diff --git a/cmd/kueuectl/app/list/list.go b/cmd/kueuectl/app/list/list.go index 85902849b0..6dc4f21027 100644 --- a/cmd/kueuectl/app/list/list.go +++ b/cmd/kueuectl/app/list/list.go @@ -25,7 +25,7 @@ import ( ) const ( - listExample = ` # List LocalQueue + listExample = ` # List LocalQueue kueuectl list localqueue` ) @@ -41,6 +41,7 @@ func NewListCmd(clientGetter util.ClientGetter, streams genericiooptions.IOStrea cmd.AddCommand(NewClusterQueueCmd(clientGetter, streams, clock)) cmd.AddCommand(NewWorkloadCmd(clientGetter, streams, clock)) cmd.AddCommand(NewResourceFlavorCmd(clientGetter, streams, clock)) + cmd.AddCommand(NewPodCmd(clientGetter, streams)) return cmd } diff --git a/cmd/kueuectl/app/list/list_pods.go b/cmd/kueuectl/app/list/list_pods.go new file mode 100644 index 0000000000..9d9b65fc35 --- /dev/null +++ b/cmd/kueuectl/app/list/list_pods.go @@ -0,0 +1,395 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package list + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/spf13/cobra" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + k8s "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + kubectlget "k8s.io/kubectl/pkg/cmd/get" + "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" + + "sigs.k8s.io/kueue/client-go/clientset/versioned/scheme" + "sigs.k8s.io/kueue/cmd/kueuectl/app/util" + kueuejob "sigs.k8s.io/kueue/pkg/controller/jobs/job" + kueuejobset "sigs.k8s.io/kueue/pkg/controller/jobs/jobset" + kueuemxjob "sigs.k8s.io/kueue/pkg/controller/jobs/kubeflow/jobs/mxjob" + kueuepaddlejob "sigs.k8s.io/kueue/pkg/controller/jobs/kubeflow/jobs/paddlejob" + kueuepytorchjob "sigs.k8s.io/kueue/pkg/controller/jobs/kubeflow/jobs/pytorchjob" + kueuetfjob "sigs.k8s.io/kueue/pkg/controller/jobs/kubeflow/jobs/tfjob" + kueuexgboostjob "sigs.k8s.io/kueue/pkg/controller/jobs/kubeflow/jobs/xgboostjob" + kueuempijob "sigs.k8s.io/kueue/pkg/controller/jobs/mpijob" + "sigs.k8s.io/kueue/pkg/controller/jobs/pod" + kueueraycluster "sigs.k8s.io/kueue/pkg/controller/jobs/raycluster" + kueuerayjob "sigs.k8s.io/kueue/pkg/controller/jobs/rayjob" +) + +const ( + podLong = `Lists all pods that matches the given criteria: Should be part of the specified Job kind, +belonging to the specified namespace, matching +the label selector or the field selector.` + podExample = ` # List Pods +kueuectl list pods --for job/job-name` +) + +var jobsWithPodLabelSelector = []JobWithPodLabelSelector{ + &kueuejob.Job{}, + &kueuejobset.JobSet{}, + &kueuemxjob.JobControl{}, + &kueuepaddlejob.JobControl{}, + &kueuetfjob.JobControl{}, + &kueuepytorchjob.JobControl{}, + &kueuexgboostjob.JobControl{}, + &kueuempijob.MPIJob{}, + &pod.Pod{}, + &kueueraycluster.RayCluster{}, + &kueuerayjob.RayJob{}, +} + +type PodOptions struct { + PrintFlags *genericclioptions.PrintFlags + + Limit int64 + AllNamespaces bool + ServerPrint bool + Namespace string + LabelSelector string + FieldSelector string + UserSpecifiedForObject string + ForName string + ForGVK schema.GroupVersionKind + ForObject *unstructured.Unstructured + PodLabelSelector string + + Clientset k8s.Interface + + genericiooptions.IOStreams +} + +type JobWithPodLabelSelector interface { + Object() client.Object + GVK() schema.GroupVersionKind + PodLabelSelector() string +} + +func NewPodOptions(streams genericiooptions.IOStreams) *PodOptions { + return &PodOptions{ + PrintFlags: genericclioptions.NewPrintFlags("").WithTypeSetter(scheme.Scheme), + IOStreams: streams, + } +} + +func NewPodCmd(clientGetter util.ClientGetter, streams genericiooptions.IOStreams) *cobra.Command { + o := NewPodOptions(streams) + + cmd := &cobra.Command{ + Use: "pods --for TYPE[.API-GROUP]/NAME", + DisableFlagsInUseLine: true, + Aliases: []string{"po"}, + Short: "List Pods belong to a Job Kind", + Long: podLong, + Example: podExample, + RunE: func(cmd *cobra.Command, args []string) error { + cmd.SilenceUsage = true + err := o.Complete(cmd, clientGetter) + if err != nil { + return err + } + if o.ForObject == nil { + return nil + } + if len(o.PodLabelSelector) == 0 { + return nil + } + return o.Run(cmd.Context(), clientGetter) + }, + } + + o.PrintFlags.AddFlags(cmd) + + addAllNamespacesFlagVar(cmd, &o.AllNamespaces) + addFieldSelectorFlagVar(cmd, &o.FieldSelector) + addLabelSelectorFlagVar(cmd, &o.LabelSelector) + addForObjectFlagVar(cmd, &o.UserSpecifiedForObject) + + _ = cmd.MarkFlagRequired("for") + + return cmd +} + +// Complete takes the command arguments and infers any remaining options. +func (o *PodOptions) Complete(cmd *cobra.Command, clientGetter util.ClientGetter) error { + var err error + + o.Limit, err = listRequestLimit() + if err != nil { + return err + } + + outputOption := ptr.Deref(o.PrintFlags.OutputFormat, "") + if outputOption == "" || strings.Contains(outputOption, "wide") { + o.ServerPrint = true + } + + o.Namespace, _, err = clientGetter.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + + o.Clientset, err = clientGetter.K8sClientSet() + if err != nil { + return err + } + + mapper, err := clientGetter.ToRESTMapper() + if err != nil { + return err + } + var found bool + o.ForGVK, o.ForName, found, err = decodeResourceTypeName(mapper, o.UserSpecifiedForObject) + if err != nil { + return err + } + if !found { + return fmt.Errorf("invalid value '%s' used in --for flag; value must be in the format TYPE[.API-GROUP]/NAME", o.UserSpecifiedForObject) + } + + infos, err := o.getForObjectInfos(clientGetter) + if err != nil { + return err + } + + if len(infos) == 0 { + o.printNoResourcesFound() + return nil + } + + o.ForObject, err = o.getForObject(infos) + if err != nil { + return err + } + + o.PodLabelSelector, err = o.getPodLabelSelector() + if err != nil { + return err + } + + if len(o.PodLabelSelector) == 0 { + o.printNoResourcesFound() + return nil + } + + return nil +} + +// getForObjectInfos builds and executes a dynamic client query for a resource specified in --for +func (o *PodOptions) getForObjectInfos(clientGetter util.ClientGetter) ([]*resource.Info, error) { + r := clientGetter.NewResourceBuilder(). + Unstructured(). + NamespaceParam(o.Namespace). + DefaultNamespace(). + AllNamespaces(o.AllNamespaces). + FieldSelectorParam(fmt.Sprintf("metadata.name=%s", o.ForName)). + ResourceTypeOrNameArgs(true, o.ForGVK.Kind). + ContinueOnError(). + Latest(). + Flatten(). + Do() + + if r == nil { + return nil, fmt.Errorf("Error building client for: %s/%s", o.ForGVK.Kind, o.ForName) + } + + if err := r.Err(); err != nil { + return nil, err + } + + infos, err := r.Infos() + if err != nil { + return nil, err + } + + return infos, nil +} + +func (o *PodOptions) getForObject(infos []*resource.Info) (*unstructured.Unstructured, error) { + job, ok := infos[0].Object.(*unstructured.Unstructured) + if !ok { + return nil, fmt.Errorf("Invalid object %+v. Unexpected type %T", job, infos[0].Object) + } + + return job, nil +} + +func (o *PodOptions) getJobController() JobWithPodLabelSelector { + for _, jobController := range jobsWithPodLabelSelector { + if jobController.GVK() == o.ForGVK { + return jobController + } + } + return nil +} + +// getPodLabelSelector returns the podLabels used as a standard selector for jobs +func (o *PodOptions) getPodLabelSelector() (string, error) { + jobController := o.getJobController() + if jobController == nil { + return "", fmt.Errorf("unsupported kind: %s", o.ForObject.GetKind()) + } + + err := runtime.DefaultUnstructuredConverter.FromUnstructured(o.ForObject.UnstructuredContent(), jobController.Object()) + if err != nil { + return "", fmt.Errorf("failed to convert unstructured object: %w", err) + } + + return jobController.PodLabelSelector(), nil +} + +type trackingWriterWrapper struct { + Delegate io.Writer + Written int +} + +func (t *trackingWriterWrapper) Write(p []byte) (n int, err error) { + t.Written += len(p) + return t.Delegate.Write(p) +} + +// Run prints the pods for a specific Job +func (o *PodOptions) Run(ctx context.Context, clientGetter util.ClientGetter) error { + trackingWriter := &trackingWriterWrapper{Delegate: o.Out} + tabWriter := printers.GetNewTabWriter(trackingWriter) + + infos, err := o.getPodsInfos(clientGetter) + if err != nil { + return err + } + + printer, err := o.ToPrinter() + if err != nil { + return err + } + + for _, pod := range infos { + if err = printer.PrintObj(pod.Object, tabWriter); err != nil { + return err + } + } + + if err = tabWriter.Flush(); err != nil { + return err + } + + if trackingWriter.Written == 0 { + o.printNoResourcesFound() + } + + return nil +} + +func (o *PodOptions) ToPrinter() (printers.ResourcePrinterFunc, error) { + if o.ServerPrint { + tablePrinter := printers.NewTablePrinter(printers.PrintOptions{ + NoHeaders: false, + WithNamespace: o.AllNamespaces, + WithKind: false, + Wide: ptr.Deref(o.PrintFlags.OutputFormat, "") == "wide", + ShowLabels: false, + ColumnLabels: nil, + }) + + printer := &kubectlget.TablePrinter{Delegate: tablePrinter} + + return printer.PrintObj, nil + } + + printer, err := o.PrintFlags.ToPrinter() + if err != nil { + return nil, err + } + + return printer.PrintObj, nil +} + +// getPodsInfos gets the pods raw infos directly from the API server +func (o *PodOptions) getPodsInfos(clientGetter util.ClientGetter) ([]*resource.Info, error) { + namespace := o.Namespace + if o.AllNamespaces { + namespace = "" + } + + podLabelSelector := o.PodLabelSelector + if len(o.LabelSelector) != 0 { + podLabelSelector = "," + o.PodLabelSelector + } + + r := clientGetter.NewResourceBuilder().Unstructured(). + NamespaceParam(namespace).DefaultNamespace().AllNamespaces(o.AllNamespaces). + FieldSelectorParam(o.FieldSelector). + LabelSelectorParam(o.LabelSelector+podLabelSelector). + ResourceTypeOrNameArgs(true, "pods"). + ContinueOnError(). + RequestChunksOf(o.Limit). + Latest(). + Flatten(). + TransformRequests(o.transformRequests). + Do() + + if err := r.Err(); err != nil { + return nil, err + } + + infos, err := r.Infos() + if err != nil { + return nil, err + } + + return infos, nil +} + +func (o *PodOptions) transformRequests(req *rest.Request) { + if !o.ServerPrint { + return + } + req.SetHeader("Accept", strings.Join([]string{ + fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1.SchemeGroupVersion.Version, metav1.GroupName), + "application/json", + }, ",")) +} + +// printNoResourcesFound handles output when there is no object found in any namespaces +func (o *PodOptions) printNoResourcesFound() { + if !o.AllNamespaces { + fmt.Fprintf(o.ErrOut, "No resources found in %s namespace.\n", o.Namespace) + } else { + fmt.Fprintln(o.ErrOut, "No resources found.") + } +} diff --git a/cmd/kueuectl/app/list/list_pods_test.go b/cmd/kueuectl/app/list/list_pods_test.go new file mode 100644 index 0000000000..195ecb28c1 --- /dev/null +++ b/cmd/kueuectl/app/list/list_pods_test.go @@ -0,0 +1,905 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package list + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + kftraining "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" + rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" + rayutils "github.com/ray-project/kuberay/ray-operator/controllers/ray/utils" + + kueuecmdtesting "sigs.k8s.io/kueue/cmd/kueuectl/app/testing" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/resource" + restfake "k8s.io/client-go/rest/fake" + "k8s.io/utils/strings/slices" + + jobsetapi "sigs.k8s.io/jobset/api/jobset/v1alpha2" +) + +type podTestCase struct { + name string + job runtime.Object + pods []corev1.Pod + mapperGVKs []schema.GroupVersionKind + args []string + wantOut string + wantOutErr string + wantErr error +} + +func TestPodCmd(t *testing.T) { + testStartTime := time.Now() + + testCases := []podTestCase{ + { + name: "list pods of batch/job with wide output", + job: &batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + batchv1.JobNameLabel: "test-job", + }, + }, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + batchv1.JobNameLabel: "test-job", + }, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-2", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + batchv1.JobNameLabel: "test-job", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "batch", + Version: "v1", + Kind: "Job", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "job/test-job", "-o", "wide"}, + wantOut: `NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES +valid-pod-1 1/1 Running 0 +valid-pod-2 1/1 Running 0 +`, + }, { + name: "list pods for valid batch/job type", + job: &batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + batchv1.JobNameLabel: "test-job", + }, + }, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + batchv1.JobNameLabel: "test-job", + }, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-2", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + batchv1.JobNameLabel: "test-job", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "batch", + Version: "v1", + Kind: "Job", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "job/test-job"}, + wantOut: `NAME READY STATUS RESTARTS AGE +valid-pod-1 1/1 Running 0 +valid-pod-2 1/1 Running 0 +`, + }, { + name: "no valid pods for batch/job type in current namespace", + job: &batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + batchv1.JobNameLabel: "test-job", + }, + }, + }, + pods: []corev1.Pod{}, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "batch", + Version: "v1", + Kind: "Job", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "job/test-job"}, + wantOut: "", + wantOutErr: `No resources found in default namespace. +`, + }, { + name: "no valid pods for batch/job type in all namespaces", + job: &batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + batchv1.JobNameLabel: "test-job", + }, + }, + }, + pods: []corev1.Pod{}, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "batch", + Version: "v1", + Kind: "Job", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "job/test-job", "-A"}, + wantOut: "", + wantOutErr: `No resources found. +`, + }, { + name: "valid pods for batch/job type in all namespaces", + job: &batchv1.Job{ + TypeMeta: metav1.TypeMeta{ + Kind: "Job", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "sample-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + batchv1.JobNameLabel: "sample-job", + }, + }, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: "dev-team-a", + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + batchv1.JobNameLabel: "sample-job", + }, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-2", + Namespace: "dev-team-b", + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + batchv1.JobNameLabel: "sample-job", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "batch", + Version: "v1", + Kind: "Job", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "job/sample-job", "-A"}, + wantOut: `NAMESPACE NAME READY STATUS RESTARTS AGE +dev-team-a valid-pod-1 1/1 Running 0 +dev-team-b valid-pod-2 1/1 Running 0 +`, + }, { + name: "list pods for kubeflow.org/PyTorchJob type", + job: &kftraining.PyTorchJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "PyTorchJob", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + kftraining.OperatorNameLabel: "pytorchjob-controller", + kftraining.JobNameLabel: "test-job", + }, + }, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + kftraining.OperatorNameLabel: "pytorchjob-controller", + kftraining.JobNameLabel: "test-job", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "kubeflow.org", + Version: "v1", + Kind: "PyTorchJob", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "pytorchjob/test-job"}, + wantOut: `NAME READY STATUS RESTARTS AGE +valid-pod-1 1/1 Running 0 +`, + }, { + name: "list pods for kubeflow.org/MXjob type", + job: &kftraining.MXJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "MXJob", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + kftraining.OperatorNameLabel: "mxjob-controller", + kftraining.JobNameLabel: "test-job", + }, + }, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + kftraining.OperatorNameLabel: "mxjob-controller", + kftraining.JobNameLabel: "test-job", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "kubeflow.org", + Version: "v1", + Kind: "MXJob", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "mxjob/test-job"}, + wantOut: `NAME READY STATUS RESTARTS AGE +valid-pod-1 1/1 Running 0 +`, + }, { + name: "list pods for kubeflow.org/paddlejob type", + job: &kftraining.PaddleJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "PaddleJob", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + kftraining.OperatorNameLabel: "paddlejob-controller", + kftraining.JobNameLabel: "test-job", + }, + }, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + kftraining.OperatorNameLabel: "paddlejob-controller", + kftraining.JobNameLabel: "test-job", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "kubeflow.org", + Version: "v1", + Kind: "PaddleJob", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "paddlejob/test-job"}, + wantOut: `NAME READY STATUS RESTARTS AGE +valid-pod-1 1/1 Running 0 +`, + }, { + name: "list pods for kubeflow.org/tfjob type", + job: &kftraining.TFJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "TFJob", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + kftraining.OperatorNameLabel: "tfjob-controller", + kftraining.JobNameLabel: "test-job", + }, + }, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + kftraining.OperatorNameLabel: "tfjob-controller", + kftraining.JobNameLabel: "test-job", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "kubeflow.org", + Version: "v1", + Kind: "TFJob", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "tfjob/test-job"}, + wantOut: `NAME READY STATUS RESTARTS AGE +valid-pod-1 1/1 Running 0 +`, + }, { + name: "list pods for kubeflow.org/mpijob type", + job: &kftraining.MPIJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "MPIJob", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + kftraining.OperatorNameLabel: "mpijob-controller", + kftraining.JobNameLabel: "test-job", + }, + }, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + kftraining.OperatorNameLabel: "mpijob-controller", + kftraining.JobNameLabel: "test-job", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "kubeflow.org", + Version: "v2beta1", + Kind: "MPIJob", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "mpijob/test-job"}, + wantOut: `NAME READY STATUS RESTARTS AGE +valid-pod-1 1/1 Running 0 +`, + }, { + name: "list pods for kubeflow.org/xgboostjob type", + job: &kftraining.XGBoostJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "XGBoostJob", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + kftraining.OperatorNameLabel: "xgboostjob-controller", + kftraining.JobNameLabel: "test-job", + }, + }, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + kftraining.OperatorNameLabel: "xgboostjob-controller", + kftraining.JobNameLabel: "test-job", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "kubeflow.org", + Version: "v1", + Kind: "XGBoostJob", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "xgboostjob/test-job"}, + wantOut: `NAME READY STATUS RESTARTS AGE +valid-pod-1 1/1 Running 0 +`, + }, { + name: "list pods for ray.io/rayjob type", + job: &rayv1.RayJob{ + TypeMeta: metav1.TypeMeta{ + Kind: "RayJob", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + batchv1.JobNameLabel: "test-job", + }, + }, + Status: rayv1.RayJobStatus{RayClusterName: "test-cluster"}, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + batchv1.JobNameLabel: "test-job", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "ray.io", + Version: "v1", + Kind: "RayJob", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "rayjob/test-job"}, + wantOut: `NAME READY STATUS RESTARTS AGE +valid-pod-1 1/1 Running 0 +`, + }, { + name: "list pods for ray.io/raycluster type", + job: &rayv1.RayCluster{ + TypeMeta: metav1.TypeMeta{ + Kind: "RayCluster", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-cluster", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + rayutils.RayClusterLabelKey: "test-cluster", + }, + }, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + rayutils.RayClusterLabelKey: "test-cluster", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "ray.io", + Version: "v1", + Kind: "RayCluster", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "raycluster/test-cluster"}, + wantOut: `NAME READY STATUS RESTARTS AGE +valid-pod-1 1/1 Running 0 +`, + }, { + name: "list pods for jobset.x-k8s.io/jobset type", + job: &jobsetapi.JobSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "JobSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + jobsetapi.JobSetNameKey: "test-job", + }, + }, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + jobsetapi.JobSetNameKey: "test-job", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "jobset.x-k8s.io", + Version: "v1alpha2", + Kind: "JobSet", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "jobset/test-job"}, + wantOut: `NAME READY STATUS RESTARTS AGE +valid-pod-1 1/1 Running 0 +`, + }, { + name: "list pods with api-group filter", + job: &jobsetapi.JobSet{ + TypeMeta: metav1.TypeMeta{ + Kind: "JobSet", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-job", + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{ + jobsetapi.JobSetNameKey: "test-job", + }, + }, + }, + pods: []corev1.Pod{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "valid-pod-1", + Namespace: metav1.NamespaceDefault, + CreationTimestamp: metav1.Time{ + Time: testStartTime.Add(-time.Hour).Truncate(time.Second), + }, + Labels: map[string]string{ + jobsetapi.JobSetNameKey: "test-job", + }, + }, + }, + }, + mapperGVKs: []schema.GroupVersionKind{ + { + Group: "jobset.x-k8s.io", + Version: "v1alpha2", + Kind: "JobSet", + }, { + Group: "", + Version: "v1", + Kind: "Pod", + }, + }, + args: []string{"--for", "jobset.jobset.x-k8s.io/test-job"}, + wantOut: `NAME READY STATUS RESTARTS AGE +valid-pod-1 1/1 Running 0 +`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + streams, _, out, outErr := genericiooptions.NewTestIOStreams() + + mapper := func() *meta.DefaultRESTMapper { + m := meta.NewDefaultRESTMapper([]schema.GroupVersion{}) + for _, gvk := range tc.mapperGVKs { + m.Add(gvk, meta.RESTScopeNamespace) + } + return m + }() + + tf := kueuecmdtesting.NewTestClientGetter() + tf.WithNamespace(metav1.NamespaceDefault) + tf.WithRESTMapper(mapper) + + scheme, err := buildTestRuntimeScheme() + if err != nil { + t.Errorf("Unexpected error\n%s", err) + } + + codec := serializer.NewCodecFactory(scheme).LegacyCodec(scheme.PrioritizedVersionsAllGroups()...) + + tf.UnstructuredClient, err = mockRESTClient(codec, tc) + if err != nil { + t.Fatal(err) + } + + cmd := NewPodCmd(tf, streams) + cmd.SetArgs(tc.args) + + gotErr := cmd.Execute() + + if diff := cmp.Diff(tc.wantErr, gotErr, cmpopts.EquateErrors()); diff != "" { + t.Errorf("Unexpected error (-want/+got)\n%s", diff) + } + + gotOut := out.String() + if diff := cmp.Diff(tc.wantOut, gotOut); diff != "" { + t.Errorf("Unexpected output (-want/+got)\n%s", diff) + } + + gotOutErr := outErr.String() + if diff := cmp.Diff(tc.wantOutErr, gotOutErr); diff != "" { + t.Errorf("Unexpected output (-want/+got)\n%s", diff) + } + }) + } +} + +func buildTestRuntimeScheme() (*runtime.Scheme, error) { + scheme := runtime.NewScheme() + + scheme.AddKnownTypes(metav1.SchemeGroupVersion, &metav1.Table{}) + if err := corev1.AddToScheme(scheme); err != nil { + return nil, err + } + if err := batchv1.AddToScheme(scheme); err != nil { + return nil, err + } + if err := rayv1.AddToScheme(scheme); err != nil { + return nil, err + } + if err := kftraining.AddToScheme(scheme); err != nil { + return nil, err + } + if err := jobsetapi.AddToScheme(scheme); err != nil { + return nil, err + } + + return scheme, nil +} + +func mockRESTClient(codec runtime.Codec, tc podTestCase) (*restfake.RESTClient, error) { + var podRespBody io.ReadCloser + + podList := &corev1.PodList{Items: tc.pods} + if len(podList.Items) == 0 { + podRespBody = emptyTableObjBody(codec) + } else { + podRespBody = podTableObjBody(codec, podList.Items...) + } + + reqPathPrefix := fmt.Sprintf("/namespaces/%s", metav1.NamespaceDefault) + if slices.Contains(tc.args, "-A") || slices.Contains(tc.args, "all-namespaces") { + reqPathPrefix = "" + } + + reqJobKind := strings.ToLower(tc.job.GetObjectKind().GroupVersionKind().Kind) + "s" + + var err error + mockRestClient := &restfake.RESTClient{ + NegotiatedSerializer: resource.UnstructuredPlusDefaultContentConfig().NegotiatedSerializer, + Client: restfake.CreateHTTPClient(func(request *http.Request) (*http.Response, error) { + switch request.URL.Path { + case fmt.Sprintf("%s/%s", reqPathPrefix, reqJobKind): + return &http.Response{ + StatusCode: http.StatusOK, + Header: getDefaultHeader(), + Body: io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, tc.job)))), + }, nil + case fmt.Sprintf("%s/pods", reqPathPrefix): + return &http.Response{ + StatusCode: http.StatusOK, + Header: getDefaultHeader(), + Body: podRespBody, + }, nil + default: + err = fmt.Errorf("request URL: %#v, and request: %#v", request.URL, request) + return nil, nil + } + }), + } + + return mockRestClient, err +} + +func getDefaultHeader() http.Header { + header := http.Header{} + header.Set("Content-Type", runtime.ContentTypeJSON) + return header +} + +var podColumns = []metav1.TableColumnDefinition{ + {Name: "Name", Type: "string", Format: "name"}, + {Name: "Ready", Type: "string", Format: ""}, + {Name: "Status", Type: "string", Format: ""}, + {Name: "Restarts", Type: "integer", Format: ""}, + {Name: "Age", Type: "string", Format: ""}, + {Name: "IP", Type: "string", Format: "", Priority: 1}, + {Name: "Node", Type: "string", Format: "", Priority: 1}, + {Name: "Nominated Node", Type: "string", Format: "", Priority: 1}, + {Name: "Readiness Gates", Type: "string", Format: "", Priority: 1}, +} + +// podTableObjBody builds a table with the given list of pods +func podTableObjBody(codec runtime.Codec, pods ...corev1.Pod) io.ReadCloser { + table := &metav1.Table{ + TypeMeta: metav1.TypeMeta{APIVersion: "meta.k8s.io/v1", Kind: "Table"}, + ColumnDefinitions: podColumns, + } + + for i := range pods { + b := bytes.NewBuffer(nil) + _ = codec.Encode(&pods[i], b) + table.Rows = append(table.Rows, metav1.TableRow{ + Object: runtime.RawExtension{Raw: b.Bytes()}, + Cells: []interface{}{pods[i].Name, "1/1", "Running", int64(0), "", "", "", "", ""}, + }) + } + + data, err := json.Marshal(table) + if err != nil { + panic(err) + } + if !strings.Contains(string(data), `"meta.k8s.io/v1"`) { + panic("expected v1, got " + string(data)) + } + return io.NopCloser(bytes.NewReader(data)) +} + +// emptyTableObjBody builds an empty table response +func emptyTableObjBody(codec runtime.Codec) io.ReadCloser { + table := &metav1.Table{ + ColumnDefinitions: podColumns, + } + return io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, table)))) +} diff --git a/go.mod b/go.mod index d87c2cb3bd..7736c4ba7f 100644 --- a/go.mod +++ b/go.mod @@ -26,6 +26,7 @@ require ( k8s.io/component-helpers v0.29.7 k8s.io/klog/v2 v2.120.1 k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a + k8s.io/kubectl v0.29.7 k8s.io/metrics v0.29.7 k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 sigs.k8s.io/controller-runtime v0.17.3 @@ -36,6 +37,7 @@ require ( require ( github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1 // indirect github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect @@ -43,13 +45,16 @@ require ( github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect github.com/coreos/go-semver v0.3.1 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.8.0 // indirect + github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fvbommel/sortorder v1.1.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect @@ -78,16 +83,20 @@ require ( github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect diff --git a/go.sum b/go.sum index f2d152d5b1..adf8971412 100644 --- a/go.sum +++ b/go.sum @@ -6,10 +6,14 @@ cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1 h1:X8MJ0fnN5FPdcGF5Ij2/OW+HgiJrRg3AfHAx1PJtIzM= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230321174746-8dcc6526cfb1/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -21,6 +25,8 @@ github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -49,10 +55,14 @@ github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lSh github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.8.0 h1:lRj6N9Nci7MvzrXuX6HFzU8XjmhPiXPlsKEy1u0KQro= github.com/evanphx/json-patch/v5 v5.8.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= @@ -115,6 +125,7 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaU github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= @@ -131,6 +142,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jarcoal/httpmock v1.2.0 h1:gSvTxxFR/MEMfsGrvRbdfpRUMBStovlSRLw0Ep1bwwc= +github.com/jarcoal/httpmock v1.2.0/go.mod h1:oCoTsnAz4+UoOUIf5lJOWV2QQIW5UoeUI6aM2YnWAZk= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -156,6 +169,10 @@ github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0 github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -167,6 +184,8 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= @@ -194,6 +213,7 @@ github.com/ray-project/kuberay/ray-operator v1.1.1 h1:mVOA1ddS9aAsPvhhHrpf0ZXgTz github.com/ray-project/kuberay/ray-operator v1.1.1/go.mod h1:ZqyKKvMP5nKDldQoKmur+Wcx7wVlV9Q98phFqHzr+KY= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -416,6 +436,8 @@ k8s.io/kube-aggregator v0.28.1 h1:rvG4llYnQKHjj6YjjoBPEJxfD1uH0DJwkrJTNKGAaCs= k8s.io/kube-aggregator v0.28.1/go.mod h1:JaLizMe+AECSpO2OmrWVsvnG0V3dX1RpW+Wq/QHbu18= k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a h1:zD1uj3Jf+mD4zmA7W+goE5TxDkI7OGJjBNBzq5fJtLA= k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= +k8s.io/kubectl v0.29.7 h1:D+Jheug9M++zlt67cROZgxaIjrDdLqp9jkW/EYrXAoM= +k8s.io/kubectl v0.29.7/go.mod h1:VOEJkcfKTO/X8xSSB6d2JXP/Qni6xtjuI3CUP52T9bM= k8s.io/metrics v0.29.7 h1:/oMPdVL7dt+lF8W6lXTg9gIKz1dDKgVBfDnJwgyJrhk= k8s.io/metrics v0.29.7/go.mod h1:5AiYPn1Crd25wtTh7OxHg9Rm2t9THSXJVp3Lb2k7MB8= k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= diff --git a/hack/internal/tools/go.mod b/hack/internal/tools/go.mod index 963733562d..139c123cc9 100644 --- a/hack/internal/tools/go.mod +++ b/hack/internal/tools/go.mod @@ -25,6 +25,7 @@ replace sigs.k8s.io/kueue => ../../.. require ( github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BurntSushi/toml v1.4.0 // indirect + github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect github.com/Masterminds/semver/v3 v3.2.1 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect @@ -36,6 +37,7 @@ require ( github.com/bitfield/gotestdox v0.2.2 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect github.com/common-nighthawk/go-figure v0.0.0-20210622060536-734e95fb86be // indirect github.com/cyphar/filepath-securejoin v0.2.4 // indirect github.com/davecgh/go-spew v1.1.1 // indirect @@ -45,8 +47,10 @@ require ( github.com/emicklei/go-restful/v3 v3.12.1 // indirect github.com/evanphx/json-patch v5.9.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.17.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/fvbommel/sortorder v1.1.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect @@ -67,6 +71,7 @@ require ( github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.3 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/huandu/xstrings v1.4.0 // indirect github.com/imdario/mergo v0.3.16 // indirect @@ -74,6 +79,8 @@ require ( github.com/jinzhu/copier v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kubeflow/mpi-operator v0.5.0 // indirect + github.com/kubeflow/training-operator v1.8.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -81,13 +88,16 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/mmarkdown/mmark v2.0.40+incompatible // indirect + github.com/moby/spdystream v0.2.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect @@ -95,7 +105,10 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect + github.com/ray-project/kuberay/ray-operator v1.1.1 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect github.com/spf13/afero v1.11.0 // indirect github.com/spf13/cast v1.7.0 // indirect github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect @@ -131,8 +144,10 @@ require ( k8s.io/gengo/v2 v2.0.0-20240404160639-a0386bf69313 // indirect k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a // indirect + k8s.io/kubectl v0.30.3 // indirect k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 // indirect sigs.k8s.io/controller-runtime v0.18.4 // indirect + sigs.k8s.io/jobset v0.5.2 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect diff --git a/hack/internal/tools/go.sum b/hack/internal/tools/go.sum index 28f121ed89..856f034ebc 100644 --- a/hack/internal/tools/go.sum +++ b/hack/internal/tools/go.sum @@ -4,6 +4,8 @@ github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg6 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.4.0 h1:kuoIxZQy2WRRk1pttg9asf+WVv6tWQuBNVmK8+nqPr0= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= +github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= +github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= @@ -19,6 +21,8 @@ github.com/alecthomas/participle/v2 v2.1.1 h1:hrjKESvSqGHzRb4yW1ciisFJ4p3MGYih6i github.com/alecthomas/participle/v2 v2.1.1/go.mod h1:Y1+hAs8DHPmc3YUFzqllV+eSQ9ljPTk0ZkPMtEdAx2c= github.com/alecthomas/repr v0.4.0 h1:GhI2A8MACjfegCPVq9f1FLvIBS+DrQ2KQBFZP1iFzXc= github.com/alecthomas/repr v0.4.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -30,6 +34,8 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= @@ -58,6 +64,8 @@ github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lSh github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= @@ -66,6 +74,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fvbommel/sortorder v1.1.0 h1:fUmoe+HLsBTctBDoaBwpQo5N+nrCp8g/BjKb/6ZQmYw= +github.com/fvbommel/sortorder v1.1.0/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= @@ -140,6 +150,9 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3 github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= @@ -152,6 +165,8 @@ github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jarcoal/httpmock v1.2.0 h1:gSvTxxFR/MEMfsGrvRbdfpRUMBStovlSRLw0Ep1bwwc= +github.com/jarcoal/httpmock v1.2.0/go.mod h1:oCoTsnAz4+UoOUIf5lJOWV2QQIW5UoeUI6aM2YnWAZk= github.com/jinzhu/copier v0.4.0 h1:w3ciUoD19shMCRargcpm0cm91ytaBhDvuRpz1ODO/U8= github.com/jinzhu/copier v0.4.0/go.mod h1:DfbEm0FYsaqBcKcFuvmOZb218JkPGtvSHsKg8S8hyyg= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -164,6 +179,8 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kubeflow/mpi-operator v0.5.0 h1:XvBwyXXQ9103DNMa22sxsaQlaktvaT2LY/g0UniGn5U= +github.com/kubeflow/mpi-operator v0.5.0/go.mod h1:SeZQJW8KJxSTWD++eQYKRFpoDg1v8yrdC6fjx2/3mG0= github.com/kubeflow/training-operator v1.8.0 h1:cHXIz7BV3Ayp7W5Rqe20/ukmVEzraI+O/XRYKBHQcrg= github.com/kubeflow/training-operator v1.8.0/go.mod h1:T6I15h1S09ncH5C6St/QEC7Dy6dpHZA5sPFo+VoJAvE= github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= @@ -188,11 +205,15 @@ github.com/mikefarah/yq/v4 v4.44.3/go.mod h1:1pm9sJoyZLDql3OqgklvRCkD0XIIHMZV38j github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mmarkdown/mmark v2.0.40+incompatible h1:vMeUeDzBK3H+/mU0oMVfMuhSXJlIA+DE/DMPQNAj5C4= github.com/mmarkdown/mmark v2.0.40+incompatible/go.mod h1:Uvmoz7tvsWpr7bMVxIpqZPyN3FbOtzDmnsJDFp7ltJs= +github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -204,6 +225,8 @@ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/ github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= @@ -236,6 +259,7 @@ github.com/ray-project/kuberay/ray-operator v1.1.1/go.mod h1:ZqyKKvMP5nKDldQoKmu github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= @@ -493,6 +517,8 @@ k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a h1:zD1uj3Jf+mD4zmA7W+goE5TxDkI7OGJjBNBzq5fJtLA= k8s.io/kube-openapi v0.0.0-20240521193020-835d969ad83a/go.mod h1:UxDHUPsUwTOOxSU+oXURfFBcAS6JwiRXTYqYwfuGowc= +k8s.io/kubectl v0.30.3 h1:YIBBvMdTW0xcDpmrOBzcpUVsn+zOgjMYIu7kAq+yqiI= +k8s.io/kubectl v0.30.3/go.mod h1:IcR0I9RN2+zzTRUa1BzZCm4oM0NLOawE6RzlDvd1Fpo= k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0 h1:jgGTlFYnhF1PM1Ax/lAlxUPE+KfCIXHaathvJg1C3ak= k8s.io/utils v0.0.0-20240502163921-fe8a2dddb1d0/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= @@ -501,6 +527,8 @@ sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240605185440-12cc8d5 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20240605185440-12cc8d59fabe/go.mod h1:4+4tM2Es0ycqPedATtzPer5RTrUq3Xab59BYogt0mCE= sigs.k8s.io/controller-tools v0.15.0 h1:4dxdABXGDhIa68Fiwaif0vcu32xfwmgQ+w8p+5CxoAI= sigs.k8s.io/controller-tools v0.15.0/go.mod h1:8zUSS2T8Hx0APCNRhJWbS3CAQEbIxLa07khzh7pZmXM= +sigs.k8s.io/jobset v0.5.2 h1:276q5Pi/ErLYj+GQ0ydEXR6tx3LwBhEzHLQv+k8bYF4= +sigs.k8s.io/jobset v0.5.2/go.mod h1:Vg99rj/6OoGvy1uvywGEHOcVLCWWJYkJtisKqdWzcFw= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.24.0 h1:g4y4eu0qa+SCeKESLpESgMmVFBebL0BDa6f777OIWrg= diff --git a/pkg/controller/jobs/job/job_controller.go b/pkg/controller/jobs/job/job_controller.go index 138164e5ab..9f490d157f 100644 --- a/pkg/controller/jobs/job/job_controller.go +++ b/pkg/controller/jobs/job/job_controller.go @@ -208,6 +208,10 @@ func (j *Job) GVK() schema.GroupVersionKind { return gvk } +func (j *Job) PodLabelSelector() string { + return fmt.Sprintf("%s=%s", batchv1.JobNameLabel, j.Name) +} + func (j *Job) ReclaimablePods() ([]kueue.ReclaimablePod, error) { parallelism := ptr.Deref(j.Spec.Parallelism, 1) if parallelism == 1 || j.Status.Succeeded == 0 { diff --git a/pkg/controller/jobs/jobset/jobset_controller.go b/pkg/controller/jobs/jobset/jobset_controller.go index 57a388e090..2310f01609 100644 --- a/pkg/controller/jobs/jobset/jobset_controller.go +++ b/pkg/controller/jobs/jobset/jobset_controller.go @@ -18,6 +18,7 @@ package jobset import ( "context" + "fmt" "strings" apimeta "k8s.io/apimachinery/pkg/api/meta" @@ -104,6 +105,10 @@ func (j *JobSet) GVK() schema.GroupVersionKind { return gvk } +func (j *JobSet) PodLabelSelector() string { + return fmt.Sprintf("%s=%s", jobsetapi.JobSetNameKey, j.Name) +} + func (j *JobSet) PodSets() []kueue.PodSet { podSets := make([]kueue.PodSet, len(j.Spec.ReplicatedJobs)) for index, replicatedJob := range j.Spec.ReplicatedJobs { diff --git a/pkg/controller/jobs/kubeflow/jobs/mxjob/mxjob_controller.go b/pkg/controller/jobs/kubeflow/jobs/mxjob/mxjob_controller.go index 789622530a..652964d17b 100644 --- a/pkg/controller/jobs/kubeflow/jobs/mxjob/mxjob_controller.go +++ b/pkg/controller/jobs/kubeflow/jobs/mxjob/mxjob_controller.go @@ -18,6 +18,7 @@ package mxjob import ( "context" + "fmt" "strings" kftraining "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" @@ -83,6 +84,10 @@ func (j *JobControl) GVK() schema.GroupVersionKind { return gvk } +func (j *JobControl) PodLabelSelector() string { + return fmt.Sprintf("%s=%s,%s=%s", kftraining.JobNameLabel, j.Name, kftraining.OperatorNameLabel, "mxjob-controller") +} + func (j *JobControl) RunPolicy() *kftraining.RunPolicy { return &j.Spec.RunPolicy } diff --git a/pkg/controller/jobs/kubeflow/jobs/paddlejob/paddlejob_controller.go b/pkg/controller/jobs/kubeflow/jobs/paddlejob/paddlejob_controller.go index eb091ba7a6..36acbeb510 100644 --- a/pkg/controller/jobs/kubeflow/jobs/paddlejob/paddlejob_controller.go +++ b/pkg/controller/jobs/kubeflow/jobs/paddlejob/paddlejob_controller.go @@ -18,6 +18,7 @@ package paddlejob import ( "context" + "fmt" "strings" kftraining "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" @@ -84,6 +85,10 @@ func (j *JobControl) GVK() schema.GroupVersionKind { return gvk } +func (j *JobControl) PodLabelSelector() string { + return fmt.Sprintf("%s=%s,%s=%s", kftraining.JobNameLabel, j.Name, kftraining.OperatorNameLabel, "paddlejob-controller") +} + func (j *JobControl) RunPolicy() *kftraining.RunPolicy { return &j.Spec.RunPolicy } diff --git a/pkg/controller/jobs/kubeflow/jobs/pytorchjob/pytorchjob_controller.go b/pkg/controller/jobs/kubeflow/jobs/pytorchjob/pytorchjob_controller.go index b63f17cbc0..47ce0ff166 100644 --- a/pkg/controller/jobs/kubeflow/jobs/pytorchjob/pytorchjob_controller.go +++ b/pkg/controller/jobs/kubeflow/jobs/pytorchjob/pytorchjob_controller.go @@ -18,6 +18,7 @@ package pytorchjob import ( "context" + "fmt" "strings" kftraining "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" @@ -84,6 +85,10 @@ func (j *JobControl) GVK() schema.GroupVersionKind { return gvk } +func (j *JobControl) PodLabelSelector() string { + return fmt.Sprintf("%s=%s,%s=%s", kftraining.JobNameLabel, j.Name, kftraining.OperatorNameLabel, "pytorchjob-controller") +} + func (j *JobControl) RunPolicy() *kftraining.RunPolicy { return &j.Spec.RunPolicy } diff --git a/pkg/controller/jobs/kubeflow/jobs/tfjob/tfjob_controller.go b/pkg/controller/jobs/kubeflow/jobs/tfjob/tfjob_controller.go index 02f9896a19..6f8de13b81 100644 --- a/pkg/controller/jobs/kubeflow/jobs/tfjob/tfjob_controller.go +++ b/pkg/controller/jobs/kubeflow/jobs/tfjob/tfjob_controller.go @@ -18,6 +18,7 @@ package tfjob import ( "context" + "fmt" "strings" kftraining "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" @@ -84,6 +85,10 @@ func (j *JobControl) GVK() schema.GroupVersionKind { return gvk } +func (j *JobControl) PodLabelSelector() string { + return fmt.Sprintf("%s=%s,%s=%s", kftraining.JobNameLabel, j.Name, kftraining.OperatorNameLabel, "tfjob-controller") +} + func (j *JobControl) RunPolicy() *kftraining.RunPolicy { return &j.Spec.RunPolicy } diff --git a/pkg/controller/jobs/kubeflow/jobs/xgboostjob/xgboostjob_controller.go b/pkg/controller/jobs/kubeflow/jobs/xgboostjob/xgboostjob_controller.go index 8ddd0dae14..fe45ba4fe3 100644 --- a/pkg/controller/jobs/kubeflow/jobs/xgboostjob/xgboostjob_controller.go +++ b/pkg/controller/jobs/kubeflow/jobs/xgboostjob/xgboostjob_controller.go @@ -18,6 +18,7 @@ package xgboostjob import ( "context" + "fmt" "strings" kftraining "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" @@ -84,6 +85,10 @@ func (j *JobControl) GVK() schema.GroupVersionKind { return gvk } +func (j *JobControl) PodLabelSelector() string { + return fmt.Sprintf("%s=%s,%s=%s", kftraining.JobNameLabel, j.Name, kftraining.OperatorNameLabel, "xgboostjob-controller") +} + func (j *JobControl) RunPolicy() *kftraining.RunPolicy { return &j.Spec.RunPolicy } diff --git a/pkg/controller/jobs/kubeflow/kubeflowjob/interface.go b/pkg/controller/jobs/kubeflow/kubeflowjob/interface.go index f2d391a9df..8c9dc42c69 100644 --- a/pkg/controller/jobs/kubeflow/kubeflowjob/interface.go +++ b/pkg/controller/jobs/kubeflow/kubeflowjob/interface.go @@ -35,4 +35,6 @@ type KFJobControl interface { JobStatus() *kftraining.JobStatus // OrderedReplicaTypes returns the ordered list of ReplicaTypes for the KFJob. OrderedReplicaTypes() []kftraining.ReplicaType + // PodLabelSelector returns the label selector used by pods for the job. + PodLabelSelector() string } diff --git a/pkg/controller/jobs/kubeflow/kubeflowjob/kubeflowjob_controller.go b/pkg/controller/jobs/kubeflow/kubeflowjob/kubeflowjob_controller.go index f2746ffcd0..a6a1564e90 100644 --- a/pkg/controller/jobs/kubeflow/kubeflowjob/kubeflowjob_controller.go +++ b/pkg/controller/jobs/kubeflow/kubeflowjob/kubeflowjob_controller.go @@ -129,6 +129,10 @@ func (j *KubeflowJob) GVK() schema.GroupVersionKind { return j.KFJobControl.GVK() } +func (j *KubeflowJob) PodLabelSelector() string { + return j.KFJobControl.PodLabelSelector() +} + // PriorityClass calculates the priorityClass name needed for workload according to the following priorities: // 1. .spec.runPolicy.schedulingPolicy.priorityClass // 2. .spec.replicaSpecs[OrderedReplicaTypes[0]].template.spec.priorityClassName diff --git a/pkg/controller/jobs/mpijob/mpijob_controller.go b/pkg/controller/jobs/mpijob/mpijob_controller.go index df1e1e8ca5..3a0929fb79 100644 --- a/pkg/controller/jobs/mpijob/mpijob_controller.go +++ b/pkg/controller/jobs/mpijob/mpijob_controller.go @@ -18,6 +18,7 @@ package mpijob import ( "context" + "fmt" "strings" kubeflow "github.com/kubeflow/mpi-operator/pkg/apis/kubeflow/v2beta1" @@ -103,6 +104,10 @@ func (j *MPIJob) GVK() schema.GroupVersionKind { return gvk } +func (j *MPIJob) PodLabelSelector() string { + return fmt.Sprintf("%s=%s,%s=%s", kubeflow.JobNameLabel, j.Name, kubeflow.OperatorNameLabel, kubeflow.OperatorName) +} + func (j *MPIJob) PodSets() []kueue.PodSet { replicaTypes := orderedReplicaTypes(&j.Spec) podSets := make([]kueue.PodSet, len(replicaTypes)) diff --git a/pkg/controller/jobs/pod/pod_controller.go b/pkg/controller/jobs/pod/pod_controller.go index 94d1f3b716..6fe38d95fc 100644 --- a/pkg/controller/jobs/pod/pod_controller.go +++ b/pkg/controller/jobs/pod/pod_controller.go @@ -427,6 +427,10 @@ func (p *Pod) GVK() schema.GroupVersionKind { return gvk } +func (p *Pod) PodLabelSelector() string { + return fmt.Sprintf("%s=%s", GroupNameLabel, p.pod.Labels[GroupNameLabel]) +} + func (p *Pod) Stop(ctx context.Context, c client.Client, _ []podset.PodSetInfo, stopReason jobframework.StopReason, eventMsg string) ([]client.Object, error) { var podsInGroup []corev1.Pod diff --git a/pkg/controller/jobs/raycluster/raycluster_controller.go b/pkg/controller/jobs/raycluster/raycluster_controller.go index ed093fe686..814770c88f 100644 --- a/pkg/controller/jobs/raycluster/raycluster_controller.go +++ b/pkg/controller/jobs/raycluster/raycluster_controller.go @@ -15,9 +15,11 @@ package raycluster import ( "context" + "fmt" "strings" rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" + rayutils "github.com/ray-project/kuberay/ray-operator/controllers/ray/utils" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" @@ -87,6 +89,10 @@ func (j *RayCluster) GVK() schema.GroupVersionKind { return gvk } +func (j *RayCluster) PodLabelSelector() string { + return fmt.Sprintf("%s=%s", rayutils.RayClusterLabelKey, j.Name) +} + func (j *RayCluster) PodSets() []kueue.PodSet { // len = workerGroups + head podSets := make([]kueue.PodSet, len(j.Spec.WorkerGroupSpecs)+1) diff --git a/pkg/controller/jobs/rayjob/rayjob_controller.go b/pkg/controller/jobs/rayjob/rayjob_controller.go index 9ef4770c83..00d4d7f81e 100644 --- a/pkg/controller/jobs/rayjob/rayjob_controller.go +++ b/pkg/controller/jobs/rayjob/rayjob_controller.go @@ -18,9 +18,11 @@ package rayjob import ( "context" + "fmt" "strings" rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" + rayutils "github.com/ray-project/kuberay/ray-operator/controllers/ray/utils" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" @@ -88,6 +90,13 @@ func (j *RayJob) GVK() schema.GroupVersionKind { return gvk } +func (j *RayJob) PodLabelSelector() string { + if j.Status.RayClusterName != "" { + return fmt.Sprintf("%s=%s", rayutils.RayClusterLabelKey, j.Status.RayClusterName) + } + return "" +} + func (j *RayJob) PodSets() []kueue.PodSet { // len = workerGroups + head podSets := make([]kueue.PodSet, len(j.Spec.RayClusterSpec.WorkerGroupSpecs)+1) diff --git a/site/content/en/docs/reference/kubectl-kueue/commands/kueuectl_list/_index.md b/site/content/en/docs/reference/kubectl-kueue/commands/kueuectl_list/_index.md index 7ffa8fe94a..a307addf05 100644 --- a/site/content/en/docs/reference/kubectl-kueue/commands/kueuectl_list/_index.md +++ b/site/content/en/docs/reference/kubectl-kueue/commands/kueuectl_list/_index.md @@ -19,7 +19,7 @@ Display resources ## Examples ``` - # List LocalQueue + # List LocalQueue kueuectl list localqueue ``` @@ -226,6 +226,7 @@ Display resources * [kueuectl](../kueuectl/) - Controls Kueue queueing manager * [kueuectl list clusterqueue](kueuectl_list_clusterqueue/) - List ClusterQueues * [kueuectl list localqueue](kueuectl_list_localqueue/) - List LocalQueue +* [kueuectl list pods](kueuectl_list_pods/) - List Pods belong to a Job Kind * [kueuectl list resourceflavor](kueuectl_list_resourceflavor/) - List ResourceFlavor * [kueuectl list workload](kueuectl_list_workload/) - List Workload diff --git a/site/content/en/docs/reference/kubectl-kueue/commands/kueuectl_list/kueuectl_list_pods.md b/site/content/en/docs/reference/kubectl-kueue/commands/kueuectl_list/kueuectl_list_pods.md new file mode 100644 index 0000000000..affb0fbe08 --- /dev/null +++ b/site/content/en/docs/reference/kubectl-kueue/commands/kueuectl_list/kueuectl_list_pods.md @@ -0,0 +1,305 @@ +--- +title: kueuectl list pods +content_type: tool-reference +auto_generated: true +no_list: false +--- + + + +## Synopsis + + +Lists all pods that matches the given criteria: Should be part of the specified Job kind, +belonging to the specified namespace, matching +the label selector or the field selector. + +``` +kueuectl list pods --for TYPE[.API-GROUP]/NAME +``` + + +## Examples + +``` + # List Pods +kueuectl list pods --for job/job-name +``` + + +## Options + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
-A, --all-namespaces
+

If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.

+
--allow-missing-template-keys     Default: true
+

If true, ignore any errors in templates when a field or map key is missing in the template. Only applies to golang and jsonpath output formats.

+
--field-selector string
+

Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.

+
--for string
+

Filter workloads to only those pertaining to the specified resource.

+
-h, --help
+

help for pods

+
-o, --output string
+

Output format. One of: (json, yaml, name, go-template, go-template-file, template, templatefile, jsonpath, jsonpath-as-json, jsonpath-file).

+
-l, --selector string
+

Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints.

+
--show-managed-fields
+

If true, keep the managedFields when printing objects in JSON or YAML format.

+
--template string
+

Template string or path to template file to use when -o=go-template, -o=go-template-file. The template format is golang templates [http://golang.org/pkg/text/template/#pkg-overview].

+
+ + + +## Options inherited from parent commands + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
--as string
+

Username to impersonate for the operation. User could be a regular user or a service account in a namespace.

+
--as-group strings
+

Group to impersonate for the operation, this flag can be repeated to specify multiple groups.

+
--as-uid string
+

UID to impersonate for the operation.

+
--cache-dir string     Default: "$HOME/.kube/cache"
+

Default cache directory

+
--certificate-authority string
+

Path to a cert file for the certificate authority

+
--client-certificate string
+

Path to a client certificate file for TLS

+
--client-key string
+

Path to a client key file for TLS

+
--cluster string
+

The name of the kubeconfig cluster to use

+
--context string
+

The name of the kubeconfig context to use

+
--disable-compression
+

If true, opt-out of response compression for all requests to the server

+
--insecure-skip-tls-verify
+

If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure

+
--kubeconfig string
+

Path to the kubeconfig file to use for CLI requests.

+
-n, --namespace string
+

If present, the namespace scope for this CLI request

+
--request-timeout string     Default: "0"
+

The length of time to wait before giving up on a single server request. Non-zero values should contain a corresponding time unit (e.g. 1s, 2m, 3h). A value of zero means don't timeout requests.

+
-s, --server string
+

The address and port of the Kubernetes API server

+
--tls-server-name string
+

Server name to use for server certificate validation. If it is not provided, the hostname used to contact the server is used

+
--token string
+

Bearer token for authentication to the API server

+
--user string
+

The name of the kubeconfig user to use

+
+ + + +## See Also + +* [kueuectl list](../) - Display resources + diff --git a/vendor/github.com/MakeNowJust/heredoc/LICENSE b/vendor/github.com/MakeNowJust/heredoc/LICENSE new file mode 100644 index 0000000000..6d0eb9d5d6 --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2019 TSUYUSATO Kitsune + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/MakeNowJust/heredoc/README.md b/vendor/github.com/MakeNowJust/heredoc/README.md new file mode 100644 index 0000000000..e9924d2974 --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/README.md @@ -0,0 +1,52 @@ +# heredoc + +[![Build Status](https://circleci.com/gh/MakeNowJust/heredoc.svg?style=svg)](https://circleci.com/gh/MakeNowJust/heredoc) [![GoDoc](https://godoc.org/github.com/MakeNowJusti/heredoc?status.svg)](https://godoc.org/github.com/MakeNowJust/heredoc) + +## About + +Package heredoc provides the here-document with keeping indent. + +## Install + +```console +$ go get github.com/MakeNowJust/heredoc +``` + +## Import + +```go +// usual +import "github.com/MakeNowJust/heredoc" +``` + +## Example + +```go +package main + +import ( + "fmt" + "github.com/MakeNowJust/heredoc" +) + +func main() { + fmt.Println(heredoc.Doc(` + Lorem ipsum dolor sit amet, consectetur adipisicing elit, + sed do eiusmod tempor incididunt ut labore et dolore magna + aliqua. Ut enim ad minim veniam, ... + `)) + // Output: + // Lorem ipsum dolor sit amet, consectetur adipisicing elit, + // sed do eiusmod tempor incididunt ut labore et dolore magna + // aliqua. Ut enim ad minim veniam, ... + // +} +``` + +## API Document + + - [heredoc - GoDoc](https://godoc.org/github.com/MakeNowJust/heredoc) + +## License + +This software is released under the MIT License, see LICENSE. diff --git a/vendor/github.com/MakeNowJust/heredoc/heredoc.go b/vendor/github.com/MakeNowJust/heredoc/heredoc.go new file mode 100644 index 0000000000..1fc0469555 --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/heredoc.go @@ -0,0 +1,105 @@ +// Copyright (c) 2014-2019 TSUYUSATO Kitsune +// This software is released under the MIT License. +// http://opensource.org/licenses/mit-license.php + +// Package heredoc provides creation of here-documents from raw strings. +// +// Golang supports raw-string syntax. +// +// doc := ` +// Foo +// Bar +// ` +// +// But raw-string cannot recognize indentation. Thus such content is an indented string, equivalent to +// +// "\n\tFoo\n\tBar\n" +// +// I dont't want this! +// +// However this problem is solved by package heredoc. +// +// doc := heredoc.Doc(` +// Foo +// Bar +// `) +// +// Is equivalent to +// +// "Foo\nBar\n" +package heredoc + +import ( + "fmt" + "strings" + "unicode" +) + +const maxInt = int(^uint(0) >> 1) + +// Doc returns un-indented string as here-document. +func Doc(raw string) string { + skipFirstLine := false + if len(raw) > 0 && raw[0] == '\n' { + raw = raw[1:] + } else { + skipFirstLine = true + } + + lines := strings.Split(raw, "\n") + + minIndentSize := getMinIndent(lines, skipFirstLine) + lines = removeIndentation(lines, minIndentSize, skipFirstLine) + + return strings.Join(lines, "\n") +} + +// getMinIndent calculates the minimum indentation in lines, excluding empty lines. +func getMinIndent(lines []string, skipFirstLine bool) int { + minIndentSize := maxInt + + for i, line := range lines { + if i == 0 && skipFirstLine { + continue + } + + indentSize := 0 + for _, r := range []rune(line) { + if unicode.IsSpace(r) { + indentSize += 1 + } else { + break + } + } + + if len(line) == indentSize { + if i == len(lines)-1 && indentSize < minIndentSize { + lines[i] = "" + } + } else if indentSize < minIndentSize { + minIndentSize = indentSize + } + } + return minIndentSize +} + +// removeIndentation removes n characters from the front of each line in lines. +// Skips first line if skipFirstLine is true, skips empty lines. +func removeIndentation(lines []string, n int, skipFirstLine bool) []string { + for i, line := range lines { + if i == 0 && skipFirstLine { + continue + } + + if len(lines[i]) >= n { + lines[i] = line[n:] + } + } + return lines +} + +// Docf returns unindented and formatted string as here-document. +// Formatting is done as for fmt.Printf(). +func Docf(raw string, args ...interface{}) string { + return fmt.Sprintf(Doc(raw), args...) +} diff --git a/vendor/github.com/chai2010/gettext-go/.travis.yml b/vendor/github.com/chai2010/gettext-go/.travis.yml new file mode 100644 index 0000000000..4eac3982bc --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/.travis.yml @@ -0,0 +1,5 @@ +language: go + +go: + - "1.14" + - tip diff --git a/vendor/github.com/chai2010/gettext-go/LICENSE b/vendor/github.com/chai2010/gettext-go/LICENSE new file mode 100644 index 0000000000..8f39408250 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/LICENSE @@ -0,0 +1,27 @@ +Copyright 2013 ChaiShushan . All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/chai2010/gettext-go/README.md b/vendor/github.com/chai2010/gettext-go/README.md new file mode 100644 index 0000000000..9381bd1522 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/README.md @@ -0,0 +1,191 @@ +- *赞助 BTC: 1Cbd6oGAUUyBi7X7MaR4np4nTmQZXVgkCW* +- *赞助 ETH: 0x623A3C3a72186A6336C79b18Ac1eD36e1c71A8a6* +- *Go语言付费QQ群: 1055927514* + +---- + +# gettext-go: GNU gettext for Go ([Imported By Kubernetes](https://pkg.go.dev/github.com/chai2010/gettext-go@v0.1.0/gettext?tab=importedby)) + +- PkgDoc: [http://godoc.org/github.com/chai2010/gettext-go](http://godoc.org/github.com/chai2010/gettext-go) +- PkgDoc: [http://pkg.go.dev/github.com/chai2010/gettext-go](http://pkg.go.dev/github.com/chai2010/gettext-go) + +## Install + +1. `go get github.com/chai2010/gettext-go` +2. `go run hello.go` + +The godoc.org or go.dev has more information. + +## Examples + +```Go +package main + +import ( + "fmt" + + "github.com/chai2010/gettext-go" +) + +func main() { + gettext := gettext.New("hello", "./examples/locale").SetLanguage("zh_CN") + fmt.Println(gettext.Gettext("Hello, world!")) + + // Output: 你好, 世界! +} +``` + +```Go +package main + +import ( + "fmt" + + "github.com/chai2010/gettext-go" +) + +func main() { + gettext.SetLanguage("zh_CN") + gettext.BindLocale(gettext.New("hello", "locale")) + + // gettext.BindLocale("hello", "locale") // from locale dir + // gettext.BindLocale("hello", "locale.zip") // from locale zip file + // gettext.BindLocale("hello", "locale.zip", zipData) // from embedded zip data + + // translate source text + fmt.Println(gettext.Gettext("Hello, world!")) + // Output: 你好, 世界! + + // if no msgctxt in PO file (only msgid and msgstr), + // specify context as "" by + fmt.Println(gettext.PGettext("", "Hello, world!")) + // Output: 你好, 世界! + + // translate resource + fmt.Println(string(gettext.Getdata("poems.txt")))) + // Output: ... +} +``` + +Go file: [hello.go](https://github.com/chai2010/gettext-go/blob/master/examples/hello.go); PO file: [hello.po](https://github.com/chai2010/gettext-go/blob/master/examples/locale/default/LC_MESSAGES/hello.po); + +---- + +## API Changes (v0.1.0 vs v1.0.0) + +### Renamed package path + +| v0.1.0 (old) | v1.0.0 (new) | +| ----------------------------------------------- | --------------------------------------- | +| `github.com/chai2010/gettext-go/gettext` | `github.com/chai2010/gettext-go` | +| `github.com/chai2010/gettext-go/gettext/po` | `github.com/chai2010/gettext-go/po` | +| `github.com/chai2010/gettext-go/gettext/mo` | `github.com/chai2010/gettext-go/mo` | +| `github.com/chai2010/gettext-go/gettext/plural` | `github.com/chai2010/gettext-go/plural` | + +### Renamed functions + +| v0.1.0 (old) | v1.0.0 (new) | +| ---------------------------------- | --------------------------- | +| `gettext-go/gettext.*` | `gettext-go.*` | +| `gettext-go/gettext.DefaultLocal` | `gettext-go.DefaultLanguage`| +| `gettext-go/gettext.BindTextdomain`| `gettext-go.BindLocale` | +| `gettext-go/gettext.Textdomain` | `gettext-go.SetDomain` | +| `gettext-go/gettext.SetLocale` | `gettext-go.SetLanguage` | +| `gettext-go/gettext/po.Load` | `gettext-go/po.LoadFile` | +| `gettext-go/gettext/po.LoadData` | `gettext-go/po.Load` | +| `gettext-go/gettext/mo.Load` | `gettext-go/mo.LoadFile` | +| `gettext-go/gettext/mo.LoadData` | `gettext-go/mo.Load` | + +### Use empty string as the default context for `gettext.Gettext` + +```go +package main + +// v0.1.0 +// if the **context** missing, use `callerName(2)` as the context: + +// v1.0.0 +// if the **context** missing, use empty string as the context: + +func main() { + gettext.Gettext("hello") + // v0.1.0 => gettext.PGettext("main.main", "hello") + // v1.0.0 => gettext.PGettext("", "hello") + + gettext.DGettext("domain", "hello") + // v0.1.0 => gettext.DPGettext("domain", "main.main", "hello") + // v1.0.0 => gettext.DPGettext("domain", "", "hello") + + gettext.NGettext("domain", "hello", "hello2", n) + // v0.1.0 => gettext.PNGettext("domain", "main.main", "hello", "hello2", n) + // v1.0.0 => gettext.PNGettext("domain", "", "hello", "hello2", n) + + gettext.DNGettext("domain", "hello", "hello2", n) + // v0.1.0 => gettext.DPNGettext("domain", "main.main", "hello", "hello2", n) + // v1.0.0 => gettext.DPNGettext("domain", "", "hello", "hello2", n) +} +``` + +### `BindLocale` support `FileSystem` interface + +```go +// Use FileSystem: +// BindLocale(New("poedit", "name", OS("path/to/dir"))) // bind "poedit" domain +// BindLocale(New("poedit", "name", OS("path/to.zip"))) // bind "poedit" domain +``` + +## New API in v1.0.0 + +`Gettexter` interface: + +```go +type Gettexter interface { + FileSystem() FileSystem + + GetDomain() string + SetDomain(domain string) Gettexter + + GetLanguage() string + SetLanguage(lang string) Gettexter + + Gettext(msgid string) string + PGettext(msgctxt, msgid string) string + + NGettext(msgid, msgidPlural string, n int) string + PNGettext(msgctxt, msgid, msgidPlural string, n int) string + + DGettext(domain, msgid string) string + DPGettext(domain, msgctxt, msgid string) string + DNGettext(domain, msgid, msgidPlural string, n int) string + DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string + + Getdata(name string) []byte + DGetdata(domain, name string) []byte +} + +func New(domain, path string, data ...interface{}) Gettexter +``` + +`FileSystem` interface: + +```go +type FileSystem interface { + LocaleList() []string + LoadMessagesFile(domain, lang, ext string) ([]byte, error) + LoadResourceFile(domain, lang, name string) ([]byte, error) + String() string +} + +func NewFS(name string, x interface{}) FileSystem +func OS(root string) FileSystem +func ZipFS(r *zip.Reader, name string) FileSystem +func NilFS(name string) FileSystem +``` + +---- + +## BUGS + +Please report bugs to . + +Thanks! diff --git a/vendor/github.com/chai2010/gettext-go/doc.go b/vendor/github.com/chai2010/gettext-go/doc.go new file mode 100644 index 0000000000..50dfea3305 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/doc.go @@ -0,0 +1,67 @@ +// Copyright 2013 . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gettext implements a basic GNU's gettext library. + +Example: + import ( + "github.com/chai2010/gettext-go" + ) + + func main() { + gettext.SetLanguage("zh_CN") + + // gettext.BindLocale(gettext.New("hello", "locale")) // from locale dir + // gettext.BindLocale(gettext.New("hello", "locale.zip")) // from locale zip file + // gettext.BindLocale(gettext.New("hello", "locale.zip", zipData)) // from embedded zip data + + gettext.BindLocale(gettext.New("hello", "locale")) + + // translate source text + fmt.Println(gettext.Gettext("Hello, world!")) + // Output: 你好, 世界! + + // translate resource + fmt.Println(string(gettext.Getdata("poems.txt"))) + // Output: ... + } + +Translate directory struct("./examples/locale.zip"): + + Root: "path" or "file.zip/zipBaseName" + +-default # locale: $(LC_MESSAGES) or $(LANG) or "default" + | +-LC_MESSAGES # just for `gettext.Gettext` + | | +-hello.mo # $(Root)/$(lang)/LC_MESSAGES/$(domain).mo + | | +-hello.po # $(Root)/$(lang)/LC_MESSAGES/$(domain).po + | | \-hello.json # $(Root)/$(lang)/LC_MESSAGES/$(domain).json + | | + | \-LC_RESOURCE # just for `gettext.Getdata` + | +-hello # domain map a dir in resource translate + | +-favicon.ico # $(Root)/$(lang)/LC_RESOURCE/$(domain)/$(filename) + | \-poems.txt + | + \-zh_CN # simple chinese translate + +-LC_MESSAGES + | +-hello.po # try "$(domain).po" first + | +-hello.mo # try "$(domain).mo" second + | \-hello.json # try "$(domain).json" third + | + \-LC_RESOURCE + +-hello + +-favicon.ico # $(lang)/$(domain)/favicon.ico + \-poems.txt # $(lang)/$(domain)/poems.txt + +See: + http://en.wikipedia.org/wiki/Gettext + http://www.gnu.org/software/gettext/manual/html_node + http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html + http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html + http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html + http://www.poedit.net/ + +Please report bugs to . +Thanks! +*/ +package gettext diff --git a/vendor/github.com/chai2010/gettext-go/fs.go b/vendor/github.com/chai2010/gettext-go/fs.go new file mode 100644 index 0000000000..4e66fae7c6 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/fs.go @@ -0,0 +1,84 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "archive/zip" + "bytes" + "fmt" +) + +type FileSystem interface { + LocaleList() []string + LoadMessagesFile(domain, lang, ext string) ([]byte, error) + LoadResourceFile(domain, lang, name string) ([]byte, error) + String() string +} + +func NewFS(name string, x interface{}) FileSystem { + if x == nil { + if name != "" { + return OS(name) + } + return NilFS(name) + } + + switch x := x.(type) { + case []byte: + if len(x) == 0 { + return OS(name) + } + if r, err := zip.NewReader(bytes.NewReader(x), int64(len(x))); err == nil { + return ZipFS(r, name) + } + if fs, err := newJson(x, name); err == nil { + return fs + } + case string: + if len(x) == 0 { + return OS(name) + } + if r, err := zip.NewReader(bytes.NewReader([]byte(x)), int64(len(x))); err == nil { + return ZipFS(r, name) + } + if fs, err := newJson([]byte(x), name); err == nil { + return fs + } + case FileSystem: + return x + } + + return NilFS(name) +} + +func OS(root string) FileSystem { + return newOsFS(root) +} + +func ZipFS(r *zip.Reader, name string) FileSystem { + return newZipFS(r, name) +} + +func NilFS(name string) FileSystem { + return &nilFS{name} +} + +type nilFS struct { + name string +} + +func (p *nilFS) LocaleList() []string { + return nil +} + +func (p *nilFS) LoadMessagesFile(domain, lang, ext string) ([]byte, error) { + return nil, fmt.Errorf("not found") +} +func (p *nilFS) LoadResourceFile(domain, lang, name string) ([]byte, error) { + return nil, fmt.Errorf("not found") +} +func (p *nilFS) String() string { + return "gettext.nilfs(" + p.name + ")" +} diff --git a/vendor/github.com/chai2010/gettext-go/fs_json.go b/vendor/github.com/chai2010/gettext-go/fs_json.go new file mode 100644 index 0000000000..c7138c9954 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/fs_json.go @@ -0,0 +1,66 @@ +// Copyright 2020 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "encoding/json" + "fmt" + "sort" +) + +type jsonFS struct { + name string + x map[string]struct { + LC_MESSAGES map[string][]struct { + MsgContext string `json:"msgctxt"` // msgctxt context + MsgId string `json:"msgid"` // msgid untranslated-string + MsgIdPlural string `json:"msgid_plural"` // msgid_plural untranslated-string-plural + MsgStr []string `json:"msgstr"` // msgstr translated-string + } + LC_RESOURCE map[string]map[string]string + } +} + +func isJsonData() bool { + return false +} + +func newJson(jsonData []byte, name string) (*jsonFS, error) { + p := &jsonFS{name: name} + if err := json.Unmarshal(jsonData, &p.x); err != nil { + return nil, err + } + + return p, nil +} + +func (p *jsonFS) LocaleList() []string { + var ss []string + for lang := range p.x { + ss = append(ss, lang) + } + sort.Strings(ss) + return ss +} + +func (p *jsonFS) LoadMessagesFile(domain, lang, ext string) ([]byte, error) { + if v, ok := p.x[lang]; ok { + if v, ok := v.LC_MESSAGES[domain+ext]; ok { + return json.Marshal(v) + } + } + return nil, fmt.Errorf("not found") +} +func (p *jsonFS) LoadResourceFile(domain, lang, name string) ([]byte, error) { + if v, ok := p.x[lang]; ok { + if v, ok := v.LC_RESOURCE[domain]; ok { + return []byte(v[name]), nil + } + } + return nil, fmt.Errorf("not found") +} +func (p *jsonFS) String() string { + return "gettext.nilfs(" + p.name + ")" +} diff --git a/vendor/github.com/chai2010/gettext-go/fs_os.go b/vendor/github.com/chai2010/gettext-go/fs_os.go new file mode 100644 index 0000000000..80d4f51bac --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/fs_os.go @@ -0,0 +1,91 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "archive/zip" + "bytes" + "fmt" + "io/ioutil" + "os" + "sort" + "strings" +) + +type osFS struct { + root string +} + +func newOsFS(root string) FileSystem { + // locale zip file + if fi, err := os.Stat(root); err == nil && !fi.IsDir() { + if strings.HasSuffix(strings.ToLower(root), ".zip") { + if x, err := ioutil.ReadFile(root); err == nil { + if r, err := zip.NewReader(bytes.NewReader(x), int64(len(x))); err == nil { + return ZipFS(r, root) + } + } + } + if strings.HasSuffix(strings.ToLower(root), ".json") { + if x, err := ioutil.ReadFile(root); err == nil { + if fs, err := newJson(x, root); err == nil { + return fs + } + } + } + } + + // locale dir + return &osFS{root: root} +} + +func (p *osFS) LocaleList() []string { + list, err := ioutil.ReadDir(p.root) + if err != nil { + return nil + } + ssMap := make(map[string]bool) + for _, dir := range list { + if dir.IsDir() { + ssMap[dir.Name()] = true + } + } + var locales = make([]string, 0, len(ssMap)) + for s := range ssMap { + locales = append(locales, s) + } + sort.Strings(locales) + return locales +} + +func (p *osFS) LoadMessagesFile(domain, locale, ext string) ([]byte, error) { + trName := p.makeMessagesFileName(domain, locale, ext) + rcData, err := ioutil.ReadFile(trName) + if err != nil { + return nil, err + } + return rcData, nil +} + +func (p *osFS) LoadResourceFile(domain, locale, name string) ([]byte, error) { + rcName := p.makeResourceFileName(domain, locale, name) + rcData, err := ioutil.ReadFile(rcName) + if err != nil { + return nil, err + } + return rcData, nil +} + +func (p *osFS) String() string { + return "gettext.localfs(" + p.root + ")" +} + +func (p *osFS) makeMessagesFileName(domain, lang, ext string) string { + return fmt.Sprintf("%s/%s/LC_MESSAGES/%s%s", p.root, lang, domain, ext) +} + +func (p *osFS) makeResourceFileName(domain, lang, name string) string { + return fmt.Sprintf("%s/%s/LC_RESOURCE/%s/%s", p.root, lang, domain, name) +} diff --git a/vendor/github.com/chai2010/gettext-go/fs_zip.go b/vendor/github.com/chai2010/gettext-go/fs_zip.go new file mode 100644 index 0000000000..61eb8359da --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/fs_zip.go @@ -0,0 +1,142 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "archive/zip" + "fmt" + "io/ioutil" + "sort" + "strings" +) + +type zipFS struct { + root string + name string + r *zip.Reader +} + +func newZipFS(r *zip.Reader, name string) *zipFS { + fs := &zipFS{r: r, name: name} + fs.root = fs.zipRoot() + return fs +} + +func (p *zipFS) zipName() string { + name := p.name + if x := strings.LastIndexAny(name, `\/`); x != -1 { + name = name[x+1:] + } + name = strings.TrimSuffix(name, ".zip") + return name +} + +func (p *zipFS) zipRoot() string { + var somepath string + for _, f := range p.r.File { + if x := strings.Index(f.Name, "LC_MESSAGES"); x != -1 { + somepath = f.Name + } + if x := strings.Index(f.Name, "LC_RESOURCE"); x != -1 { + somepath = f.Name + } + } + if somepath == "" { + return p.zipName() + } + + ss := strings.Split(somepath, "/") + for i, s := range ss { + // $(root)/$(lang)/LC_MESSAGES + // $(root)/$(lang)/LC_RESOURCE + if (s == "LC_MESSAGES" || s == "LC_RESOURCE") && i >= 2 { + return strings.Join(ss[:i-1], "/") + } + } + + return p.zipName() +} + +func (p *zipFS) LocaleList() []string { + var locals []string + for s := range p.lsZip(p.r) { + locals = append(locals, s) + } + sort.Strings(locals) + return locals +} + +func (p *zipFS) LoadMessagesFile(domain, lang, ext string) ([]byte, error) { + trName := p.makeMessagesFileName(domain, lang, ext) + for _, f := range p.r.File { + if f.Name != trName { + continue + } + rc, err := f.Open() + if err != nil { + return nil, err + } + rcData, err := ioutil.ReadAll(rc) + rc.Close() + return rcData, err + } + return nil, fmt.Errorf("not found") +} + +func (p *zipFS) LoadResourceFile(domain, lang, name string) ([]byte, error) { + rcName := p.makeResourceFileName(domain, lang, name) + for _, f := range p.r.File { + if f.Name != rcName { + continue + } + rc, err := f.Open() + if err != nil { + return nil, err + } + rcData, err := ioutil.ReadAll(rc) + rc.Close() + return rcData, err + } + return nil, fmt.Errorf("not found") +} + +func (p *zipFS) String() string { + return "gettext.zipfs(" + p.name + ")" +} + +func (p *zipFS) makeMessagesFileName(domain, lang, ext string) string { + return fmt.Sprintf("%s/%s/LC_MESSAGES/%s%s", p.root, lang, domain, ext) +} + +func (p *zipFS) makeResourceFileName(domain, lang, name string) string { + return fmt.Sprintf("%s/%s/LC_RESOURCE/%s/%s", p.root, lang, domain, name) +} + +func (p *zipFS) lsZip(r *zip.Reader) map[string]bool { + ssMap := make(map[string]bool) + for _, f := range r.File { + if x := strings.Index(f.Name, "LC_MESSAGES"); x != -1 { + s := strings.TrimRight(f.Name[:x], `\/`) + if x = strings.LastIndexAny(s, `\/`); x != -1 { + s = s[x+1:] + } + if s != "" { + ssMap[s] = true + } + continue + } + if x := strings.Index(f.Name, "LC_RESOURCE"); x != -1 { + s := strings.TrimRight(f.Name[:x], `\/`) + if x = strings.LastIndexAny(s, `\/`); x != -1 { + s = s[x+1:] + } + if s != "" { + ssMap[s] = true + } + continue + } + } + return ssMap +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext.go b/vendor/github.com/chai2010/gettext-go/gettext.go new file mode 100644 index 0000000000..7747188ab4 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext.go @@ -0,0 +1,219 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +var ( + DefaultLanguage string = getDefaultLanguage() // use $(LC_MESSAGES) or $(LANG) or "default" +) + +type Gettexter interface { + FileSystem() FileSystem + + GetDomain() string + SetDomain(domain string) Gettexter + + GetLanguage() string + SetLanguage(lang string) Gettexter + + Gettext(msgid string) string + PGettext(msgctxt, msgid string) string + + NGettext(msgid, msgidPlural string, n int) string + PNGettext(msgctxt, msgid, msgidPlural string, n int) string + + DGettext(domain, msgid string) string + DPGettext(domain, msgctxt, msgid string) string + DNGettext(domain, msgid, msgidPlural string, n int) string + DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string + + Getdata(name string) []byte + DGetdata(domain, name string) []byte +} + +// New create Interface use default language. +func New(domain, path string, data ...interface{}) Gettexter { + return newLocale(domain, path, data...) +} + +var defaultGettexter struct { + lang string + domain string + Gettexter +} + +func init() { + defaultGettexter.lang = getDefaultLanguage() + defaultGettexter.domain = "default" + defaultGettexter.Gettexter = newLocale("", "") +} + +// BindLocale sets and queries program's domains. +// +// Examples: +// BindLocale(New("poedit", "locale")) // bind "poedit" domain +// +// Use zip file: +// BindLocale(New("poedit", "locale.zip")) // bind "poedit" domain +// BindLocale(New("poedit", "locale.zip", zipData)) // bind "poedit" domain +// +// Use FileSystem: +// BindLocale(New("poedit", "name", OS("path/to/dir"))) // bind "poedit" domain +// BindLocale(New("poedit", "name", OS("path/to.zip"))) // bind "poedit" domain +// +func BindLocale(g Gettexter) { + if g != nil { + defaultGettexter.Gettexter = g + defaultGettexter.SetLanguage(defaultGettexter.lang) + } else { + defaultGettexter.Gettexter = newLocale("", "") + defaultGettexter.SetLanguage(defaultGettexter.lang) + } +} + +// SetLanguage sets and queries the program's current lang. +// +// If the lang is not empty string, set the new locale. +// +// If the lang is empty string, don't change anything. +// +// Returns is the current locale. +// +// Examples: +// SetLanguage("") // get locale: return DefaultLocale +// SetLanguage("zh_CN") // set locale: return zh_CN +// SetLanguage("") // get locale: return zh_CN +func SetLanguage(lang string) string { + defaultGettexter.SetLanguage(lang) + return defaultGettexter.GetLanguage() +} + +// SetDomain sets and retrieves the current message domain. +// +// If the domain is not empty string, set the new domains. +// +// If the domain is empty string, don't change anything. +// +// Returns is the all used domains. +// +// Examples: +// SetDomain("poedit") // set domain: poedit +// SetDomain("") // get domain: return poedit +func SetDomain(domain string) string { + defaultGettexter.SetDomain(domain) + return defaultGettexter.GetDomain() +} + +// Gettext attempt to translate a text string into the user's native language, +// by looking up the translation in a message catalog. +// +// It use the caller's function name as the msgctxt. +// +// Examples: +// func Foo() { +// msg := gettext.Gettext("Hello") // msgctxt is "" +// } +func Gettext(msgid string) string { + return defaultGettexter.Gettext(msgid) +} + +// Getdata attempt to translate a resource file into the user's native language, +// by looking up the translation in a message catalog. +// +// Examples: +// func Foo() { +// Textdomain("hello") +// BindLocale("hello", "locale.zip", nilOrZipData) +// poems := gettext.Getdata("poems.txt") +// } +func Getdata(name string) []byte { + return defaultGettexter.Getdata(name) +} + +// NGettext attempt to translate a text string into the user's native language, +// by looking up the appropriate plural form of the translation in a message +// catalog. +// +// It use the caller's function name as the msgctxt. +// +// Examples: +// func Foo() { +// msg := gettext.NGettext("%d people", "%d peoples", 2) +// } +func NGettext(msgid, msgidPlural string, n int) string { + return defaultGettexter.NGettext(msgid, msgidPlural, n) +} + +// PGettext attempt to translate a text string into the user's native language, +// by looking up the translation in a message catalog. +// +// Examples: +// func Foo() { +// msg := gettext.PGettext("gettext-go.example", "Hello") // msgctxt is "gettext-go.example" +// } +func PGettext(msgctxt, msgid string) string { + return defaultGettexter.PGettext(msgctxt, msgid) +} + +// PNGettext attempt to translate a text string into the user's native language, +// by looking up the appropriate plural form of the translation in a message +// catalog. +// +// Examples: +// func Foo() { +// msg := gettext.PNGettext("gettext-go.example", "%d people", "%d peoples", 2) +// } +func PNGettext(msgctxt, msgid, msgidPlural string, n int) string { + return defaultGettexter.PNGettext(msgctxt, msgid, msgidPlural, n) +} + +// DGettext like Gettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DGettext("poedit", "Hello") +// } +func DGettext(domain, msgid string) string { + return defaultGettexter.DGettext(domain, msgid) +} + +// DNGettext like NGettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.PNGettext("poedit", "gettext-go.example", "%d people", "%d peoples", 2) +// } +func DNGettext(domain, msgid, msgidPlural string, n int) string { + return defaultGettexter.DNGettext(domain, msgid, msgidPlural, n) +} + +// DPGettext like PGettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DPGettext("poedit", "gettext-go.example", "Hello") +// } +func DPGettext(domain, msgctxt, msgid string) string { + return defaultGettexter.DPGettext(domain, msgctxt, msgid) +} + +// DPNGettext like PNGettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DPNGettext("poedit", "gettext-go.example", "%d people", "%d peoples", 2) +// } +func DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string { + return defaultGettexter.DPNGettext(domain, msgctxt, msgid, msgidPlural, n) +} + +// DGetdata like Getdata(), but looking up the resource in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DGetdata("hello", "poems.txt") +// } +func DGetdata(domain, name string) []byte { + return defaultGettexter.DGetdata(domain, name) +} diff --git a/vendor/github.com/chai2010/gettext-go/locale.go b/vendor/github.com/chai2010/gettext-go/locale.go new file mode 100644 index 0000000000..e7a2d4b37b --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/locale.go @@ -0,0 +1,205 @@ +// Copyright 2020 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "fmt" + "sync" +) + +type _Locale struct { + mutex sync.Mutex + fs FileSystem + lang string + domain string + trMap map[string]*translator + trCurrent *translator +} + +var _ Gettexter = (*_Locale)(nil) + +func newLocale(domain, path string, data ...interface{}) *_Locale { + if domain == "" { + domain = "default" + } + p := &_Locale{ + lang: DefaultLanguage, + domain: domain, + } + if len(data) > 0 { + p.fs = NewFS(path, data[0]) + } else { + p.fs = NewFS(path, nil) + } + + p.syncTrMap() + return p +} + +func (p *_Locale) makeTrMapKey(domain, _Locale string) string { + return domain + "_$$$_" + _Locale +} + +func (p *_Locale) FileSystem() FileSystem { + return p.fs +} + +func (p *_Locale) GetLanguage() string { + p.mutex.Lock() + defer p.mutex.Unlock() + + return p.lang +} +func (p *_Locale) SetLanguage(lang string) Gettexter { + p.mutex.Lock() + defer p.mutex.Unlock() + + if lang == "" { + lang = DefaultLanguage + } + if lang == p.lang { + return p + } + + p.lang = lang + p.syncTrMap() + return p +} + +func (p *_Locale) GetDomain() string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.domain +} + +func (p *_Locale) SetDomain(domain string) Gettexter { + p.mutex.Lock() + defer p.mutex.Unlock() + + if domain == "" || domain == p.domain { + return p + } + + p.domain = domain + p.syncTrMap() + return p +} + +func (p *_Locale) syncTrMap() { + p.trMap = make(map[string]*translator) + trMapKey := p.makeTrMapKey(p.domain, p.lang) + + if tr, ok := p.trMap[trMapKey]; ok { + p.trCurrent = tr + return + } + + // try load po file + if data, err := p.fs.LoadMessagesFile(p.domain, p.lang, ".po"); err == nil { + if tr, err := newPoTranslator(fmt.Sprintf("%s_%s.po", p.domain, p.lang), data); err == nil { + p.trMap[trMapKey] = tr + p.trCurrent = tr + return + } + } + + // try load mo file + if data, err := p.fs.LoadMessagesFile(p.domain, p.lang, ".mo"); err == nil { + if tr, err := newMoTranslator(fmt.Sprintf("%s_%s.mo", p.domain, p.lang), data); err == nil { + p.trMap[trMapKey] = tr + p.trCurrent = tr + return + } + } + + // try load json file + if data, err := p.fs.LoadMessagesFile(p.domain, p.lang, ".json"); err == nil { + if tr, err := newJsonTranslator(p.lang, fmt.Sprintf("%s_%s.json", p.domain, p.lang), data); err == nil { + p.trMap[trMapKey] = tr + p.trCurrent = tr + return + } + } + + // no po/mo file + p.trMap[trMapKey] = nilTranslator + p.trCurrent = nilTranslator + return +} + +func (p *_Locale) Gettext(msgid string) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.trCurrent.PGettext("", msgid) +} + +func (p *_Locale) PGettext(msgctxt, msgid string) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.trCurrent.PGettext(msgctxt, msgid) +} + +func (p *_Locale) NGettext(msgid, msgidPlural string, n int) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.trCurrent.PNGettext("", msgid, msgidPlural, n) +} + +func (p *_Locale) PNGettext(msgctxt, msgid, msgidPlural string, n int) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.trCurrent.PNGettext(msgctxt, msgid, msgidPlural, n) +} + +func (p *_Locale) DGettext(domain, msgid string) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.gettext(domain, "", msgid, "", 0) +} + +func (p *_Locale) DNGettext(domain, msgid, msgidPlural string, n int) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.gettext(domain, "", msgid, msgidPlural, n) +} + +func (p *_Locale) DPGettext(domain, msgctxt, msgid string) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.gettext(domain, msgctxt, msgid, "", 0) +} + +func (p *_Locale) DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.gettext(domain, msgctxt, msgid, msgidPlural, n) +} + +func (p *_Locale) Getdata(name string) []byte { + return p.getdata(p.domain, name) +} + +func (p *_Locale) DGetdata(domain, name string) []byte { + return p.getdata(domain, name) +} + +func (p *_Locale) gettext(domain, msgctxt, msgid, msgidPlural string, n int) string { + if f, ok := p.trMap[p.makeTrMapKey(domain, p.lang)]; ok { + return f.PNGettext(msgctxt, msgid, msgidPlural, n) + } + return msgid +} + +func (p *_Locale) getdata(domain, name string) []byte { + if data, err := p.fs.LoadResourceFile(domain, p.lang, name); err == nil { + return data + } + if p.lang != "default" { + if data, err := p.fs.LoadResourceFile(domain, "default", name); err == nil { + return data + } + } + return nil +} diff --git a/vendor/github.com/chai2010/gettext-go/mo/doc.go b/vendor/github.com/chai2010/gettext-go/mo/doc.go new file mode 100644 index 0000000000..5fefc18930 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/mo/doc.go @@ -0,0 +1,74 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mo provides support for reading and writing GNU MO file. + +Examples: + import ( + "github.com/chai2010/gettext-go/mo" + ) + + func main() { + moFile, err := mo.LoadFile("test.mo") + if err != nil { + log.Fatal(err) + } + fmt.Printf("%v", moFile) + } + +GNU MO file struct: + + byte + +------------------------------------------+ + 0 | magic number = 0x950412de | + | | + 4 | file format revision = 0 | + | | + 8 | number of strings | == N + | | + 12 | offset of table with original strings | == O + | | + 16 | offset of table with translation strings | == T + | | + 20 | size of hashing table | == S + | | + 24 | offset of hashing table | == H + | | + . . + . (possibly more entries later) . + . . + | | + O | length & offset 0th string ----------------. + O + 8 | length & offset 1st string ------------------. + ... ... | | + O + ((N-1)*8)| length & offset (N-1)th string | | | + | | | | + T | length & offset 0th translation ---------------. + T + 8 | length & offset 1st translation -----------------. + ... ... | | | | + T + ((N-1)*8)| length & offset (N-1)th translation | | | | | + | | | | | | + H | start hash table | | | | | + ... ... | | | | + H + S * 4 | end hash table | | | | | + | | | | | | + | NUL terminated 0th string <----------------' | | | + | | | | | + | NUL terminated 1st string <------------------' | | + | | | | + ... ... | | + | | | | + | NUL terminated 0th translation <---------------' | + | | | + | NUL terminated 1st translation <-----------------' + | | + ... ... + | | + +------------------------------------------+ + +The GNU MO file specification is at +http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html. +*/ +package mo diff --git a/vendor/github.com/chai2010/gettext-go/mo/encoder.go b/vendor/github.com/chai2010/gettext-go/mo/encoder.go new file mode 100644 index 0000000000..f953fd3cb8 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/mo/encoder.go @@ -0,0 +1,105 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "encoding/binary" + "sort" + "strings" +) + +type moHeader struct { + MagicNumber uint32 + MajorVersion uint16 + MinorVersion uint16 + MsgIdCount uint32 + MsgIdOffset uint32 + MsgStrOffset uint32 + HashSize uint32 + HashOffset uint32 +} + +type moStrPos struct { + Size uint32 // must keep fields order + Addr uint32 +} + +func encodeFile(f *File) []byte { + hdr := &moHeader{ + MagicNumber: MoMagicLittleEndian, + } + data := encodeData(hdr, f) + data = append(encodeHeader(hdr), data...) + return data +} + +// encode data and init moHeader +func encodeData(hdr *moHeader, f *File) []byte { + msgList := []Message{f.MimeHeader.toMessage()} + for _, v := range f.Messages { + if len(v.MsgId) == 0 { + continue + } + if len(v.MsgStr) == 0 && len(v.MsgStrPlural) == 0 { + continue + } + msgList = append(msgList, v) + } + sort.Slice(msgList, func(i, j int) bool { + return msgList[i].less(&msgList[j]) + }) + + var buf bytes.Buffer + var msgIdPosList = make([]moStrPos, len(msgList)) + var msgStrPosList = make([]moStrPos, len(msgList)) + for i, v := range msgList { + // write msgid + msgId := encodeMsgId(v) + msgIdPosList[i].Addr = uint32(buf.Len() + MoHeaderSize) + msgIdPosList[i].Size = uint32(len(msgId)) + buf.WriteString(msgId) + // write msgstr + msgStr := encodeMsgStr(v) + msgStrPosList[i].Addr = uint32(buf.Len() + MoHeaderSize) + msgStrPosList[i].Size = uint32(len(msgStr)) + buf.WriteString(msgStr) + } + + hdr.MsgIdOffset = uint32(buf.Len() + MoHeaderSize) + binary.Write(&buf, binary.LittleEndian, msgIdPosList) + hdr.MsgStrOffset = uint32(buf.Len() + MoHeaderSize) + binary.Write(&buf, binary.LittleEndian, msgStrPosList) + + hdr.MsgIdCount = uint32(len(msgList)) + return buf.Bytes() +} + +// must called after encodeData +func encodeHeader(hdr *moHeader) []byte { + var buf bytes.Buffer + binary.Write(&buf, binary.LittleEndian, hdr) + return buf.Bytes() +} + +func encodeMsgId(v Message) string { + if v.MsgContext != "" && v.MsgIdPlural != "" { + return v.MsgContext + EotSeparator + v.MsgId + NulSeparator + v.MsgIdPlural + } + if v.MsgContext != "" && v.MsgIdPlural == "" { + return v.MsgContext + EotSeparator + v.MsgId + } + if v.MsgContext == "" && v.MsgIdPlural != "" { + return v.MsgId + NulSeparator + v.MsgIdPlural + } + return v.MsgId +} + +func encodeMsgStr(v Message) string { + if v.MsgIdPlural != "" { + return strings.Join(v.MsgStrPlural, NulSeparator) + } + return v.MsgStr +} diff --git a/vendor/github.com/chai2010/gettext-go/mo/file.go b/vendor/github.com/chai2010/gettext-go/mo/file.go new file mode 100644 index 0000000000..6f7ed161c1 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/mo/file.go @@ -0,0 +1,197 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "encoding/binary" + "fmt" + "io/ioutil" + "strings" +) + +const ( + MoHeaderSize = 28 + MoMagicLittleEndian = 0x950412de + MoMagicBigEndian = 0xde120495 + + EotSeparator = "\x04" // msgctxt and msgid separator + NulSeparator = "\x00" // msgid and msgstr separator +) + +// File represents an MO File. +// +// See http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html +type File struct { + MagicNumber uint32 + MajorVersion uint16 + MinorVersion uint16 + MsgIdCount uint32 + MsgIdOffset uint32 + MsgStrOffset uint32 + HashSize uint32 + HashOffset uint32 + MimeHeader Header + Messages []Message +} + +// Load loads mo file format data. +func Load(data []byte) (*File, error) { + return loadData(data) +} + +// Load loads a named mo file. +func LoadFile(path string) (*File, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return loadData(data) +} + +func loadData(data []byte) (*File, error) { + r := bytes.NewReader(data) + + var magicNumber uint32 + if err := binary.Read(r, binary.LittleEndian, &magicNumber); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + var bo binary.ByteOrder + switch magicNumber { + case MoMagicLittleEndian: + bo = binary.LittleEndian + case MoMagicBigEndian: + bo = binary.BigEndian + default: + return nil, fmt.Errorf("gettext: %v", "invalid magic number") + } + + var header struct { + MajorVersion uint16 + MinorVersion uint16 + MsgIdCount uint32 + MsgIdOffset uint32 + MsgStrOffset uint32 + HashSize uint32 + HashOffset uint32 + } + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + if v := header.MajorVersion; v != 0 && v != 1 { + return nil, fmt.Errorf("gettext: %v", "invalid version number") + } + if v := header.MinorVersion; v != 0 && v != 1 { + return nil, fmt.Errorf("gettext: %v", "invalid version number") + } + + msgIdStart := make([]uint32, header.MsgIdCount) + msgIdLen := make([]uint32, header.MsgIdCount) + if _, err := r.Seek(int64(header.MsgIdOffset), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + for i := 0; i < int(header.MsgIdCount); i++ { + if err := binary.Read(r, bo, &msgIdLen[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + if err := binary.Read(r, bo, &msgIdStart[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + } + + msgStrStart := make([]int32, header.MsgIdCount) + msgStrLen := make([]int32, header.MsgIdCount) + if _, err := r.Seek(int64(header.MsgStrOffset), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + for i := 0; i < int(header.MsgIdCount); i++ { + if err := binary.Read(r, bo, &msgStrLen[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + if err := binary.Read(r, bo, &msgStrStart[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + } + + file := &File{ + MagicNumber: magicNumber, + MajorVersion: header.MajorVersion, + MinorVersion: header.MinorVersion, + MsgIdCount: header.MsgIdCount, + MsgIdOffset: header.MsgIdOffset, + MsgStrOffset: header.MsgStrOffset, + HashSize: header.HashSize, + HashOffset: header.HashOffset, + } + for i := 0; i < int(header.MsgIdCount); i++ { + if _, err := r.Seek(int64(msgIdStart[i]), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + msgIdData := make([]byte, msgIdLen[i]) + if _, err := r.Read(msgIdData); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + + if _, err := r.Seek(int64(msgStrStart[i]), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + msgStrData := make([]byte, msgStrLen[i]) + if _, err := r.Read(msgStrData); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + + if len(msgIdData) == 0 { + var msg = Message{ + MsgId: string(msgIdData), + MsgStr: string(msgStrData), + } + file.MimeHeader.fromMessage(&msg) + } else { + var msg = Message{ + MsgId: string(msgIdData), + MsgStr: string(msgStrData), + } + // Is this a context message? + if idx := strings.Index(msg.MsgId, EotSeparator); idx != -1 { + msg.MsgContext, msg.MsgId = msg.MsgId[:idx], msg.MsgId[idx+1:] + } + // Is this a plural message? + if idx := strings.Index(msg.MsgId, NulSeparator); idx != -1 { + msg.MsgId, msg.MsgIdPlural = msg.MsgId[:idx], msg.MsgId[idx+1:] + msg.MsgStrPlural = strings.Split(msg.MsgStr, NulSeparator) + msg.MsgStr = "" + } + file.Messages = append(file.Messages, msg) + } + } + + return file, nil +} + +// Save saves a mo file. +func (f *File) Save(name string) error { + return ioutil.WriteFile(name, f.Data(), 0666) +} + +// Save returns a mo file format data. +func (f *File) Data() []byte { + return encodeFile(f) +} + +// String returns the po format file string. +func (f *File) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "# version: %d.%d\n", f.MajorVersion, f.MinorVersion) + fmt.Fprintf(&buf, "%s\n", f.MimeHeader.String()) + fmt.Fprintf(&buf, "\n") + + for k, v := range f.Messages { + fmt.Fprintf(&buf, `msgid "%v"`+"\n", k) + fmt.Fprintf(&buf, `msgstr "%s"`+"\n", v.MsgStr) + fmt.Fprintf(&buf, "\n") + } + + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/mo/header.go b/vendor/github.com/chai2010/gettext-go/mo/header.go new file mode 100644 index 0000000000..d8c7a5e3a3 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/mo/header.go @@ -0,0 +1,109 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "fmt" + "strings" +) + +// Header is the initial comments "SOME DESCRIPTIVE TITLE", "YEAR" +// and "FIRST AUTHOR , YEAR" ought to be replaced by sensible information. +// +// See http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html#Header-Entry +type Header struct { + ProjectIdVersion string // Project-Id-Version: PACKAGE VERSION + ReportMsgidBugsTo string // Report-Msgid-Bugs-To: FIRST AUTHOR + POTCreationDate string // POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE + PORevisionDate string // PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE + LastTranslator string // Last-Translator: FIRST AUTHOR + LanguageTeam string // Language-Team: golang-china + Language string // Language: zh_CN + MimeVersion string // MIME-Version: 1.0 + ContentType string // Content-Type: text/plain; charset=UTF-8 + ContentTransferEncoding string // Content-Transfer-Encoding: 8bit + PluralForms string // Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1; + XGenerator string // X-Generator: Poedit 1.5.5 + UnknowFields map[string]string +} + +func (p *Header) fromMessage(msg *Message) { + if msg.MsgId != "" || msg.MsgStr == "" { + return + } + lines := strings.Split(msg.MsgStr, "\n") + for i := 0; i < len(lines); i++ { + idx := strings.Index(lines[i], ":") + if idx < 0 { + continue + } + key := strings.TrimSpace(lines[i][:idx]) + val := strings.TrimSpace(lines[i][idx+1:]) + switch strings.ToUpper(key) { + case strings.ToUpper("Project-Id-Version"): + p.ProjectIdVersion = val + case strings.ToUpper("Report-Msgid-Bugs-To"): + p.ReportMsgidBugsTo = val + case strings.ToUpper("POT-Creation-Date"): + p.POTCreationDate = val + case strings.ToUpper("PO-Revision-Date"): + p.PORevisionDate = val + case strings.ToUpper("Last-Translator"): + p.LastTranslator = val + case strings.ToUpper("Language-Team"): + p.LanguageTeam = val + case strings.ToUpper("Language"): + p.Language = val + case strings.ToUpper("MIME-Version"): + p.MimeVersion = val + case strings.ToUpper("Content-Type"): + p.ContentType = val + case strings.ToUpper("Content-Transfer-Encoding"): + p.ContentTransferEncoding = val + case strings.ToUpper("Plural-Forms"): + p.PluralForms = val + case strings.ToUpper("X-Generator"): + p.XGenerator = val + default: + if p.UnknowFields == nil { + p.UnknowFields = make(map[string]string) + } + p.UnknowFields[key] = val + } + } +} + +func (p *Header) toMessage() Message { + return Message{ + MsgStr: p.String(), + } +} + +// String returns the po format header string. +func (p Header) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, `msgid ""`+"\n") + fmt.Fprintf(&buf, `msgstr ""`+"\n") + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Project-Id-Version", p.ProjectIdVersion) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Report-Msgid-Bugs-To", p.ReportMsgidBugsTo) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "POT-Creation-Date", p.POTCreationDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "PO-Revision-Date", p.PORevisionDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Last-Translator", p.LastTranslator) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language-Team", p.LanguageTeam) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language", p.Language) + if p.MimeVersion != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "MIME-Version", p.MimeVersion) + } + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Type", p.ContentType) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Transfer-Encoding", p.ContentTransferEncoding) + if p.XGenerator != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "X-Generator", p.XGenerator) + } + for k, v := range p.UnknowFields { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", k, v) + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/mo/message.go b/vendor/github.com/chai2010/gettext-go/mo/message.go new file mode 100644 index 0000000000..b67bde0b70 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/mo/message.go @@ -0,0 +1,52 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "fmt" +) + +// A MO file is made up of many entries, +// each entry holding the relation between an original untranslated string +// and its corresponding translation. +// +// See http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html +type Message struct { + MsgContext string // msgctxt context + MsgId string // msgid untranslated-string + MsgIdPlural string // msgid_plural untranslated-string-plural + MsgStr string // msgstr translated-string + MsgStrPlural []string // msgstr[0] translated-string-case-0 +} + +// String returns the po format entry string. +func (p Message) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "msgid %s", encodePoString(p.MsgId)) + if p.MsgIdPlural != "" { + fmt.Fprintf(&buf, "msgid_plural %s", encodePoString(p.MsgIdPlural)) + } + if p.MsgStr != "" { + fmt.Fprintf(&buf, "msgstr %s", encodePoString(p.MsgStr)) + } + for i := 0; i < len(p.MsgStrPlural); i++ { + fmt.Fprintf(&buf, "msgstr[%d] %s", i, encodePoString(p.MsgStrPlural[i])) + } + return buf.String() +} + +func (m_i *Message) less(m_j *Message) bool { + if a, b := m_i.MsgContext, m_j.MsgContext; a != b { + return a < b + } + if a, b := m_i.MsgId, m_j.MsgId; a != b { + return a < b + } + if a, b := m_i.MsgIdPlural, m_j.MsgIdPlural; a != b { + return a < b + } + return false +} diff --git a/vendor/github.com/chai2010/gettext-go/mo/util.go b/vendor/github.com/chai2010/gettext-go/mo/util.go new file mode 100644 index 0000000000..3804511053 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/mo/util.go @@ -0,0 +1,110 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "strings" +) + +func decodePoString(text string) string { + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + left := strings.Index(lines[i], `"`) + right := strings.LastIndex(lines[i], `"`) + if left < 0 || right < 0 || left == right { + lines[i] = "" + continue + } + line := lines[i][left+1 : right] + data := make([]byte, 0, len(line)) + for i := 0; i < len(line); i++ { + if line[i] != '\\' { + data = append(data, line[i]) + continue + } + if i+1 >= len(line) { + break + } + switch line[i+1] { + case 'n': // \\n -> \n + data = append(data, '\n') + i++ + case 't': // \\t -> \n + data = append(data, '\t') + i++ + case '\\': // \\\ -> ? + data = append(data, '\\') + i++ + } + } + lines[i] = string(data) + } + return strings.Join(lines, "") +} + +func encodePoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + if lines[i] == "" { + if i != len(lines)-1 { + buf.WriteString(`"\n"` + "\n") + } + continue + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + buf.WriteString(`\n"` + "\n") + } + return buf.String() +} + +func encodeCommentPoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + if len(lines) > 1 { + buf.WriteString(`""` + "\n") + } + for i := 0; i < len(lines); i++ { + if len(lines) > 0 { + buf.WriteString("#| ") + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + if i < len(lines)-1 { + buf.WriteString(`\n"` + "\n") + } else { + buf.WriteString(`"`) + } + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/plural/doc.go b/vendor/github.com/chai2010/gettext-go/plural/doc.go new file mode 100644 index 0000000000..31cb8fae9f --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/plural/doc.go @@ -0,0 +1,36 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package plural provides standard plural formulas. + +Examples: + import ( + "github.com/chai2010/gettext-go/plural" + ) + + func main() { + enFormula := plural.Formula("en_US") + xxFormula := plural.Formula("zh_CN") + + fmt.Printf("%s: %d\n", "en", enFormula(0)) + fmt.Printf("%s: %d\n", "en", enFormula(1)) + fmt.Printf("%s: %d\n", "en", enFormula(2)) + fmt.Printf("%s: %d\n", "??", xxFormula(0)) + fmt.Printf("%s: %d\n", "??", xxFormula(1)) + fmt.Printf("%s: %d\n", "??", xxFormula(2)) + fmt.Printf("%s: %d\n", "??", xxFormula(9)) + // Output: + // en: 0 + // en: 0 + // en: 1 + // ??: 0 + // ??: 0 + // ??: 1 + // ??: 8 + } + +See http://www.gnu.org/software/gettext/manual/html_node/Plural-forms.html +*/ +package plural diff --git a/vendor/github.com/chai2010/gettext-go/plural/formula.go b/vendor/github.com/chai2010/gettext-go/plural/formula.go new file mode 100644 index 0000000000..679a1cd50d --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/plural/formula.go @@ -0,0 +1,181 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +import ( + "strings" +) + +// Formula provides the language's standard plural formula. +func Formula(lang string) func(n int) int { + if idx := index(lang); idx != -1 { + return formulaTable[fmtForms(FormsTable[idx].Value)] + } + if idx := index("??"); idx != -1 { + return formulaTable[fmtForms(FormsTable[idx].Value)] + } + return func(n int) int { + return n + } +} + +func index(lang string) int { + for i := 0; i < len(FormsTable); i++ { + if strings.HasPrefix(lang, FormsTable[i].Lang) { + return i + } + } + return -1 +} + +func fmtForms(forms string) string { + forms = strings.TrimSpace(forms) + forms = strings.Replace(forms, " ", "", -1) + return forms +} + +var formulaTable = map[string]func(n int) int{ + fmtForms("nplurals=n; plural=n-1;"): func(n int) int { + if n > 0 { + return n - 1 + } + return 0 + }, + fmtForms("nplurals=1; plural=0;"): func(n int) int { + return 0 + }, + fmtForms("nplurals=2; plural=(n != 1);"): func(n int) int { + if n <= 1 { + return 0 + } + return 1 + }, + fmtForms("nplurals=2; plural=(n > 1);"): func(n int) int { + if n <= 1 { + return 0 + } + return 1 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n != 0 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=n==1 ? 0 : n==2 ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n == 2 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n == 0 || (n%100 > 0 && n%100 < 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n >= 2 && n <= 4 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n >= 2 && n <= 4 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n == 1 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3);"): func(n int) int { + if n%100 == 1 { + return 0 + } + if n%100 == 2 { + return 1 + } + if n%100 == 3 || n%100 == 4 { + return 2 + } + return 3 + }, +} diff --git a/vendor/github.com/chai2010/gettext-go/plural/table.go b/vendor/github.com/chai2010/gettext-go/plural/table.go new file mode 100644 index 0000000000..cdc50d2110 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/plural/table.go @@ -0,0 +1,55 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +// FormsTable are standard hard-coded plural rules. +// The application developers and the translators need to understand them. +// +// See GNU's gettext library source code: gettext/gettext-tools/src/plural-table.c +var FormsTable = []struct { + Lang string + Language string + Value string +}{ + {"??", "Unknown", "nplurals=1; plural=0;"}, + {"ja", "Japanese", "nplurals=1; plural=0;"}, + {"vi", "Vietnamese", "nplurals=1; plural=0;"}, + {"ko", "Korean", "nplurals=1; plural=0;"}, + {"en", "English", "nplurals=2; plural=(n != 1);"}, + {"de", "German", "nplurals=2; plural=(n != 1);"}, + {"nl", "Dutch", "nplurals=2; plural=(n != 1);"}, + {"sv", "Swedish", "nplurals=2; plural=(n != 1);"}, + {"da", "Danish", "nplurals=2; plural=(n != 1);"}, + {"no", "Norwegian", "nplurals=2; plural=(n != 1);"}, + {"nb", "Norwegian Bokmal", "nplurals=2; plural=(n != 1);"}, + {"nn", "Norwegian Nynorsk", "nplurals=2; plural=(n != 1);"}, + {"fo", "Faroese", "nplurals=2; plural=(n != 1);"}, + {"es", "Spanish", "nplurals=2; plural=(n != 1);"}, + {"pt", "Portuguese", "nplurals=2; plural=(n != 1);"}, + {"it", "Italian", "nplurals=2; plural=(n != 1);"}, + {"bg", "Bulgarian", "nplurals=2; plural=(n != 1);"}, + {"el", "Greek", "nplurals=2; plural=(n != 1);"}, + {"fi", "Finnish", "nplurals=2; plural=(n != 1);"}, + {"et", "Estonian", "nplurals=2; plural=(n != 1);"}, + {"he", "Hebrew", "nplurals=2; plural=(n != 1);"}, + {"eo", "Esperanto", "nplurals=2; plural=(n != 1);"}, + {"hu", "Hungarian", "nplurals=2; plural=(n != 1);"}, + {"tr", "Turkish", "nplurals=2; plural=(n != 1);"}, + {"pt_BR", "Brazilian", "nplurals=2; plural=(n > 1);"}, + {"fr", "French", "nplurals=2; plural=(n > 1);"}, + {"lv", "Latvian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);"}, + {"ga", "Irish", "nplurals=3; plural=n==1 ? 0 : n==2 ? 1 : 2;"}, + {"ro", "Romanian", "nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2;"}, + {"lt", "Lithuanian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"ru", "Russian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"uk", "Ukrainian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"be", "Belarusian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"sr", "Serbian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"hr", "Croatian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"cs", "Czech", "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"}, + {"sk", "Slovak", "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"}, + {"pl", "Polish", "nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"sl", "Slovenian", "nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3);"}, +} diff --git a/vendor/github.com/chai2010/gettext-go/po/comment.go b/vendor/github.com/chai2010/gettext-go/po/comment.go new file mode 100644 index 0000000000..d4abe7c106 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/comment.go @@ -0,0 +1,270 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// Comment represents every message's comments. +type Comment struct { + StartLine int // comment start line + TranslatorComment string // # translator-comments // TrimSpace + ExtractedComment string // #. extracted-comments + ReferenceFile []string // #: src/msgcmp.c:338 src/po-lex.c:699 + ReferenceLine []int // #: src/msgcmp.c:338 src/po-lex.c:699 + Flags []string // #, fuzzy,c-format,range:0..10 + PrevMsgContext string // #| msgctxt previous-context + PrevMsgId string // #| msgid previous-untranslated-string +} + +func (p *Comment) less(q *Comment) bool { + if p.StartLine != 0 || q.StartLine != 0 { + return p.StartLine < q.StartLine + } + if a, b := len(p.ReferenceFile), len(q.ReferenceFile); a != b { + return a < b + } + for i := 0; i < len(p.ReferenceFile); i++ { + if a, b := p.ReferenceFile[i], q.ReferenceFile[i]; a != b { + return a < b + } + if a, b := p.ReferenceLine[i], q.ReferenceLine[i]; a != b { + return a < b + } + } + return false +} + +func (p *Comment) readPoComment(r *lineReader) (err error) { + *p = Comment{} + if err = r.skipBlankLine(); err != nil { + return err + } + defer func(oldPos int) { + newPos := r.currentPos() + if newPos != oldPos && err == io.EOF { + err = nil + } + }(r.currentPos()) + + p.StartLine = r.currentPos() + 1 + for { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if len(s) == 0 || s[0] != '#' { + return + } + + if err = p.readTranslatorComment(r); err != nil { + return + } + if err = p.readExtractedComment(r); err != nil { + return + } + if err = p.readReferenceComment(r); err != nil { + return + } + if err = p.readFlagsComment(r); err != nil { + return + } + if err = p.readPrevMsgContext(r); err != nil { + return + } + if err = p.readPrevMsgId(r); err != nil { + return + } + } +} + +func (p *Comment) readTranslatorComment(r *lineReader) (err error) { + const prefix = "# " // .,:| + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < 1 || s[0] != '#' { + r.unreadLine() + return nil + } + if len(s) >= 2 { + switch s[1] { + case '.', ',', ':', '|': + r.unreadLine() + return nil + } + } + if p.TranslatorComment != "" { + p.TranslatorComment += "\n" + } + p.TranslatorComment += strings.TrimSpace(s[1:]) + } +} + +func (p *Comment) readExtractedComment(r *lineReader) (err error) { + const prefix = "#." + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < len(prefix) || s[:len(prefix)] != prefix { + r.unreadLine() + return nil + } + if p.ExtractedComment != "" { + p.ExtractedComment += "\n" + } + p.ExtractedComment += strings.TrimSpace(s[len(prefix):]) + } +} + +func (p *Comment) readReferenceComment(r *lineReader) (err error) { + const prefix = "#:" + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < len(prefix) || s[:len(prefix)] != prefix { + r.unreadLine() + return nil + } + ss := strings.Split(strings.TrimSpace(s[len(prefix):]), " ") + for i := 0; i < len(ss); i++ { + idx := strings.Index(ss[i], ":") + if idx <= 0 { + continue + } + name := strings.TrimSpace(ss[i][:idx]) + line, _ := strconv.Atoi(strings.TrimSpace(ss[i][idx+1:])) + p.ReferenceFile = append(p.ReferenceFile, name) + p.ReferenceLine = append(p.ReferenceLine, line) + } + } +} + +func (p *Comment) readFlagsComment(r *lineReader) (err error) { + const prefix = "#," + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < len(prefix) || s[:len(prefix)] != prefix { + r.unreadLine() + return nil + } + ss := strings.Split(strings.TrimSpace(s[len(prefix):]), ",") + for i := 0; i < len(ss); i++ { + p.Flags = append(p.Flags, strings.TrimSpace(ss[i])) + } + } +} + +func (p *Comment) readPrevMsgContext(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !rePrevMsgContextComments.MatchString(s) { + return + } + p.PrevMsgContext, err = p.readString(r) + return +} + +func (p *Comment) readPrevMsgId(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !rePrevMsgIdComments.MatchString(s) { + return + } + p.PrevMsgId, err = p.readString(r) + return +} + +func (p *Comment) readString(r *lineReader) (msg string, err error) { + var s string + if s, _, err = r.readLine(); err != nil { + return + } + msg += decodePoString(s) + for { + if s, _, err = r.readLine(); err != nil { + return + } + if !reStringLineComments.MatchString(s) { + r.unreadLine() + break + } + msg += decodePoString(s) + } + return +} + +// GetFuzzy gets the fuzzy flag. +func (p *Comment) GetFuzzy() bool { + for _, s := range p.Flags { + if s == "fuzzy" { + return true + } + } + return false +} + +// SetFuzzy sets the fuzzy flag. +func (p *Comment) SetFuzzy(fuzzy bool) { + // +} + +// String returns the po format comment string. +func (p Comment) String() string { + var buf bytes.Buffer + if p.TranslatorComment != "" { + ss := strings.Split(p.TranslatorComment, "\n") + for i := 0; i < len(ss); i++ { + fmt.Fprintf(&buf, "# %s\n", ss[i]) + } + } + if p.ExtractedComment != "" { + ss := strings.Split(p.ExtractedComment, "\n") + for i := 0; i < len(ss); i++ { + fmt.Fprintf(&buf, "#. %s\n", ss[i]) + } + } + if a, b := len(p.ReferenceFile), len(p.ReferenceLine); a != 0 && a == b { + fmt.Fprintf(&buf, "#:") + for i := 0; i < len(p.ReferenceFile); i++ { + fmt.Fprintf(&buf, " %s:%d", p.ReferenceFile[i], p.ReferenceLine[i]) + } + fmt.Fprintf(&buf, "\n") + } + if len(p.Flags) != 0 { + fmt.Fprintf(&buf, "#, %s", p.Flags[0]) + for i := 1; i < len(p.Flags); i++ { + fmt.Fprintf(&buf, ", %s", p.Flags[i]) + } + fmt.Fprintf(&buf, "\n") + } + if p.PrevMsgContext != "" { + s := encodeCommentPoString(p.PrevMsgContext) + fmt.Fprintf(&buf, "#| msgctxt %s\n", s) + } + if p.PrevMsgId != "" { + s := encodeCommentPoString(p.PrevMsgId) + fmt.Fprintf(&buf, "#| msgid %s\n", s) + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/po/doc.go b/vendor/github.com/chai2010/gettext-go/po/doc.go new file mode 100644 index 0000000000..6cfa2a24be --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/doc.go @@ -0,0 +1,24 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package po provides support for reading and writing GNU PO file. + +Examples: + import ( + "github.com/chai2010/gettext-go/po" + ) + + func main() { + poFile, err := po.LoadFile("test.po") + if err != nil { + log.Fatal(err) + } + fmt.Printf("%v", poFile) + } + +The GNU PO file specification is at +http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html. +*/ +package po diff --git a/vendor/github.com/chai2010/gettext-go/po/file.go b/vendor/github.com/chai2010/gettext-go/po/file.go new file mode 100644 index 0000000000..4a122eeb8b --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/file.go @@ -0,0 +1,81 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "sort" +) + +// File represents an PO File. +// +// See http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html +type File struct { + MimeHeader Header + Messages []Message +} + +// Load loads po file format data. +func Load(data []byte) (*File, error) { + return loadData(data) +} + +// LoadFile loads a named po file. +func LoadFile(path string) (*File, error) { + data, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return loadData(data) +} + +func loadData(data []byte) (*File, error) { + r := newLineReader(string(data)) + var file File + for { + var msg Message + if err := msg.readPoEntry(r); err != nil { + if err == io.EOF { + return &file, nil + } + return nil, err + } + if msg.MsgId == "" { + file.MimeHeader.parseHeader(&msg) + continue + } + file.Messages = append(file.Messages, msg) + } +} + +// Save saves a po file. +func (f *File) Save(name string) error { + return ioutil.WriteFile(name, []byte(f.String()), 0666) +} + +// Save returns a po file format data. +func (f *File) Data() []byte { + // sort the massge as ReferenceFile/ReferenceLine field + var messages []Message + messages = append(messages, f.Messages...) + sort.Slice(messages, func(i, j int) bool { + return messages[i].less(&messages[j]) + }) + + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s\n", f.MimeHeader.String()) + for i := 0; i < len(messages); i++ { + fmt.Fprintf(&buf, "%s\n", messages[i].String()) + } + return buf.Bytes() +} + +// String returns the po format file string. +func (f *File) String() string { + return string(f.Data()) +} diff --git a/vendor/github.com/chai2010/gettext-go/po/header.go b/vendor/github.com/chai2010/gettext-go/po/header.go new file mode 100644 index 0000000000..a9b5b6671b --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/header.go @@ -0,0 +1,106 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "strings" +) + +// Header is the initial comments "SOME DESCRIPTIVE TITLE", "YEAR" +// and "FIRST AUTHOR , YEAR" ought to be replaced by sensible information. +// +// See http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html#Header-Entry +type Header struct { + Comment // Header Comments + ProjectIdVersion string // Project-Id-Version: PACKAGE VERSION + ReportMsgidBugsTo string // Report-Msgid-Bugs-To: FIRST AUTHOR + POTCreationDate string // POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE + PORevisionDate string // PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE + LastTranslator string // Last-Translator: FIRST AUTHOR + LanguageTeam string // Language-Team: golang-china + Language string // Language: zh_CN + MimeVersion string // MIME-Version: 1.0 + ContentType string // Content-Type: text/plain; charset=UTF-8 + ContentTransferEncoding string // Content-Transfer-Encoding: 8bit + PluralForms string // Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1; + XGenerator string // X-Generator: Poedit 1.5.5 + UnknowFields map[string]string +} + +func (p *Header) parseHeader(msg *Message) { + if msg.MsgId != "" || msg.MsgStr == "" { + return + } + lines := strings.Split(msg.MsgStr, "\n") + for i := 0; i < len(lines); i++ { + idx := strings.Index(lines[i], ":") + if idx < 0 { + continue + } + key := strings.TrimSpace(lines[i][:idx]) + val := strings.TrimSpace(lines[i][idx+1:]) + switch strings.ToUpper(key) { + case strings.ToUpper("Project-Id-Version"): + p.ProjectIdVersion = val + case strings.ToUpper("Report-Msgid-Bugs-To"): + p.ReportMsgidBugsTo = val + case strings.ToUpper("POT-Creation-Date"): + p.POTCreationDate = val + case strings.ToUpper("PO-Revision-Date"): + p.PORevisionDate = val + case strings.ToUpper("Last-Translator"): + p.LastTranslator = val + case strings.ToUpper("Language-Team"): + p.LanguageTeam = val + case strings.ToUpper("Language"): + p.Language = val + case strings.ToUpper("MIME-Version"): + p.MimeVersion = val + case strings.ToUpper("Content-Type"): + p.ContentType = val + case strings.ToUpper("Content-Transfer-Encoding"): + p.ContentTransferEncoding = val + case strings.ToUpper("Plural-Forms"): + p.PluralForms = val + case strings.ToUpper("X-Generator"): + p.XGenerator = val + default: + if p.UnknowFields == nil { + p.UnknowFields = make(map[string]string) + } + p.UnknowFields[key] = val + } + } + p.Comment = msg.Comment +} + +// String returns the po format header string. +func (p Header) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s", p.Comment.String()) + fmt.Fprintf(&buf, `msgid ""`+"\n") + fmt.Fprintf(&buf, `msgstr ""`+"\n") + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Project-Id-Version", p.ProjectIdVersion) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Report-Msgid-Bugs-To", p.ReportMsgidBugsTo) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "POT-Creation-Date", p.POTCreationDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "PO-Revision-Date", p.PORevisionDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Last-Translator", p.LastTranslator) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language-Team", p.LanguageTeam) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language", p.Language) + if p.MimeVersion != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "MIME-Version", p.MimeVersion) + } + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Type", p.ContentType) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Transfer-Encoding", p.ContentTransferEncoding) + if p.XGenerator != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "X-Generator", p.XGenerator) + } + for k, v := range p.UnknowFields { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", k, v) + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/po/line_reader.go b/vendor/github.com/chai2010/gettext-go/po/line_reader.go new file mode 100644 index 0000000000..8597273a2b --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/line_reader.go @@ -0,0 +1,62 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "io" + "strings" +) + +type lineReader struct { + lines []string + pos int +} + +func newLineReader(data string) *lineReader { + data = strings.Replace(data, "\r", "", -1) + lines := strings.Split(data, "\n") + return &lineReader{lines: lines} +} + +func (r *lineReader) skipBlankLine() error { + for ; r.pos < len(r.lines); r.pos++ { + if strings.TrimSpace(r.lines[r.pos]) != "" { + break + } + } + if r.pos >= len(r.lines) { + return io.EOF + } + return nil +} + +func (r *lineReader) currentPos() int { + return r.pos +} + +func (r *lineReader) currentLine() (s string, pos int, err error) { + if r.pos >= len(r.lines) { + err = io.EOF + return + } + s, pos = r.lines[r.pos], r.pos + return +} + +func (r *lineReader) readLine() (s string, pos int, err error) { + if r.pos >= len(r.lines) { + err = io.EOF + return + } + s, pos = r.lines[r.pos], r.pos + r.pos++ + return +} + +func (r *lineReader) unreadLine() { + if r.pos >= 0 { + r.pos-- + } +} diff --git a/vendor/github.com/chai2010/gettext-go/po/message.go b/vendor/github.com/chai2010/gettext-go/po/message.go new file mode 100644 index 0000000000..39936dcc7b --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/message.go @@ -0,0 +1,193 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// A PO file is made up of many entries, +// each entry holding the relation between an original untranslated string +// and its corresponding translation. +// +// See http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html +type Message struct { + Comment // Coments + MsgContext string // msgctxt context + MsgId string // msgid untranslated-string + MsgIdPlural string // msgid_plural untranslated-string-plural + MsgStr string // msgstr translated-string + MsgStrPlural []string // msgstr[0] translated-string-case-0 +} + +func (p *Message) less(q *Message) bool { + if p.Comment.less(&q.Comment) { + return true + } + if a, b := p.MsgContext, q.MsgContext; a != b { + return a < b + } + if a, b := p.MsgId, q.MsgId; a != b { + return a < b + } + if a, b := p.MsgIdPlural, q.MsgIdPlural; a != b { + return a < b + } + return false +} + +func (p *Message) readPoEntry(r *lineReader) (err error) { + *p = Message{} + if err = r.skipBlankLine(); err != nil { + return + } + defer func(oldPos int) { + newPos := r.currentPos() + if newPos != oldPos && err == io.EOF { + err = nil + } + }(r.currentPos()) + + if err = p.Comment.readPoComment(r); err != nil { + return + } + for { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + + if p.isInvalidLine(s) { + err = fmt.Errorf("gettext: line %d, %v", r.currentPos(), "invalid line") + return + } + if reComment.MatchString(s) || reBlankLine.MatchString(s) { + return + } + + if err = p.readMsgContext(r); err != nil { + return + } + if err = p.readMsgId(r); err != nil { + return + } + if err = p.readMsgIdPlural(r); err != nil { + return + } + if err = p.readMsgStrOrPlural(r); err != nil { + return + } + } +} + +func (p *Message) readMsgContext(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgContext.MatchString(s) { + return + } + p.MsgContext, err = p.readString(r) + return +} + +func (p *Message) readMsgId(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgId.MatchString(s) { + return + } + p.MsgId, err = p.readString(r) + return +} + +func (p *Message) readMsgIdPlural(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgIdPlural.MatchString(s) { + return + } + p.MsgIdPlural, err = p.readString(r) + return nil +} + +func (p *Message) readMsgStrOrPlural(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgStr.MatchString(s) && !reMsgStrPlural.MatchString(s) { + return + } + if reMsgStrPlural.MatchString(s) { + left, right := strings.Index(s, `[`), strings.LastIndex(s, `]`) + idx, _ := strconv.Atoi(s[left+1 : right]) + s, err = p.readString(r) + if n := len(p.MsgStrPlural); (idx + 1) > n { + p.MsgStrPlural = append(p.MsgStrPlural, make([]string, (idx+1)-n)...) + } + p.MsgStrPlural[idx] = s + } else { + p.MsgStr, err = p.readString(r) + } + return nil +} + +func (p *Message) readString(r *lineReader) (msg string, err error) { + var s string + if s, _, err = r.readLine(); err != nil { + return + } + msg += decodePoString(s) + for { + if s, _, err = r.readLine(); err != nil { + return + } + if !reStringLine.MatchString(s) { + r.unreadLine() + break + } + msg += decodePoString(s) + } + return +} + +// String returns the po format entry string. +func (p Message) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s", p.Comment.String()) + if p.MsgContext != "" { + fmt.Fprintf(&buf, "msgctxt %s", encodePoString(p.MsgContext)) + } + fmt.Fprintf(&buf, "msgid %s", encodePoString(p.MsgId)) + if p.MsgIdPlural != "" { + fmt.Fprintf(&buf, "msgid_plural %s", encodePoString(p.MsgIdPlural)) + } + if len(p.MsgStrPlural) == 0 { + if p.MsgStr != "" { + fmt.Fprintf(&buf, "msgstr %s", encodePoString(p.MsgStr)) + } else { + fmt.Fprintf(&buf, "msgstr %s", `""`+"\n") + } + } else { + for i := 0; i < len(p.MsgStrPlural); i++ { + if p.MsgStrPlural[i] != "" { + fmt.Fprintf(&buf, "msgstr[%d] %s", i, encodePoString(p.MsgStrPlural[i])) + } else { + fmt.Fprintf(&buf, "msgstr[%d] %s", i, `""`+"\n") + } + } + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/po/re.go b/vendor/github.com/chai2010/gettext-go/po/re.go new file mode 100644 index 0000000000..67c240a57b --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/re.go @@ -0,0 +1,58 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "regexp" +) + +var ( + reComment = regexp.MustCompile(`^#`) // # + reExtractedComments = regexp.MustCompile(`^#\.`) // #. + reReferenceComments = regexp.MustCompile(`^#:`) // #: + reFlagsComments = regexp.MustCompile(`^#,`) // #, fuzzy,c-format + rePrevMsgContextComments = regexp.MustCompile(`^#\|\s+msgctxt`) // #| msgctxt + rePrevMsgIdComments = regexp.MustCompile(`^#\|\s+msgid`) // #| msgid + reStringLineComments = regexp.MustCompile(`^#\|\s+".*"\s*$`) // #| "message" + + reMsgContext = regexp.MustCompile(`^msgctxt\s+".*"\s*$`) // msgctxt + reMsgId = regexp.MustCompile(`^msgid\s+".*"\s*$`) // msgid + reMsgIdPlural = regexp.MustCompile(`^msgid_plural\s+".*"\s*$`) // msgid_plural + reMsgStr = regexp.MustCompile(`^msgstr\s*".*"\s*$`) // msgstr + reMsgStrPlural = regexp.MustCompile(`^msgstr\s*(\[\d+\])\s*".*"\s*$`) // msgstr[0] + reStringLine = regexp.MustCompile(`^\s*".*"\s*$`) // "message" + reBlankLine = regexp.MustCompile(`^\s*$`) // +) + +func (p *Message) isInvalidLine(s string) bool { + if reComment.MatchString(s) { + return false + } + if reBlankLine.MatchString(s) { + return false + } + + if reMsgContext.MatchString(s) { + return false + } + if reMsgId.MatchString(s) { + return false + } + if reMsgIdPlural.MatchString(s) { + return false + } + if reMsgStr.MatchString(s) { + return false + } + if reMsgStrPlural.MatchString(s) { + return false + } + + if reStringLine.MatchString(s) { + return false + } + + return true +} diff --git a/vendor/github.com/chai2010/gettext-go/po/util.go b/vendor/github.com/chai2010/gettext-go/po/util.go new file mode 100644 index 0000000000..d8b3b0e254 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/po/util.go @@ -0,0 +1,114 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "strings" +) + +func decodePoString(text string) string { + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + left := strings.Index(lines[i], `"`) + right := strings.LastIndex(lines[i], `"`) + if left < 0 || right < 0 || left == right { + lines[i] = "" + continue + } + line := lines[i][left+1 : right] + data := make([]byte, 0, len(line)) + for i := 0; i < len(line); i++ { + if line[i] != '\\' { + data = append(data, line[i]) + continue + } + if i+1 >= len(line) { + break + } + switch line[i+1] { + case 'n': // \\n -> \n + data = append(data, '\n') + i++ + case 't': // \\t -> \n + data = append(data, '\t') + i++ + case '\\': // \\\ -> ? + data = append(data, '\\') + i++ + } + } + lines[i] = string(data) + } + return strings.Join(lines, "") +} + +func encodePoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + if lines[i] == "" { + if i != len(lines)-1 { + buf.WriteString(`"\n"` + "\n") + } + continue + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + if i < len(lines)-1 { + buf.WriteString(`\n"` + "\n") + } else { + buf.WriteString(`"` + "\n") + } + } + return buf.String() +} + +func encodeCommentPoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + if len(lines) > 1 { + buf.WriteString(`""` + "\n") + } + for i := 0; i < len(lines); i++ { + if len(lines) > 0 { + buf.WriteString("#| ") + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + if i < len(lines)-1 { + buf.WriteString(`\n"` + "\n") + } else { + buf.WriteString(`"`) + } + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/tr.go b/vendor/github.com/chai2010/gettext-go/tr.go new file mode 100644 index 0000000000..5b9d08f426 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/tr.go @@ -0,0 +1,175 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "encoding/json" + + "github.com/chai2010/gettext-go/mo" + "github.com/chai2010/gettext-go/plural" + "github.com/chai2010/gettext-go/po" +) + +var nilTranslator = &translator{ + MessageMap: make(map[string]mo.Message), + PluralFormula: plural.Formula("??"), +} + +type translator struct { + MessageMap map[string]mo.Message + PluralFormula func(n int) int +} + +func newMoTranslator(name string, data []byte) (*translator, error) { + var ( + f *mo.File + err error + ) + if len(data) != 0 { + f, err = mo.Load(data) + } else { + f, err = mo.LoadFile(name) + } + if err != nil { + return nil, err + } + var tr = &translator{ + MessageMap: make(map[string]mo.Message), + } + for _, v := range f.Messages { + tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = v + } + if lang := f.MimeHeader.Language; lang != "" { + tr.PluralFormula = plural.Formula(lang) + } else { + tr.PluralFormula = plural.Formula("??") + } + return tr, nil +} + +func newPoTranslator(name string, data []byte) (*translator, error) { + var ( + f *po.File + err error + ) + if len(data) != 0 { + f, err = po.Load(data) + } else { + f, err = po.LoadFile(name) + } + if err != nil { + return nil, err + } + var tr = &translator{ + MessageMap: make(map[string]mo.Message), + } + for _, v := range f.Messages { + tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = mo.Message{ + MsgContext: v.MsgContext, + MsgId: v.MsgId, + MsgIdPlural: v.MsgIdPlural, + MsgStr: v.MsgStr, + MsgStrPlural: v.MsgStrPlural, + } + } + if lang := f.MimeHeader.Language; lang != "" { + tr.PluralFormula = plural.Formula(lang) + } else { + tr.PluralFormula = plural.Formula("??") + } + return tr, nil +} + +func newJsonTranslator(lang, name string, jsonData []byte) (*translator, error) { + var msgList []struct { + MsgContext string `json:"msgctxt"` // msgctxt context + MsgId string `json:"msgid"` // msgid untranslated-string + MsgIdPlural string `json:"msgid_plural"` // msgid_plural untranslated-string-plural + MsgStr []string `json:"msgstr"` // msgstr translated-string + } + if err := json.Unmarshal(jsonData, &msgList); err != nil { + return nil, err + } + + var tr = &translator{ + MessageMap: make(map[string]mo.Message), + PluralFormula: plural.Formula(lang), + } + + for _, v := range msgList { + var v_MsgStr string + var v_MsgStrPlural = v.MsgStr + + if len(v.MsgStr) != 0 { + v_MsgStr = v.MsgStr[0] + } + + tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = mo.Message{ + MsgContext: v.MsgContext, + MsgId: v.MsgId, + MsgIdPlural: v.MsgIdPlural, + MsgStr: v_MsgStr, + MsgStrPlural: v_MsgStrPlural, + } + } + return tr, nil +} + +func (p *translator) PGettext(msgctxt, msgid string) string { + return p.findMsgStr(msgctxt, msgid) +} + +func (p *translator) PNGettext(msgctxt, msgid, msgidPlural string, n int) string { + n = p.PluralFormula(n) + if ss := p.findMsgStrPlural(msgctxt, msgid, msgidPlural); len(ss) != 0 { + if n >= len(ss) { + n = len(ss) - 1 + } + if ss[n] != "" { + return ss[n] + } + } + if msgidPlural != "" && n > 0 { + return msgidPlural + } + return msgid +} + +func (p *translator) findMsgStr(msgctxt, msgid string) string { + key := p.makeMapKey(msgctxt, msgid) + if v, ok := p.MessageMap[key]; ok { + if v.MsgStr != "" { + return v.MsgStr + } + } + return msgid +} + +func (p *translator) findMsgStrPlural(msgctxt, msgid, msgidPlural string) []string { + key := p.makeMapKey(msgctxt, msgid) + if v, ok := p.MessageMap[key]; ok { + if len(v.MsgIdPlural) != 0 { + if len(v.MsgStrPlural) != 0 { + return v.MsgStrPlural + } else { + return nil + } + } else { + if len(v.MsgStr) != 0 { + return []string{v.MsgStr} + } else { + return nil + } + } + } + return nil +} + +func (p *translator) makeMapKey(msgctxt, msgid string) string { + if msgctxt != "" { + return msgctxt + mo.EotSeparator + msgid + } + return msgid +} diff --git a/vendor/github.com/chai2010/gettext-go/util.go b/vendor/github.com/chai2010/gettext-go/util.go new file mode 100644 index 0000000000..b8269a605c --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/util.go @@ -0,0 +1,34 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "os" + "strings" +) + +func getDefaultLanguage() string { + if v := os.Getenv("LC_MESSAGES"); v != "" { + return simplifiedLanguage(v) + } + if v := os.Getenv("LANG"); v != "" { + return simplifiedLanguage(v) + } + return "default" +} + +func simplifiedLanguage(lang string) string { + // en_US/en_US.UTF-8/zh_CN/zh_TW/el_GR@euro/... + if idx := strings.Index(lang, ":"); idx != -1 { + lang = lang[:idx] + } + if idx := strings.Index(lang, "@"); idx != -1 { + lang = lang[:idx] + } + if idx := strings.Index(lang, "."); idx != -1 { + lang = lang[:idx] + } + return strings.TrimSpace(lang) +} diff --git a/vendor/github.com/exponent-io/jsonpath/.gitignore b/vendor/github.com/exponent-io/jsonpath/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/exponent-io/jsonpath/.travis.yml b/vendor/github.com/exponent-io/jsonpath/.travis.yml new file mode 100644 index 0000000000..f4f458a416 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/.travis.yml @@ -0,0 +1,5 @@ +language: go + +go: + - 1.5 + - tip diff --git a/vendor/github.com/exponent-io/jsonpath/LICENSE b/vendor/github.com/exponent-io/jsonpath/LICENSE new file mode 100644 index 0000000000..5419772507 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Exponent Labs LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/exponent-io/jsonpath/README.md b/vendor/github.com/exponent-io/jsonpath/README.md new file mode 100644 index 0000000000..382fb3138c --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/README.md @@ -0,0 +1,66 @@ +[![GoDoc](https://godoc.org/github.com/exponent-io/jsonpath?status.svg)](https://godoc.org/github.com/exponent-io/jsonpath) +[![Build Status](https://travis-ci.org/exponent-io/jsonpath.svg?branch=master)](https://travis-ci.org/exponent-io/jsonpath) + +# jsonpath + +This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used. + +This Decoder has the following enhancements... + * The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions). + * The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path. + * The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token. + * The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string. + +## Installation + + go get -u github.com/exponent-io/jsonpath + +## Example Usage + +#### SeekTo + +```go +import "github.com/exponent-io/jsonpath" + +var j = []byte(`[ + {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}}, + {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}} +]`) + +w := json.NewDecoder(bytes.NewReader(j)) +var v interface{} + +w.SeekTo(1, "Point", "G") +w.Decode(&v) // v is 218 +``` + +#### Scan with PathActions + +```go +var j = []byte(`{"colors":[ + {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}}, + {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}} +]}`) + +var actions PathActions + +// Extract the value at Point.A +actions.Add(func(d *Decoder) error { + var alpha int + err := d.Decode(&alpha) + fmt.Printf("Alpha: %v\n", alpha) + return err +}, "Point", "A") + +w := NewDecoder(bytes.NewReader(j)) +w.SeekTo("colors", 0) + +var ok = true +var err error +for ok { + ok, err = w.Scan(&actions) + if err != nil && err != io.EOF { + panic(err) + } +} +``` diff --git a/vendor/github.com/exponent-io/jsonpath/decoder.go b/vendor/github.com/exponent-io/jsonpath/decoder.go new file mode 100644 index 0000000000..31de46c738 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/decoder.go @@ -0,0 +1,210 @@ +package jsonpath + +import ( + "encoding/json" + "io" +) + +// KeyString is returned from Decoder.Token to represent each key in a JSON object value. +type KeyString string + +// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens. +type Decoder struct { + json.Decoder + + path JsonPath + context jsonContext +} + +// NewDecoder creates a new instance of the extended JSON Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{Decoder: *json.NewDecoder(r)} +} + +// SeekTo causes the Decoder to move forward to a given path in the JSON structure. +// +// The path argument must consist of strings or integers. Each string specifies an JSON object key, and +// each integer specifies an index into a JSON array. +// +// Consider the JSON structure +// +// { "a": [0,"s",12e4,{"b":0,"v":35} ] } +// +// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object, +// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v". +// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35. +// +// SeekTo returns a boolean value indicating whether a match was found. +// +// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only. +func (d *Decoder) SeekTo(path ...interface{}) (bool, error) { + + if len(path) == 0 { + return len(d.path) == 0, nil + } + last := len(path) - 1 + if i, ok := path[last].(int); ok { + path[last] = i - 1 + } + + for { + if d.path.Equal(path) { + return true, nil + } + _, err := d.Token() + if err == io.EOF { + return false, nil + } else if err != nil { + return false, err + } + } +} + +// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is +// equivalent to encoding/json.Decode(). +func (d *Decoder) Decode(v interface{}) error { + switch d.context { + case objValue: + d.context = objKey + break + case arrValue: + d.path.incTop() + break + } + return d.Decoder.Decode(v) +} + +// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the +// position of the most-recently parsed token. +func (d *Decoder) Path() JsonPath { + p := make(JsonPath, len(d.path)) + copy(p, d.path) + return p +} + +// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes +// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a +// KeyString rather than as a native string. +func (d *Decoder) Token() (json.Token, error) { + t, err := d.Decoder.Token() + if err != nil { + return t, err + } + + if t == nil { + switch d.context { + case objValue: + d.context = objKey + break + case arrValue: + d.path.incTop() + break + } + return t, err + } + + switch t := t.(type) { + case json.Delim: + switch t { + case json.Delim('{'): + if d.context == arrValue { + d.path.incTop() + } + d.path.push("") + d.context = objKey + break + case json.Delim('}'): + d.path.pop() + d.context = d.path.inferContext() + break + case json.Delim('['): + if d.context == arrValue { + d.path.incTop() + } + d.path.push(-1) + d.context = arrValue + break + case json.Delim(']'): + d.path.pop() + d.context = d.path.inferContext() + break + } + case float64, json.Number, bool: + switch d.context { + case objValue: + d.context = objKey + break + case arrValue: + d.path.incTop() + break + } + break + case string: + switch d.context { + case objKey: + d.path.nameTop(t) + d.context = objValue + return KeyString(t), err + case objValue: + d.context = objKey + case arrValue: + d.path.incTop() + } + break + } + + return t, err +} + +// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array) +// invoking each matching PathAction along the way. +// +// Scan returns true if there are more contiguous values to scan (for example in an array). +func (d *Decoder) Scan(ext *PathActions) (bool, error) { + + rootPath := d.Path() + + // If this is an array path, increment the root path in our local copy. + if rootPath.inferContext() == arrValue { + rootPath.incTop() + } + + for { + // advance the token position + _, err := d.Token() + if err != nil { + return false, err + } + + match: + var relPath JsonPath + + // capture the new JSON path + path := d.Path() + + if len(path) > len(rootPath) { + // capture the path relative to where the scan started + relPath = path[len(rootPath):] + } else { + // if the path is not longer than the root, then we are done with this scan + // return boolean flag indicating if there are more items to scan at the same level + return d.Decoder.More(), nil + } + + // match the relative path against the path actions + if node := ext.node.match(relPath); node != nil { + if node.action != nil { + // we have a match so execute the action + err = node.action(d) + if err != nil { + return d.Decoder.More(), err + } + // The action may have advanced the decoder. If we are in an array, advancing it further would + // skip tokens. So, if we are scanning an array, jump to the top without advancing the token. + if d.path.inferContext() == arrValue && d.Decoder.More() { + goto match + } + } + } + } +} diff --git a/vendor/github.com/exponent-io/jsonpath/path.go b/vendor/github.com/exponent-io/jsonpath/path.go new file mode 100644 index 0000000000..d7db2ad336 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/path.go @@ -0,0 +1,67 @@ +// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens. +package jsonpath + +import "fmt" + +type jsonContext int + +const ( + none jsonContext = iota + objKey + objValue + arrValue +) + +// AnyIndex can be used in a pattern to match any array index. +const AnyIndex = -2 + +// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and +// each integer specifies an index into a JSON array. +type JsonPath []interface{} + +func (p *JsonPath) push(n interface{}) { *p = append(*p, n) } +func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] } + +// increment the index at the top of the stack (must be an array index) +func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 } + +// name the key at the top of the stack (must be an object key) +func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n } + +// infer the context from the item at the top of the stack +func (p *JsonPath) inferContext() jsonContext { + if len(*p) == 0 { + return none + } + t := (*p)[len(*p)-1] + switch t.(type) { + case string: + return objKey + case int: + return arrValue + default: + panic(fmt.Sprintf("Invalid stack type %T", t)) + } +} + +// Equal tests for equality between two JsonPath types. +func (p *JsonPath) Equal(o JsonPath) bool { + if len(*p) != len(o) { + return false + } + for i, v := range *p { + if v != o[i] { + return false + } + } + return true +} + +func (p *JsonPath) HasPrefix(o JsonPath) bool { + for i, v := range o { + if v != (*p)[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/exponent-io/jsonpath/pathaction.go b/vendor/github.com/exponent-io/jsonpath/pathaction.go new file mode 100644 index 0000000000..497ed686ca --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/pathaction.go @@ -0,0 +1,61 @@ +package jsonpath + +// pathNode is used to construct a trie of paths to be matched +type pathNode struct { + matchOn interface{} // string, or integer + childNodes []pathNode + action DecodeAction +} + +// match climbs the trie to find a node that matches the given JSON path. +func (n *pathNode) match(path JsonPath) *pathNode { + var node *pathNode = n + for _, ps := range path { + found := false + for i, n := range node.childNodes { + if n.matchOn == ps { + node = &node.childNodes[i] + found = true + break + } else if _, ok := ps.(int); ok && n.matchOn == AnyIndex { + node = &node.childNodes[i] + found = true + break + } + } + if !found { + return nil + } + } + return node +} + +// PathActions represents a collection of DecodeAction functions that should be called at certain path positions +// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams. +type PathActions struct { + node pathNode +} + +// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail. +type DecodeAction func(d *Decoder) error + +// Add specifies an action to call on the Decoder when the specified path is encountered. +func (je *PathActions) Add(action DecodeAction, path ...interface{}) { + + var node *pathNode = &je.node + for _, ps := range path { + found := false + for i, n := range node.childNodes { + if n.matchOn == ps { + node = &node.childNodes[i] + found = true + break + } + } + if !found { + node.childNodes = append(node.childNodes, pathNode{matchOn: ps}) + node = &node.childNodes[len(node.childNodes)-1] + } + } + node.action = action +} diff --git a/vendor/github.com/fvbommel/sortorder/.gitignore b/vendor/github.com/fvbommel/sortorder/.gitignore new file mode 100644 index 0000000000..c021733e25 --- /dev/null +++ b/vendor/github.com/fvbommel/sortorder/.gitignore @@ -0,0 +1,19 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so +# Folders +_obj +_test +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* +_testmain.go +*.exe +*.test +*.prof diff --git a/vendor/github.com/fvbommel/sortorder/LICENSE b/vendor/github.com/fvbommel/sortorder/LICENSE new file mode 100644 index 0000000000..5c695fb590 --- /dev/null +++ b/vendor/github.com/fvbommel/sortorder/LICENSE @@ -0,0 +1,17 @@ +The MIT License (MIT) +Copyright (c) 2015 Frits van Bommel +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/fvbommel/sortorder/README.md b/vendor/github.com/fvbommel/sortorder/README.md new file mode 100644 index 0000000000..06779c8852 --- /dev/null +++ b/vendor/github.com/fvbommel/sortorder/README.md @@ -0,0 +1,9 @@ +# sortorder [![PkgGoDev](https://pkg.go.dev/badge/github.com/fvbommel/sortorder)](https://pkg.go.dev/github.com/fvbommel/sortorder) + + import "github.com/fvbommel/sortorder" + +Sort orders and comparison functions. + +Case-insensitive sort orders are in the `casefolded` sub-package +because it pulls in the Unicode tables in the standard library, +which can add significantly to the size of binaries. diff --git a/vendor/github.com/fvbommel/sortorder/doc.go b/vendor/github.com/fvbommel/sortorder/doc.go new file mode 100644 index 0000000000..a7dd9585d0 --- /dev/null +++ b/vendor/github.com/fvbommel/sortorder/doc.go @@ -0,0 +1,5 @@ +// Package sortorder implements sort orders and comparison functions. +// +// Currently, it only implements so-called "natural order", where integers +// embedded in strings are compared by value. +package sortorder // import "github.com/fvbommel/sortorder" diff --git a/vendor/github.com/fvbommel/sortorder/natsort.go b/vendor/github.com/fvbommel/sortorder/natsort.go new file mode 100644 index 0000000000..e4f15110b8 --- /dev/null +++ b/vendor/github.com/fvbommel/sortorder/natsort.go @@ -0,0 +1,76 @@ +package sortorder + +// Natural implements sort.Interface to sort strings in natural order. This +// means that e.g. "abc2" < "abc12". +// +// Non-digit sequences and numbers are compared separately. The former are +// compared bytewise, while digits are compared numerically (except that +// the number of leading zeros is used as a tie-breaker, so e.g. "2" < "02") +// +// Limitation: only ASCII digits (0-9) are considered. +type Natural []string + +func (n Natural) Len() int { return len(n) } +func (n Natural) Swap(i, j int) { n[i], n[j] = n[j], n[i] } +func (n Natural) Less(i, j int) bool { return NaturalLess(n[i], n[j]) } + +func isDigit(b byte) bool { return '0' <= b && b <= '9' } + +// NaturalLess compares two strings using natural ordering. This means that e.g. +// "abc2" < "abc12". +// +// Non-digit sequences and numbers are compared separately. The former are +// compared bytewise, while digits are compared numerically (except that +// the number of leading zeros is used as a tie-breaker, so e.g. "2" < "02") +// +// Limitation: only ASCII digits (0-9) are considered. +func NaturalLess(str1, str2 string) bool { + idx1, idx2 := 0, 0 + for idx1 < len(str1) && idx2 < len(str2) { + c1, c2 := str1[idx1], str2[idx2] + dig1, dig2 := isDigit(c1), isDigit(c2) + switch { + case dig1 != dig2: // Digits before other characters. + return dig1 // True if LHS is a digit, false if the RHS is one. + case !dig1: // && !dig2, because dig1 == dig2 + // UTF-8 compares bytewise-lexicographically, no need to decode + // codepoints. + if c1 != c2 { + return c1 < c2 + } + idx1++ + idx2++ + default: // Digits + // Eat zeros. + for ; idx1 < len(str1) && str1[idx1] == '0'; idx1++ { + } + for ; idx2 < len(str2) && str2[idx2] == '0'; idx2++ { + } + // Eat all digits. + nonZero1, nonZero2 := idx1, idx2 + for ; idx1 < len(str1) && isDigit(str1[idx1]); idx1++ { + } + for ; idx2 < len(str2) && isDigit(str2[idx2]); idx2++ { + } + // If lengths of numbers with non-zero prefix differ, the shorter + // one is less. + if len1, len2 := idx1-nonZero1, idx2-nonZero2; len1 != len2 { + return len1 < len2 + } + // If they're equally long, string comparison is correct. + if nr1, nr2 := str1[nonZero1:idx1], str2[nonZero2:idx2]; nr1 != nr2 { + return nr1 < nr2 + } + // Otherwise, the one with less zeros is less. + // Because everything up to the number is equal, comparing the index + // after the zeros is sufficient. + if nonZero1 != nonZero2 { + return nonZero1 < nonZero2 + } + } + // They're identical so far, so continue comparing. + } + // So far they are identical. At least one is ended. If the other continues, + // it sorts last. + return len(str1) < len(str2) +} diff --git a/vendor/github.com/gorilla/websocket/.editorconfig b/vendor/github.com/gorilla/websocket/.editorconfig new file mode 100644 index 0000000000..2940ec92ac --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.editorconfig @@ -0,0 +1,20 @@ +; https://editorconfig.org/ + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = space +indent_size = 2 + +[{Makefile,go.mod,go.sum,*.go,.gitmodules}] +indent_style = tab +indent_size = 4 + +[*.md] +indent_size = 4 +trim_trailing_whitespace = false + +eclint_indent_style = unset diff --git a/vendor/github.com/gorilla/websocket/.gitignore b/vendor/github.com/gorilla/websocket/.gitignore new file mode 100644 index 0000000000..84039fec68 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.gitignore @@ -0,0 +1 @@ +coverage.coverprofile diff --git a/vendor/github.com/gorilla/websocket/.golangci.yml b/vendor/github.com/gorilla/websocket/.golangci.yml new file mode 100644 index 0000000000..34882139e1 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/.golangci.yml @@ -0,0 +1,3 @@ +run: + skip-dirs: + - examples/*.go diff --git a/vendor/github.com/gorilla/websocket/LICENSE b/vendor/github.com/gorilla/websocket/LICENSE new file mode 100644 index 0000000000..bb9d80bc9b --- /dev/null +++ b/vendor/github.com/gorilla/websocket/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2023 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/websocket/Makefile b/vendor/github.com/gorilla/websocket/Makefile new file mode 100644 index 0000000000..603a63f50a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/Makefile @@ -0,0 +1,34 @@ +GO_LINT=$(shell which golangci-lint 2> /dev/null || echo '') +GO_LINT_URI=github.com/golangci/golangci-lint/cmd/golangci-lint@latest + +GO_SEC=$(shell which gosec 2> /dev/null || echo '') +GO_SEC_URI=github.com/securego/gosec/v2/cmd/gosec@latest + +GO_VULNCHECK=$(shell which govulncheck 2> /dev/null || echo '') +GO_VULNCHECK_URI=golang.org/x/vuln/cmd/govulncheck@latest + +.PHONY: golangci-lint +golangci-lint: + $(if $(GO_LINT), ,go install $(GO_LINT_URI)) + @echo "##### Running golangci-lint" + golangci-lint run -v + +.PHONY: gosec +gosec: + $(if $(GO_SEC), ,go install $(GO_SEC_URI)) + @echo "##### Running gosec" + gosec -exclude-dir examples ./... + +.PHONY: govulncheck +govulncheck: + $(if $(GO_VULNCHECK), ,go install $(GO_VULNCHECK_URI)) + @echo "##### Running govulncheck" + govulncheck ./... + +.PHONY: verify +verify: golangci-lint gosec govulncheck + +.PHONY: test +test: + @echo "##### Running tests" + go test -race -cover -coverprofile=coverage.coverprofile -covermode=atomic -v ./... diff --git a/vendor/github.com/gorilla/websocket/README.md b/vendor/github.com/gorilla/websocket/README.md new file mode 100644 index 0000000000..1fd5e9c4e7 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/README.md @@ -0,0 +1,36 @@ +# gorilla/websocket + +![testing](https://github.com/gorilla/websocket/actions/workflows/test.yml/badge.svg) +[![codecov](https://codecov.io/github/gorilla/websocket/branch/main/graph/badge.svg)](https://codecov.io/github/gorilla/websocket) +[![godoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) +[![sourcegraph](https://sourcegraph.com/github.com/gorilla/websocket/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/websocket?badge) + +Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. + +![Gorilla Logo](https://github.com/gorilla/.github/assets/53367916/d92caabf-98e0-473e-bfbf-ab554ba435e5) + + +### Documentation + +* [API Reference](https://pkg.go.dev/github.com/gorilla/websocket?tab=doc) +* [Chat example](https://github.com/gorilla/websocket/tree/master/examples/chat) +* [Command example](https://github.com/gorilla/websocket/tree/master/examples/command) +* [Client and server example](https://github.com/gorilla/websocket/tree/master/examples/echo) +* [File watch example](https://github.com/gorilla/websocket/tree/master/examples/filewatch) +* [Write buffer pool example](https://github.com/gorilla/websocket/tree/master/examples/bufferpool) + +### Status + +The Gorilla WebSocket package provides a complete and tested implementation of +the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. The +package API is stable. + +### Installation + + go get github.com/gorilla/websocket + +### Protocol Compliance + +The Gorilla WebSocket package passes the server tests in the [Autobahn Test +Suite](https://github.com/crossbario/autobahn-testsuite) using the application in the [examples/autobahn +subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn). diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 0000000000..815b0ca5c8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,444 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "log" + + "net" + "net/http" + "net/http/httptrace" + "net/url" + "strings" + "time" + + "golang.org/x/net/proxy" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +// +// It is safe to call Dialer's methods concurrently. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // NetDialContext specifies the dial function for creating TCP connections. If + // NetDialContext is nil, NetDial is used. + NetDialContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // NetDialTLSContext specifies the dial function for creating TLS/TCP connections. If + // NetDialTLSContext is nil, NetDialContext is used. + // If NetDialTLSContext is set, Dial assumes the TLS handshake is done there and + // TLSClientConfig is ignored. + NetDialTLSContext func(ctx context.Context, network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + // If either NetDialTLS or NetDialTLSContext are set, Dial assumes the TLS handshake + // is done there and TLSClientConfig is ignored. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +// Dial creates a new client connection by calling DialContext with a background context. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + return d.DialContext(context.Background(), urlStr, requestHeader) +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, + HandshakeTimeout: 45 * time.Second, +} + +// nilDialer is dialer to use when receiver is nil. +var nilDialer = *DefaultDialer + +// DialContext creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// The context will be used in the request and in the Dialer. +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) DialContext(ctx context.Context, urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + if d == nil { + d = &nilDialer + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := url.Parse(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: http.MethodGet, + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + req = req.WithContext(ctx) + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + //#nosec G101 (CWE-798): Potential HTTP request smuggling via parameter pollution + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + case k == "Sec-Websocket-Protocol": + req.Header["Sec-WebSocket-Protocol"] = vs + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header["Sec-WebSocket-Extensions"] = []string{"permessage-deflate; server_no_context_takeover; client_no_context_takeover"} + } + + if d.HandshakeTimeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, d.HandshakeTimeout) + defer cancel() + } + + // Get network dial function. + var netDial func(network, add string) (net.Conn, error) + + switch u.Scheme { + case "http": + if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + case "https": + if d.NetDialTLSContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialTLSContext(ctx, network, addr) + } + } else if d.NetDialContext != nil { + netDial = func(network, addr string) (net.Conn, error) { + return d.NetDialContext(ctx, network, addr) + } + } else if d.NetDial != nil { + netDial = d.NetDial + } + default: + return nil, nil, errMalformedURL + } + + if netDial == nil { + netDialer := &net.Dialer{} + netDial = func(network, addr string) (net.Conn, error) { + return netDialer.DialContext(ctx, network, addr) + } + } + + // If needed, wrap the dial function to set the connection deadline. + if deadline, ok := ctx.Deadline(); ok { + forwardDial := netDial + netDial = func(network, addr string) (net.Conn, error) { + c, err := forwardDial(network, addr) + if err != nil { + return nil, err + } + err = c.SetDeadline(deadline) + if err != nil { + if err := c.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + return c, nil + } + } + + // If needed, wrap the dial function to connect through a proxy. + if d.Proxy != nil { + proxyURL, err := d.Proxy(req) + if err != nil { + return nil, nil, err + } + if proxyURL != nil { + dialer, err := proxy.FromURL(proxyURL, netDialerFunc(netDial)) + if err != nil { + return nil, nil, err + } + netDial = dialer.Dial + } + } + + hostPort, hostNoPort := hostPortNoPort(u) + trace := httptrace.ContextClientTrace(ctx) + if trace != nil && trace.GetConn != nil { + trace.GetConn(hostPort) + } + + netConn, err := netDial("tcp", hostPort) + if err != nil { + return nil, nil, err + } + if trace != nil && trace.GotConn != nil { + trace.GotConn(httptrace.GotConnInfo{ + Conn: netConn, + }) + } + + defer func() { + if netConn != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + } + }() + + if u.Scheme == "https" && d.NetDialTLSContext == nil { + // If NetDialTLSContext is set, assume that the TLS handshake has already been done + + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + + if trace != nil && trace.TLSHandshakeStart != nil { + trace.TLSHandshakeStart() + } + err := doHandshake(ctx, tlsConn, cfg) + if trace != nil && trace.TLSHandshakeDone != nil { + trace.TLSHandshakeDone(tlsConn.ConnectionState(), err) + } + + if err != nil { + return nil, nil, err + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize, d.WriteBufferPool, nil, nil) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + if trace != nil && trace.GotFirstResponseByte != nil { + if peek, err := conn.br.Peek(1); err == nil && len(peek) == 1 { + trace.GotFirstResponseByte() + } + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + if d.TLSClientConfig != nil { + for _, proto := range d.TLSClientConfig.NextProtos { + if proto != "http/1.1" { + return nil, nil, fmt.Errorf( + "websocket: protocol %q was given but is not supported;"+ + "sharing tls.Config with net/http Transport can cause this error: %w", + proto, err, + ) + } + } + } + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !tokenListContainsValue(resp.Header, "Upgrade", "websocket") || + !tokenListContainsValue(resp.Header, "Connection", "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = io.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = io.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + if err := netConn.SetDeadline(time.Time{}); err != nil { + return nil, nil, err + } + netConn = nil // to avoid close in defer. + return conn, resp, nil +} + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{MinVersion: tls.VersionTLS12} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 0000000000..9fed0ef521 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,153 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "log" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + if err := fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil); err != nil { + panic(err) + } + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + if err := r.Close(); err != nil { + log.Printf("websocket: flateReadWrapper.Close() returned error: %v", err) + } + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 0000000000..221e6cf798 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1267 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "crypto/rand" + "encoding/binary" + "errors" + "io" + "log" + "net" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a pong control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents a close message. +type CloseError struct { + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +// maskRand is an io.Reader for generating mask bytes. The reader is initialized +// to crypto/rand Reader. Tests swap the reader to a math/rand reader for +// reproducible results. +var maskRand = rand.Reader + +// newMaskKey returns a new 32 bit value for masking client frames. +func newMaskKey() [4]byte { + var k [4]byte + _, _ = io.ReadFull(maskRand, k[:]) + return k +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// BufferPool represents a pool of buffers. The *sync.Pool type satisfies this +// interface. The type of the value stored in a pool is not specified. +type BufferPool interface { + // Get gets a value from the pool or returns nil if the pool is empty. + Get() interface{} + // Put adds a value to the pool. + Put(interface{}) +} + +// writePoolData is the type added to the write buffer pool. This wrapper is +// used to prevent applications from peeking at and depending on the values +// added to the pool. +type writePoolData struct{ buf []byte } + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan struct{} // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writePool BufferPool + writeBufSize int + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + // bytes remaining in current frame. + // set setReadRemaining to safely update this value and prevent overflow + readRemaining int64 + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, writeBufferPool BufferPool, br *bufio.Reader, writeBuf []byte) *Conn { + + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } else if readBufferSize < maxControlFramePayloadSize { + // must be large enough for control frame + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + if writeBufferSize <= 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBufferSize += maxFrameHeaderSize + + if writeBuf == nil && writeBufferPool == nil { + writeBuf = make([]byte, writeBufferSize) + } + + mu := make(chan struct{}, 1) + mu <- struct{}{} + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + writePool: writeBufferPool, + writeBufSize: writeBufferSize, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// setReadRemaining tracks the number of bytes remaining on the connection. If n +// overflows, an ErrReadLimit is returned. +func (c *Conn) setReadRemaining(n int64) error { + if n < 0 { + return ErrReadLimit + } + + c.readRemaining = n + return nil +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting +// for a close message. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + if _, err := c.br.Discard(len(p)); err != nil { + return p, err + } + return p, err +} + +func (c *Conn) write(frameType int, deadline time.Time, buf0, buf1 []byte) error { + <-c.mu + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } + if len(buf1) == 0 { + _, err = c.conn.Write(buf0) + } else { + err = c.writeBufs(buf0, buf1) + } + if err != nil { + return c.writeFatal(err) + } + if frameType == CloseMessage { + _ = c.writeFatal(ErrCloseSent) + } + return nil +} + +func (c *Conn) writeBufs(bufs ...[]byte) error { + b := net.Buffers(bufs) + _, err := b.WriteTo(c.conn) + return err +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := 1000 * time.Hour + if !deadline.IsZero() { + d = time.Until(deadline) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- struct{}{} }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + if err := c.conn.SetWriteDeadline(deadline); err != nil { + return c.writeFatal(err) + } + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + _ = c.writeFatal(ErrCloseSent) + } + return err +} + +// beginMessage prepares a connection and message writer for a new message. +func (c *Conn) beginMessage(mw *messageWriter, messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + if err := c.writer.Close(); err != nil { + log.Printf("websocket: discarding writer close error: %v", err) + } + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + mw.c = c + mw.frameType = messageType + mw.pos = maxFrameHeaderSize + + if c.writeBuf == nil { + wpd, ok := c.writePool.Get().(writePoolData) + if ok { + c.writeBuf = wpd.buf + } else { + c.writeBuf = make([]byte, c.writeBufSize) + } + } + return nil +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +// +// All message types (TextMessage, BinaryMessage, CloseMessage, PingMessage and +// PongMessage) are supported. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return nil, err + } + c.writer = &mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) endMessage(err error) error { + if w.err != nil { + return err + } + c := w.c + w.err = err + c.writer = nil + if c.writePool != nil { + c.writePool.Put(writePoolData{buf: c.writeBuf}) + c.writeBuf = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.endMessage(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return w.endMessage(c.writeFatal(errors.New("websocket: internal error, extra used in client mode"))) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.endMessage(err) + } + + if final { + _ = w.endMessage(errWriteClosed) + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + return w.flushFrame(true, nil) +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + var mw messageWriter + if err := c.beginMessage(&mw, messageType); err != nil { + return err + } + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(io.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + // To aid debugging, collect and report all errors in the first two bytes + // of the header. + + var errors []string + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + frameType := int(p[0] & 0xf) + final := p[0]&finalBit != 0 + rsv1 := p[0]&rsv1Bit != 0 + rsv2 := p[0]&rsv2Bit != 0 + rsv3 := p[0]&rsv3Bit != 0 + mask := p[1]&maskBit != 0 + if err := c.setReadRemaining(int64(p[1] & 0x7f)); err != nil { + return noFrame, err + } + + c.readDecompress = false + if rsv1 { + if c.newDecompressionReader != nil { + c.readDecompress = true + } else { + errors = append(errors, "RSV1 set") + } + } + + if rsv2 { + errors = append(errors, "RSV2 set") + } + + if rsv3 { + errors = append(errors, "RSV3 set") + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + errors = append(errors, "len > 125 for control") + } + if !final { + errors = append(errors, "FIN not set on control") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + errors = append(errors, "data before FIN") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + errors = append(errors, "continuation after FIN") + } + c.readFinal = final + default: + errors = append(errors, "bad opcode "+strconv.Itoa(frameType)) + } + + if mask != c.isServer { + errors = append(errors, "bad MASK") + } + + if len(errors) > 0 { + return noFrame, c.handleProtocolError(strings.Join(errors, ", ")) + } + + // 3. Read and parse frame length as per + // https://tools.ietf.org/html/rfc6455#section-5.2 + // + // The length of the "Payload data", in bytes: if 0-125, that is the payload + // length. + // - If 126, the following 2 bytes interpreted as a 16-bit unsigned + // integer are the payload length. + // - If 127, the following 8 bytes interpreted as + // a 64-bit unsigned integer (the most significant bit MUST be 0) are the + // payload length. Multibyte length quantities are expressed in network byte + // order. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint16(p))); err != nil { + return noFrame, err + } + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + + if err := c.setReadRemaining(int64(binary.BigEndian.Uint64(p))); err != nil { + return noFrame, err + } + } + + // 4. Handle frame masking. + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + // Don't allow readLength to overflow in the presence of a large readRemaining + // counter. + if c.readLength < 0 { + return noFrame, ErrReadLimit + } + + if c.readLimit > 0 && c.readLength > c.readLimit { + if err := c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)); err != nil { + return noFrame, err + } + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + if err := c.setReadRemaining(0); err != nil { + return noFrame, err + } + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("bad close code " + strconv.Itoa(closeCode)) + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + data := FormatCloseMessage(CloseProtocolError, message) + if len(data) > maxControlFramePayloadSize { + data = data[:maxControlFramePayloadSize] + } + if err := c.WriteControl(CloseMessage, data, time.Now().Add(writeWait)); err != nil { + return err + } + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + if err := c.reader.Close(); err != nil { + log.Printf("websocket: discarding reader close error: %v", err) + } + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + rem := c.readRemaining + rem -= int64(n) + if err := c.setReadRemaining(rem); err != nil { + return 0, err + } + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = io.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size in bytes for a message read from the peer. If a +// message exceeds the limit, the connection sends a close message to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close +// message back to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// close messages as described in the section on Control Messages above. +// +// The connection read methods return a CloseError when a close message is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close message back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := FormatCloseMessage(code, "") + if err := c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)); err != nil { + return err + } + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING message application data. The default +// ping handler sends a pong to the peer. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// ping messages as described in the section on Control Messages above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if _, ok := err.(net.Error); ok { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG message application data. The default +// pong handler does nothing. +// +// The handler function is called from the NextReader, ReadMessage and message +// reader Read methods. The application must read the connection to process +// pong messages as described in the section on Control Messages above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// NetConn returns the underlying connection that is wrapped by c. +// Note that writing to or reading from this connection directly will corrupt the +// WebSocket connection. +func (c *Conn) NetConn() net.Conn { + return c.conn +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +// Deprecated: Use the NetConn method. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +// An empty message is returned for code CloseNoStatusReceived. +func FormatCloseMessage(closeCode int, text string) []byte { + if closeCode == CloseNoStatusReceived { + // Return empty message because it's illegal to send + // CloseNoStatusReceived. Return non-nil value in case application + // checks for nil. + return []byte{} + } + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 0000000000..8db0cef95a --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,227 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// log.Println(err) +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// log.Println(err) +// return +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by calling the handler function +// set with the SetCloseHandler method and by returning a *CloseError from the +// NextReader, ReadMessage or the message Read method. The default close +// handler sends a close message to the peer. +// +// Connections handle received ping messages by calling the handler function +// set with the SetPingHandler method. The default ping handler sends a pong +// message to the peer. +// +// Connections handle received pong messages by calling the handler function +// set with the SetPongHandler method. The default pong handler does nothing. +// If an application sends ping messages, then the application should set a +// pong handler to receive the corresponding pong. +// +// The control message handler functions are called from the NextReader, +// ReadMessage and message reader Read methods. The default close and ping +// handlers can block these methods for a short time when the handler writes to +// the connection. +// +// The application must read the connection to process close, ping and pong +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and the Origin host is +// not equal to the Host request header. +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Buffers +// +// Connections buffer network input and output to reduce the number +// of system calls when reading or writing messages. +// +// Write buffers are also used for constructing WebSocket frames. See RFC 6455, +// Section 5 for a discussion of message framing. A WebSocket frame header is +// written to the network each time a write buffer is flushed to the network. +// Decreasing the size of the write buffer can increase the amount of framing +// overhead on the connection. +// +// The buffer sizes in bytes are specified by the ReadBufferSize and +// WriteBufferSize fields in the Dialer and Upgrader. The Dialer uses a default +// size of 4096 when a buffer size field is set to zero. The Upgrader reuses +// buffers created by the HTTP server when a buffer size field is set to zero. +// The HTTP server buffers have a size of 4096 at the time of this writing. +// +// The buffer sizes do not limit the size of a message that can be read or +// written by a connection. +// +// Buffers are held for the lifetime of the connection by default. If the +// Dialer or Upgrader WriteBufferPool field is set, then a connection holds the +// write buffer only when writing a message. +// +// Applications should tune the buffer sizes to balance memory use and +// performance. Increasing the buffer size uses more memory, but can reduce the +// number of system calls to read or write the network. In the case of writing, +// increasing the buffer size can reduce the number of frame headers written to +// the network. +// +// Some guidelines for setting buffer parameters are: +// +// Limit the buffer sizes to the maximum expected message size. Buffers larger +// than the largest message do not provide any benefit. +// +// Depending on the distribution of message sizes, setting the buffer size to +// a value less than the maximum expected message size can greatly reduce memory +// use with a small impact on performance. Here's an example: If 99% of the +// messages are smaller than 256 bytes and the maximum message size is 512 +// bytes, then a buffer size of 256 bytes will result in 1.01 more system calls +// than a buffer size of 512 bytes. The memory savings is 50%. +// +// A write buffer pool is useful when the application has a modest number +// writes over a large number of connections. when buffers are pooled, a larger +// buffer size has a reduced impact on total memory use and has the benefit of +// reducing system calls and frame overhead. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/join.go b/vendor/github.com/gorilla/websocket/join.go new file mode 100644 index 0000000000..c64f8c8290 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/join.go @@ -0,0 +1,42 @@ +// Copyright 2019 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "io" + "strings" +) + +// JoinMessages concatenates received messages to create a single io.Reader. +// The string term is appended to each message. The returned reader does not +// support concurrent calls to the Read method. +func JoinMessages(c *Conn, term string) io.Reader { + return &joinReader{c: c, term: term} +} + +type joinReader struct { + c *Conn + term string + r io.Reader +} + +func (r *joinReader) Read(p []byte) (int, error) { + if r.r == nil { + var err error + _, r.r, err = r.c.NextReader() + if err != nil { + return 0, err + } + if r.term != "" { + r.r = io.MultiReader(r.r, strings.NewReader(r.term)) + } + } + n, err := r.r.Read(p) + if err == io.EOF { + err = nil + r.r = nil + } + return n, err +} diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 0000000000..dc2c1f6415 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 0000000000..67d0968be8 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,59 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build !appengine +// +build !appengine + +package websocket + +import "unsafe" + +// #nosec G103 -- (CWE-242) Has been audited +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + //#nosec G103 -- (CWE-242) Has been audited + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + //#nosec G103 -- (CWE-242) Has been audited + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + //#nosec G103 -- (CWE-242) Has been audited + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 0000000000..36250ca7c4 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,16 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +//go:build appengine +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 0000000000..c854225e96 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,102 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan struct{}, 1) + mu <- struct{}{} + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/proxy.go b/vendor/github.com/gorilla/websocket/proxy.go new file mode 100644 index 0000000000..80f55d1eac --- /dev/null +++ b/vendor/github.com/gorilla/websocket/proxy.go @@ -0,0 +1,86 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/base64" + "errors" + "log" + "net" + "net/http" + "net/url" + "strings" + + "golang.org/x/net/proxy" +) + +type netDialerFunc func(network, addr string) (net.Conn, error) + +func (fn netDialerFunc) Dial(network, addr string) (net.Conn, error) { + return fn(network, addr) +} + +func init() { + proxy.RegisterDialerType("http", func(proxyURL *url.URL, forwardDialer proxy.Dialer) (proxy.Dialer, error) { + return &httpProxyDialer{proxyURL: proxyURL, forwardDial: forwardDialer.Dial}, nil + }) +} + +type httpProxyDialer struct { + proxyURL *url.URL + forwardDial func(network, addr string) (net.Conn, error) +} + +func (hpd *httpProxyDialer) Dial(network string, addr string) (net.Conn, error) { + hostPort, _ := hostPortNoPort(hpd.proxyURL) + conn, err := hpd.forwardDial(network, hostPort) + if err != nil { + return nil, err + } + + connectHeader := make(http.Header) + if user := hpd.proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + + connectReq := &http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Opaque: addr}, + Host: addr, + Header: connectHeader, + } + + if err := connectReq.Write(conn); err != nil { + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } + return nil, err + } + + // Read response. It's OK to use and discard buffered reader here becaue + // the remote server does not speak until spoken to. + br := bufio.NewReader(conn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } + return nil, err + } + + if resp.StatusCode != 200 { + if err := conn.Close(); err != nil { + log.Printf("httpProxyDialer: failed to close connection: %v", err) + } + f := strings.SplitN(resp.Status, " ", 2) + return nil, errors.New(f[1]) + } + return conn, nil +} diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 0000000000..1e720e1da4 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,389 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "io" + "log" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +// +// It is safe to call Upgrader's methods concurrently. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes in bytes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // WriteBufferPool is a pool of buffers for write operations. If the value + // is not set, then write buffers are allocated to the connection for the + // lifetime of the connection. + // + // A pool is most useful when the application has a modest volume of writes + // across a large number of connections. + // + // Applications should use a single pool for each unique value of + // WriteBufferSize. + WriteBufferPool BufferPool + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is not nil, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. If there's no match, then no protocol is + // negotiated (the Sec-Websocket-Protocol header is not included in the + // handshake response). + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, then a safe default is used: return false if the + // Origin request header is present and the origin host is not equal to + // request Host header. + // + // A CheckOrigin function should carefully validate the request origin to + // prevent cross-site request forgery. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return equalASCIIFold(u.Host, r.Host) +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie). To specify +// subprotocols supported by the server, set Upgrader.Subprotocols directly. +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + const badHandshake = "websocket: the client is not using the websocket protocol: " + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, badHandshake+"'websocket' token not found in 'Upgrade' header") + } + + if r.Method != http.MethodGet { + return u.returnError(w, r, http.StatusMethodNotAllowed, badHandshake+"request method is not GET") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-WebSocket-Extensions' headers are unsupported") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: request origin not allowed by Upgrader.CheckOrigin") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if !isValidChallengeKey(challengeKey) { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'Sec-WebSocket-Key' header must be Base64 encoded value of 16-byte in length") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err := h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + var br *bufio.Reader + if u.ReadBufferSize == 0 && bufioReaderSize(netConn, brw.Reader) > 256 { + // Reuse hijacked buffered reader as connection reader. + br = brw.Reader + } + + buf := bufioWriterBuffer(netConn, brw.Writer) + + var writeBuf []byte + if u.WriteBufferPool == nil && u.WriteBufferSize == 0 && len(buf) >= maxFrameHeaderSize+256 { + // Reuse hijacked write buffer as connection buffer. + writeBuf = buf + } + + c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize, u.WriteBufferPool, br, writeBuf) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + // Use larger of hijacked buffer and connection write buffer for header. + p := buf + if len(c.writeBuf) > len(p) { + p = c.writeBuf + } + p = p[:0] + + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-WebSocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-WebSocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + if err := netConn.SetDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + + if u.HandshakeTimeout > 0 { + if err := netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + } + if _, err = netConn.Write(p); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + if u.HandshakeTimeout > 0 { + if err := netConn.SetWriteDeadline(time.Time{}); err != nil { + if err := netConn.Close(); err != nil { + log.Printf("websocket: failed to close network connection: %v", err) + } + return nil, err + } + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", http.StatusForbidden) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} + +// bufioReaderSize size returns the size of a bufio.Reader. +func bufioReaderSize(originalReader io.Reader, br *bufio.Reader) int { + // This code assumes that peek on a reset reader returns + // bufio.Reader.buf[:0]. + // TODO: Use bufio.Reader.Size() after Go 1.10 + br.Reset(originalReader) + if p, err := br.Peek(0); err == nil { + return cap(p) + } + return 0 +} + +// writeHook is an io.Writer that records the last slice passed to it vio +// io.Writer.Write. +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +// bufioWriterBuffer grabs the buffer from a bufio.Writer. +func bufioWriterBuffer(originalWriter io.Writer, bw *bufio.Writer) []byte { + // This code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + bw.Reset(&wh) + if err := bw.WriteByte(0); err != nil { + panic(err) + } + if err := bw.Flush(); err != nil { + log.Printf("websocket: bufioWriterBuffer: Flush: %v", err) + } + + bw.Reset(originalWriter) + + return wh.p[:cap(wh.p)] +} diff --git a/vendor/github.com/gorilla/websocket/tls_handshake.go b/vendor/github.com/gorilla/websocket/tls_handshake.go new file mode 100644 index 0000000000..7f38645348 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/tls_handshake.go @@ -0,0 +1,18 @@ +package websocket + +import ( + "context" + "crypto/tls" +) + +func doHandshake(ctx context.Context, tlsConn *tls.Conn, cfg *tls.Config) error { + if err := tlsConn.HandshakeContext(ctx); err != nil { + return err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 0000000000..9b1a629bff --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,298 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" //#nosec G505 -- (CWE-327) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + "encoding/base64" + "io" + "net/http" + "strings" + "unicode/utf8" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() //#nosec G401 -- (CWE-326) https://datatracker.ietf.org/doc/html/rfc6455#page-54 + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Token octets per RFC 2616. +var isTokenOctet = [256]bool{ + '!': true, + '#': true, + '$': true, + '%': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} + +// skipSpace returns a slice of the string s with all leading RFC 2616 linear +// whitespace removed. +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if b := s[i]; b != ' ' && b != '\t' { + break + } + } + return s[i:] +} + +// nextToken returns the leading RFC 2616 token of s and the string following +// the token. +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if !isTokenOctet[s[i]] { + break + } + } + return s[:i], s[i:] +} + +// nextTokenOrQuoted returns the leading token or quoted string per RFC 2616 +// and the string following the token or quoted string. +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// equalASCIIFold returns true if s is equal to t with ASCII case folding as +// defined in RFC 4790. +func equalASCIIFold(s, t string) bool { + for s != "" && t != "" { + sr, size := utf8.DecodeRuneInString(s) + s = s[size:] + tr, size := utf8.DecodeRuneInString(t) + t = t[size:] + if sr == tr { + continue + } + if 'A' <= sr && sr <= 'Z' { + sr = sr + 'a' - 'A' + } + if 'A' <= tr && tr <= 'Z' { + tr = tr + 'a' - 'A' + } + if sr != tr { + return false + } + } + return s == t +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains a token equal to value with ASCII case folding. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if equalASCIIFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensions parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} + +// isValidChallengeKey checks if the argument meets RFC6455 specification. +func isValidChallengeKey(s string) bool { + // From RFC6455: + // + // A |Sec-WebSocket-Key| header field with a base64-encoded (see + // Section 4 of [RFC4648]) value that, when decoded, is 16 bytes in + // length. + + if s == "" { + return false + } + decoded, err := base64.StdEncoding.DecodeString(s) + return err == nil && len(decoded) == 16 +} diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md new file mode 100644 index 0000000000..2298515904 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md new file mode 100644 index 0000000000..60ae311700 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/README.md @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go new file mode 100644 index 0000000000..f7bedda388 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -0,0 +1,83 @@ +package wordwrap + +import ( + "bytes" + "unicode" +) + +const nbsp = 0xA0 + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + var wordBufLen, spaceBufLen uint + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+spaceBufLen > lim { + current = 0 + } else { + current += spaceBufLen + spaceBuf.WriteTo(buf) + } + spaceBuf.Reset() + spaceBufLen = 0 + } else { + current += spaceBufLen + wordBufLen + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + spaceBufLen = 0 + wordBuf.WriteTo(buf) + wordBuf.Reset() + wordBufLen = 0 + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) && char != nbsp { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += spaceBufLen + wordBufLen + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + spaceBufLen = 0 + wordBuf.WriteTo(buf) + wordBuf.Reset() + wordBufLen = 0 + } + + spaceBuf.WriteRune(char) + spaceBufLen++ + } else { + wordBuf.WriteRune(char) + wordBufLen++ + + if current+wordBufLen+spaceBufLen > lim && wordBufLen < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + spaceBufLen = 0 + } + } + } + + if wordBuf.Len() == 0 { + if current+spaceBufLen <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/vendor/github.com/moby/spdystream/CONTRIBUTING.md b/vendor/github.com/moby/spdystream/CONTRIBUTING.md new file mode 100644 index 0000000000..d4eddcc539 --- /dev/null +++ b/vendor/github.com/moby/spdystream/CONTRIBUTING.md @@ -0,0 +1,13 @@ +# Contributing to SpdyStream + +Want to hack on spdystream? Awesome! Here are instructions to get you +started. + +SpdyStream is a part of the [Docker](https://docker.io) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read +[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md). + +Happy hacking! diff --git a/vendor/github.com/moby/spdystream/LICENSE b/vendor/github.com/moby/spdystream/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/moby/spdystream/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/spdystream/MAINTAINERS b/vendor/github.com/moby/spdystream/MAINTAINERS new file mode 100644 index 0000000000..26e5ec828a --- /dev/null +++ b/vendor/github.com/moby/spdystream/MAINTAINERS @@ -0,0 +1,40 @@ +# Spdystream maintainers file +# +# This file describes who runs the moby/spdystream project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "adisky", + "dims", + "dmcgowan", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.adisky] + Name = "Aditi Sharma" + Email = "adi.sky17@gmail.com" + GitHub = "adisky" + + [people.dims] + Name = "Davanum Srinivas" + Email = "davanum@gmail.com" + GitHub = "dims" + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@mcg.dev" + GitHub = "dmcgowan" diff --git a/vendor/github.com/moby/spdystream/NOTICE b/vendor/github.com/moby/spdystream/NOTICE new file mode 100644 index 0000000000..b9b11c9ab7 --- /dev/null +++ b/vendor/github.com/moby/spdystream/NOTICE @@ -0,0 +1,5 @@ +SpdyStream +Copyright 2014-2021 Docker Inc. + +This product includes software developed at +Docker Inc. (https://www.docker.com/). diff --git a/vendor/github.com/moby/spdystream/README.md b/vendor/github.com/moby/spdystream/README.md new file mode 100644 index 0000000000..b84e983439 --- /dev/null +++ b/vendor/github.com/moby/spdystream/README.md @@ -0,0 +1,77 @@ +# SpdyStream + +A multiplexed stream library using spdy + +## Usage + +Client example (connecting to mirroring server without auth) + +```go +package main + +import ( + "fmt" + "github.com/moby/spdystream" + "net" + "net/http" +) + +func main() { + conn, err := net.Dial("tcp", "localhost:8080") + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.NoOpStreamHandler) + stream, err := spdyConn.CreateStream(http.Header{}, nil, false) + if err != nil { + panic(err) + } + + stream.Wait() + + fmt.Fprint(stream, "Writing to stream") + + buf := make([]byte, 25) + stream.Read(buf) + fmt.Println(string(buf)) + + stream.Close() +} +``` + +Server example (mirroring server without auth) + +```go +package main + +import ( + "github.com/moby/spdystream" + "net" +) + +func main() { + listener, err := net.Listen("tcp", "localhost:8080") + if err != nil { + panic(err) + } + for { + conn, err := listener.Accept() + if err != nil { + panic(err) + } + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + panic(err) + } + go spdyConn.Serve(spdystream.MirrorStreamHandler) + } +} +``` + +## Copyright and license + +Copyright 2013-2021 Docker, inc. Released under the [Apache 2.0 license](LICENSE). diff --git a/vendor/github.com/moby/spdystream/connection.go b/vendor/github.com/moby/spdystream/connection.go new file mode 100644 index 0000000000..d906bb05ce --- /dev/null +++ b/vendor/github.com/moby/spdystream/connection.go @@ -0,0 +1,972 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/moby/spdystream/spdy" +) + +var ( + ErrInvalidStreamId = errors.New("Invalid stream id") + ErrTimeout = errors.New("Timeout occurred") + ErrReset = errors.New("Stream reset") + ErrWriteClosedStream = errors.New("Write on closed stream") +) + +const ( + FRAME_WORKERS = 5 + QUEUE_SIZE = 50 +) + +type StreamHandler func(stream *Stream) + +type AuthHandler func(header http.Header, slot uint8, parent uint32) bool + +type idleAwareFramer struct { + f *spdy.Framer + conn *Connection + writeLock sync.Mutex + resetChan chan struct{} + setTimeoutLock sync.Mutex + setTimeoutChan chan time.Duration + timeout time.Duration +} + +func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer { + iaf := &idleAwareFramer{ + f: framer, + resetChan: make(chan struct{}, 2), + // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about + // the same time the connection is being closed + setTimeoutChan: make(chan time.Duration, 1), + } + return iaf +} + +func (i *idleAwareFramer) monitor() { + var ( + timer *time.Timer + expired <-chan time.Time + resetChan = i.resetChan + setTimeoutChan = i.setTimeoutChan + ) +Loop: + for { + select { + case timeout := <-i.setTimeoutChan: + i.timeout = timeout + if timeout == 0 { + if timer != nil { + timer.Stop() + } + } else { + if timer == nil { + timer = time.NewTimer(timeout) + expired = timer.C + } else { + timer.Reset(timeout) + } + } + case <-resetChan: + if timer != nil && i.timeout > 0 { + timer.Reset(i.timeout) + } + case <-expired: + i.conn.streamCond.L.Lock() + streams := i.conn.streams + i.conn.streams = make(map[spdy.StreamId]*Stream) + i.conn.streamCond.Broadcast() + i.conn.streamCond.L.Unlock() + go func() { + for _, stream := range streams { + stream.resetStream() + } + i.conn.Close() + }() + case <-i.conn.closeChan: + if timer != nil { + timer.Stop() + } + + // Start a goroutine to drain resetChan. This is needed because we've seen + // some unit tests with large numbers of goroutines get into a situation + // where resetChan fills up, at least 1 call to Write() is still trying to + // send to resetChan, the connection gets closed, and this case statement + // attempts to grab the write lock that Write() already has, causing a + // deadlock. + // + // See https://github.com/moby/spdystream/issues/49 for more details. + go func() { + for range resetChan { + } + }() + + go func() { + for range setTimeoutChan { + } + }() + + i.writeLock.Lock() + close(resetChan) + i.resetChan = nil + i.writeLock.Unlock() + + i.setTimeoutLock.Lock() + close(i.setTimeoutChan) + i.setTimeoutChan = nil + i.setTimeoutLock.Unlock() + + break Loop + } + } + + // Drain resetChan + for range resetChan { + } +} + +func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error { + i.writeLock.Lock() + defer i.writeLock.Unlock() + if i.resetChan == nil { + return io.EOF + } + err := i.f.WriteFrame(frame) + if err != nil { + return err + } + + i.resetChan <- struct{}{} + + return nil +} + +func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) { + frame, err := i.f.ReadFrame() + if err != nil { + return nil, err + } + + // resetChan should never be closed since it is only closed + // when the connection has closed its closeChan. This closure + // only occurs after all Reads have finished + // TODO (dmcgowan): refactor relationship into connection + i.resetChan <- struct{}{} + + return frame, nil +} + +func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) { + i.setTimeoutLock.Lock() + defer i.setTimeoutLock.Unlock() + + if i.setTimeoutChan == nil { + return + } + + i.setTimeoutChan <- timeout +} + +type Connection struct { + conn net.Conn + framer *idleAwareFramer + + closeChan chan bool + goneAway bool + lastStreamChan chan<- *Stream + goAwayTimeout time.Duration + closeTimeout time.Duration + + streamLock *sync.RWMutex + streamCond *sync.Cond + streams map[spdy.StreamId]*Stream + + nextIdLock sync.Mutex + receiveIdLock sync.Mutex + nextStreamId spdy.StreamId + receivedStreamId spdy.StreamId + + pingIdLock sync.Mutex + pingId uint32 + pingChans map[uint32]chan error + + shutdownLock sync.Mutex + shutdownChan chan error + hasShutdown bool + + // for testing https://github.com/moby/spdystream/pull/56 + dataFrameHandler func(*spdy.DataFrame) error +} + +// NewConnection creates a new spdy connection from an existing +// network connection. +func NewConnection(conn net.Conn, server bool) (*Connection, error) { + framer, framerErr := spdy.NewFramer(conn, conn) + if framerErr != nil { + return nil, framerErr + } + idleAwareFramer := newIdleAwareFramer(framer) + var sid spdy.StreamId + var rid spdy.StreamId + var pid uint32 + if server { + sid = 2 + rid = 1 + pid = 2 + } else { + sid = 1 + rid = 2 + pid = 1 + } + + streamLock := new(sync.RWMutex) + streamCond := sync.NewCond(streamLock) + + session := &Connection{ + conn: conn, + framer: idleAwareFramer, + + closeChan: make(chan bool), + goAwayTimeout: time.Duration(0), + closeTimeout: time.Duration(0), + + streamLock: streamLock, + streamCond: streamCond, + streams: make(map[spdy.StreamId]*Stream), + nextStreamId: sid, + receivedStreamId: rid, + + pingId: pid, + pingChans: make(map[uint32]chan error), + + shutdownChan: make(chan error), + } + session.dataFrameHandler = session.handleDataFrame + idleAwareFramer.conn = session + go idleAwareFramer.monitor() + + return session, nil +} + +// Ping sends a ping frame across the connection and +// returns the response time +func (s *Connection) Ping() (time.Duration, error) { + pid := s.pingId + s.pingIdLock.Lock() + if s.pingId > 0x7ffffffe { + s.pingId = s.pingId - 0x7ffffffe + } else { + s.pingId = s.pingId + 2 + } + s.pingIdLock.Unlock() + pingChan := make(chan error) + s.pingChans[pid] = pingChan + defer delete(s.pingChans, pid) + + frame := &spdy.PingFrame{Id: pid} + startTime := time.Now() + writeErr := s.framer.WriteFrame(frame) + if writeErr != nil { + return time.Duration(0), writeErr + } + select { + case <-s.closeChan: + return time.Duration(0), errors.New("connection closed") + case err, ok := <-pingChan: + if ok && err != nil { + return time.Duration(0), err + } + break + } + return time.Since(startTime), nil +} + +// Serve handles frames sent from the server, including reply frames +// which are needed to fully initiate connections. Both clients and servers +// should call Serve in a separate goroutine before creating streams. +func (s *Connection) Serve(newHandler StreamHandler) { + // use a WaitGroup to wait for all frames to be drained after receiving + // go-away. + var wg sync.WaitGroup + + // Parition queues to ensure stream frames are handled + // by the same worker, ensuring order is maintained + frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS) + for i := 0; i < FRAME_WORKERS; i++ { + frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE) + + // Ensure frame queue is drained when connection is closed + go func(frameQueue *PriorityFrameQueue) { + <-s.closeChan + frameQueue.Drain() + }(frameQueues[i]) + + wg.Add(1) + go func(frameQueue *PriorityFrameQueue) { + // let the WaitGroup know this worker is done + defer wg.Done() + + s.frameHandler(frameQueue, newHandler) + }(frameQueues[i]) + } + + var ( + partitionRoundRobin int + goAwayFrame *spdy.GoAwayFrame + ) +Loop: + for { + readFrame, err := s.framer.ReadFrame() + if err != nil { + if err != io.EOF { + debugMessage("frame read error: %s", err) + } else { + debugMessage("(%p) EOF received", s) + } + break + } + var priority uint8 + var partition int + switch frame := readFrame.(type) { + case *spdy.SynStreamFrame: + if s.checkStreamFrame(frame) { + priority = frame.Priority + partition = int(frame.StreamId % FRAME_WORKERS) + debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId) + s.addStreamFrame(frame) + } else { + debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId) + continue + } + case *spdy.SynReplyFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.DataFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.RstStreamFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.HeadersFrame: + priority = s.getStreamPriority(frame.StreamId) + partition = int(frame.StreamId % FRAME_WORKERS) + case *spdy.PingFrame: + priority = 0 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + case *spdy.GoAwayFrame: + // hold on to the go away frame and exit the loop + goAwayFrame = frame + break Loop + default: + priority = 7 + partition = partitionRoundRobin + partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS + } + frameQueues[partition].Push(readFrame, priority) + } + close(s.closeChan) + + // wait for all frame handler workers to indicate they've drained their queues + // before handling the go away frame + wg.Wait() + + if goAwayFrame != nil { + s.handleGoAwayFrame(goAwayFrame) + } + + // now it's safe to close remote channels and empty s.streams + s.streamCond.L.Lock() + // notify streams that they're now closed, which will + // unblock any stream Read() calls + for _, stream := range s.streams { + stream.closeRemoteChannels() + } + s.streams = make(map[spdy.StreamId]*Stream) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) { + for { + popFrame := frameQueue.Pop() + if popFrame == nil { + return + } + + var frameErr error + switch frame := popFrame.(type) { + case *spdy.SynStreamFrame: + frameErr = s.handleStreamFrame(frame, newHandler) + case *spdy.SynReplyFrame: + frameErr = s.handleReplyFrame(frame) + case *spdy.DataFrame: + frameErr = s.dataFrameHandler(frame) + case *spdy.RstStreamFrame: + frameErr = s.handleResetFrame(frame) + case *spdy.HeadersFrame: + frameErr = s.handleHeaderFrame(frame) + case *spdy.PingFrame: + frameErr = s.handlePingFrame(frame) + case *spdy.GoAwayFrame: + frameErr = s.handleGoAwayFrame(frame) + default: + frameErr = fmt.Errorf("unhandled frame type: %T", frame) + } + + if frameErr != nil { + debugMessage("frame handling error: %s", frameErr) + } + } +} + +func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 { + stream, streamOk := s.getStream(streamId) + if !streamOk { + return 7 + } + return stream.priority +} + +func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) { + var parent *Stream + if frame.AssociatedToStreamId != spdy.StreamId(0) { + parent, _ = s.getStream(frame.AssociatedToStreamId) + } + + stream := &Stream{ + streamId: frame.StreamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: frame.Headers, + finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00, + replyCond: sync.NewCond(new(sync.Mutex)), + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + priority: frame.Priority, + } + if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 { + stream.closeRemoteChannels() + } + + s.addStream(stream) +} + +// checkStreamFrame checks to see if a stream frame is allowed. +// If the stream is invalid, then a reset frame with protocol error +// will be returned. +func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool { + s.receiveIdLock.Lock() + defer s.receiveIdLock.Unlock() + if s.goneAway { + return false + } + validationErr := s.validateStreamId(frame.StreamId) + if validationErr != nil { + go func() { + resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId) + if resetErr != nil { + debugMessage("reset error: %s", resetErr) + } + }() + return false + } + return true +} + +func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error { + stream, ok := s.getStream(frame.StreamId) + if !ok { + return fmt.Errorf("Missing stream: %d", frame.StreamId) + } + + newHandler(stream) + + return nil +} + +func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error { + debugMessage("(%p) Reply frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("Reply frame gone away for %d", frame.StreamId) + // Stream has already gone away + return nil + } + if stream.replied { + // Stream has already received reply + return nil + } + stream.replied = true + + // TODO Check for error + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + close(stream.startChan) + + return nil +} + +func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already been removed + return nil + } + s.removeStream(stream) + stream.closeRemoteChannels() + + if !stream.replied { + stream.replied = true + stream.startChan <- ErrReset + close(stream.startChan) + } + + stream.finishLock.Lock() + stream.finished = true + stream.finishLock.Unlock() + + return nil +} + +func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error { + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + // Stream has already gone away + return nil + } + if !stream.replied { + // No reply received...Protocol error? + return nil + } + + // TODO limit headers while not blocking (use buffered chan or goroutine?) + select { + case <-stream.closeChan: + return nil + case stream.headerChan <- frame.Headers: + } + + if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + + return nil +} + +func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error { + debugMessage("(%p) Data frame received for %d", s, frame.StreamId) + stream, streamOk := s.getStream(frame.StreamId) + if !streamOk { + debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId) + // Stream has already gone away + return nil + } + if !stream.replied { + debugMessage("(%p) Data frame not replied %d", s, frame.StreamId) + // No reply received...Protocol error? + return nil + } + + debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId) + if len(frame.Data) > 0 { + stream.dataLock.RLock() + select { + case <-stream.closeChan: + debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId) + case stream.dataChan <- frame.Data: + debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId) + } + stream.dataLock.RUnlock() + } + if (frame.Flags & spdy.DataFlagFin) != 0x00 { + s.remoteStreamFinish(stream) + } + return nil +} + +func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error { + if s.pingId&0x01 != frame.Id&0x01 { + return s.framer.WriteFrame(frame) + } + pingChan, pingOk := s.pingChans[frame.Id] + if pingOk { + close(pingChan) + } + return nil +} + +func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error { + debugMessage("(%p) Go away received", s) + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + if s.lastStreamChan != nil { + stream, _ := s.getStream(frame.LastGoodStreamId) + go func() { + s.lastStreamChan <- stream + }() + } + + // Do not block frame handler waiting for closure + go s.shutdown(s.goAwayTimeout) + + return nil +} + +func (s *Connection) remoteStreamFinish(stream *Stream) { + stream.closeRemoteChannels() + + stream.finishLock.Lock() + if stream.finished { + // Stream is fully closed, cleanup + s.removeStream(stream) + } + stream.finishLock.Unlock() +} + +// CreateStream creates a new spdy stream using the parameters for +// creating the stream frame. The stream frame will be sent upon +// calling this function, however this function does not wait for +// the reply frame. If waiting for the reply is desired, use +// the stream Wait or WaitTimeout function on the stream returned +// by this function. +func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) { + // MUST synchronize stream creation (all the way to writing the frame) + // as stream IDs **MUST** increase monotonically. + s.nextIdLock.Lock() + defer s.nextIdLock.Unlock() + + streamId := s.getNextStreamId() + if streamId == 0 { + return nil, fmt.Errorf("Unable to get new stream id") + } + + stream := &Stream{ + streamId: streamId, + parent: parent, + conn: s, + startChan: make(chan error), + headers: headers, + dataChan: make(chan []byte), + headerChan: make(chan http.Header), + closeChan: make(chan bool), + } + + debugMessage("(%p) (%p) Create stream", s, stream) + + s.addStream(stream) + + return stream, s.sendStream(stream, fin) +} + +func (s *Connection) shutdown(closeTimeout time.Duration) { + // TODO Ensure this isn't called multiple times + s.shutdownLock.Lock() + if s.hasShutdown { + s.shutdownLock.Unlock() + return + } + s.hasShutdown = true + s.shutdownLock.Unlock() + + var timeout <-chan time.Time + if closeTimeout > time.Duration(0) { + timeout = time.After(closeTimeout) + } + streamsClosed := make(chan bool) + + go func() { + s.streamCond.L.Lock() + for len(s.streams) > 0 { + debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams) + s.streamCond.Wait() + } + s.streamCond.L.Unlock() + close(streamsClosed) + }() + + var err error + select { + case <-streamsClosed: + // No active streams, close should be safe + err = s.conn.Close() + case <-timeout: + // Force ungraceful close + err = s.conn.Close() + // Wait for cleanup to clear active streams + <-streamsClosed + } + + if err != nil { + duration := 10 * time.Minute + time.AfterFunc(duration, func() { + select { + case err, ok := <-s.shutdownChan: + if ok { + debugMessage("Unhandled close error after %s: %s", duration, err) + } + default: + } + }) + s.shutdownChan <- err + } + close(s.shutdownChan) +} + +// Closes spdy connection by sending GoAway frame and initiating shutdown +func (s *Connection) Close() error { + s.receiveIdLock.Lock() + if s.goneAway { + s.receiveIdLock.Unlock() + return nil + } + s.goneAway = true + s.receiveIdLock.Unlock() + + var lastStreamId spdy.StreamId + if s.receivedStreamId > 2 { + lastStreamId = s.receivedStreamId - 2 + } + + goAwayFrame := &spdy.GoAwayFrame{ + LastGoodStreamId: lastStreamId, + Status: spdy.GoAwayOK, + } + + err := s.framer.WriteFrame(goAwayFrame) + go s.shutdown(s.closeTimeout) + if err != nil { + return err + } + + return nil +} + +// CloseWait closes the connection and waits for shutdown +// to finish. Note the underlying network Connection +// is not closed until the end of shutdown. +func (s *Connection) CloseWait() error { + closeErr := s.Close() + if closeErr != nil { + return closeErr + } + shutdownErr, ok := <-s.shutdownChan + if ok { + return shutdownErr + } + return nil +} + +// Wait waits for the connection to finish shutdown or for +// the wait timeout duration to expire. This needs to be +// called either after Close has been called or the GOAWAYFRAME +// has been received. If the wait timeout is 0, this function +// will block until shutdown finishes. If wait is never called +// and a shutdown error occurs, that error will be logged as an +// unhandled error. +func (s *Connection) Wait(waitTimeout time.Duration) error { + var timeout <-chan time.Time + if waitTimeout > time.Duration(0) { + timeout = time.After(waitTimeout) + } + + select { + case err, ok := <-s.shutdownChan: + if ok { + return err + } + case <-timeout: + return ErrTimeout + } + return nil +} + +// NotifyClose registers a channel to be called when the remote +// peer inidicates connection closure. The last stream to be +// received by the remote will be sent on the channel. The notify +// timeout will determine the duration between go away received +// and the connection being closed. +func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) { + s.goAwayTimeout = timeout + s.lastStreamChan = c +} + +// SetCloseTimeout sets the amount of time close will wait for +// streams to finish before terminating the underlying network +// connection. Setting the timeout to 0 will cause close to +// wait forever, which is the default. +func (s *Connection) SetCloseTimeout(timeout time.Duration) { + s.closeTimeout = timeout +} + +// SetIdleTimeout sets the amount of time the connection may sit idle before +// it is forcefully terminated. +func (s *Connection) SetIdleTimeout(timeout time.Duration) { + s.framer.setIdleTimeout(timeout) +} + +func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + headerFrame := &spdy.HeadersFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(headerFrame) +} + +func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + } + + replyFrame := &spdy.SynReplyFrame{ + StreamId: stream.streamId, + Headers: headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(replyFrame) +} + +func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error { + resetFrame := &spdy.RstStreamFrame{ + StreamId: streamId, + Status: status, + } + + return s.framer.WriteFrame(resetFrame) +} + +func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error { + return s.sendResetFrame(status, stream.streamId) +} + +func (s *Connection) sendStream(stream *Stream, fin bool) error { + var flags spdy.ControlFlags + if fin { + flags = spdy.ControlFlagFin + stream.finished = true + } + + var parentId spdy.StreamId + if stream.parent != nil { + parentId = stream.parent.streamId + } + + streamFrame := &spdy.SynStreamFrame{ + StreamId: spdy.StreamId(stream.streamId), + AssociatedToStreamId: spdy.StreamId(parentId), + Headers: stream.headers, + CFHeader: spdy.ControlFrameHeader{Flags: flags}, + } + + return s.framer.WriteFrame(streamFrame) +} + +// getNextStreamId returns the next sequential id +// every call should produce a unique value or an error +func (s *Connection) getNextStreamId() spdy.StreamId { + sid := s.nextStreamId + if sid > 0x7fffffff { + return 0 + } + s.nextStreamId = s.nextStreamId + 2 + return sid +} + +// PeekNextStreamId returns the next sequential id and keeps the next id untouched +func (s *Connection) PeekNextStreamId() spdy.StreamId { + sid := s.nextStreamId + return sid +} + +func (s *Connection) validateStreamId(rid spdy.StreamId) error { + if rid > 0x7fffffff || rid < s.receivedStreamId { + return ErrInvalidStreamId + } + s.receivedStreamId = rid + 2 + return nil +} + +func (s *Connection) addStream(stream *Stream) { + s.streamCond.L.Lock() + s.streams[stream.streamId] = stream + debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) removeStream(stream *Stream) { + s.streamCond.L.Lock() + delete(s.streams, stream.streamId) + debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId) + s.streamCond.Broadcast() + s.streamCond.L.Unlock() +} + +func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) { + s.streamLock.RLock() + stream, ok = s.streams[streamId] + s.streamLock.RUnlock() + return +} + +// FindStream looks up the given stream id and either waits for the +// stream to be found or returns nil if the stream id is no longer +// valid. +func (s *Connection) FindStream(streamId uint32) *Stream { + var stream *Stream + var ok bool + s.streamCond.L.Lock() + stream, ok = s.streams[spdy.StreamId(streamId)] + debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok) + for !ok && streamId >= uint32(s.receivedStreamId) { + s.streamCond.Wait() + stream, ok = s.streams[spdy.StreamId(streamId)] + } + s.streamCond.L.Unlock() + return stream +} + +func (s *Connection) CloseChan() <-chan bool { + return s.closeChan +} diff --git a/vendor/github.com/moby/spdystream/handlers.go b/vendor/github.com/moby/spdystream/handlers.go new file mode 100644 index 0000000000..d68f61f812 --- /dev/null +++ b/vendor/github.com/moby/spdystream/handlers.go @@ -0,0 +1,52 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package spdystream + +import ( + "io" + "net/http" +) + +// MirrorStreamHandler mirrors all streams. +func MirrorStreamHandler(stream *Stream) { + replyErr := stream.SendReply(http.Header{}, false) + if replyErr != nil { + return + } + + go func() { + io.Copy(stream, stream) + stream.Close() + }() + go func() { + for { + header, receiveErr := stream.ReceiveHeader() + if receiveErr != nil { + return + } + sendErr := stream.SendHeader(header, false) + if sendErr != nil { + return + } + } + }() +} + +// NoopStreamHandler does nothing when stream connects. +func NoOpStreamHandler(stream *Stream) { + stream.SendReply(http.Header{}, false) +} diff --git a/vendor/github.com/moby/spdystream/priority.go b/vendor/github.com/moby/spdystream/priority.go new file mode 100644 index 0000000000..d8eb3516ca --- /dev/null +++ b/vendor/github.com/moby/spdystream/priority.go @@ -0,0 +1,114 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package spdystream + +import ( + "container/heap" + "sync" + + "github.com/moby/spdystream/spdy" +) + +type prioritizedFrame struct { + frame spdy.Frame + priority uint8 + insertId uint64 +} + +type frameQueue []*prioritizedFrame + +func (fq frameQueue) Len() int { + return len(fq) +} + +func (fq frameQueue) Less(i, j int) bool { + if fq[i].priority == fq[j].priority { + return fq[i].insertId < fq[j].insertId + } + return fq[i].priority < fq[j].priority +} + +func (fq frameQueue) Swap(i, j int) { + fq[i], fq[j] = fq[j], fq[i] +} + +func (fq *frameQueue) Push(x interface{}) { + *fq = append(*fq, x.(*prioritizedFrame)) +} + +func (fq *frameQueue) Pop() interface{} { + old := *fq + n := len(old) + *fq = old[0 : n-1] + return old[n-1] +} + +type PriorityFrameQueue struct { + queue *frameQueue + c *sync.Cond + size int + nextInsertId uint64 + drain bool +} + +func NewPriorityFrameQueue(size int) *PriorityFrameQueue { + queue := make(frameQueue, 0, size) + heap.Init(&queue) + + return &PriorityFrameQueue{ + queue: &queue, + size: size, + c: sync.NewCond(&sync.Mutex{}), + } +} + +func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() >= q.size { + q.c.Wait() + } + pFrame := &prioritizedFrame{ + frame: frame, + priority: priority, + insertId: q.nextInsertId, + } + q.nextInsertId = q.nextInsertId + 1 + heap.Push(q.queue, pFrame) + q.c.Signal() +} + +func (q *PriorityFrameQueue) Pop() spdy.Frame { + q.c.L.Lock() + defer q.c.L.Unlock() + for q.queue.Len() == 0 { + if q.drain { + return nil + } + q.c.Wait() + } + frame := heap.Pop(q.queue).(*prioritizedFrame).frame + q.c.Signal() + return frame +} + +func (q *PriorityFrameQueue) Drain() { + q.c.L.Lock() + defer q.c.L.Unlock() + q.drain = true + q.c.Broadcast() +} diff --git a/vendor/github.com/moby/spdystream/spdy/dictionary.go b/vendor/github.com/moby/spdystream/spdy/dictionary.go new file mode 100644 index 0000000000..392232f174 --- /dev/null +++ b/vendor/github.com/moby/spdystream/spdy/dictionary.go @@ -0,0 +1,203 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +// headerDictionary is the dictionary sent to the zlib compressor/decompressor. +var headerDictionary = []byte{ + 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, + 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, + 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, + 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, + 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, + 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, + 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, + 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, + 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, + 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, + 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, + 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, + 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, + 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, + 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, + 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, + 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, + 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, + 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, + 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, + 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, + 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, + 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, + 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, + 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, + 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, + 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, + 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, + 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, + 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, + 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, + 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, + 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, + 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, + 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, + 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, + 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, + 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, + 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, + 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, + 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, + 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, + 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, + 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, + 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, + 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, + 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, + 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, + 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, + 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, + 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, + 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, + 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, + 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, + 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, + 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, + 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, + 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, + 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, + 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, + 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, + 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, + 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, + 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, + 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, + 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, + 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, + 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, + 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, + 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, + 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, + 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, + 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, + 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, + 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, + 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, + 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, + 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, + 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, + 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, + 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, + 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, + 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, + 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, + 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, + 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, + 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, + 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, + 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, + 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, + 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, + 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, + 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, + 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, + 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, + 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, + 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, + 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, + 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, + 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, + 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, + 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, + 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, + 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, + 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, + 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, + 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, + 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, + 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, + 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, + 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e, +} diff --git a/vendor/github.com/moby/spdystream/spdy/read.go b/vendor/github.com/moby/spdystream/spdy/read.go new file mode 100644 index 0000000000..75ea045b8e --- /dev/null +++ b/vendor/github.com/moby/spdystream/spdy/read.go @@ -0,0 +1,364 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "compress/zlib" + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynStreamFrame(h, frame) +} + +func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readSynReplyFrame(h, frame) +} + +func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + var numSettings uint32 + if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil { + return err + } + frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings) + for i := uint32(0); i < numSettings; i++ { + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil { + return err + } + frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24) + frame.FlagIdValues[i].Id &= 0xffffff + if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil { + return err + } + } + return nil +} + +func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil { + return err + } + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, StreamId(frame.Id)} + } + return nil +} + +func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.LastGoodStreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil { + return err + } + return nil +} + +func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error { + return f.readHeadersFrame(h, frame) +} + +func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error { + frame.CFHeader = h + if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if frame.CFHeader.Flags != 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if frame.CFHeader.length != 8 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil { + return err + } + return nil +} + +func newControlFrame(frameType ControlFrameType) (controlFrame, error) { + ctor, ok := cframeCtor[frameType] + if !ok { + return nil, &Error{Err: InvalidControlFrame} + } + return ctor(), nil +} + +var cframeCtor = map[ControlFrameType]func() controlFrame{ + TypeSynStream: func() controlFrame { return new(SynStreamFrame) }, + TypeSynReply: func() controlFrame { return new(SynReplyFrame) }, + TypeRstStream: func() controlFrame { return new(RstStreamFrame) }, + TypeSettings: func() controlFrame { return new(SettingsFrame) }, + TypePing: func() controlFrame { return new(PingFrame) }, + TypeGoAway: func() controlFrame { return new(GoAwayFrame) }, + TypeHeaders: func() controlFrame { return new(HeadersFrame) }, + TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) }, +} + +func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error { + if f.headerDecompressor != nil { + f.headerReader.N = payloadSize + return nil + } + f.headerReader = io.LimitedReader{R: f.r, N: payloadSize} + decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary)) + if err != nil { + return err + } + f.headerDecompressor = decompressor + return nil +} + +// ReadFrame reads SPDY encoded data and returns a decompressed Frame. +func (f *Framer) ReadFrame() (Frame, error) { + var firstWord uint32 + if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil { + return nil, err + } + if firstWord&0x80000000 != 0 { + frameType := ControlFrameType(firstWord & 0xffff) + version := uint16(firstWord >> 16 & 0x7fff) + return f.parseControlFrame(version, frameType) + } + return f.parseDataFrame(StreamId(firstWord & 0x7fffffff)) +} + +func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + flags := ControlFlags((length & 0xff000000) >> 24) + length &= 0xffffff + header := ControlFrameHeader{version, frameType, flags, length} + cframe, err := newControlFrame(frameType) + if err != nil { + return nil, err + } + if err = cframe.read(header, f); err != nil { + return nil, err + } + return cframe, nil +} + +func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) { + var numHeaders uint32 + if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil { + return nil, err + } + var e error + h := make(http.Header, int(numHeaders)) + for i := 0; i < int(numHeaders); i++ { + var length uint32 + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + nameBytes := make([]byte, length) + if _, err := io.ReadFull(r, nameBytes); err != nil { + return nil, err + } + name := string(nameBytes) + if name != strings.ToLower(name) { + e = &Error{UnlowercasedHeaderName, streamId} + name = strings.ToLower(name) + } + if h[name] != nil { + e = &Error{DuplicateHeaders, streamId} + } + if err := binary.Read(r, binary.BigEndian, &length); err != nil { + return nil, err + } + value := make([]byte, length) + if _, err := io.ReadFull(r, value); err != nil { + return nil, err + } + valueList := strings.Split(string(value), headerValueSeparator) + for _, v := range valueList { + h.Add(name, v) + } + } + if e != nil { + return h, e + } + return h, nil +} + +func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil { + return err + } + frame.Priority >>= 5 + if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 10)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidReqHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + for h := range frame.Headers { + if invalidRespHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error { + frame.CFHeader = h + var err error + if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil { + return err + } + reader := f.r + if !f.headerCompressionDisabled { + err := f.uncorkHeaderDecompressor(int64(h.length - 4)) + if err != nil { + return err + } + reader = f.headerDecompressor + } + frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId) + if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) { + err = &Error{WrongCompressedPayloadSize, 0} + } + if err != nil { + return err + } + var invalidHeaders map[string]bool + if frame.StreamId%2 == 0 { + invalidHeaders = invalidReqHeaders + } else { + invalidHeaders = invalidRespHeaders + } + for h := range frame.Headers { + if invalidHeaders[h] { + return &Error{InvalidHeaderPresent, frame.StreamId} + } + } + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + return nil +} + +func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) { + var length uint32 + if err := binary.Read(f.r, binary.BigEndian, &length); err != nil { + return nil, err + } + var frame DataFrame + frame.StreamId = streamId + frame.Flags = DataFlags(length >> 24) + length &= 0xffffff + frame.Data = make([]byte, length) + if _, err := io.ReadFull(f.r, frame.Data); err != nil { + return nil, err + } + if frame.StreamId == 0 { + return nil, &Error{ZeroStreamId, 0} + } + return &frame, nil +} diff --git a/vendor/github.com/moby/spdystream/spdy/types.go b/vendor/github.com/moby/spdystream/spdy/types.go new file mode 100644 index 0000000000..a254a43ab9 --- /dev/null +++ b/vendor/github.com/moby/spdystream/spdy/types.go @@ -0,0 +1,291 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package spdy implements the SPDY protocol (currently SPDY/3), described in +// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3. +package spdy + +import ( + "bytes" + "compress/zlib" + "io" + "net/http" +) + +// Version is the protocol version number that this package implements. +const Version = 3 + +// ControlFrameType stores the type field in a control frame header. +type ControlFrameType uint16 + +const ( + TypeSynStream ControlFrameType = 0x0001 + TypeSynReply ControlFrameType = 0x0002 + TypeRstStream ControlFrameType = 0x0003 + TypeSettings ControlFrameType = 0x0004 + TypePing ControlFrameType = 0x0006 + TypeGoAway ControlFrameType = 0x0007 + TypeHeaders ControlFrameType = 0x0008 + TypeWindowUpdate ControlFrameType = 0x0009 +) + +// ControlFlags are the flags that can be set on a control frame. +type ControlFlags uint8 + +const ( + ControlFlagFin ControlFlags = 0x01 + ControlFlagUnidirectional ControlFlags = 0x02 + ControlFlagSettingsClearSettings ControlFlags = 0x01 +) + +// DataFlags are the flags that can be set on a data frame. +type DataFlags uint8 + +const ( + DataFlagFin DataFlags = 0x01 +) + +// MaxDataLength is the maximum number of bytes that can be stored in one frame. +const MaxDataLength = 1<<24 - 1 + +// headerValueSepator separates multiple header values. +const headerValueSeparator = "\x00" + +// Frame is a single SPDY frame in its unpacked in-memory representation. Use +// Framer to read and write it. +type Frame interface { + write(f *Framer) error +} + +// ControlFrameHeader contains all the fields in a control frame header, +// in its unpacked in-memory representation. +type ControlFrameHeader struct { + // Note, high bit is the "Control" bit. + version uint16 // spdy version number + frameType ControlFrameType + Flags ControlFlags + length uint32 // length of data field +} + +type controlFrame interface { + Frame + read(h ControlFrameHeader, f *Framer) error +} + +// StreamId represents a 31-bit value identifying the stream. +type StreamId uint32 + +// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM +// frame. +type SynStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to + Priority uint8 // priority of this frame (3-bit) + Slot uint8 // index in the server's credential vector of the client certificate + Headers http.Header +} + +// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame. +type SynReplyFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// RstStreamStatus represents the status that led to a RST_STREAM. +type RstStreamStatus uint32 + +const ( + ProtocolError RstStreamStatus = iota + 1 + InvalidStream + RefusedStream + UnsupportedVersion + Cancel + InternalError + FlowControlError + StreamInUse + StreamAlreadyClosed + InvalidCredentials + FrameTooLarge +) + +// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM +// frame. +type RstStreamFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Status RstStreamStatus +} + +// SettingsFlag represents a flag in a SETTINGS frame. +type SettingsFlag uint8 + +const ( + FlagSettingsPersistValue SettingsFlag = 0x1 + FlagSettingsPersisted SettingsFlag = 0x2 +) + +// SettingsFlag represents the id of an id/value pair in a SETTINGS frame. +type SettingsId uint32 + +const ( + SettingsUploadBandwidth SettingsId = iota + 1 + SettingsDownloadBandwidth + SettingsRoundTripTime + SettingsMaxConcurrentStreams + SettingsCurrentCwnd + SettingsDownloadRetransRate + SettingsInitialWindowSize + SettingsClientCretificateVectorSize +) + +// SettingsFlagIdValue is the unpacked, in-memory representation of the +// combined flag/id/value for a setting in a SETTINGS frame. +type SettingsFlagIdValue struct { + Flag SettingsFlag + Id SettingsId + Value uint32 +} + +// SettingsFrame is the unpacked, in-memory representation of a SPDY +// SETTINGS frame. +type SettingsFrame struct { + CFHeader ControlFrameHeader + FlagIdValues []SettingsFlagIdValue +} + +// PingFrame is the unpacked, in-memory representation of a PING frame. +type PingFrame struct { + CFHeader ControlFrameHeader + Id uint32 // unique id for this ping, from server is even, from client is odd. +} + +// GoAwayStatus represents the status in a GoAwayFrame. +type GoAwayStatus uint32 + +const ( + GoAwayOK GoAwayStatus = iota + GoAwayProtocolError + GoAwayInternalError +) + +// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame. +type GoAwayFrame struct { + CFHeader ControlFrameHeader + LastGoodStreamId StreamId // last stream id which was accepted by sender + Status GoAwayStatus +} + +// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame. +type HeadersFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + Headers http.Header +} + +// WindowUpdateFrame is the unpacked, in-memory representation of a +// WINDOW_UPDATE frame. +type WindowUpdateFrame struct { + CFHeader ControlFrameHeader + StreamId StreamId + DeltaWindowSize uint32 // additional number of bytes to existing window size +} + +// TODO: Implement credential frame and related methods. + +// DataFrame is the unpacked, in-memory representation of a DATA frame. +type DataFrame struct { + // Note, high bit is the "Control" bit. Should be 0 for data frames. + StreamId StreamId + Flags DataFlags + Data []byte // payload data of this frame +} + +// A SPDY specific error. +type ErrorCode string + +const ( + UnlowercasedHeaderName ErrorCode = "header was not lowercased" + DuplicateHeaders ErrorCode = "multiple headers with same name" + WrongCompressedPayloadSize ErrorCode = "compressed payload size was incorrect" + UnknownFrameType ErrorCode = "unknown frame type" + InvalidControlFrame ErrorCode = "invalid control frame" + InvalidDataFrame ErrorCode = "invalid data frame" + InvalidHeaderPresent ErrorCode = "frame contained invalid header" + ZeroStreamId ErrorCode = "stream id zero is disallowed" +) + +// Error contains both the type of error and additional values. StreamId is 0 +// if Error is not associated with a stream. +type Error struct { + Err ErrorCode + StreamId StreamId +} + +func (e *Error) Error() string { + return string(e.Err) +} + +var invalidReqHeaders = map[string]bool{ + "Connection": true, + "Host": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +var invalidRespHeaders = map[string]bool{ + "Connection": true, + "Keep-Alive": true, + "Proxy-Connection": true, + "Transfer-Encoding": true, +} + +// Framer handles serializing/deserializing SPDY frames, including compressing/ +// decompressing payloads. +type Framer struct { + headerCompressionDisabled bool + w io.Writer + headerBuf *bytes.Buffer + headerCompressor *zlib.Writer + r io.Reader + headerReader io.LimitedReader + headerDecompressor io.ReadCloser +} + +// NewFramer allocates a new Framer for a given SPDY connection, represented by +// a io.Writer and io.Reader. Note that Framer will read and write individual fields +// from/to the Reader and Writer, so the caller should pass in an appropriately +// buffered implementation to optimize performance. +func NewFramer(w io.Writer, r io.Reader) (*Framer, error) { + compressBuf := new(bytes.Buffer) + compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary)) + if err != nil { + return nil, err + } + framer := &Framer{ + w: w, + headerBuf: compressBuf, + headerCompressor: compressor, + r: r, + } + return framer, nil +} diff --git a/vendor/github.com/moby/spdystream/spdy/write.go b/vendor/github.com/moby/spdystream/spdy/write.go new file mode 100644 index 0000000000..ab6d91f3b8 --- /dev/null +++ b/vendor/github.com/moby/spdystream/spdy/write.go @@ -0,0 +1,334 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package spdy + +import ( + "encoding/binary" + "io" + "net/http" + "strings" +) + +func (frame *SynStreamFrame) write(f *Framer) error { + return f.writeSynStreamFrame(frame) +} + +func (frame *SynReplyFrame) write(f *Framer) error { + return f.writeSynReplyFrame(frame) +} + +func (frame *RstStreamFrame) write(f *Framer) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeRstStream + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if frame.Status == 0 { + return &Error{InvalidControlFrame, frame.StreamId} + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return +} + +func (frame *SettingsFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSettings + frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil { + return + } + for _, flagIdValue := range frame.FlagIdValues { + flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id) + if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil { + return + } + } + return +} + +func (frame *PingFrame) write(f *Framer) (err error) { + if frame.Id == 0 { + return &Error{ZeroStreamId, 0} + } + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypePing + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 4 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil { + return + } + return +} + +func (frame *GoAwayFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeGoAway + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil { + return + } + return nil +} + +func (frame *HeadersFrame) write(f *Framer) error { + return f.writeHeadersFrame(frame) +} + +func (frame *WindowUpdateFrame) write(f *Framer) (err error) { + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeWindowUpdate + frame.CFHeader.Flags = 0 + frame.CFHeader.length = 8 + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil { + return + } + return nil +} + +func (frame *DataFrame) write(f *Framer) error { + return f.writeDataFrame(frame) +} + +// WriteFrame writes a frame. +func (f *Framer) WriteFrame(frame Frame) error { + return frame.write(f) +} + +func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error { + if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil { + return err + } + if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil { + return err + } + flagsAndLength := uint32(h.Flags)<<24 | h.length + if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil { + return err + } + return nil +} + +func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) { + n = 0 + if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil { + return + } + n += 2 + for name, values := range h { + if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil { + return + } + n += 2 + name = strings.ToLower(name) + if _, err = io.WriteString(w, name); err != nil { + return + } + n += len(name) + v := strings.Join(values, headerValueSeparator) + if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil { + return + } + n += 2 + if _, err = io.WriteString(w, v); err != nil { + return + } + n += len(v) + } + return +} + +func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynStream + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil { + return err + } + if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil { + return err + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return err + } + f.headerBuf.Reset() + return nil +} + +func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeSynReply + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + // Marshal the headers. + var writer io.Writer = f.headerBuf + if !f.headerCompressionDisabled { + writer = f.headerCompressor + } + if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil { + return + } + if !f.headerCompressionDisabled { + f.headerCompressor.Flush() + } + + // Set ControlFrameHeader. + frame.CFHeader.version = Version + frame.CFHeader.frameType = TypeHeaders + frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4) + + // Serialize frame to Writer. + if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil { + return + } + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil { + return + } + f.headerBuf.Reset() + return +} + +func (f *Framer) writeDataFrame(frame *DataFrame) (err error) { + if frame.StreamId == 0 { + return &Error{ZeroStreamId, 0} + } + if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength { + return &Error{InvalidDataFrame, frame.StreamId} + } + + // Serialize frame to Writer. + if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil { + return + } + flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data)) + if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil { + return + } + if _, err = f.w.Write(frame.Data); err != nil { + return + } + return nil +} diff --git a/vendor/github.com/moby/spdystream/stream.go b/vendor/github.com/moby/spdystream/stream.go new file mode 100644 index 0000000000..404e3c02df --- /dev/null +++ b/vendor/github.com/moby/spdystream/stream.go @@ -0,0 +1,343 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package spdystream + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + "github.com/moby/spdystream/spdy" +) + +var ( + ErrUnreadPartialData = errors.New("unread partial data") +) + +type Stream struct { + streamId spdy.StreamId + parent *Stream + conn *Connection + startChan chan error + + dataLock sync.RWMutex + dataChan chan []byte + unread []byte + + priority uint8 + headers http.Header + headerChan chan http.Header + finishLock sync.Mutex + finished bool + replyCond *sync.Cond + replied bool + closeLock sync.Mutex + closeChan chan bool +} + +// WriteData writes data to stream, sending a dataframe per call +func (s *Stream) WriteData(data []byte, fin bool) error { + s.waitWriteReply() + var flags spdy.DataFlags + + if fin { + flags = spdy.DataFlagFin + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return ErrWriteClosedStream + } + s.finished = true + s.finishLock.Unlock() + } + + dataFrame := &spdy.DataFrame{ + StreamId: s.streamId, + Flags: flags, + Data: data, + } + + debugMessage("(%p) (%d) Writing data frame", s, s.streamId) + return s.conn.framer.WriteFrame(dataFrame) +} + +// Write writes bytes to a stream, calling write data for each call. +func (s *Stream) Write(data []byte) (n int, err error) { + err = s.WriteData(data, false) + if err == nil { + n = len(data) + } + return +} + +// Read reads bytes from a stream, a single read will never get more +// than what is sent on a single data frame, but a multiple calls to +// read may get data from the same data frame. +func (s *Stream) Read(p []byte) (n int, err error) { + if s.unread == nil { + select { + case <-s.closeChan: + return 0, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return 0, io.EOF + } + s.unread = read + } + } + n = copy(p, s.unread) + if n < len(s.unread) { + s.unread = s.unread[n:] + } else { + s.unread = nil + } + return +} + +// ReadData reads an entire data frame and returns the byte array +// from the data frame. If there is unread data from the result +// of a Read call, this function will return an ErrUnreadPartialData. +func (s *Stream) ReadData() ([]byte, error) { + debugMessage("(%p) Reading data from %d", s, s.streamId) + if s.unread != nil { + return nil, ErrUnreadPartialData + } + select { + case <-s.closeChan: + return nil, io.EOF + case read, ok := <-s.dataChan: + if !ok { + return nil, io.EOF + } + return read, nil + } +} + +func (s *Stream) waitWriteReply() { + if s.replyCond != nil { + s.replyCond.L.Lock() + for !s.replied { + s.replyCond.Wait() + } + s.replyCond.L.Unlock() + } +} + +// Wait waits for the stream to receive a reply. +func (s *Stream) Wait() error { + return s.WaitTimeout(time.Duration(0)) +} + +// WaitTimeout waits for the stream to receive a reply or for timeout. +// When the timeout is reached, ErrTimeout will be returned. +func (s *Stream) WaitTimeout(timeout time.Duration) error { + var timeoutChan <-chan time.Time + if timeout > time.Duration(0) { + timeoutChan = time.After(timeout) + } + + select { + case err := <-s.startChan: + if err != nil { + return err + } + break + case <-timeoutChan: + return ErrTimeout + } + return nil +} + +// Close closes the stream by sending an empty data frame with the +// finish flag set, indicating this side is finished with the stream. +func (s *Stream) Close() error { + select { + case <-s.closeChan: + // Stream is now fully closed + s.conn.removeStream(s) + default: + break + } + return s.WriteData([]byte{}, true) +} + +// Reset sends a reset frame, putting the stream into the fully closed state. +func (s *Stream) Reset() error { + s.conn.removeStream(s) + return s.resetStream() +} + +func (s *Stream) resetStream() error { + // Always call closeRemoteChannels, even if s.finished is already true. + // This makes it so that stream.Close() followed by stream.Reset() allows + // stream.Read() to unblock. + s.closeRemoteChannels() + + s.finishLock.Lock() + if s.finished { + s.finishLock.Unlock() + return nil + } + s.finished = true + s.finishLock.Unlock() + + resetFrame := &spdy.RstStreamFrame{ + StreamId: s.streamId, + Status: spdy.Cancel, + } + return s.conn.framer.WriteFrame(resetFrame) +} + +// CreateSubStream creates a stream using the current as the parent +func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) { + return s.conn.CreateStream(headers, s, fin) +} + +// SetPriority sets the stream priority, does not affect the +// remote priority of this stream after Open has been called. +// Valid values are 0 through 7, 0 being the highest priority +// and 7 the lowest. +func (s *Stream) SetPriority(priority uint8) { + s.priority = priority +} + +// SendHeader sends a header frame across the stream +func (s *Stream) SendHeader(headers http.Header, fin bool) error { + return s.conn.sendHeaders(headers, s, fin) +} + +// SendReply sends a reply on a stream, only valid to be called once +// when handling a new stream +func (s *Stream) SendReply(headers http.Header, fin bool) error { + if s.replyCond == nil { + return errors.New("cannot reply on initiated stream") + } + s.replyCond.L.Lock() + defer s.replyCond.L.Unlock() + if s.replied { + return nil + } + + err := s.conn.sendReply(headers, s, fin) + if err != nil { + return err + } + + s.replied = true + s.replyCond.Broadcast() + return nil +} + +// Refuse sends a reset frame with the status refuse, only +// valid to be called once when handling a new stream. This +// may be used to indicate that a stream is not allowed +// when http status codes are not being used. +func (s *Stream) Refuse() error { + if s.replied { + return nil + } + s.replied = true + return s.conn.sendReset(spdy.RefusedStream, s) +} + +// Cancel sends a reset frame with the status canceled. This +// can be used at any time by the creator of the Stream to +// indicate the stream is no longer needed. +func (s *Stream) Cancel() error { + return s.conn.sendReset(spdy.Cancel, s) +} + +// ReceiveHeader receives a header sent on the other side +// of the stream. This function will block until a header +// is received or stream is closed. +func (s *Stream) ReceiveHeader() (http.Header, error) { + select { + case <-s.closeChan: + break + case header, ok := <-s.headerChan: + if !ok { + return nil, fmt.Errorf("header chan closed") + } + return header, nil + } + return nil, fmt.Errorf("stream closed") +} + +// Parent returns the parent stream +func (s *Stream) Parent() *Stream { + return s.parent +} + +// Headers returns the headers used to create the stream +func (s *Stream) Headers() http.Header { + return s.headers +} + +// String returns the string version of stream using the +// streamId to uniquely identify the stream +func (s *Stream) String() string { + return fmt.Sprintf("stream:%d", s.streamId) +} + +// Identifier returns a 32 bit identifier for the stream +func (s *Stream) Identifier() uint32 { + return uint32(s.streamId) +} + +// IsFinished returns whether the stream has finished +// sending data +func (s *Stream) IsFinished() bool { + return s.finished +} + +// Implement net.Conn interface + +func (s *Stream) LocalAddr() net.Addr { + return s.conn.conn.LocalAddr() +} + +func (s *Stream) RemoteAddr() net.Addr { + return s.conn.conn.RemoteAddr() +} + +// TODO set per stream values instead of connection-wide + +func (s *Stream) SetDeadline(t time.Time) error { + return s.conn.conn.SetDeadline(t) +} + +func (s *Stream) SetReadDeadline(t time.Time) error { + return s.conn.conn.SetReadDeadline(t) +} + +func (s *Stream) SetWriteDeadline(t time.Time) error { + return s.conn.conn.SetWriteDeadline(t) +} + +func (s *Stream) closeRemoteChannels() { + s.closeLock.Lock() + defer s.closeLock.Unlock() + select { + case <-s.closeChan: + default: + close(s.closeChan) + } +} diff --git a/vendor/github.com/moby/spdystream/utils.go b/vendor/github.com/moby/spdystream/utils.go new file mode 100644 index 0000000000..e9f7fffd60 --- /dev/null +++ b/vendor/github.com/moby/spdystream/utils.go @@ -0,0 +1,32 @@ +/* + Copyright 2014-2021 Docker Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package spdystream + +import ( + "log" + "os" +) + +var ( + DEBUG = os.Getenv("DEBUG") +) + +func debugMessage(fmt string, args ...interface{}) { + if DEBUG != "" { + log.Printf(fmt, args...) + } +} diff --git a/vendor/github.com/mxk/go-flowrate/LICENSE b/vendor/github.com/mxk/go-flowrate/LICENSE new file mode 100644 index 0000000000..e9f9f628ba --- /dev/null +++ b/vendor/github.com/mxk/go-flowrate/LICENSE @@ -0,0 +1,29 @@ +Copyright (c) 2014 The Go-FlowRate Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the + distribution. + + * Neither the name of the go-flowrate project nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go b/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go new file mode 100644 index 0000000000..1b727721e1 --- /dev/null +++ b/vendor/github.com/mxk/go-flowrate/flowrate/flowrate.go @@ -0,0 +1,267 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +// Package flowrate provides the tools for monitoring and limiting the flow rate +// of an arbitrary data stream. +package flowrate + +import ( + "math" + "sync" + "time" +) + +// Monitor monitors and limits the transfer rate of a data stream. +type Monitor struct { + mu sync.Mutex // Mutex guarding access to all internal fields + active bool // Flag indicating an active transfer + start time.Duration // Transfer start time (clock() value) + bytes int64 // Total number of bytes transferred + samples int64 // Total number of samples taken + + rSample float64 // Most recent transfer rate sample (bytes per second) + rEMA float64 // Exponential moving average of rSample + rPeak float64 // Peak transfer rate (max of all rSamples) + rWindow float64 // rEMA window (seconds) + + sBytes int64 // Number of bytes transferred since sLast + sLast time.Duration // Most recent sample time (stop time when inactive) + sRate time.Duration // Sampling rate + + tBytes int64 // Number of bytes expected in the current transfer + tLast time.Duration // Time of the most recent transfer of at least 1 byte +} + +// New creates a new flow control monitor. Instantaneous transfer rate is +// measured and updated for each sampleRate interval. windowSize determines the +// weight of each sample in the exponential moving average (EMA) calculation. +// The exact formulas are: +// +// sampleTime = currentTime - prevSampleTime +// sampleRate = byteCount / sampleTime +// weight = 1 - exp(-sampleTime/windowSize) +// newRate = weight*sampleRate + (1-weight)*oldRate +// +// The default values for sampleRate and windowSize (if <= 0) are 100ms and 1s, +// respectively. +func New(sampleRate, windowSize time.Duration) *Monitor { + if sampleRate = clockRound(sampleRate); sampleRate <= 0 { + sampleRate = 5 * clockRate + } + if windowSize <= 0 { + windowSize = 1 * time.Second + } + now := clock() + return &Monitor{ + active: true, + start: now, + rWindow: windowSize.Seconds(), + sLast: now, + sRate: sampleRate, + tLast: now, + } +} + +// Update records the transfer of n bytes and returns n. It should be called +// after each Read/Write operation, even if n is 0. +func (m *Monitor) Update(n int) int { + m.mu.Lock() + m.update(n) + m.mu.Unlock() + return n +} + +// IO is a convenience method intended to wrap io.Reader and io.Writer method +// execution. It calls m.Update(n) and then returns (n, err) unmodified. +func (m *Monitor) IO(n int, err error) (int, error) { + return m.Update(n), err +} + +// Done marks the transfer as finished and prevents any further updates or +// limiting. Instantaneous and current transfer rates drop to 0. Update, IO, and +// Limit methods become NOOPs. It returns the total number of bytes transferred. +func (m *Monitor) Done() int64 { + m.mu.Lock() + if now := m.update(0); m.sBytes > 0 { + m.reset(now) + } + m.active = false + m.tLast = 0 + n := m.bytes + m.mu.Unlock() + return n +} + +// timeRemLimit is the maximum Status.TimeRem value. +const timeRemLimit = 999*time.Hour + 59*time.Minute + 59*time.Second + +// Status represents the current Monitor status. All transfer rates are in bytes +// per second rounded to the nearest byte. +type Status struct { + Active bool // Flag indicating an active transfer + Start time.Time // Transfer start time + Duration time.Duration // Time period covered by the statistics + Idle time.Duration // Time since the last transfer of at least 1 byte + Bytes int64 // Total number of bytes transferred + Samples int64 // Total number of samples taken + InstRate int64 // Instantaneous transfer rate + CurRate int64 // Current transfer rate (EMA of InstRate) + AvgRate int64 // Average transfer rate (Bytes / Duration) + PeakRate int64 // Maximum instantaneous transfer rate + BytesRem int64 // Number of bytes remaining in the transfer + TimeRem time.Duration // Estimated time to completion + Progress Percent // Overall transfer progress +} + +// Status returns current transfer status information. The returned value +// becomes static after a call to Done. +func (m *Monitor) Status() Status { + m.mu.Lock() + now := m.update(0) + s := Status{ + Active: m.active, + Start: clockToTime(m.start), + Duration: m.sLast - m.start, + Idle: now - m.tLast, + Bytes: m.bytes, + Samples: m.samples, + PeakRate: round(m.rPeak), + BytesRem: m.tBytes - m.bytes, + Progress: percentOf(float64(m.bytes), float64(m.tBytes)), + } + if s.BytesRem < 0 { + s.BytesRem = 0 + } + if s.Duration > 0 { + rAvg := float64(s.Bytes) / s.Duration.Seconds() + s.AvgRate = round(rAvg) + if s.Active { + s.InstRate = round(m.rSample) + s.CurRate = round(m.rEMA) + if s.BytesRem > 0 { + if tRate := 0.8*m.rEMA + 0.2*rAvg; tRate > 0 { + ns := float64(s.BytesRem) / tRate * 1e9 + if ns > float64(timeRemLimit) { + ns = float64(timeRemLimit) + } + s.TimeRem = clockRound(time.Duration(ns)) + } + } + } + } + m.mu.Unlock() + return s +} + +// Limit restricts the instantaneous (per-sample) data flow to rate bytes per +// second. It returns the maximum number of bytes (0 <= n <= want) that may be +// transferred immediately without exceeding the limit. If block == true, the +// call blocks until n > 0. want is returned unmodified if want < 1, rate < 1, +// or the transfer is inactive (after a call to Done). +// +// At least one byte is always allowed to be transferred in any given sampling +// period. Thus, if the sampling rate is 100ms, the lowest achievable flow rate +// is 10 bytes per second. +// +// For usage examples, see the implementation of Reader and Writer in io.go. +func (m *Monitor) Limit(want int, rate int64, block bool) (n int) { + if want < 1 || rate < 1 { + return want + } + m.mu.Lock() + + // Determine the maximum number of bytes that can be sent in one sample + limit := round(float64(rate) * m.sRate.Seconds()) + if limit <= 0 { + limit = 1 + } + + // If block == true, wait until m.sBytes < limit + if now := m.update(0); block { + for m.sBytes >= limit && m.active { + now = m.waitNextSample(now) + } + } + + // Make limit <= want (unlimited if the transfer is no longer active) + if limit -= m.sBytes; limit > int64(want) || !m.active { + limit = int64(want) + } + m.mu.Unlock() + + if limit < 0 { + limit = 0 + } + return int(limit) +} + +// SetTransferSize specifies the total size of the data transfer, which allows +// the Monitor to calculate the overall progress and time to completion. +func (m *Monitor) SetTransferSize(bytes int64) { + if bytes < 0 { + bytes = 0 + } + m.mu.Lock() + m.tBytes = bytes + m.mu.Unlock() +} + +// update accumulates the transferred byte count for the current sample until +// clock() - m.sLast >= m.sRate. The monitor status is updated once the current +// sample is done. +func (m *Monitor) update(n int) (now time.Duration) { + if !m.active { + return + } + if now = clock(); n > 0 { + m.tLast = now + } + m.sBytes += int64(n) + if sTime := now - m.sLast; sTime >= m.sRate { + t := sTime.Seconds() + if m.rSample = float64(m.sBytes) / t; m.rSample > m.rPeak { + m.rPeak = m.rSample + } + + // Exponential moving average using a method similar to *nix load + // average calculation. Longer sampling periods carry greater weight. + if m.samples > 0 { + w := math.Exp(-t / m.rWindow) + m.rEMA = m.rSample + w*(m.rEMA-m.rSample) + } else { + m.rEMA = m.rSample + } + m.reset(now) + } + return +} + +// reset clears the current sample state in preparation for the next sample. +func (m *Monitor) reset(sampleTime time.Duration) { + m.bytes += m.sBytes + m.samples++ + m.sBytes = 0 + m.sLast = sampleTime +} + +// waitNextSample sleeps for the remainder of the current sample. The lock is +// released and reacquired during the actual sleep period, so it's possible for +// the transfer to be inactive when this method returns. +func (m *Monitor) waitNextSample(now time.Duration) time.Duration { + const minWait = 5 * time.Millisecond + current := m.sLast + + // sleep until the last sample time changes (ideally, just one iteration) + for m.sLast == current && m.active { + d := current + m.sRate - now + m.mu.Unlock() + if d < minWait { + d = minWait + } + time.Sleep(d) + m.mu.Lock() + now = m.update(0) + } + return now +} diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/io.go b/vendor/github.com/mxk/go-flowrate/flowrate/io.go new file mode 100644 index 0000000000..fbe0909725 --- /dev/null +++ b/vendor/github.com/mxk/go-flowrate/flowrate/io.go @@ -0,0 +1,133 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "errors" + "io" +) + +// ErrLimit is returned by the Writer when a non-blocking write is short due to +// the transfer rate limit. +var ErrLimit = errors.New("flowrate: flow rate limit exceeded") + +// Limiter is implemented by the Reader and Writer to provide a consistent +// interface for monitoring and controlling data transfer. +type Limiter interface { + Done() int64 + Status() Status + SetTransferSize(bytes int64) + SetLimit(new int64) (old int64) + SetBlocking(new bool) (old bool) +} + +// Reader implements io.ReadCloser with a restriction on the rate of data +// transfer. +type Reader struct { + io.Reader // Data source + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be read due to the limit +} + +// NewReader restricts all Read operations on r to limit bytes per second. +func NewReader(r io.Reader, limit int64) *Reader { + return &Reader{r, New(0, 0), limit, true} +} + +// Read reads up to len(p) bytes into p without exceeding the current transfer +// rate limit. It returns (0, nil) immediately if r is non-blocking and no new +// bytes can be read at this time. +func (r *Reader) Read(p []byte) (n int, err error) { + p = p[:r.Limit(len(p), r.limit, r.block)] + if len(p) > 0 { + n, err = r.IO(r.Reader.Read(p)) + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (r *Reader) SetLimit(new int64) (old int64) { + old, r.limit = r.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Read call on a non-blocking reader returns immediately if no additional bytes +// may be read at this time due to the rate limit. +func (r *Reader) SetBlocking(new bool) (old bool) { + old, r.block = r.block, new + return +} + +// Close closes the underlying reader if it implements the io.Closer interface. +func (r *Reader) Close() error { + defer r.Done() + if c, ok := r.Reader.(io.Closer); ok { + return c.Close() + } + return nil +} + +// Writer implements io.WriteCloser with a restriction on the rate of data +// transfer. +type Writer struct { + io.Writer // Data destination + *Monitor // Flow control monitor + + limit int64 // Rate limit in bytes per second (unlimited when <= 0) + block bool // What to do when no new bytes can be written due to the limit +} + +// NewWriter restricts all Write operations on w to limit bytes per second. The +// transfer rate and the default blocking behavior (true) can be changed +// directly on the returned *Writer. +func NewWriter(w io.Writer, limit int64) *Writer { + return &Writer{w, New(0, 0), limit, true} +} + +// Write writes len(p) bytes from p to the underlying data stream without +// exceeding the current transfer rate limit. It returns (n, ErrLimit) if w is +// non-blocking and no additional bytes can be written at this time. +func (w *Writer) Write(p []byte) (n int, err error) { + var c int + for len(p) > 0 && err == nil { + s := p[:w.Limit(len(p), w.limit, w.block)] + if len(s) > 0 { + c, err = w.IO(w.Writer.Write(s)) + } else { + return n, ErrLimit + } + p = p[c:] + n += c + } + return +} + +// SetLimit changes the transfer rate limit to new bytes per second and returns +// the previous setting. +func (w *Writer) SetLimit(new int64) (old int64) { + old, w.limit = w.limit, new + return +} + +// SetBlocking changes the blocking behavior and returns the previous setting. A +// Write call on a non-blocking writer returns as soon as no additional bytes +// may be written at this time due to the rate limit. +func (w *Writer) SetBlocking(new bool) (old bool) { + old, w.block = w.block, new + return +} + +// Close closes the underlying writer if it implements the io.Closer interface. +func (w *Writer) Close() error { + defer w.Done() + if c, ok := w.Writer.(io.Closer); ok { + return c.Close() + } + return nil +} diff --git a/vendor/github.com/mxk/go-flowrate/flowrate/util.go b/vendor/github.com/mxk/go-flowrate/flowrate/util.go new file mode 100644 index 0000000000..4caac583fc --- /dev/null +++ b/vendor/github.com/mxk/go-flowrate/flowrate/util.go @@ -0,0 +1,67 @@ +// +// Written by Maxim Khitrov (November 2012) +// + +package flowrate + +import ( + "math" + "strconv" + "time" +) + +// clockRate is the resolution and precision of clock(). +const clockRate = 20 * time.Millisecond + +// czero is the process start time rounded down to the nearest clockRate +// increment. +var czero = time.Duration(time.Now().UnixNano()) / clockRate * clockRate + +// clock returns a low resolution timestamp relative to the process start time. +func clock() time.Duration { + return time.Duration(time.Now().UnixNano())/clockRate*clockRate - czero +} + +// clockToTime converts a clock() timestamp to an absolute time.Time value. +func clockToTime(c time.Duration) time.Time { + return time.Unix(0, int64(czero+c)) +} + +// clockRound returns d rounded to the nearest clockRate increment. +func clockRound(d time.Duration) time.Duration { + return (d + clockRate>>1) / clockRate * clockRate +} + +// round returns x rounded to the nearest int64 (non-negative values only). +func round(x float64) int64 { + if _, frac := math.Modf(x); frac >= 0.5 { + return int64(math.Ceil(x)) + } + return int64(math.Floor(x)) +} + +// Percent represents a percentage in increments of 1/1000th of a percent. +type Percent uint32 + +// percentOf calculates what percent of the total is x. +func percentOf(x, total float64) Percent { + if x < 0 || total <= 0 { + return 0 + } else if p := round(x / total * 1e5); p <= math.MaxUint32 { + return Percent(p) + } + return Percent(math.MaxUint32) +} + +func (p Percent) Float() float64 { + return float64(p) * 1e-3 +} + +func (p Percent) String() string { + var buf [12]byte + b := strconv.AppendUint(buf[:0], uint64(p)/1000, 10) + n := len(b) + b = strconv.AppendUint(b, 1000+uint64(p)%1000, 10) + b[n] = '.' + return string(append(b, '%')) +} diff --git a/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/constant.go b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/constant.go new file mode 100644 index 0000000000..1509826225 --- /dev/null +++ b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/constant.go @@ -0,0 +1,187 @@ +package utils + +const ( + + // Default application name + DefaultServeAppName = "default" + // Belows used as label key + + // RayOriginatedFromCRNameLabelKey and RayOriginatedFromCRDLabelKey are the labels used to associate the root KubeRay Custom Resource. + // [Example 1] If we create a RayJob named `myjob`, then (1) the RayCluster and (2) the submitter K8s Job will have a + // `ray.io/originated-from-cr-name=myjob` and a `ray.io/originated-from-crd=RayJob` label. + // + // [Example 2] If we create a RayService named `mysvc`, then (1) the RayCluster and (2) the Kubernetes services managed by the RayService + // will have a `ray.io/originated-from-cr-name=mysvc` and a `ray.io/originated-from-crd=RayService` label. + RayOriginatedFromCRNameLabelKey = "ray.io/originated-from-cr-name" + RayOriginatedFromCRDLabelKey = "ray.io/originated-from-crd" + RayClusterLabelKey = "ray.io/cluster" + RayNodeTypeLabelKey = "ray.io/node-type" + RayNodeGroupLabelKey = "ray.io/group" + RayNodeLabelKey = "ray.io/is-ray-node" + RayIDLabelKey = "ray.io/identifier" + RayClusterServingServiceLabelKey = "ray.io/serve" + RayClusterHeadlessServiceLabelKey = "ray.io/headless-worker-svc" + HashWithoutReplicasAndWorkersToDeleteKey = "ray.io/hash-without-replicas-and-workers-to-delete" + NumWorkerGroupsKey = "ray.io/num-worker-groups" + + // In KubeRay, the Ray container must be the first application container in a head or worker Pod. + RayContainerIndex = 0 + + // Batch scheduling labels + // TODO(tgaddair): consider making these part of the CRD + RaySchedulerName = "ray.io/scheduler-name" + RayPriorityClassName = "ray.io/priority-class-name" + + // Ray GCS FT related annotations + RayFTEnabledAnnotationKey = "ray.io/ft-enabled" + RayExternalStorageNSAnnotationKey = "ray.io/external-storage-namespace" + + // If this annotation is set to "true", the KubeRay operator will not modify the container's command. + // However, the generated `ray start` command will still be stored in the container's environment variable + // `KUBERAY_GEN_RAY_START_CMD`. + RayOverwriteContainerCmdAnnotationKey = "ray.io/overwrite-container-cmd" + + // Finalizers for GCS fault tolerance + GCSFaultToleranceRedisCleanupFinalizer = "ray.io/gcs-ft-redis-cleanup-finalizer" + + // EnableServeServiceKey is exclusively utilized to indicate if a RayCluster is directly used for serving. + // See https://github.com/ray-project/kuberay/pull/1672 for more details. + EnableServeServiceKey = "ray.io/enable-serve-service" + EnableServeServiceTrue = "true" + + EnableRayClusterServingServiceTrue = "true" + EnableRayClusterServingServiceFalse = "false" + + KubernetesApplicationNameLabelKey = "app.kubernetes.io/name" + KubernetesCreatedByLabelKey = "app.kubernetes.io/created-by" + + // Use as separator for pod name, for example, raycluster-small-size-worker-0 + DashSymbol = "-" + + // Use as default port + DefaultClientPort = 10001 + // For Ray >= 1.11.0, "DefaultRedisPort" actually refers to the GCS server port. + // However, the role of this port is unchanged in Ray APIs like ray.init and ray start. + // This is the port used by Ray workers and drivers inside the Ray cluster to connect to the Ray head. + DefaultRedisPort = 6379 + DefaultDashboardPort = 8265 + DefaultMetricsPort = 8080 + DefaultDashboardAgentListenPort = 52365 + DefaultServingPort = 8000 + + ClientPortName = "client" + RedisPortName = "redis" + DashboardPortName = "dashboard" + MetricsPortName = "metrics" + ServingPortName = "serve" + + // The default AppProtocol for Kubernetes service + DefaultServiceAppProtocol = "tcp" + + // The default application name + ApplicationName = "kuberay" + + // The default name for kuberay operator + ComponentName = "kuberay-operator" + + // The default suffix for Headless Service for multi-host worker groups. + // The full name will be of the form "${RayCluster_Name}-headless-worker-svc". + HeadlessServiceSuffix = "headless-worker-svc" + + // Use as container env variable + RAY_CLUSTER_NAME = "RAY_CLUSTER_NAME" + RAY_IP = "RAY_IP" + FQ_RAY_IP = "FQ_RAY_IP" + RAY_PORT = "RAY_PORT" + RAY_ADDRESS = "RAY_ADDRESS" + REDIS_PASSWORD = "REDIS_PASSWORD" + RAY_DASHBOARD_ENABLE_K8S_DISK_USAGE = "RAY_DASHBOARD_ENABLE_K8S_DISK_USAGE" + RAY_EXTERNAL_STORAGE_NS = "RAY_external_storage_namespace" + RAY_GCS_RPC_SERVER_RECONNECT_TIMEOUT_S = "RAY_gcs_rpc_server_reconnect_timeout_s" + RAY_TIMEOUT_MS_TASK_WAIT_FOR_DEATH_INFO = "RAY_timeout_ms_task_wait_for_death_info" + RAY_GCS_SERVER_REQUEST_TIMEOUT_SECONDS = "RAY_gcs_server_request_timeout_seconds" + RAY_SERVE_KV_TIMEOUT_S = "RAY_SERVE_KV_TIMEOUT_S" + RAY_USAGE_STATS_KUBERAY_IN_USE = "RAY_USAGE_STATS_KUBERAY_IN_USE" + RAY_USAGE_STATS_EXTRA_TAGS = "RAY_USAGE_STATS_EXTRA_TAGS" + RAYCLUSTER_DEFAULT_REQUEUE_SECONDS_ENV = "RAYCLUSTER_DEFAULT_REQUEUE_SECONDS_ENV" + RAYCLUSTER_DEFAULT_REQUEUE_SECONDS = 300 + KUBERAY_GEN_RAY_START_CMD = "KUBERAY_GEN_RAY_START_CMD" + + // Environment variables for RayJob submitter Kubernetes Job. + // Example: ray job submit --address=http://$RAY_DASHBOARD_ADDRESS --submission-id=$RAY_JOB_SUBMISSION_ID ... + RAY_DASHBOARD_ADDRESS = "RAY_DASHBOARD_ADDRESS" + RAY_JOB_SUBMISSION_ID = "RAY_JOB_SUBMISSION_ID" + + // Environment variables for Ray Autoscaler V2. + // The value of RAY_CLOUD_INSTANCE_ID is the Pod name for Autoscaler V2 alpha. This may change in the future. + RAY_CLOUD_INSTANCE_ID = "RAY_CLOUD_INSTANCE_ID" + // The value of RAY_NODE_TYPE_NAME is the name of the node group (i.e., the value of the "ray.io/group" label). + RAY_NODE_TYPE_NAME = "RAY_NODE_TYPE_NAME" + + // This KubeRay operator environment variable is used to determine if random Pod + // deletion should be enabled. Note that this only takes effect when autoscaling + // is enabled for the RayCluster. This is a feature flag for v0.6.0, and will be + // removed if the default behavior is stable enoguh. + ENABLE_RANDOM_POD_DELETE = "ENABLE_RANDOM_POD_DELETE" + + // This KubeRay operator environment variable is used to determine if the Redis + // cleanup Job should be enabled. This is a feature flag for v1.0.0. + ENABLE_GCS_FT_REDIS_CLEANUP = "ENABLE_GCS_FT_REDIS_CLEANUP" + + // This environment variable for the KubeRay operator is used to determine whether to enable + // the injection of readiness and liveness probes into Ray head and worker containers. + // Enabling this feature contributes to the robustness of Ray clusters. It is currently a feature + // flag for v1.1.0 and will be removed if the behavior proves to be stable enough. + ENABLE_PROBES_INJECTION = "ENABLE_PROBES_INJECTION" + + // Ray core default configurations + DefaultWorkerRayGcsReconnectTimeoutS = "600" + + LOCAL_HOST = "127.0.0.1" + // Ray FT default readiness probe values + DefaultReadinessProbeInitialDelaySeconds = 10 + DefaultReadinessProbeTimeoutSeconds = 1 + DefaultReadinessProbePeriodSeconds = 5 + DefaultReadinessProbeSuccessThreshold = 1 + DefaultReadinessProbeFailureThreshold = 10 + ServeReadinessProbeFailureThreshold = 1 + + // Ray FT default liveness probe values + DefaultLivenessProbeInitialDelaySeconds = 30 + DefaultLivenessProbeTimeoutSeconds = 1 + DefaultLivenessProbePeriodSeconds = 5 + DefaultLivenessProbeSuccessThreshold = 1 + DefaultLivenessProbeFailureThreshold = 120 + + // Ray health check related configurations + // Note: Since the Raylet process and the dashboard agent process are fate-sharing, + // only one of them needs to be checked. So, RayAgentRayletHealthPath accesses the dashboard agent's API endpoint + // to check the health of the Raylet process. + // TODO (kevin85421): Should we take the dashboard process into account? + RayAgentRayletHealthPath = "api/local_raylet_healthz" + RayDashboardGCSHealthPath = "api/gcs_healthz" + RayServeProxyHealthPath = "-/healthz" + BaseWgetHealthCommand = "wget -T 2 -q -O- http://localhost:%d/%s | grep success" + + // Finalizers for RayJob + RayJobStopJobFinalizer = "ray.io/rayjob-finalizer" + + // RayNodeHeadGroupLabelValue is the value for the RayNodeGroupLabelKey label on a head node + RayNodeHeadGroupLabelValue = "headgroup" + + // Telemetry + KUBERAY_VERSION = "v1.1.1" +) + +type ServiceType string + +const ( + HeadService ServiceType = "headService" + ServingService ServiceType = "serveService" +) + +// RayOriginatedFromCRDLabelValue generates a value for the label RayOriginatedFromCRDLabelKey +// This is also the only function to construct label filter of resources originated from a given CRDType. +func RayOriginatedFromCRDLabelValue(crdType CRDType) string { + return string(crdType) +} diff --git a/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/dashboard_httpclient.go b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/dashboard_httpclient.go new file mode 100644 index 0000000000..e864e4fa6b --- /dev/null +++ b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/dashboard_httpclient.go @@ -0,0 +1,451 @@ +package utils + +import ( + "bytes" + "context" + "fmt" + "io" + "net/http" + "time" + + "k8s.io/apimachinery/pkg/util/yaml" + + fmtErrors "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + "k8s.io/apimachinery/pkg/api/errors" + + "k8s.io/apimachinery/pkg/util/json" + + rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" +) + +var ( + // Multi-application URL paths + ServeDetailsPath = "/api/serve/applications/" + DeployPathV2 = "/api/serve/applications/" + // Job URL paths + JobPath = "/api/jobs/" +) + +type RayDashboardClientInterface interface { + InitClient(url string) + UpdateDeployments(ctx context.Context, configJson []byte) error + // V2/multi-app Rest API + GetServeDetails(ctx context.Context) (*ServeDetails, error) + GetMultiApplicationStatus(context.Context) (map[string]*ServeApplicationStatus, error) + GetJobInfo(ctx context.Context, jobId string) (*RayJobInfo, error) + ListJobs(ctx context.Context) (*[]RayJobInfo, error) + SubmitJob(ctx context.Context, rayJob *rayv1.RayJob) (string, error) + SubmitJobReq(ctx context.Context, request *RayJobRequest, name *string) (string, error) + GetJobLog(ctx context.Context, jobName string) (*string, error) + StopJob(ctx context.Context, jobName string) error + DeleteJob(ctx context.Context, jobName string) error +} + +type BaseDashboardClient struct { + client http.Client + dashboardURL string +} + +func GetRayDashboardClient() RayDashboardClientInterface { + return &RayDashboardClient{} +} + +type RayDashboardClient struct { + BaseDashboardClient +} + +// FetchHeadServiceURL fetches the URL that consists of the FQDN for the RayCluster's head service +// and the port with the given port name (defaultPortName). +func FetchHeadServiceURL(ctx context.Context, cli client.Client, rayCluster *rayv1.RayCluster, defaultPortName string) (string, error) { + log := ctrl.LoggerFrom(ctx) + headSvc := &corev1.Service{} + headSvcName, err := GenerateHeadServiceName(RayClusterCRD, rayCluster.Spec, rayCluster.Name) + if err != nil { + log.Error(err, "Failed to generate head service name", "RayCluster name", rayCluster.Name, "RayCluster spec", rayCluster.Spec) + return "", err + } + + if err = cli.Get(ctx, client.ObjectKey{Name: headSvcName, Namespace: rayCluster.Namespace}, headSvc); err != nil { + if errors.IsNotFound(err) { + log.Error(err, "Head service is not found", "head service name", headSvcName, "namespace", rayCluster.Namespace) + } + return "", err + } + + log.Info("FetchHeadServiceURL", "head service name", headSvc.Name, "namespace", headSvc.Namespace) + servicePorts := headSvc.Spec.Ports + port := int32(-1) + + for _, servicePort := range servicePorts { + if servicePort.Name == defaultPortName { + port = servicePort.Port + break + } + } + + if port == int32(-1) { + return "", fmtErrors.Errorf("%s port is not found", defaultPortName) + } + + domainName := GetClusterDomainName() + headServiceURL := fmt.Sprintf("%s.%s.svc.%s:%v", + headSvc.Name, + headSvc.Namespace, + domainName, + port) + log.Info("FetchHeadServiceURL", "head service URL", headServiceURL, "port", defaultPortName) + return headServiceURL, nil +} + +func (r *RayDashboardClient) InitClient(url string) { + r.client = http.Client{ + Timeout: 2 * time.Second, + } + r.dashboardURL = "http://" + url +} + +// UpdateDeployments update the deployments in the Ray cluster. +func (r *RayDashboardClient) UpdateDeployments(ctx context.Context, configJson []byte) error { + var req *http.Request + var err error + if req, err = http.NewRequestWithContext(ctx, http.MethodPut, r.dashboardURL+DeployPathV2, bytes.NewBuffer(configJson)); err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := r.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return fmt.Errorf("UpdateDeployments fail: %s %s", resp.Status, string(body)) + } + + return nil +} + +func (r *RayDashboardClient) GetMultiApplicationStatus(ctx context.Context) (map[string]*ServeApplicationStatus, error) { + serveDetails, err := r.GetServeDetails(ctx) + if err != nil { + return nil, fmt.Errorf("Failed to get serve details: %v", err) + } + + return r.ConvertServeDetailsToApplicationStatuses(serveDetails) +} + +// GetServeDetails gets details on all live applications on the Ray cluster. +func (r *RayDashboardClient) GetServeDetails(ctx context.Context) (*ServeDetails, error) { + req, err := http.NewRequestWithContext(ctx, "GET", r.dashboardURL+ServeDetailsPath, nil) + if err != nil { + return nil, err + } + + resp, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return nil, fmt.Errorf("GetServeDetails fail: %s %s", resp.Status, string(body)) + } + + var serveDetails ServeDetails + if err = json.Unmarshal(body, &serveDetails); err != nil { + return nil, fmt.Errorf("GetServeDetails failed. Failed to unmarshal bytes: %s", string(body)) + } + + return &serveDetails, nil +} + +func (r *RayDashboardClient) ConvertServeDetailsToApplicationStatuses(serveDetails *ServeDetails) (map[string]*ServeApplicationStatus, error) { + detailsJson, err := json.Marshal(serveDetails.Applications) + if err != nil { + return nil, fmt.Errorf("Failed to marshal serve details: %v.", serveDetails.Applications) + } + + applicationStatuses := map[string]*ServeApplicationStatus{} + if err = json.Unmarshal(detailsJson, &applicationStatuses); err != nil { + return nil, fmt.Errorf("Failed to unmarshal serve details bytes into map of application statuses: %v. Bytes: %s", err, string(detailsJson)) + } + + return applicationStatuses, nil +} + +type RuntimeEnvType map[string]interface{} + +// RayJobInfo is the response of "ray job status" api. +// Reference to https://docs.ray.io/en/latest/cluster/running-applications/job-submission/rest.html#ray-job-rest-api-spec +// Reference to https://github.com/ray-project/ray/blob/cfbf98c315cfb2710c56039a3c96477d196de049/dashboard/modules/job/pydantic_models.py#L38-L107 +type RayJobInfo struct { + JobStatus rayv1.JobStatus `json:"status,omitempty"` + Entrypoint string `json:"entrypoint,omitempty"` + JobId string `json:"job_id,omitempty"` + SubmissionId string `json:"submission_id,omitempty"` + Message string `json:"message,omitempty"` + ErrorType *string `json:"error_type,omitempty"` + StartTime uint64 `json:"start_time,omitempty"` + EndTime uint64 `json:"end_time,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + RuntimeEnv RuntimeEnvType `json:"runtime_env,omitempty"` +} + +// RayJobRequest is the request body to submit. +// Reference to https://docs.ray.io/en/latest/cluster/running-applications/job-submission/rest.html#ray-job-rest-api-spec +// Reference to https://github.com/ray-project/ray/blob/cfbf98c315cfb2710c56039a3c96477d196de049/dashboard/modules/job/common.py#L325-L353 +type RayJobRequest struct { + Entrypoint string `json:"entrypoint"` + SubmissionId string `json:"submission_id,omitempty"` + RuntimeEnv RuntimeEnvType `json:"runtime_env,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + NumCpus float32 `json:"entrypoint_num_cpus,omitempty"` + NumGpus float32 `json:"entrypoint_num_gpus,omitempty"` + Resources map[string]float32 `json:"entrypoint_resources,omitempty"` +} + +type RayJobResponse struct { + JobId string `json:"job_id"` +} + +type RayJobStopResponse struct { + Stopped bool `json:"stopped"` +} + +type RayJobLogsResponse struct { + Logs string `json:"logs,omitempty"` +} + +// Note that RayJobInfo and error can't be nil at the same time. +// Please make sure if the Ray job with JobId can't be found. Return a BadRequest error. +func (r *RayDashboardClient) GetJobInfo(ctx context.Context, jobId string) (*RayJobInfo, error) { + req, err := http.NewRequestWithContext(ctx, "GET", r.dashboardURL+JobPath+jobId, nil) + if err != nil { + return nil, err + } + + resp, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound { + return nil, errors.NewBadRequest("Job " + jobId + " does not exist on the cluster") + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var jobInfo RayJobInfo + if err = json.Unmarshal(body, &jobInfo); err != nil { + // Maybe body is not valid json, raise an error with the body. + return nil, fmt.Errorf("GetJobInfo fail: %s", string(body)) + } + + return &jobInfo, nil +} + +func (r *RayDashboardClient) ListJobs(ctx context.Context) (*[]RayJobInfo, error) { + req, err := http.NewRequestWithContext(ctx, "GET", r.dashboardURL+JobPath, nil) + if err != nil { + return nil, err + } + + resp, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound { + return nil, nil + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var jobInfo []RayJobInfo + if err = json.Unmarshal(body, &jobInfo); err != nil { + // Maybe body is not valid json, raise an error with the body. + return nil, fmt.Errorf("GetJobInfo fail: %s", string(body)) + } + + return &jobInfo, nil +} + +func (r *RayDashboardClient) SubmitJob(ctx context.Context, rayJob *rayv1.RayJob) (jobId string, err error) { + request, err := ConvertRayJobToReq(rayJob) + if err != nil { + return "", err + } + return r.SubmitJobReq(ctx, request, &rayJob.Name) +} + +func (r *RayDashboardClient) SubmitJobReq(ctx context.Context, request *RayJobRequest, name *string) (jobId string, err error) { + log := ctrl.LoggerFrom(ctx) + rayJobJson, err := json.Marshal(request) + if err != nil { + return + } + if name != nil { + log.Info("Submit a ray job", "rayJob", name, "jobInfo", string(rayJobJson)) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, r.dashboardURL+JobPath, bytes.NewBuffer(rayJobJson)) + if err != nil { + return + } + + req.Header.Set("Content-Type", "application/json") + resp, err := r.client.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + + var jobResp RayJobResponse + if err = json.Unmarshal(body, &jobResp); err != nil { + // Maybe body is not valid json, raise an error with the body. + return "", fmt.Errorf("SubmitJob fail: %s", string(body)) + } + + return jobResp.JobId, nil +} + +// Get Job Log +func (r *RayDashboardClient) GetJobLog(ctx context.Context, jobName string) (*string, error) { + log := ctrl.LoggerFrom(ctx) + log.Info("Get ray job log", "rayJob", jobName) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, r.dashboardURL+JobPath+jobName+"/logs", nil) + if err != nil { + return nil, err + } + resp, err := r.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound { + // This does the right thing, but breaks E2E test + // return nil, errors.NewBadRequest("Job " + jobId + " does not exist on the cluster") + return nil, nil + } + + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + var jobLog RayJobLogsResponse + if err = json.Unmarshal(body, &jobLog); err != nil { + // Maybe body is not valid json, raise an error with the body. + return nil, fmt.Errorf("GetJobLog fail: %s", string(body)) + } + + return &jobLog.Logs, nil +} + +func (r *RayDashboardClient) StopJob(ctx context.Context, jobName string) (err error) { + log := ctrl.LoggerFrom(ctx) + log.Info("Stop a ray job", "rayJob", jobName) + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, r.dashboardURL+JobPath+jobName+"/stop", nil) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + resp, err := r.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + + var jobStopResp RayJobStopResponse + if err = json.Unmarshal(body, &jobStopResp); err != nil { + return err + } + + if !jobStopResp.Stopped { + jobInfo, err := r.GetJobInfo(ctx, jobName) + if err != nil { + return err + } + // StopJob only returns an error when JobStatus is not in terminal states (STOPPED / SUCCEEDED / FAILED) + if !rayv1.IsJobTerminal(jobInfo.JobStatus) { + return fmt.Errorf("Failed to stopped job: %v", jobInfo) + } + } + return nil +} + +func (r *RayDashboardClient) DeleteJob(ctx context.Context, jobName string) error { + log := ctrl.LoggerFrom(ctx) + log.Info("Delete a ray job", "rayJob", jobName) + + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, r.dashboardURL+JobPath+jobName, nil) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + resp, err := r.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + return nil +} + +func ConvertRayJobToReq(rayJob *rayv1.RayJob) (*RayJobRequest, error) { + req := &RayJobRequest{ + Entrypoint: rayJob.Spec.Entrypoint, + SubmissionId: rayJob.Status.JobId, + Metadata: rayJob.Spec.Metadata, + } + if len(rayJob.Spec.RuntimeEnvYAML) != 0 { + runtimeEnv, err := UnmarshalRuntimeEnvYAML(rayJob.Spec.RuntimeEnvYAML) + if err != nil { + return nil, err + } + req.RuntimeEnv = runtimeEnv + } + req.NumCpus = rayJob.Spec.EntrypointNumCpus + req.NumGpus = rayJob.Spec.EntrypointNumGpus + if rayJob.Spec.EntrypointResources != "" { + if err := json.Unmarshal([]byte(rayJob.Spec.EntrypointResources), &req.Resources); err != nil { + return nil, err + } + } + return req, nil +} + +func UnmarshalRuntimeEnvYAML(runtimeEnvYAML string) (RuntimeEnvType, error) { + var runtimeEnv RuntimeEnvType + err := yaml.Unmarshal([]byte(runtimeEnvYAML), &runtimeEnv) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal RuntimeEnvYAML: %v: %v", runtimeEnvYAML, err) + } + return runtimeEnv, nil +} diff --git a/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/fake_httpproxy_httpclient.go b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/fake_httpproxy_httpclient.go new file mode 100644 index 0000000000..4df3cabff0 --- /dev/null +++ b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/fake_httpproxy_httpclient.go @@ -0,0 +1,28 @@ +package utils + +import ( + "fmt" + "net/http" + "time" +) + +type FakeRayHttpProxyClient struct { + client http.Client + httpProxyURL string +} + +func (r *FakeRayHttpProxyClient) InitClient() { + r.client = http.Client{ + Timeout: 20 * time.Millisecond, + } +} + +func (r *FakeRayHttpProxyClient) SetHostIp(hostIp string, port int) { + r.httpProxyURL = fmt.Sprintf("http://%s:%d", hostIp, port) +} + +func (r *FakeRayHttpProxyClient) CheckHealth() error { + // TODO: test check return error cases. + // Always return successful. + return nil +} diff --git a/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/fake_serve_httpclient.go b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/fake_serve_httpclient.go new file mode 100644 index 0000000000..7c84bf7e7a --- /dev/null +++ b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/fake_serve_httpclient.go @@ -0,0 +1,81 @@ +package utils + +import ( + "context" + "fmt" + "net/http" + "sync/atomic" + + rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" +) + +type FakeRayDashboardClient struct { + BaseDashboardClient + multiAppStatuses map[string]*ServeApplicationStatus + serveDetails ServeDetails + + GetJobInfoMock atomic.Pointer[func(context.Context, string) (*RayJobInfo, error)] +} + +var _ RayDashboardClientInterface = (*FakeRayDashboardClient)(nil) + +func (r *FakeRayDashboardClient) InitClient(url string) { + r.client = http.Client{} + r.dashboardURL = "http://" + url +} + +func (r *FakeRayDashboardClient) UpdateDeployments(_ context.Context, configJson []byte) error { + fmt.Print("UpdateDeployments fake succeeds.") + return nil +} + +func (r *FakeRayDashboardClient) GetMultiApplicationStatus(_ context.Context) (map[string]*ServeApplicationStatus, error) { + return r.multiAppStatuses, nil +} + +func (r *FakeRayDashboardClient) GetServeDetails(_ context.Context) (*ServeDetails, error) { + return &r.serveDetails, nil +} + +func (r *FakeRayDashboardClient) SetMultiApplicationStatuses(statuses map[string]*ServeApplicationStatus) { + r.multiAppStatuses = statuses +} + +func (r *FakeRayDashboardClient) GetJobInfo(ctx context.Context, jobId string) (*RayJobInfo, error) { + if mock := r.GetJobInfoMock.Load(); mock != nil { + return (*mock)(ctx, jobId) + } + return &RayJobInfo{JobStatus: rayv1.JobStatusRunning}, nil +} + +func (r *FakeRayDashboardClient) ListJobs(ctx context.Context) (*[]RayJobInfo, error) { + if mock := r.GetJobInfoMock.Load(); mock != nil { + info, err := (*mock)(ctx, "job_id") + if err != nil { + return nil, err + } + return &[]RayJobInfo{*info}, nil + } + return nil, nil +} + +func (r *FakeRayDashboardClient) SubmitJob(_ context.Context, rayJob *rayv1.RayJob) (jobId string, err error) { + return "", nil +} + +func (r *FakeRayDashboardClient) SubmitJobReq(_ context.Context, request *RayJobRequest, name *string) (string, error) { + return "", nil +} + +func (r *FakeRayDashboardClient) GetJobLog(_ context.Context, jobName string) (*string, error) { + lg := "log" + return &lg, nil +} + +func (r *FakeRayDashboardClient) StopJob(_ context.Context, jobName string) (err error) { + return nil +} + +func (r *FakeRayDashboardClient) DeleteJob(_ context.Context, jobName string) error { + return nil +} diff --git a/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/httpproxy_httpclient.go b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/httpproxy_httpclient.go new file mode 100644 index 0000000000..9856c5d04f --- /dev/null +++ b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/httpproxy_httpclient.go @@ -0,0 +1,53 @@ +package utils + +import ( + "fmt" + "io" + "net/http" + "time" +) + +type RayHttpProxyClientInterface interface { + InitClient() + CheckHealth() error + SetHostIp(hostIp string, port int) +} + +func GetRayHttpProxyClient() RayHttpProxyClientInterface { + return &RayHttpProxyClient{} +} + +type RayHttpProxyClient struct { + client http.Client + httpProxyURL string +} + +func (r *RayHttpProxyClient) InitClient() { + r.client = http.Client{ + Timeout: 20 * time.Millisecond, + } +} + +func (r *RayHttpProxyClient) SetHostIp(hostIp string, port int) { + r.httpProxyURL = fmt.Sprintf("http://%s:%d/", hostIp, port) +} + +func (r *RayHttpProxyClient) CheckHealth() error { + req, err := http.NewRequest("GET", r.httpProxyURL+RayServeProxyHealthPath, nil) + if err != nil { + return err + } + + resp, err := r.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + body, _ := io.ReadAll(resp.Body) + if resp.StatusCode < 200 || resp.StatusCode > 299 { + return fmt.Errorf("RayHttpProxyClient CheckHealth fail: %s %s", resp.Status, string(body)) + } + + return nil +} diff --git a/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/serve_api_models.go b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/serve_api_models.go new file mode 100644 index 0000000000..68671af910 --- /dev/null +++ b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/serve_api_models.go @@ -0,0 +1,42 @@ +package utils + +// Please see the Ray Serve docs +// https://docs.ray.io/en/latest/serve/api/doc/ray.serve.schema.ServeDeploySchema.html for the +// multi-application schema. + +// ServeDeploymentStatus and ServeApplicationStatus describe the format of status(es) that will +// be returned by the GetMultiApplicationStatus method of the dashboard client +// Describes the status of a deployment +type ServeDeploymentStatus struct { + Name string `json:"name,omitempty"` + Status string `json:"status,omitempty"` + Message string `json:"message,omitempty"` +} + +// Describes the status of an application +type ServeApplicationStatus struct { + Name string `json:"name,omitempty"` + Status string `json:"status"` + Message string `json:"message,omitempty"` + Deployments map[string]ServeDeploymentStatus `json:"deployments"` +} + +// V2 Serve API Response format. These extend the ServeDeploymentStatus and ServeApplicationStatus structs, +// but contain more information such as route prefix because the V2/multi-app GET API fetchs general metadata, +// not just statuses. +type ServeDeploymentDetails struct { + ServeDeploymentStatus + RoutePrefix string `json:"route_prefix,omitempty"` +} + +type ServeApplicationDetails struct { + ServeApplicationStatus + RoutePrefix string `json:"route_prefix,omitempty"` + DocsPath string `json:"docs_path,omitempty"` + Deployments map[string]ServeDeploymentDetails `json:"deployments"` +} + +type ServeDetails struct { + Applications map[string]ServeApplicationDetails `json:"applications"` + DeployMode string `json:"deploy_mode,omitempty"` +} diff --git a/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/util.go b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/util.go new file mode 100644 index 0000000000..b0b2b44c6d --- /dev/null +++ b/vendor/github.com/ray-project/kuberay/ray-operator/controllers/ray/utils/util.go @@ -0,0 +1,563 @@ +package utils + +import ( + "context" + "crypto/sha1" + "encoding/base32" + "fmt" + "math" + "os" + "reflect" + "strconv" + "strings" + "time" + "unicode" + + batchv1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/util/json" + + "k8s.io/apimachinery/pkg/util/rand" + + rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" +) + +const ( + RayClusterSuffix = "-raycluster-" + ServeName = "serve" + ClusterDomainEnvKey = "CLUSTER_DOMAIN" + DefaultDomainName = "cluster.local" +) + +// TODO (kevin85421): Define CRDType here rather than constant.go to avoid circular dependency. +type CRDType string + +const ( + RayClusterCRD CRDType = "RayCluster" + RayJobCRD CRDType = "RayJob" + RayServiceCRD CRDType = "RayService" +) + +var crdMap = map[string]CRDType{ + "RayCluster": RayClusterCRD, + "RayJob": RayJobCRD, + "RayService": RayServiceCRD, +} + +func GetCRDType(key string) CRDType { + if crdType, exists := crdMap[key]; exists { + return crdType + } + return RayClusterCRD +} + +// GetClusterDomainName returns cluster's domain name +func GetClusterDomainName() string { + if domain := os.Getenv(ClusterDomainEnvKey); len(domain) > 0 { + return domain + } + + // Return default domain name. + return DefaultDomainName +} + +// IsCreated returns true if pod has been created and is maintained by the API server +func IsCreated(pod *corev1.Pod) bool { + return pod.Status.Phase != "" +} + +// IsRunningAndReady returns true if pod is in the PodRunning Phase, if it has a condition of PodReady. +func IsRunningAndReady(pod *corev1.Pod) bool { + if pod.Status.Phase != corev1.PodRunning { + return false + } + for _, cond := range pod.Status.Conditions { + if cond.Type == corev1.PodReady && cond.Status == corev1.ConditionTrue { + return true + } + } + return false +} + +func CheckRouteName(ctx context.Context, s string, n string) string { + log := ctrl.LoggerFrom(ctx) + + // 6 chars are consumed at the end with "-head-" + 5 generated. + // Namespace name will be appended to form: {name}-{namespace} for first host + // segment within route + // 63 - (6 + 5) - (length of namespace name + 1) + // => 52 - (length of namespace name + 1) + // => 51 - (length of namespace name) + maxLength := 51 - len(n) + + if len(s) > maxLength { + // shorten the name + log.Info(fmt.Sprintf("route name is too long: len = %v, we will shorten it to = %v\n", len(s), maxLength)) + s = s[:maxLength] + } + + // Pass through CheckName for remaining string validations + return CheckName(s) +} + +// CheckName makes sure the name does not start with a numeric value and the total length is < 63 char +func CheckName(s string) string { + maxLength := 50 // 63 - (max(8,6) + 5 ) // 6 to 8 char are consumed at the end with "-head-" or -worker- + 5 generated. + + if len(s) > maxLength { + // shorten the name + offset := int(math.Abs(float64(maxLength) - float64(len(s)))) + fmt.Printf("pod name is too long: len = %v, we will shorten it by offset = %v", len(s), offset) + s = s[offset:] + } + + // cannot start with a numeric value + if unicode.IsDigit(rune(s[0])) { + s = "r" + s[1:] + } + + // cannot start with a punctuation + if unicode.IsPunct(rune(s[0])) { + fmt.Println(s) + s = "r" + s[1:] + } + + return s +} + +// CheckLabel makes sure the label value does not start with a punctuation and the total length is < 63 char +func CheckLabel(s string) string { + maxLenght := 63 + + if len(s) > maxLenght { + // shorten the name + offset := int(math.Abs(float64(maxLenght) - float64(len(s)))) + fmt.Printf("label value is too long: len = %v, we will shorten it by offset = %v\n", len(s), offset) + s = s[offset:] + } + + // cannot start with a punctuation + if unicode.IsPunct(rune(s[0])) { + fmt.Println(s) + s = "r" + s[1:] + } + + return s +} + +// FormatInt returns the string representation of i in the given base, +// for 2 <= base <= 36. The result uses the lower-case letters 'a' to 'z' +// for digit values >= 10. +func FormatInt32(n int32) string { + return strconv.FormatInt(int64(n), 10) +} + +// GetNamespace return namespace +func GetNamespace(metaData metav1.ObjectMeta) string { + if metaData.Namespace == "" { + return "default" + } + return metaData.Namespace +} + +// GenerateHeadServiceName generates a Ray head service name. Note that there are two types of head services: +// +// (1) For RayCluster: If `HeadService.Name` in the cluster spec is not empty, it will be used as the head service name. +// Otherwise, the name is generated based on the RayCluster CR's name. +// (2) For RayService: It's important to note that the RayService CR not only possesses a head service owned by its RayCluster CR +// but also maintains a separate head service for itself to facilitate zero-downtime upgrades. The name of the head service owned +// by the RayService CR is generated based on the RayService CR's name. +// +// @param crdType: The type of the CRD that owns the head service. +// @param clusterSpec: `RayClusterSpec` +// @param ownerName: The name of the CR that owns the head service. +func GenerateHeadServiceName(crdType CRDType, clusterSpec rayv1.RayClusterSpec, ownerName string) (string, error) { + switch crdType { + case RayServiceCRD: + return CheckName(fmt.Sprintf("%s-%s-%s", ownerName, rayv1.HeadNode, "svc")), nil + case RayClusterCRD: + headSvcName := CheckName(fmt.Sprintf("%s-%s-%s", ownerName, rayv1.HeadNode, "svc")) + if clusterSpec.HeadGroupSpec.HeadService != nil && clusterSpec.HeadGroupSpec.HeadService.Name != "" { + headSvcName = clusterSpec.HeadGroupSpec.HeadService.Name + } + return headSvcName, nil + default: + return "", fmt.Errorf("unknown CRD type: %s", crdType) + } +} + +// GenerateFQDNServiceName generates a Fully Qualified Domain Name. +func GenerateFQDNServiceName(ctx context.Context, cluster rayv1.RayCluster, namespace string) string { + log := ctrl.LoggerFrom(ctx) + headSvcName, err := GenerateHeadServiceName(RayClusterCRD, cluster.Spec, cluster.Name) + if err != nil { + log.Error(err, "Failed to generate head service name") + return "" + } + return fmt.Sprintf("%s.%s.svc.%s", headSvcName, namespace, GetClusterDomainName()) +} + +// ExtractRayIPFromFQDN extracts the head service name (i.e., RAY_IP, deprecated) from a fully qualified +// domain name (FQDN). This function is provided for backward compatibility purposes only. +func ExtractRayIPFromFQDN(fqdnRayIP string) string { + return strings.Split(fqdnRayIP, ".")[0] +} + +// GenerateServeServiceName generates name for serve service. +func GenerateServeServiceName(serviceName string) string { + return CheckName(fmt.Sprintf("%s-%s-%s", serviceName, ServeName, "svc")) +} + +// GenerateServeServiceLabel generates label value for serve service selector. +func GenerateServeServiceLabel(serviceName string) string { + return fmt.Sprintf("%s-%s", serviceName, ServeName) +} + +// GenerateIngressName generates an ingress name from cluster name +func GenerateIngressName(clusterName string) string { + return fmt.Sprintf("%s-%s-%s", clusterName, rayv1.HeadNode, "ingress") +} + +// GenerateRouteName generates an ingress name from cluster name +func GenerateRouteName(clusterName string) string { + return fmt.Sprintf("%s-%s-%s", clusterName, rayv1.HeadNode, "route") +} + +// GenerateRayClusterName generates a ray cluster name from ray service name +func GenerateRayClusterName(serviceName string) string { + return fmt.Sprintf("%s%s%s", serviceName, RayClusterSuffix, rand.String(5)) +} + +// GenerateRayJobId generates a ray job id for submission +func GenerateRayJobId(rayjob string) string { + return fmt.Sprintf("%s-%s", rayjob, rand.String(5)) +} + +// GenerateIdentifier generates identifier of same group pods +func GenerateIdentifier(clusterName string, nodeType rayv1.RayNodeType) string { + return fmt.Sprintf("%s-%s", clusterName, nodeType) +} + +func GetWorkerGroupDesiredReplicas(ctx context.Context, workerGroupSpec rayv1.WorkerGroupSpec) int32 { + log := ctrl.LoggerFrom(ctx) + // Always adhere to min/max replicas constraints. + var workerReplicas int32 + if *workerGroupSpec.MinReplicas > *workerGroupSpec.MaxReplicas { + log.Info(fmt.Sprintf("minReplicas (%v) is greater than maxReplicas (%v), using maxReplicas as desired replicas. "+ + "Please fix this to avoid any unexpected behaviors.", *workerGroupSpec.MinReplicas, *workerGroupSpec.MaxReplicas)) + workerReplicas = *workerGroupSpec.MaxReplicas + } else if workerGroupSpec.Replicas == nil || *workerGroupSpec.Replicas < *workerGroupSpec.MinReplicas { + // Replicas is impossible to be nil as it has a default value assigned in the CRD. + // Add this check to make testing easier. + workerReplicas = *workerGroupSpec.MinReplicas + } else if *workerGroupSpec.Replicas > *workerGroupSpec.MaxReplicas { + workerReplicas = *workerGroupSpec.MaxReplicas + } else { + workerReplicas = *workerGroupSpec.Replicas + } + return workerReplicas +} + +// CalculateDesiredReplicas calculate desired worker replicas at the cluster level +func CalculateDesiredReplicas(ctx context.Context, cluster *rayv1.RayCluster) int32 { + count := int32(0) + for _, nodeGroup := range cluster.Spec.WorkerGroupSpecs { + count += GetWorkerGroupDesiredReplicas(ctx, nodeGroup) + } + + return count +} + +// CalculateMinReplicas calculates min worker replicas at the cluster level +func CalculateMinReplicas(cluster *rayv1.RayCluster) int32 { + count := int32(0) + for _, nodeGroup := range cluster.Spec.WorkerGroupSpecs { + count += *nodeGroup.MinReplicas + } + + return count +} + +// CalculateMaxReplicas calculates max worker replicas at the cluster level +func CalculateMaxReplicas(cluster *rayv1.RayCluster) int32 { + count := int32(0) + for _, nodeGroup := range cluster.Spec.WorkerGroupSpecs { + count += *nodeGroup.MaxReplicas + } + + return count +} + +// CalculateAvailableReplicas calculates available worker replicas at the cluster level +// A worker is available if its Pod is running +func CalculateAvailableReplicas(pods corev1.PodList) int32 { + count := int32(0) + for _, pod := range pods.Items { + if val, ok := pod.Labels["ray.io/node-type"]; !ok || val != string(rayv1.WorkerNode) { + continue + } + if pod.Status.Phase == corev1.PodRunning { + count++ + } + } + + return count +} + +func CalculateDesiredResources(cluster *rayv1.RayCluster) corev1.ResourceList { + desiredResourcesList := []corev1.ResourceList{{}} + headPodResource := calculatePodResource(cluster.Spec.HeadGroupSpec.Template.Spec) + desiredResourcesList = append(desiredResourcesList, headPodResource) + for _, nodeGroup := range cluster.Spec.WorkerGroupSpecs { + podResource := calculatePodResource(nodeGroup.Template.Spec) + for i := int32(0); i < *nodeGroup.Replicas; i++ { + desiredResourcesList = append(desiredResourcesList, podResource) + } + } + return sumResourceList(desiredResourcesList) +} + +func CalculateMinResources(cluster *rayv1.RayCluster) corev1.ResourceList { + minResourcesList := []corev1.ResourceList{{}} + headPodResource := calculatePodResource(cluster.Spec.HeadGroupSpec.Template.Spec) + minResourcesList = append(minResourcesList, headPodResource) + for _, nodeGroup := range cluster.Spec.WorkerGroupSpecs { + podResource := calculatePodResource(nodeGroup.Template.Spec) + for i := int32(0); i < *nodeGroup.MinReplicas; i++ { + minResourcesList = append(minResourcesList, podResource) + } + } + return sumResourceList(minResourcesList) +} + +// calculatePodResource returns the total resources of a pod. +// Request values take precedence over limit values. +func calculatePodResource(podSpec corev1.PodSpec) corev1.ResourceList { + podResource := corev1.ResourceList{} + for _, container := range podSpec.Containers { + containerResource := container.Resources.Requests + if containerResource == nil { + containerResource = corev1.ResourceList{} + } + for name, quantity := range container.Resources.Limits { + if _, ok := containerResource[name]; !ok { + containerResource[name] = quantity + } + } + for name, quantity := range containerResource { + if totalQuantity, ok := podResource[name]; ok { + totalQuantity.Add(quantity) + podResource[name] = totalQuantity + } else { + podResource[name] = quantity + } + } + } + return podResource +} + +func sumResourceList(list []corev1.ResourceList) corev1.ResourceList { + totalResource := corev1.ResourceList{} + for _, l := range list { + for name, quantity := range l { + if value, ok := totalResource[name]; !ok { + totalResource[name] = quantity.DeepCopy() + } else { + value.Add(quantity) + totalResource[name] = value + } + } + } + return totalResource +} + +func Contains(elems []string, searchTerm string) bool { + for _, s := range elems { + if searchTerm == s { + return true + } + } + return false +} + +// GetHeadGroupServiceAccountName returns the head group service account if it exists. +// Otherwise, it returns the name of the cluster itself. +func GetHeadGroupServiceAccountName(cluster *rayv1.RayCluster) string { + headGroupServiceAccountName := cluster.Spec.HeadGroupSpec.Template.Spec.ServiceAccountName + if headGroupServiceAccountName != "" { + return headGroupServiceAccountName + } + return cluster.Name +} + +// CheckAllPodsRunning returns true if all the RayCluster's Pods are running, false otherwise +func CheckAllPodsRunning(ctx context.Context, runningPods corev1.PodList) bool { + log := ctrl.LoggerFrom(ctx) + // check if there are no pods. + if len(runningPods.Items) == 0 { + return false + } + for _, pod := range runningPods.Items { + if pod.Status.Phase != corev1.PodRunning { + log.Info(fmt.Sprintf("CheckAllPodsRunning: Pod is not running; Pod Name: %s; Pod Status.Phase: %v", pod.Name, pod.Status.Phase)) + return false + } + for _, cond := range pod.Status.Conditions { + if cond.Type == corev1.PodReady && cond.Status != corev1.ConditionTrue { + log.Info(fmt.Sprintf("CheckAllPodsRunning: Pod is not ready; Pod Name: %s; Pod Status.Conditions[PodReady]: %v", pod.Name, cond)) + return false + } + } + } + return true +} + +func PodNotMatchingTemplate(pod corev1.Pod, template corev1.PodTemplateSpec) bool { + if pod.Status.Phase == corev1.PodRunning && pod.ObjectMeta.DeletionTimestamp == nil { + if len(template.Spec.Containers) != len(pod.Spec.Containers) { + return true + } + cmap := map[string]*corev1.Container{} + for _, container := range pod.Spec.Containers { + cmap[container.Name] = &container + } + for _, container1 := range template.Spec.Containers { + if container2, ok := cmap[container1.Name]; ok { + if container1.Image != container2.Image { + // image name do not match + return true + } + if len(container1.Resources.Requests) != len(container2.Resources.Requests) || + len(container1.Resources.Limits) != len(container2.Resources.Limits) { + // resource entries do not match + return true + } + + resources1 := []corev1.ResourceList{ + container1.Resources.Requests, + container1.Resources.Limits, + } + resources2 := []corev1.ResourceList{ + container2.Resources.Requests, + container2.Resources.Limits, + } + for i := range resources1 { + // we need to make sure all fields match + for name, quantity1 := range resources1[i] { + if quantity2, ok := resources2[i][name]; ok { + if quantity1.Cmp(quantity2) != 0 { + // request amount does not match + return true + } + } else { + // no such request + return true + } + } + } + + // now we consider them equal + delete(cmap, container1.Name) + } else { + // container name do not match + return true + } + } + if len(cmap) != 0 { + // one or more containers do not match + return true + } + } + return false +} + +// CompareJsonStruct This is a way to better compare if two objects are the same when they are json/yaml structs. reflect.DeepEqual will fail in some cases. +func CompareJsonStruct(objA interface{}, objB interface{}) bool { + a, err := json.Marshal(objA) + if err != nil { + return false + } + b, err := json.Marshal(objB) + if err != nil { + return false + } + var v1, v2 interface{} + err = json.Unmarshal(a, &v1) + if err != nil { + return false + } + err = json.Unmarshal(b, &v2) + if err != nil { + return false + } + return reflect.DeepEqual(v1, v2) +} + +func ConvertUnixTimeToMetav1Time(unixTime uint64) *metav1.Time { + // The Ray jobInfo returns the start_time, which is a unix timestamp in milliseconds. + // https://docs.ray.io/en/latest/cluster/jobs-package-ref.html#jobinfo + t := time.Unix(int64(unixTime)/1000, int64(unixTime)%1000*1000000) + kt := metav1.NewTime(t) + return &kt +} + +// Json-serializes obj and returns its hash string +func GenerateJsonHash(obj interface{}) (string, error) { + serialObj, err := json.Marshal(obj) + if err != nil { + return "", err + } + + hashBytes := sha1.Sum(serialObj) + + // Convert to an ASCII string + hashStr := string(base32.HexEncoding.EncodeToString(hashBytes[:])) + + return hashStr, nil +} + +// FindContainerPort searches for a specific port $portName in the container. +// If the port is found in the container, the corresponding port is returned. +// If the port is not found, the $defaultPort is returned instead. +func FindContainerPort(container *corev1.Container, portName string, defaultPort int) int { + for _, port := range container.Ports { + if port.Name == portName { + return int(port.ContainerPort) + } + } + return defaultPort +} + +// IsJobFinished checks whether the given Job has finished execution. +// It does not discriminate between successful and failed terminations. +// src: https://github.com/kubernetes/kubernetes/blob/a8a1abc25cad87333840cd7d54be2efaf31a3177/pkg/controller/job/utils.go#L26 +func IsJobFinished(j *batchv1.Job) (batchv1.JobConditionType, bool) { + for _, c := range j.Status.Conditions { + if (c.Type == batchv1.JobComplete || c.Type == batchv1.JobFailed) && c.Status == corev1.ConditionTrue { + return c.Type, true + } + } + return "", false +} + +func EnvVarExists(envName string, envVars []corev1.EnvVar) bool { + for _, env := range envVars { + if env.Name == envName { + return true + } + } + return false +} + +// EnvVarByName returns an entry in []corev1.EnvVar that matches a name. +// Also returns a bool for whether the env var exists. +func EnvVarByName(envName string, envVars []corev1.EnvVar) (corev1.EnvVar, bool) { + for _, env := range envVars { + if env.Name == envName { + return env, true + } + } + return corev1.EnvVar{}, false +} diff --git a/vendor/github.com/russross/blackfriday/v2/.gitignore b/vendor/github.com/russross/blackfriday/v2/.gitignore new file mode 100644 index 0000000000..75623dcccb --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/.gitignore @@ -0,0 +1,8 @@ +*.out +*.swp +*.8 +*.6 +_obj +_test* +markdown +tags diff --git a/vendor/github.com/russross/blackfriday/v2/.travis.yml b/vendor/github.com/russross/blackfriday/v2/.travis.yml new file mode 100644 index 0000000000..b0b525a5a8 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go +go: + - "1.10.x" + - "1.11.x" + - tip +matrix: + fast_finish: true + allow_failures: + - go: tip +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v ./... diff --git a/vendor/github.com/russross/blackfriday/v2/LICENSE.txt b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt new file mode 100644 index 0000000000..2885af3602 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/v2/README.md b/vendor/github.com/russross/blackfriday/v2/README.md new file mode 100644 index 0000000000..d9c08a22fc --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/README.md @@ -0,0 +1,335 @@ +Blackfriday +[![Build Status][BuildV2SVG]][BuildV2URL] +[![PkgGoDev][PkgGoDevV2SVG]][PkgGoDevV2URL] +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with modern Go releases in module mode. +With Go installed: + + go get github.com/russross/blackfriday/v2 + +will resolve and add the package to the current development module, +then build and install it. Alternatively, you can achieve the same +if you import it in a package: + + import "github.com/russross/blackfriday/v2" + +and `go get` without parameters. + +Legacy GOPATH mode is unsupported. + + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the +documentation is available at +https://pkg.go.dev/github.com/russross/blackfriday/v2. + +It is `go get`-able in module mode at `github.com/russross/blackfriday/v2`. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://pkg.go.dev/github.com/russross/blackfriday. + + +Usage +----- + +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: + +```go +output := blackfriday.Run(input) +``` + +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: + +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. + +Here's an example of simple usage of Blackfriday together with Bluemonday: + +```go +import ( + "github.com/microcosm-cc/bluemonday" + "github.com/russross/blackfriday/v2" +) + +// ... +unsafe := blackfriday.Run(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options + +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. + +### `blackfriday-tool` + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `AutoHeadingIDs` extension is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://pkg.go.dev/github.com/russross/blackfriday/v2#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://pkg.go.dev/github.com/russross/blackfriday/v2#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://pkg.go.dev/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ```go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ```go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled newlines in the input + translate into line breaks in the output. This extension is off by default. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://pkg.go.dev/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable heading anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* [LaTeX output](https://gitlab.com/ambrevar/blackfriday-latex): + renders output as LaTeX. + +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + +* [Blackfriday-Confluence](https://github.com/kentaro-m/blackfriday-confluence): provides a [Confluence Wiki Markup](https://confluence.atlassian.com/doc/confluence-wiki-markup-251003035.html) renderer. + +* [Blackfriday-Slack](https://github.com/karriereat/blackfriday-slack): converts markdown to slack message style + + +TODO +---- + +* More unit testing +* Improve Unicode support. It does not understand all Unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all UTF-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://pkg.go.dev/github.com/russross/blackfriday/v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + + [BuildV2SVG]: https://travis-ci.org/russross/blackfriday.svg?branch=v2 + [BuildV2URL]: https://travis-ci.org/russross/blackfriday + [PkgGoDevV2SVG]: https://pkg.go.dev/badge/github.com/russross/blackfriday/v2 + [PkgGoDevV2URL]: https://pkg.go.dev/github.com/russross/blackfriday/v2 diff --git a/vendor/github.com/russross/blackfriday/v2/block.go b/vendor/github.com/russross/blackfriday/v2/block.go new file mode 100644 index 0000000000..dcd61e6e35 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/block.go @@ -0,0 +1,1612 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "html" + "regexp" + "strings" + "unicode" +) + +const ( + charEntity = "&(?:#x[a-f0-9]{1,8}|#[0-9]{1,8}|[a-z][a-z0-9]{1,31});" + escapable = "[!\"#$%&'()*+,./:;<=>?@[\\\\\\]^_`{|}~-]" +) + +var ( + reBackslashOrAmp = regexp.MustCompile("[\\&]") + reEntityOrEscapedChar = regexp.MustCompile("(?i)\\\\" + escapable + "|" + charEntity) +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *Markdown) block(data []byte) { + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed heading: + // + // # Heading 1 + // ## Heading 2 + // ... + // ###### Heading 6 + if p.isPrefixHeading(data) { + data = data[p.prefixHeading(data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.extensions&Titleblock != 0 { + if data[0] == '%' { + if i := p.titleBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(data):] + continue + } + + // fenced code block: + // + // ``` go + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.addBlock(HorizontalRule, nil) + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.extensions&Tables != 0 { + if i := p.table(data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(data, ListTypeOrdered):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(data, ListTypeDefinition):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headings, too + data = data[p.paragraph(data):] + } + + p.nesting-- +} + +func (p *Markdown) addBlock(typ NodeType, content []byte) *Node { + p.closeUnmatchedBlocks() + container := p.addChild(typ, 0) + container.content = content + return container +} + +func (p *Markdown) isPrefixHeading(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.extensions&SpaceHeadings != 0 { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + if level == len(data) || data[level] != ' ' { + return false + } + } + return true +} + +func (p *Markdown) prefixHeading(data []byte) int { + level := 0 + for level < 6 && level < len(data) && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.extensions&HeadingIDs != 0 { + j, k := 0, 0 + // find start/end of heading id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract heading id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.extensions&AutoHeadingIDs != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + block := p.addBlock(Heading, data[i:end]) + block.HeadingID = id + block.Level = level + } + return skip +} + +func (p *Markdown) isUnderlinedHeading(data []byte) int { + // test of level 1 heading + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 1 + } + return 0 + } + + // test of level 2 heading + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if i < len(data) && data[i] == '\n' { + return 2 + } + return 0 + } + + return 0 +} + +func (p *Markdown) titleBlock(data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + consumed := len(data) + data = bytes.TrimPrefix(data, []byte("% ")) + data = bytes.Replace(data, []byte("\n% "), []byte("\n"), -1) + block := p.addBlock(Heading, data) + block.Level = 1 + block.IsTitleblock = true + + return consumed +} + +func (p *Markdown) html(data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + + return i +} + +func finalizeHTMLBlock(block *Node) { + block.Literal = block.content + block.content = nil +} + +// HTML comment, lax form +func (p *Markdown) htmlComment(data []byte, doRender bool) int { + i := p.inlineHTMLComment(data) + // needs to end with a blank line + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + block := p.addBlock(HTMLBlock, data[:end]) + finalizeHTMLBlock(block) + } + return size + } + return 0 +} + +// HR, which is the only self-closing block tag considered +func (p *Markdown) htmlHr(data []byte, doRender bool) int { + if len(data) < 4 { + return 0 + } + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + i := 3 + for i < len(data) && data[i] != '>' && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '>' { + i++ + if j := p.isEmpty(data[i:]); j > 0 { + size := i + j + if doRender { + // trim newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + finalizeHTMLBlock(p.addBlock(HTMLBlock, data[:end])) + } + return size + } + } + return 0 +} + +func (p *Markdown) htmlFindTag(data []byte) (string, bool) { + i := 0 + for i < len(data) && isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *Markdown) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + if tag == "hr" { + return 2 + } + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.extensions&LaxHTMLBlocks != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*Markdown) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + if i < len(data) && data[i] == '\n' { + i++ + } + return i +} + +func (*Markdown) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for i < len(data) && data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If info is not nil, it gets set to the syntax specified in the fence line. +func isFenceLine(data []byte, info *string, oldmarker string) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + i++ + i = skipChar(data, i, ' ') + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + if i == len(data) { + return i, marker + } + if i > len(data) || data[i] != '\n' { + return 0, "" + } + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *Markdown) fencedCodeBlock(data []byte, doRender bool) int { + var info string + beg, marker := isFenceLine(data, &info, "") + if beg == 0 || beg >= len(data) { + return 0 + } + fenceLength := beg - 1 + + var work bytes.Buffer + work.Write([]byte(info)) + work.WriteByte('\n') + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + fenceEnd, _ := isFenceLine(data[beg:], nil, marker) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = true + block.FenceLength = fenceLength + finalizeCodeBlock(block) + } + + return beg +} + +func unescapeChar(str []byte) []byte { + if str[0] == '\\' { + return []byte{str[1]} + } + return []byte(html.UnescapeString(string(str))) +} + +func unescapeString(str []byte) []byte { + if reBackslashOrAmp.Match(str) { + return reEntityOrEscapedChar.ReplaceAllFunc(str, unescapeChar) + } + return str +} + +func finalizeCodeBlock(block *Node) { + if block.IsFenced { + newlinePos := bytes.IndexByte(block.content, '\n') + firstLine := block.content[:newlinePos] + rest := block.content[newlinePos+1:] + block.Info = unescapeString(bytes.Trim(firstLine, "\n")) + block.Literal = rest + } else { + block.Literal = block.content + } + block.content = nil +} + +func (p *Markdown) table(data []byte) int { + table := p.addBlock(Table, nil) + i, columns := p.tableHeader(data) + if i == 0 { + p.tip = table.Parent + table.Unlink() + return 0 + } + + p.addBlock(TableBody, nil) + + for i < len(data) { + pipes, rowStart := 0, i + for ; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + if i < len(data) && data[i] == '\n' { + i++ + } + p.tableRow(data[rowStart:i], columns, false) + } + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *Markdown) tableHeader(data []byte) (size int, columns []CellAlignFlags) { + i := 0 + colCount := 1 + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + j := i + if j < len(data) && data[j] == '\n' { + j++ + } + header := data[:j] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]CellAlignFlags, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for i < len(data) && data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TableAlignmentLeft + dashes++ + } + for i < len(data) && data[i] == '-' { + i++ + dashes++ + } + if i < len(data) && data[i] == ':' { + i++ + columns[col] |= TableAlignmentRight + dashes++ + } + for i < len(data) && data[i] == ' ' { + i++ + } + if i == len(data) { + return + } + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for i < len(data) && data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && i < len(data) && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.addBlock(TableHead, nil) + p.tableRow(header, columns, true) + size = i + if size < len(data) && data[size] == '\n' { + size++ + } + return +} + +func (p *Markdown) tableRow(data []byte, columns []CellAlignFlags, header bool) { + p.addBlock(TableRow, nil) + i, col := 0, 0 + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for i < len(data) && data[i] == ' ' { + i++ + } + + cellStart := i + + for i < len(data) && (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && cellEnd-1 < len(data) && data[cellEnd-1] == ' ' { + cellEnd-- + } + + cell := p.addBlock(TableCell, data[cellStart:cellEnd]) + cell.IsHeader = header + cell.Align = columns[col] + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + cell := p.addBlock(TableCell, nil) + cell.IsHeader = header + cell.Align = columns[col] + } + + // silently ignore rows with too many cells +} + +// returns blockquote prefix length +func (p *Markdown) quotePrefix(data []byte) int { + i := 0 + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + if i < len(data) && data[i] == '>' { + if i+1 < len(data) && data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *Markdown) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *Markdown) quote(data []byte) int { + block := p.addBlock(BlockQuote, nil) + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for end < len(data) && data[end] != '\n' { + if p.extensions&FencedCode != 0 { + if i := p.fencedCodeBlock(data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + if end < len(data) && data[end] == '\n' { + end++ + } + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + p.block(raw.Bytes()) + p.finalize(block) + return end +} + +// returns prefix length for block code +func (p *Markdown) codePrefix(data []byte) int { + if len(data) >= 1 && data[0] == '\t' { + return 1 + } + if len(data) >= 4 && data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *Markdown) code(data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for i < len(data) && data[i] != '\n' { + i++ + } + if i < len(data) && data[i] == '\n' { + i++ + } + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffer + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + block := p.addBlock(CodeBlock, work.Bytes()) // TODO: get rid of temp buffer + block.IsFenced = false + finalizeCodeBlock(block) + + return i +} + +// returns unordered list item prefix +func (p *Markdown) uliPrefix(data []byte) int { + i := 0 + // start with up to 3 spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + if i >= len(data)-1 { + return 0 + } + // need one of {'*', '+', '-'} followed by a space or a tab + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + (data[i+1] != ' ' && data[i+1] != '\t') { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *Markdown) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && i < len(data) && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for i < len(data) && data[i] >= '0' && data[i] <= '9' { + i++ + } + if start == i || i >= len(data)-1 { + return 0 + } + + // we need >= 1 digits followed by a dot and a space or a tab + if data[i] != '.' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *Markdown) dliPrefix(data []byte) int { + if len(data) < 2 { + return 0 + } + i := 0 + // need a ':' followed by a space or a tab + if data[i] != ':' || !(data[i+1] == ' ' || data[i+1] == '\t') { + return 0 + } + for i < len(data) && data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *Markdown) list(data []byte, flags ListType) int { + i := 0 + flags |= ListItemBeginningOfList + block := p.addBlock(List, nil) + block.ListFlags = flags + block.Tight = true + + for i < len(data) { + skip := p.listItem(data[i:], &flags) + if flags&ListItemContainsBlock != 0 { + block.ListData.Tight = false + } + i += skip + if skip == 0 || flags&ListItemEndOfList != 0 { + break + } + flags &= ^ListItemBeginningOfList + } + + above := block.Parent + finalizeList(block) + p.tip = above + return i +} + +// Returns true if the list item is not the same type as its parent list +func (p *Markdown) listTypeChanged(data []byte, flags *ListType) bool { + if p.dliPrefix(data) > 0 && *flags&ListTypeDefinition == 0 { + return true + } else if p.oliPrefix(data) > 0 && *flags&ListTypeOrdered == 0 { + return true + } else if p.uliPrefix(data) > 0 && (*flags&ListTypeOrdered != 0 || *flags&ListTypeDefinition != 0) { + return true + } + return false +} + +// Returns true if block ends with a blank line, descending if needed +// into lists and sublists. +func endsWithBlankLine(block *Node) bool { + // TODO: figure this out. Always false now. + for block != nil { + //if block.lastLineBlank { + //return true + //} + t := block.Type + if t == List || t == Item { + block = block.LastChild + } else { + break + } + } + return false +} + +func finalizeList(block *Node) { + block.open = false + item := block.FirstChild + for item != nil { + // check for non-final list item ending with blank line: + if endsWithBlankLine(item) && item.Next != nil { + block.ListData.Tight = false + break + } + // recurse into children of list item, to see if there are spaces + // between any of them: + subItem := item.FirstChild + for subItem != nil { + if endsWithBlankLine(subItem) && (item.Next != nil || subItem.Next != nil) { + block.ListData.Tight = false + break + } + subItem = subItem.Next + } + item = item.Next + } +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *Markdown) listItem(data []byte, flags *ListType) int { + // keep track of the indentation of the first line + itemIndent := 0 + if data[0] == '\t' { + itemIndent += 4 + } else { + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + } + + var bulletChar byte = '*' + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } else { + bulletChar = data[i-2] + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^ListTypeTerm + } + } + if i == 0 { + // if in definition list, set term flag and continue + if *flags&ListTypeDefinition != 0 { + *flags |= ListTypeTerm + } else { + return 0 + } + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + line = i + continue + } + + // calculate the indentation + indent := 0 + indentIndex := 0 + if data[line] == '\t' { + indentIndex++ + indent += 4 + } else { + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + indentIndex++ + } + } + + chunk := data[line+indentIndex : i] + + if p.extensions&FencedCode != 0 { + // determine if in or out of codeblock + // if in codeblock, ignore normal list processing + _, marker := isFenceLine(chunk, nil, codeBlockMarker) + if marker != "" { + if codeBlockMarker == "" { + // start of codeblock + codeBlockMarker = marker + } else { + // end of codeblock. + codeBlockMarker = "" + } + } + // we are in a codeblock, write line, and continue + if codeBlockMarker != "" || marker != "" { + raw.Write(data[line+indentIndex : i]) + line = i + continue gatherlines + } + } + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + // to be a nested list, it must be indented more + // if not, it is either a different kind of list + // or the next item in the same list + if indent <= itemIndent { + if p.listTypeChanged(chunk, flags) { + *flags |= ListItemEndOfList + } else if containsBlankLine { + *flags |= ListItemContainsBlock + } + + break gatherlines + } + + if containsBlankLine { + *flags |= ListItemContainsBlock + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix heading? + case p.isPrefixHeading(chunk): + // if the heading is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= ListItemEndOfList + break gatherlines + } + *flags |= ListItemContainsBlock + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&ListTypeDefinition != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for next < len(data) && data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= ListItemEndOfList + } + } else { + *flags |= ListItemEndOfList + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + raw.WriteByte('\n') + *flags |= ListItemContainsBlock + } + + // if this line was preceded by one or more blanks, + // re-introduce the blank into the buffer + if containsBlankLine { + containsBlankLine = false + raw.WriteByte('\n') + } + + // add the line into the working buffer without prefix + raw.Write(data[line+indentIndex : i]) + + line = i + } + + rawBytes := raw.Bytes() + + block := p.addBlock(Item, nil) + block.ListFlags = *flags + block.Tight = false + block.BulletChar = bulletChar + block.Delimiter = '.' // Only '.' is possible in Markdown, but ')' will also be possible in CommonMark + + // render the contents of the list item + if *flags&ListItemContainsBlock != 0 && *flags&ListTypeTerm == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(rawBytes[:sublist]) + p.block(rawBytes[sublist:]) + } else { + p.block(rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + child := p.addChild(Paragraph, 0) + child.content = rawBytes[:sublist] + p.block(rawBytes[sublist:]) + } else { + child := p.addChild(Paragraph, 0) + child.content = rawBytes + } + } + return line +} + +// render a single paragraph that has already been parsed out +func (p *Markdown) renderParagraph(data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + end := len(data) + // trim trailing newline + if data[len(data)-1] == '\n' { + end-- + } + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + p.addBlock(Paragraph, data[beg:end]) +} + +func (p *Markdown) paragraph(data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + tabSize := TabSizeDefault + if p.extensions&TabSizeEight != 0 { + tabSize = TabSizeDouble + } + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a reference or a footnote? If so, end a paragraph + // preceding it and report that we have consumed up to the end of that + // reference: + if refEnd := isReference(p, current, tabSize); refEnd > 0 { + p.renderParagraph(data[:i]) + return i + refEnd + } + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.extensions&DefinitionLists != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(data[prev:], ListTypeDefinition) + } + } + + p.renderParagraph(data[:i]) + return i + n + } + + // an underline under some text marks a heading, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeading(current); level > 0 { + // render the paragraph + p.renderParagraph(data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + id := "" + if p.extensions&AutoHeadingIDs != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + block := p.addBlock(Heading, data[prev:eol]) + block.Level = level + block.HeadingID = id + + // find the end of the underline + for i < len(data) && data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.extensions&LaxHTMLBlocks != 0 { + if data[i] == '<' && p.html(current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a prefixed heading or a horizontal rule after this, paragraph is over + if p.isPrefixHeading(current) || p.isHRule(current) { + p.renderParagraph(data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.extensions&FencedCode != 0 { + if p.fencedCodeBlock(current, false) > 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.extensions&DefinitionLists != 0 { + if p.dliPrefix(current) != 0 { + ret := p.list(data[prev:], ListTypeDefinition) + return ret + } + } + + // if there's a list after this, paragraph is over + if p.extensions&NoEmptyLineBeforeBlock != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + nl := bytes.IndexByte(data[i:], '\n') + if nl >= 0 { + i += nl + 1 + } else { + i += len(data[i:]) + } + } + + p.renderParagraph(data[:i]) + return i +} + +func skipChar(data []byte, start int, char byte) int { + i := start + for i < len(data) && data[i] == char { + i++ + } + return i +} + +func skipUntilChar(text []byte, start int, char byte) int { + i := start + for i < len(text) && text[i] != char { + i++ + } + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/russross/blackfriday/v2/doc.go b/vendor/github.com/russross/blackfriday/v2/doc.go new file mode 100644 index 0000000000..57ff152a05 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/doc.go @@ -0,0 +1,46 @@ +// Package blackfriday is a markdown processor. +// +// It translates plain text with simple formatting rules into an AST, which can +// then be further processed to HTML (provided by Blackfriday itself) or other +// formats (provided by the community). +// +// The simplest way to invoke Blackfriday is to call the Run function. It will +// take a text input and produce a text output in HTML (or other format). +// +// A slightly more sophisticated way to use Blackfriday is to create a Markdown +// processor and to call Parse, which returns a syntax tree for the input +// document. You can leverage Blackfriday's parsing for content extraction from +// markdown documents. You can assign a custom renderer and set various options +// to the Markdown processor. +// +// If you're interested in calling Blackfriday from command line, see +// https://github.com/russross/blackfriday-tool. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when AutoHeadingIDs extension is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that precede the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/v2/entities.go b/vendor/github.com/russross/blackfriday/v2/entities.go new file mode 100644 index 0000000000..a2c3edb691 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/entities.go @@ -0,0 +1,2236 @@ +package blackfriday + +// Extracted from https://html.spec.whatwg.org/multipage/entities.json +var entities = map[string]bool{ + "Æ": true, + "Æ": true, + "&": true, + "&": true, + "Á": true, + "Á": true, + "Ă": true, + "Â": true, + "Â": true, + "А": true, + "𝔄": true, + "À": true, + "À": true, + "Α": true, + "Ā": true, + "⩓": true, + "Ą": true, + "𝔸": true, + "⁡": true, + "Å": true, + "Å": true, + "𝒜": true, + "≔": true, + "Ã": true, + "Ã": true, + "Ä": true, + "Ä": true, + "∖": true, + "⫧": true, + "⌆": true, + "Б": true, + "∵": true, + "ℬ": true, + "Β": true, + "𝔅": true, + "𝔹": true, + "˘": true, + "ℬ": true, + "≎": true, + "Ч": true, + "©": true, + "©": true, + "Ć": true, + "⋒": true, + "ⅅ": true, + "ℭ": true, + "Č": true, + "Ç": true, + "Ç": true, + "Ĉ": true, + "∰": true, + "Ċ": true, + "¸": true, + "·": true, + "ℭ": true, + "Χ": true, + "⊙": true, + "⊖": true, + "⊕": true, + "⊗": true, + "∲": true, + "”": true, + "’": true, + "∷": true, + "⩴": true, + "≡": true, + "∯": true, + "∮": true, + "ℂ": true, + "∐": true, + "∳": true, + "⨯": true, + "𝒞": true, + "⋓": true, + "≍": true, + "ⅅ": true, + "⤑": true, + "Ђ": true, + "Ѕ": true, + "Џ": true, + "‡": true, + "↡": true, + "⫤": true, + "Ď": true, + "Д": true, + "∇": true, + "Δ": true, + "𝔇": true, + "´": true, + "˙": true, + "˝": true, + "`": true, + "˜": true, + "⋄": true, + "ⅆ": true, + "𝔻": true, + "¨": true, + "⃜": true, + "≐": true, + "∯": true, + "¨": true, + "⇓": true, + "⇐": true, + "⇔": true, + "⫤": true, + "⟸": true, + "⟺": true, + "⟹": true, + "⇒": true, + "⊨": true, + "⇑": true, + "⇕": true, + "∥": true, + "↓": true, + "⤓": true, + "⇵": true, + "̑": true, + "⥐": true, + "⥞": true, + "↽": true, + "⥖": true, + "⥟": true, + "⇁": true, + "⥗": true, + "⊤": true, + "↧": true, + "⇓": true, + "𝒟": true, + "Đ": true, + "Ŋ": true, + "Ð": true, + "Ð": true, + "É": true, + "É": true, + "Ě": true, + "Ê": true, + "Ê": true, + "Э": true, + "Ė": true, + "𝔈": true, + "È": true, + "È": true, + "∈": true, + "Ē": true, + "◻": true, + "▫": true, + "Ę": true, + "𝔼": true, + "Ε": true, + "⩵": true, + "≂": true, + "⇌": true, + "ℰ": true, + "⩳": true, + "Η": true, + "Ë": true, + "Ë": true, + "∃": true, + "ⅇ": true, + "Ф": true, + "𝔉": true, + "◼": true, + "▪": true, + "𝔽": true, + "∀": true, + "ℱ": true, + "ℱ": true, + "Ѓ": true, + ">": true, + ">": true, + "Γ": true, + "Ϝ": true, + "Ğ": true, + "Ģ": true, + "Ĝ": true, + "Г": true, + "Ġ": true, + "𝔊": true, + "⋙": true, + "𝔾": true, + "≥": true, + "⋛": true, + "≧": true, + "⪢": true, + "≷": true, + "⩾": true, + "≳": true, + "𝒢": true, + "≫": true, + "Ъ": true, + "ˇ": true, + "^": true, + "Ĥ": true, + "ℌ": true, + "ℋ": true, + "ℍ": true, + "─": true, + "ℋ": true, + "Ħ": true, + "≎": true, + "≏": true, + "Е": true, + "IJ": true, + "Ё": true, + "Í": true, + "Í": true, + "Î": true, + "Î": true, + "И": true, + "İ": true, + "ℑ": true, + "Ì": true, + "Ì": true, + "ℑ": true, + "Ī": true, + "ⅈ": true, + "⇒": true, + "∬": true, + "∫": true, + "⋂": true, + "⁣": true, + "⁢": true, + "Į": true, + "𝕀": true, + "Ι": true, + "ℐ": true, + "Ĩ": true, + "І": true, + "Ï": true, + "Ï": true, + "Ĵ": true, + "Й": true, + "𝔍": true, + "𝕁": true, + "𝒥": true, + "Ј": true, + "Є": true, + "Х": true, + "Ќ": true, + "Κ": true, + "Ķ": true, + "К": true, + "𝔎": true, + "𝕂": true, + "𝒦": true, + "Љ": true, + "<": true, + "<": true, + "Ĺ": true, + "Λ": true, + "⟪": true, + "ℒ": true, + "↞": true, + "Ľ": true, + "Ļ": true, + "Л": true, + "⟨": true, + "←": true, + "⇤": true, + "⇆": true, + "⌈": true, + "⟦": true, + "⥡": true, + "⇃": true, + "⥙": true, + "⌊": true, + "↔": true, + "⥎": true, + "⊣": true, + "↤": true, + "⥚": true, + "⊲": true, + "⧏": true, + "⊴": true, + "⥑": true, + "⥠": true, + "↿": true, + "⥘": true, + "↼": true, + "⥒": true, + "⇐": true, + "⇔": true, + "⋚": true, + "≦": true, + "≶": true, + "⪡": true, + "⩽": true, + "≲": true, + "𝔏": true, + "⋘": true, + "⇚": true, + "Ŀ": true, + "⟵": true, + "⟷": true, + "⟶": true, + "⟸": true, + "⟺": true, + "⟹": true, + "𝕃": true, + "↙": true, + "↘": true, + "ℒ": true, + "↰": true, + "Ł": true, + "≪": true, + "⤅": true, + "М": true, + " ": true, + "ℳ": true, + "𝔐": true, + "∓": true, + "𝕄": true, + "ℳ": true, + "Μ": true, + "Њ": true, + "Ń": true, + "Ň": true, + "Ņ": true, + "Н": true, + "​": true, + "​": true, + "​": true, + "​": true, + "≫": true, + "≪": true, + " ": true, + "𝔑": true, + "⁠": true, + " ": true, + "ℕ": true, + "⫬": true, + "≢": true, + "≭": true, + "∦": true, + "∉": true, + "≠": true, + "≂̸": true, + "∄": true, + "≯": true, + "≱": true, + "≧̸": true, + "≫̸": true, + "≹": true, + "⩾̸": true, + "≵": true, + "≎̸": true, + "≏̸": true, + "⋪": true, + "⧏̸": true, + "⋬": true, + "≮": true, + "≰": true, + "≸": true, + "≪̸": true, + "⩽̸": true, + "≴": true, + "⪢̸": true, + "⪡̸": true, + "⊀": true, + "⪯̸": true, + "⋠": true, + "∌": true, + "⋫": true, + "⧐̸": true, + "⋭": true, + "⊏̸": true, + "⋢": true, + "⊐̸": true, + "⋣": true, + "⊂⃒": true, + "⊈": true, + "⊁": true, + "⪰̸": true, + "⋡": true, + "≿̸": true, + "⊃⃒": true, + "⊉": true, + "≁": true, + "≄": true, + "≇": true, + "≉": true, + "∤": true, + "𝒩": true, + "Ñ": true, + "Ñ": true, + "Ν": true, + "Œ": true, + "Ó": true, + "Ó": true, + "Ô": true, + "Ô": true, + "О": true, + "Ő": true, + "𝔒": true, + "Ò": true, + "Ò": true, + "Ō": true, + "Ω": true, + "Ο": true, + "𝕆": true, + "“": true, + "‘": true, + "⩔": true, + "𝒪": true, + "Ø": true, + "Ø": true, + "Õ": true, + "Õ": true, + "⨷": true, + "Ö": true, + "Ö": true, + "‾": true, + "⏞": true, + "⎴": true, + "⏜": true, + "∂": true, + "П": true, + "𝔓": true, + "Φ": true, + "Π": true, + "±": true, + "ℌ": true, + "ℙ": true, + "⪻": true, + "≺": true, + "⪯": true, + "≼": true, + "≾": true, + "″": true, + "∏": true, + "∷": true, + "∝": true, + "𝒫": true, + "Ψ": true, + """: true, + """: true, + "𝔔": true, + "ℚ": true, + "𝒬": true, + "⤐": true, + "®": true, + "®": true, + "Ŕ": true, + "⟫": true, + "↠": true, + "⤖": true, + "Ř": true, + "Ŗ": true, + "Р": true, + "ℜ": true, + "∋": true, + "⇋": true, + "⥯": true, + "ℜ": true, + "Ρ": true, + "⟩": true, + "→": true, + "⇥": true, + "⇄": true, + "⌉": true, + "⟧": true, + "⥝": true, + "⇂": true, + "⥕": true, + "⌋": true, + "⊢": true, + "↦": true, + "⥛": true, + "⊳": true, + "⧐": true, + "⊵": true, + "⥏": true, + "⥜": true, + "↾": true, + "⥔": true, + "⇀": true, + "⥓": true, + "⇒": true, + "ℝ": true, + "⥰": true, + "⇛": true, + "ℛ": true, + "↱": true, + "⧴": true, + "Щ": true, + "Ш": true, + "Ь": true, + "Ś": true, + "⪼": true, + "Š": true, + "Ş": true, + "Ŝ": true, + "С": true, + "𝔖": true, + "↓": true, + "←": true, + "→": true, + "↑": true, + "Σ": true, + "∘": true, + "𝕊": true, + "√": true, + "□": true, + "⊓": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊔": true, + "𝒮": true, + "⋆": true, + "⋐": true, + "⋐": true, + "⊆": true, + "≻": true, + "⪰": true, + "≽": true, + "≿": true, + "∋": true, + "∑": true, + "⋑": true, + "⊃": true, + "⊇": true, + "⋑": true, + "Þ": true, + "Þ": true, + "™": true, + "Ћ": true, + "Ц": true, + " ": true, + "Τ": true, + "Ť": true, + "Ţ": true, + "Т": true, + "𝔗": true, + "∴": true, + "Θ": true, + "  ": true, + " ": true, + "∼": true, + "≃": true, + "≅": true, + "≈": true, + "𝕋": true, + "⃛": true, + "𝒯": true, + "Ŧ": true, + "Ú": true, + "Ú": true, + "↟": true, + "⥉": true, + "Ў": true, + "Ŭ": true, + "Û": true, + "Û": true, + "У": true, + "Ű": true, + "𝔘": true, + "Ù": true, + "Ù": true, + "Ū": true, + "_": true, + "⏟": true, + "⎵": true, + "⏝": true, + "⋃": true, + "⊎": true, + "Ų": true, + "𝕌": true, + "↑": true, + "⤒": true, + "⇅": true, + "↕": true, + "⥮": true, + "⊥": true, + "↥": true, + "⇑": true, + "⇕": true, + "↖": true, + "↗": true, + "ϒ": true, + "Υ": true, + "Ů": true, + "𝒰": true, + "Ũ": true, + "Ü": true, + "Ü": true, + "⊫": true, + "⫫": true, + "В": true, + "⊩": true, + "⫦": true, + "⋁": true, + "‖": true, + "‖": true, + "∣": true, + "|": true, + "❘": true, + "≀": true, + " ": true, + "𝔙": true, + "𝕍": true, + "𝒱": true, + "⊪": true, + "Ŵ": true, + "⋀": true, + "𝔚": true, + "𝕎": true, + "𝒲": true, + "𝔛": true, + "Ξ": true, + "𝕏": true, + "𝒳": true, + "Я": true, + "Ї": true, + "Ю": true, + "Ý": true, + "Ý": true, + "Ŷ": true, + "Ы": true, + "𝔜": true, + "𝕐": true, + "𝒴": true, + "Ÿ": true, + "Ж": true, + "Ź": true, + "Ž": true, + "З": true, + "Ż": true, + "​": true, + "Ζ": true, + "ℨ": true, + "ℤ": true, + "𝒵": true, + "á": true, + "á": true, + "ă": true, + "∾": true, + "∾̳": true, + "∿": true, + "â": true, + "â": true, + "´": true, + "´": true, + "а": true, + "æ": true, + "æ": true, + "⁡": true, + "𝔞": true, + "à": true, + "à": true, + "ℵ": true, + "ℵ": true, + "α": true, + "ā": true, + "⨿": true, + "&": true, + "&": true, + "∧": true, + "⩕": true, + "⩜": true, + "⩘": true, + "⩚": true, + "∠": true, + "⦤": true, + "∠": true, + "∡": true, + "⦨": true, + "⦩": true, + "⦪": true, + "⦫": true, + "⦬": true, + "⦭": true, + "⦮": true, + "⦯": true, + "∟": true, + "⊾": true, + "⦝": true, + "∢": true, + "Å": true, + "⍼": true, + "ą": true, + "𝕒": true, + "≈": true, + "⩰": true, + "⩯": true, + "≊": true, + "≋": true, + "'": true, + "≈": true, + "≊": true, + "å": true, + "å": true, + "𝒶": true, + "*": true, + "≈": true, + "≍": true, + "ã": true, + "ã": true, + "ä": true, + "ä": true, + "∳": true, + "⨑": true, + "⫭": true, + "≌": true, + "϶": true, + "‵": true, + "∽": true, + "⋍": true, + "⊽": true, + "⌅": true, + "⌅": true, + "⎵": true, + "⎶": true, + "≌": true, + "б": true, + "„": true, + "∵": true, + "∵": true, + "⦰": true, + "϶": true, + "ℬ": true, + "β": true, + "ℶ": true, + "≬": true, + "𝔟": true, + "⋂": true, + "◯": true, + "⋃": true, + "⨀": true, + "⨁": true, + "⨂": true, + "⨆": true, + "★": true, + "▽": true, + "△": true, + "⨄": true, + "⋁": true, + "⋀": true, + "⤍": true, + "⧫": true, + "▪": true, + "▴": true, + "▾": true, + "◂": true, + "▸": true, + "␣": true, + "▒": true, + "░": true, + "▓": true, + "█": true, + "=⃥": true, + "≡⃥": true, + "⌐": true, + "𝕓": true, + "⊥": true, + "⊥": true, + "⋈": true, + "╗": true, + "╔": true, + "╖": true, + "╓": true, + "═": true, + "╦": true, + "╩": true, + "╤": true, + "╧": true, + "╝": true, + "╚": true, + "╜": true, + "╙": true, + "║": true, + "╬": true, + "╣": true, + "╠": true, + "╫": true, + "╢": true, + "╟": true, + "⧉": true, + "╕": true, + "╒": true, + "┐": true, + "┌": true, + "─": true, + "╥": true, + "╨": true, + "┬": true, + "┴": true, + "⊟": true, + "⊞": true, + "⊠": true, + "╛": true, + "╘": true, + "┘": true, + "└": true, + "│": true, + "╪": true, + "╡": true, + "╞": true, + "┼": true, + "┤": true, + "├": true, + "‵": true, + "˘": true, + "¦": true, + "¦": true, + "𝒷": true, + "⁏": true, + "∽": true, + "⋍": true, + "\": true, + "⧅": true, + "⟈": true, + "•": true, + "•": true, + "≎": true, + "⪮": true, + "≏": true, + "≏": true, + "ć": true, + "∩": true, + "⩄": true, + "⩉": true, + "⩋": true, + "⩇": true, + "⩀": true, + "∩︀": true, + "⁁": true, + "ˇ": true, + "⩍": true, + "č": true, + "ç": true, + "ç": true, + "ĉ": true, + "⩌": true, + "⩐": true, + "ċ": true, + "¸": true, + "¸": true, + "⦲": true, + "¢": true, + "¢": true, + "·": true, + "𝔠": true, + "ч": true, + "✓": true, + "✓": true, + "χ": true, + "○": true, + "⧃": true, + "ˆ": true, + "≗": true, + "↺": true, + "↻": true, + "®": true, + "Ⓢ": true, + "⊛": true, + "⊚": true, + "⊝": true, + "≗": true, + "⨐": true, + "⫯": true, + "⧂": true, + "♣": true, + "♣": true, + ":": true, + "≔": true, + "≔": true, + ",": true, + "@": true, + "∁": true, + "∘": true, + "∁": true, + "ℂ": true, + "≅": true, + "⩭": true, + "∮": true, + "𝕔": true, + "∐": true, + "©": true, + "©": true, + "℗": true, + "↵": true, + "✗": true, + "𝒸": true, + "⫏": true, + "⫑": true, + "⫐": true, + "⫒": true, + "⋯": true, + "⤸": true, + "⤵": true, + "⋞": true, + "⋟": true, + "↶": true, + "⤽": true, + "∪": true, + "⩈": true, + "⩆": true, + "⩊": true, + "⊍": true, + "⩅": true, + "∪︀": true, + "↷": true, + "⤼": true, + "⋞": true, + "⋟": true, + "⋎": true, + "⋏": true, + "¤": true, + "¤": true, + "↶": true, + "↷": true, + "⋎": true, + "⋏": true, + "∲": true, + "∱": true, + "⌭": true, + "⇓": true, + "⥥": true, + "†": true, + "ℸ": true, + "↓": true, + "‐": true, + "⊣": true, + "⤏": true, + "˝": true, + "ď": true, + "д": true, + "ⅆ": true, + "‡": true, + "⇊": true, + "⩷": true, + "°": true, + "°": true, + "δ": true, + "⦱": true, + "⥿": true, + "𝔡": true, + "⇃": true, + "⇂": true, + "⋄": true, + "⋄": true, + "♦": true, + "♦": true, + "¨": true, + "ϝ": true, + "⋲": true, + "÷": true, + "÷": true, + "÷": true, + "⋇": true, + "⋇": true, + "ђ": true, + "⌞": true, + "⌍": true, + "$": true, + "𝕕": true, + "˙": true, + "≐": true, + "≑": true, + "∸": true, + "∔": true, + "⊡": true, + "⌆": true, + "↓": true, + "⇊": true, + "⇃": true, + "⇂": true, + "⤐": true, + "⌟": true, + "⌌": true, + "𝒹": true, + "ѕ": true, + "⧶": true, + "đ": true, + "⋱": true, + "▿": true, + "▾": true, + "⇵": true, + "⥯": true, + "⦦": true, + "џ": true, + "⟿": true, + "⩷": true, + "≑": true, + "é": true, + "é": true, + "⩮": true, + "ě": true, + "≖": true, + "ê": true, + "ê": true, + "≕": true, + "э": true, + "ė": true, + "ⅇ": true, + "≒": true, + "𝔢": true, + "⪚": true, + "è": true, + "è": true, + "⪖": true, + "⪘": true, + "⪙": true, + "⏧": true, + "ℓ": true, + "⪕": true, + "⪗": true, + "ē": true, + "∅": true, + "∅": true, + "∅": true, + " ": true, + " ": true, + " ": true, + "ŋ": true, + " ": true, + "ę": true, + "𝕖": true, + "⋕": true, + "⧣": true, + "⩱": true, + "ε": true, + "ε": true, + "ϵ": true, + "≖": true, + "≕": true, + "≂": true, + "⪖": true, + "⪕": true, + "=": true, + "≟": true, + "≡": true, + "⩸": true, + "⧥": true, + "≓": true, + "⥱": true, + "ℯ": true, + "≐": true, + "≂": true, + "η": true, + "ð": true, + "ð": true, + "ë": true, + "ë": true, + "€": true, + "!": true, + "∃": true, + "ℰ": true, + "ⅇ": true, + "≒": true, + "ф": true, + "♀": true, + "ffi": true, + "ff": true, + "ffl": true, + "𝔣": true, + "fi": true, + "fj": true, + "♭": true, + "fl": true, + "▱": true, + "ƒ": true, + "𝕗": true, + "∀": true, + "⋔": true, + "⫙": true, + "⨍": true, + "½": true, + "½": true, + "⅓": true, + "¼": true, + "¼": true, + "⅕": true, + "⅙": true, + "⅛": true, + "⅔": true, + "⅖": true, + "¾": true, + "¾": true, + "⅗": true, + "⅜": true, + "⅘": true, + "⅚": true, + "⅝": true, + "⅞": true, + "⁄": true, + "⌢": true, + "𝒻": true, + "≧": true, + "⪌": true, + "ǵ": true, + "γ": true, + "ϝ": true, + "⪆": true, + "ğ": true, + "ĝ": true, + "г": true, + "ġ": true, + "≥": true, + "⋛": true, + "≥": true, + "≧": true, + "⩾": true, + "⩾": true, + "⪩": true, + "⪀": true, + "⪂": true, + "⪄": true, + "⋛︀": true, + "⪔": true, + "𝔤": true, + "≫": true, + "⋙": true, + "ℷ": true, + "ѓ": true, + "≷": true, + "⪒": true, + "⪥": true, + "⪤": true, + "≩": true, + "⪊": true, + "⪊": true, + "⪈": true, + "⪈": true, + "≩": true, + "⋧": true, + "𝕘": true, + "`": true, + "ℊ": true, + "≳": true, + "⪎": true, + "⪐": true, + ">": true, + ">": true, + "⪧": true, + "⩺": true, + "⋗": true, + "⦕": true, + "⩼": true, + "⪆": true, + "⥸": true, + "⋗": true, + "⋛": true, + "⪌": true, + "≷": true, + "≳": true, + "≩︀": true, + "≩︀": true, + "⇔": true, + " ": true, + "½": true, + "ℋ": true, + "ъ": true, + "↔": true, + "⥈": true, + "↭": true, + "ℏ": true, + "ĥ": true, + "♥": true, + "♥": true, + "…": true, + "⊹": true, + "𝔥": true, + "⤥": true, + "⤦": true, + "⇿": true, + "∻": true, + "↩": true, + "↪": true, + "𝕙": true, + "―": true, + "𝒽": true, + "ℏ": true, + "ħ": true, + "⁃": true, + "‐": true, + "í": true, + "í": true, + "⁣": true, + "î": true, + "î": true, + "и": true, + "е": true, + "¡": true, + "¡": true, + "⇔": true, + "𝔦": true, + "ì": true, + "ì": true, + "ⅈ": true, + "⨌": true, + "∭": true, + "⧜": true, + "℩": true, + "ij": true, + "ī": true, + "ℑ": true, + "ℐ": true, + "ℑ": true, + "ı": true, + "⊷": true, + "Ƶ": true, + "∈": true, + "℅": true, + "∞": true, + "⧝": true, + "ı": true, + "∫": true, + "⊺": true, + "ℤ": true, + "⊺": true, + "⨗": true, + "⨼": true, + "ё": true, + "į": true, + "𝕚": true, + "ι": true, + "⨼": true, + "¿": true, + "¿": true, + "𝒾": true, + "∈": true, + "⋹": true, + "⋵": true, + "⋴": true, + "⋳": true, + "∈": true, + "⁢": true, + "ĩ": true, + "і": true, + "ï": true, + "ï": true, + "ĵ": true, + "й": true, + "𝔧": true, + "ȷ": true, + "𝕛": true, + "𝒿": true, + "ј": true, + "є": true, + "κ": true, + "ϰ": true, + "ķ": true, + "к": true, + "𝔨": true, + "ĸ": true, + "х": true, + "ќ": true, + "𝕜": true, + "𝓀": true, + "⇚": true, + "⇐": true, + "⤛": true, + "⤎": true, + "≦": true, + "⪋": true, + "⥢": true, + "ĺ": true, + "⦴": true, + "ℒ": true, + "λ": true, + "⟨": true, + "⦑": true, + "⟨": true, + "⪅": true, + "«": true, + "«": true, + "←": true, + "⇤": true, + "⤟": true, + "⤝": true, + "↩": true, + "↫": true, + "⤹": true, + "⥳": true, + "↢": true, + "⪫": true, + "⤙": true, + "⪭": true, + "⪭︀": true, + "⤌": true, + "❲": true, + "{": true, + "[": true, + "⦋": true, + "⦏": true, + "⦍": true, + "ľ": true, + "ļ": true, + "⌈": true, + "{": true, + "л": true, + "⤶": true, + "“": true, + "„": true, + "⥧": true, + "⥋": true, + "↲": true, + "≤": true, + "←": true, + "↢": true, + "↽": true, + "↼": true, + "⇇": true, + "↔": true, + "⇆": true, + "⇋": true, + "↭": true, + "⋋": true, + "⋚": true, + "≤": true, + "≦": true, + "⩽": true, + "⩽": true, + "⪨": true, + "⩿": true, + "⪁": true, + "⪃": true, + "⋚︀": true, + "⪓": true, + "⪅": true, + "⋖": true, + "⋚": true, + "⪋": true, + "≶": true, + "≲": true, + "⥼": true, + "⌊": true, + "𝔩": true, + "≶": true, + "⪑": true, + "↽": true, + "↼": true, + "⥪": true, + "▄": true, + "љ": true, + "≪": true, + "⇇": true, + "⌞": true, + "⥫": true, + "◺": true, + "ŀ": true, + "⎰": true, + "⎰": true, + "≨": true, + "⪉": true, + "⪉": true, + "⪇": true, + "⪇": true, + "≨": true, + "⋦": true, + "⟬": true, + "⇽": true, + "⟦": true, + "⟵": true, + "⟷": true, + "⟼": true, + "⟶": true, + "↫": true, + "↬": true, + "⦅": true, + "𝕝": true, + "⨭": true, + "⨴": true, + "∗": true, + "_": true, + "◊": true, + "◊": true, + "⧫": true, + "(": true, + "⦓": true, + "⇆": true, + "⌟": true, + "⇋": true, + "⥭": true, + "‎": true, + "⊿": true, + "‹": true, + "𝓁": true, + "↰": true, + "≲": true, + "⪍": true, + "⪏": true, + "[": true, + "‘": true, + "‚": true, + "ł": true, + "<": true, + "<": true, + "⪦": true, + "⩹": true, + "⋖": true, + "⋋": true, + "⋉": true, + "⥶": true, + "⩻": true, + "⦖": true, + "◃": true, + "⊴": true, + "◂": true, + "⥊": true, + "⥦": true, + "≨︀": true, + "≨︀": true, + "∺": true, + "¯": true, + "¯": true, + "♂": true, + "✠": true, + "✠": true, + "↦": true, + "↦": true, + "↧": true, + "↤": true, + "↥": true, + "▮": true, + "⨩": true, + "м": true, + "—": true, + "∡": true, + "𝔪": true, + "℧": true, + "µ": true, + "µ": true, + "∣": true, + "*": true, + "⫰": true, + "·": true, + "·": true, + "−": true, + "⊟": true, + "∸": true, + "⨪": true, + "⫛": true, + "…": true, + "∓": true, + "⊧": true, + "𝕞": true, + "∓": true, + "𝓂": true, + "∾": true, + "μ": true, + "⊸": true, + "⊸": true, + "⋙̸": true, + "≫⃒": true, + "≫̸": true, + "⇍": true, + "⇎": true, + "⋘̸": true, + "≪⃒": true, + "≪̸": true, + "⇏": true, + "⊯": true, + "⊮": true, + "∇": true, + "ń": true, + "∠⃒": true, + "≉": true, + "⩰̸": true, + "≋̸": true, + "ʼn": true, + "≉": true, + "♮": true, + "♮": true, + "ℕ": true, + " ": true, + " ": true, + "≎̸": true, + "≏̸": true, + "⩃": true, + "ň": true, + "ņ": true, + "≇": true, + "⩭̸": true, + "⩂": true, + "н": true, + "–": true, + "≠": true, + "⇗": true, + "⤤": true, + "↗": true, + "↗": true, + "≐̸": true, + "≢": true, + "⤨": true, + "≂̸": true, + "∄": true, + "∄": true, + "𝔫": true, + "≧̸": true, + "≱": true, + "≱": true, + "≧̸": true, + "⩾̸": true, + "⩾̸": true, + "≵": true, + "≯": true, + "≯": true, + "⇎": true, + "↮": true, + "⫲": true, + "∋": true, + "⋼": true, + "⋺": true, + "∋": true, + "њ": true, + "⇍": true, + "≦̸": true, + "↚": true, + "‥": true, + "≰": true, + "↚": true, + "↮": true, + "≰": true, + "≦̸": true, + "⩽̸": true, + "⩽̸": true, + "≮": true, + "≴": true, + "≮": true, + "⋪": true, + "⋬": true, + "∤": true, + "𝕟": true, + "¬": true, + "¬": true, + "∉": true, + "⋹̸": true, + "⋵̸": true, + "∉": true, + "⋷": true, + "⋶": true, + "∌": true, + "∌": true, + "⋾": true, + "⋽": true, + "∦": true, + "∦": true, + "⫽⃥": true, + "∂̸": true, + "⨔": true, + "⊀": true, + "⋠": true, + "⪯̸": true, + "⊀": true, + "⪯̸": true, + "⇏": true, + "↛": true, + "⤳̸": true, + "↝̸": true, + "↛": true, + "⋫": true, + "⋭": true, + "⊁": true, + "⋡": true, + "⪰̸": true, + "𝓃": true, + "∤": true, + "∦": true, + "≁": true, + "≄": true, + "≄": true, + "∤": true, + "∦": true, + "⋢": true, + "⋣": true, + "⊄": true, + "⫅̸": true, + "⊈": true, + "⊂⃒": true, + "⊈": true, + "⫅̸": true, + "⊁": true, + "⪰̸": true, + "⊅": true, + "⫆̸": true, + "⊉": true, + "⊃⃒": true, + "⊉": true, + "⫆̸": true, + "≹": true, + "ñ": true, + "ñ": true, + "≸": true, + "⋪": true, + "⋬": true, + "⋫": true, + "⋭": true, + "ν": true, + "#": true, + "№": true, + " ": true, + "⊭": true, + "⤄": true, + "≍⃒": true, + "⊬": true, + "≥⃒": true, + ">⃒": true, + "⧞": true, + "⤂": true, + "≤⃒": true, + "<⃒": true, + "⊴⃒": true, + "⤃": true, + "⊵⃒": true, + "∼⃒": true, + "⇖": true, + "⤣": true, + "↖": true, + "↖": true, + "⤧": true, + "Ⓢ": true, + "ó": true, + "ó": true, + "⊛": true, + "⊚": true, + "ô": true, + "ô": true, + "о": true, + "⊝": true, + "ő": true, + "⨸": true, + "⊙": true, + "⦼": true, + "œ": true, + "⦿": true, + "𝔬": true, + "˛": true, + "ò": true, + "ò": true, + "⧁": true, + "⦵": true, + "Ω": true, + "∮": true, + "↺": true, + "⦾": true, + "⦻": true, + "‾": true, + "⧀": true, + "ō": true, + "ω": true, + "ο": true, + "⦶": true, + "⊖": true, + "𝕠": true, + "⦷": true, + "⦹": true, + "⊕": true, + "∨": true, + "↻": true, + "⩝": true, + "ℴ": true, + "ℴ": true, + "ª": true, + "ª": true, + "º": true, + "º": true, + "⊶": true, + "⩖": true, + "⩗": true, + "⩛": true, + "ℴ": true, + "ø": true, + "ø": true, + "⊘": true, + "õ": true, + "õ": true, + "⊗": true, + "⨶": true, + "ö": true, + "ö": true, + "⌽": true, + "∥": true, + "¶": true, + "¶": true, + "∥": true, + "⫳": true, + "⫽": true, + "∂": true, + "п": true, + "%": true, + ".": true, + "‰": true, + "⊥": true, + "‱": true, + "𝔭": true, + "φ": true, + "ϕ": true, + "ℳ": true, + "☎": true, + "π": true, + "⋔": true, + "ϖ": true, + "ℏ": true, + "ℎ": true, + "ℏ": true, + "+": true, + "⨣": true, + "⊞": true, + "⨢": true, + "∔": true, + "⨥": true, + "⩲": true, + "±": true, + "±": true, + "⨦": true, + "⨧": true, + "±": true, + "⨕": true, + "𝕡": true, + "£": true, + "£": true, + "≺": true, + "⪳": true, + "⪷": true, + "≼": true, + "⪯": true, + "≺": true, + "⪷": true, + "≼": true, + "⪯": true, + "⪹": true, + "⪵": true, + "⋨": true, + "≾": true, + "′": true, + "ℙ": true, + "⪵": true, + "⪹": true, + "⋨": true, + "∏": true, + "⌮": true, + "⌒": true, + "⌓": true, + "∝": true, + "∝": true, + "≾": true, + "⊰": true, + "𝓅": true, + "ψ": true, + " ": true, + "𝔮": true, + "⨌": true, + "𝕢": true, + "⁗": true, + "𝓆": true, + "ℍ": true, + "⨖": true, + "?": true, + "≟": true, + """: true, + """: true, + "⇛": true, + "⇒": true, + "⤜": true, + "⤏": true, + "⥤": true, + "∽̱": true, + "ŕ": true, + "√": true, + "⦳": true, + "⟩": true, + "⦒": true, + "⦥": true, + "⟩": true, + "»": true, + "»": true, + "→": true, + "⥵": true, + "⇥": true, + "⤠": true, + "⤳": true, + "⤞": true, + "↪": true, + "↬": true, + "⥅": true, + "⥴": true, + "↣": true, + "↝": true, + "⤚": true, + "∶": true, + "ℚ": true, + "⤍": true, + "❳": true, + "}": true, + "]": true, + "⦌": true, + "⦎": true, + "⦐": true, + "ř": true, + "ŗ": true, + "⌉": true, + "}": true, + "р": true, + "⤷": true, + "⥩": true, + "”": true, + "”": true, + "↳": true, + "ℜ": true, + "ℛ": true, + "ℜ": true, + "ℝ": true, + "▭": true, + "®": true, + "®": true, + "⥽": true, + "⌋": true, + "𝔯": true, + "⇁": true, + "⇀": true, + "⥬": true, + "ρ": true, + "ϱ": true, + "→": true, + "↣": true, + "⇁": true, + "⇀": true, + "⇄": true, + "⇌": true, + "⇉": true, + "↝": true, + "⋌": true, + "˚": true, + "≓": true, + "⇄": true, + "⇌": true, + "‏": true, + "⎱": true, + "⎱": true, + "⫮": true, + "⟭": true, + "⇾": true, + "⟧": true, + "⦆": true, + "𝕣": true, + "⨮": true, + "⨵": true, + ")": true, + "⦔": true, + "⨒": true, + "⇉": true, + "›": true, + "𝓇": true, + "↱": true, + "]": true, + "’": true, + "’": true, + "⋌": true, + "⋊": true, + "▹": true, + "⊵": true, + "▸": true, + "⧎": true, + "⥨": true, + "℞": true, + "ś": true, + "‚": true, + "≻": true, + "⪴": true, + "⪸": true, + "š": true, + "≽": true, + "⪰": true, + "ş": true, + "ŝ": true, + "⪶": true, + "⪺": true, + "⋩": true, + "⨓": true, + "≿": true, + "с": true, + "⋅": true, + "⊡": true, + "⩦": true, + "⇘": true, + "⤥": true, + "↘": true, + "↘": true, + "§": true, + "§": true, + ";": true, + "⤩": true, + "∖": true, + "∖": true, + "✶": true, + "𝔰": true, + "⌢": true, + "♯": true, + "щ": true, + "ш": true, + "∣": true, + "∥": true, + "­": true, + "­": true, + "σ": true, + "ς": true, + "ς": true, + "∼": true, + "⩪": true, + "≃": true, + "≃": true, + "⪞": true, + "⪠": true, + "⪝": true, + "⪟": true, + "≆": true, + "⨤": true, + "⥲": true, + "←": true, + "∖": true, + "⨳": true, + "⧤": true, + "∣": true, + "⌣": true, + "⪪": true, + "⪬": true, + "⪬︀": true, + "ь": true, + "/": true, + "⧄": true, + "⌿": true, + "𝕤": true, + "♠": true, + "♠": true, + "∥": true, + "⊓": true, + "⊓︀": true, + "⊔": true, + "⊔︀": true, + "⊏": true, + "⊑": true, + "⊏": true, + "⊑": true, + "⊐": true, + "⊒": true, + "⊐": true, + "⊒": true, + "□": true, + "□": true, + "▪": true, + "▪": true, + "→": true, + "𝓈": true, + "∖": true, + "⌣": true, + "⋆": true, + "☆": true, + "★": true, + "ϵ": true, + "ϕ": true, + "¯": true, + "⊂": true, + "⫅": true, + "⪽": true, + "⊆": true, + "⫃": true, + "⫁": true, + "⫋": true, + "⊊": true, + "⪿": true, + "⥹": true, + "⊂": true, + "⊆": true, + "⫅": true, + "⊊": true, + "⫋": true, + "⫇": true, + "⫕": true, + "⫓": true, + "≻": true, + "⪸": true, + "≽": true, + "⪰": true, + "⪺": true, + "⪶": true, + "⋩": true, + "≿": true, + "∑": true, + "♪": true, + "¹": true, + "¹": true, + "²": true, + "²": true, + "³": true, + "³": true, + "⊃": true, + "⫆": true, + "⪾": true, + "⫘": true, + "⊇": true, + "⫄": true, + "⟉": true, + "⫗": true, + "⥻": true, + "⫂": true, + "⫌": true, + "⊋": true, + "⫀": true, + "⊃": true, + "⊇": true, + "⫆": true, + "⊋": true, + "⫌": true, + "⫈": true, + "⫔": true, + "⫖": true, + "⇙": true, + "⤦": true, + "↙": true, + "↙": true, + "⤪": true, + "ß": true, + "ß": true, + "⌖": true, + "τ": true, + "⎴": true, + "ť": true, + "ţ": true, + "т": true, + "⃛": true, + "⌕": true, + "𝔱": true, + "∴": true, + "∴": true, + "θ": true, + "ϑ": true, + "ϑ": true, + "≈": true, + "∼": true, + " ": true, + "≈": true, + "∼": true, + "þ": true, + "þ": true, + "˜": true, + "×": true, + "×": true, + "⊠": true, + "⨱": true, + "⨰": true, + "∭": true, + "⤨": true, + "⊤": true, + "⌶": true, + "⫱": true, + "𝕥": true, + "⫚": true, + "⤩": true, + "‴": true, + "™": true, + "▵": true, + "▿": true, + "◃": true, + "⊴": true, + "≜": true, + "▹": true, + "⊵": true, + "◬": true, + "≜": true, + "⨺": true, + "⨹": true, + "⧍": true, + "⨻": true, + "⏢": true, + "𝓉": true, + "ц": true, + "ћ": true, + "ŧ": true, + "≬": true, + "↞": true, + "↠": true, + "⇑": true, + "⥣": true, + "ú": true, + "ú": true, + "↑": true, + "ў": true, + "ŭ": true, + "û": true, + "û": true, + "у": true, + "⇅": true, + "ű": true, + "⥮": true, + "⥾": true, + "𝔲": true, + "ù": true, + "ù": true, + "↿": true, + "↾": true, + "▀": true, + "⌜": true, + "⌜": true, + "⌏": true, + "◸": true, + "ū": true, + "¨": true, + "¨": true, + "ų": true, + "𝕦": true, + "↑": true, + "↕": true, + "↿": true, + "↾": true, + "⊎": true, + "υ": true, + "ϒ": true, + "υ": true, + "⇈": true, + "⌝": true, + "⌝": true, + "⌎": true, + "ů": true, + "◹": true, + "𝓊": true, + "⋰": true, + "ũ": true, + "▵": true, + "▴": true, + "⇈": true, + "ü": true, + "ü": true, + "⦧": true, + "⇕": true, + "⫨": true, + "⫩": true, + "⊨": true, + "⦜": true, + "ϵ": true, + "ϰ": true, + "∅": true, + "ϕ": true, + "ϖ": true, + "∝": true, + "↕": true, + "ϱ": true, + "ς": true, + "⊊︀": true, + "⫋︀": true, + "⊋︀": true, + "⫌︀": true, + "ϑ": true, + "⊲": true, + "⊳": true, + "в": true, + "⊢": true, + "∨": true, + "⊻": true, + "≚": true, + "⋮": true, + "|": true, + "|": true, + "𝔳": true, + "⊲": true, + "⊂⃒": true, + "⊃⃒": true, + "𝕧": true, + "∝": true, + "⊳": true, + "𝓋": true, + "⫋︀": true, + "⊊︀": true, + "⫌︀": true, + "⊋︀": true, + "⦚": true, + "ŵ": true, + "⩟": true, + "∧": true, + "≙": true, + "℘": true, + "𝔴": true, + "𝕨": true, + "℘": true, + "≀": true, + "≀": true, + "𝓌": true, + "⋂": true, + "◯": true, + "⋃": true, + "▽": true, + "𝔵": true, + "⟺": true, + "⟷": true, + "ξ": true, + "⟸": true, + "⟵": true, + "⟼": true, + "⋻": true, + "⨀": true, + "𝕩": true, + "⨁": true, + "⨂": true, + "⟹": true, + "⟶": true, + "𝓍": true, + "⨆": true, + "⨄": true, + "△": true, + "⋁": true, + "⋀": true, + "ý": true, + "ý": true, + "я": true, + "ŷ": true, + "ы": true, + "¥": true, + "¥": true, + "𝔶": true, + "ї": true, + "𝕪": true, + "𝓎": true, + "ю": true, + "ÿ": true, + "ÿ": true, + "ź": true, + "ž": true, + "з": true, + "ż": true, + "ℨ": true, + "ζ": true, + "𝔷": true, + "ж": true, + "⇝": true, + "𝕫": true, + "𝓏": true, + "‍": true, + "‌": true, +} diff --git a/vendor/github.com/russross/blackfriday/v2/esc.go b/vendor/github.com/russross/blackfriday/v2/esc.go new file mode 100644 index 0000000000..6ab60102c9 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/esc.go @@ -0,0 +1,70 @@ +package blackfriday + +import ( + "html" + "io" +) + +var htmlEscaper = [256][]byte{ + '&': []byte("&"), + '<': []byte("<"), + '>': []byte(">"), + '"': []byte("""), +} + +func escapeHTML(w io.Writer, s []byte) { + escapeEntities(w, s, false) +} + +func escapeAllHTML(w io.Writer, s []byte) { + escapeEntities(w, s, true) +} + +func escapeEntities(w io.Writer, s []byte, escapeValidEntities bool) { + var start, end int + for end < len(s) { + escSeq := htmlEscaper[s[end]] + if escSeq != nil { + isEntity, entityEnd := nodeIsEntity(s, end) + if isEntity && !escapeValidEntities { + w.Write(s[start : entityEnd+1]) + start = entityEnd + 1 + } else { + w.Write(s[start:end]) + w.Write(escSeq) + start = end + 1 + } + } + end++ + } + if start < len(s) && end <= len(s) { + w.Write(s[start:end]) + } +} + +func nodeIsEntity(s []byte, end int) (isEntity bool, endEntityPos int) { + isEntity = false + endEntityPos = end + 1 + + if s[end] == '&' { + for endEntityPos < len(s) { + if s[endEntityPos] == ';' { + if entities[string(s[end:endEntityPos+1])] { + isEntity = true + break + } + } + if !isalnum(s[endEntityPos]) && s[endEntityPos] != '&' && s[endEntityPos] != '#' { + break + } + endEntityPos++ + } + } + + return isEntity, endEntityPos +} + +func escLink(w io.Writer, text []byte) { + unesc := html.UnescapeString(string(text)) + escapeHTML(w, []byte(unesc)) +} diff --git a/vendor/github.com/russross/blackfriday/v2/html.go b/vendor/github.com/russross/blackfriday/v2/html.go new file mode 100644 index 0000000000..cb4f26e30f --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/html.go @@ -0,0 +1,952 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strings" +) + +// HTMLFlags control optional behavior of HTML renderer. +type HTMLFlags int + +// HTML renderer configuration options. +const ( + HTMLFlagsNone HTMLFlags = 0 + SkipHTML HTMLFlags = 1 << iota // Skip preformatted HTML blocks + SkipImages // Skip embedded images + SkipLinks // Skip all links + Safelink // Only link to trusted protocols + NofollowLinks // Only link with rel="nofollow" + NoreferrerLinks // Only link with rel="noreferrer" + NoopenerLinks // Only link with rel="noopener" + HrefTargetBlank // Add a blank target + CompletePage // Generate a complete HTML page + UseXHTML // Generate XHTML output instead of HTML + FootnoteReturnLinks // Generate a link at the end of a footnote to return to the source + Smartypants // Enable smart punctuation substitutions + SmartypantsFractions // Enable smart fractions (with Smartypants) + SmartypantsDashes // Enable smart dashes (with Smartypants) + SmartypantsLatexDashes // Enable LaTeX-style dashes (with Smartypants) + SmartypantsAngledQuotes // Enable angled double quotes (with Smartypants) for double quotes rendering + SmartypantsQuotesNBSP // Enable « French guillemets » (with Smartypants) + TOC // Generate a table of contents +) + +var ( + htmlTagRe = regexp.MustCompile("(?i)^" + htmlTag) +) + +const ( + htmlTag = "(?:" + openTag + "|" + closeTag + "|" + htmlComment + "|" + + processingInstruction + "|" + declaration + "|" + cdata + ")" + closeTag = "]" + openTag = "<" + tagName + attribute + "*" + "\\s*/?>" + attribute = "(?:" + "\\s+" + attributeName + attributeValueSpec + "?)" + attributeValue = "(?:" + unquotedValue + "|" + singleQuotedValue + "|" + doubleQuotedValue + ")" + attributeValueSpec = "(?:" + "\\s*=" + "\\s*" + attributeValue + ")" + attributeName = "[a-zA-Z_:][a-zA-Z0-9:._-]*" + cdata = "" + declaration = "]*>" + doubleQuotedValue = "\"[^\"]*\"" + htmlComment = "|" + processingInstruction = "[<][?].*?[?][>]" + singleQuotedValue = "'[^']*'" + tagName = "[A-Za-z][A-Za-z0-9-]*" + unquotedValue = "[^\"'=<>`\\x00-\\x20]+" +) + +// HTMLRendererParameters is a collection of supplementary parameters tweaking +// the behavior of various parts of HTML renderer. +type HTMLRendererParameters struct { + // Prepend this text to each relative URL. + AbsolutePrefix string + // Add this text to each footnote anchor, to ensure uniqueness. + FootnoteAnchorPrefix string + // Show this text inside the tag for a footnote return link, if the + // HTML_FOOTNOTE_RETURN_LINKS flag is enabled. If blank, the string + // [return] is used. + FootnoteReturnLinkContents string + // If set, add this text to the front of each Heading ID, to ensure + // uniqueness. + HeadingIDPrefix string + // If set, add this text to the back of each Heading ID, to ensure uniqueness. + HeadingIDSuffix string + // Increase heading levels: if the offset is 1,

becomes

etc. + // Negative offset is also valid. + // Resulting levels are clipped between 1 and 6. + HeadingLevelOffset int + + Title string // Document title (used if CompletePage is set) + CSS string // Optional CSS file URL (used if CompletePage is set) + Icon string // Optional icon file URL (used if CompletePage is set) + + Flags HTMLFlags // Flags allow customizing this renderer's behavior +} + +// HTMLRenderer is a type that implements the Renderer interface for HTML output. +// +// Do not create this directly, instead use the NewHTMLRenderer function. +type HTMLRenderer struct { + HTMLRendererParameters + + closeTag string // how to end singleton tags: either " />" or ">" + + // Track heading IDs to prevent ID collision in a single generation. + headingIDs map[string]int + + lastOutputLen int + disableTags int + + sr *SPRenderer +} + +const ( + xhtmlClose = " />" + htmlClose = ">" +) + +// NewHTMLRenderer creates and configures an HTMLRenderer object, which +// satisfies the Renderer interface. +func NewHTMLRenderer(params HTMLRendererParameters) *HTMLRenderer { + // configure the rendering engine + closeTag := htmlClose + if params.Flags&UseXHTML != 0 { + closeTag = xhtmlClose + } + + if params.FootnoteReturnLinkContents == "" { + // U+FE0E is VARIATION SELECTOR-15. + // It suppresses automatic emoji presentation of the preceding + // U+21A9 LEFTWARDS ARROW WITH HOOK on iOS and iPadOS. + params.FootnoteReturnLinkContents = "↩\ufe0e" + } + + return &HTMLRenderer{ + HTMLRendererParameters: params, + + closeTag: closeTag, + headingIDs: make(map[string]int), + + sr: NewSmartypantsRenderer(params.Flags), + } +} + +func isHTMLTag(tag []byte, tagname string) bool { + found, _ := findHTMLTagPos(tag, tagname) + return found +} + +// Look for a character, but ignore it when it's in any kind of quotes, it +// might be JavaScript +func skipUntilCharIgnoreQuotes(html []byte, start int, char byte) int { + inSingleQuote := false + inDoubleQuote := false + inGraveQuote := false + i := start + for i < len(html) { + switch { + case html[i] == char && !inSingleQuote && !inDoubleQuote && !inGraveQuote: + return i + case html[i] == '\'': + inSingleQuote = !inSingleQuote + case html[i] == '"': + inDoubleQuote = !inDoubleQuote + case html[i] == '`': + inGraveQuote = !inGraveQuote + } + i++ + } + return start +} + +func findHTMLTagPos(tag []byte, tagname string) (bool, int) { + i := 0 + if i < len(tag) && tag[0] != '<' { + return false, -1 + } + i++ + i = skipSpace(tag, i) + + if i < len(tag) && tag[i] == '/' { + i++ + } + + i = skipSpace(tag, i) + j := 0 + for ; i < len(tag); i, j = i+1, j+1 { + if j >= len(tagname) { + break + } + + if strings.ToLower(string(tag[i]))[0] != tagname[j] { + return false, -1 + } + } + + if i == len(tag) { + return false, -1 + } + + rightAngle := skipUntilCharIgnoreQuotes(tag, i, '>') + if rightAngle >= i { + return true, rightAngle + } + + return false, -1 +} + +func skipSpace(tag []byte, i int) int { + for i < len(tag) && isspace(tag[i]) { + i++ + } + return i +} + +func isRelativeLink(link []byte) (yes bool) { + // a tag begin with '#' + if link[0] == '#' { + return true + } + + // link begin with '/' but not '//', the second maybe a protocol relative link + if len(link) >= 2 && link[0] == '/' && link[1] != '/' { + return true + } + + // only the root '/' + if len(link) == 1 && link[0] == '/' { + return true + } + + // current directory : begin with "./" + if bytes.HasPrefix(link, []byte("./")) { + return true + } + + // parent directory : begin with "../" + if bytes.HasPrefix(link, []byte("../")) { + return true + } + + return false +} + +func (r *HTMLRenderer) ensureUniqueHeadingID(id string) string { + for count, found := r.headingIDs[id]; found; count, found = r.headingIDs[id] { + tmp := fmt.Sprintf("%s-%d", id, count+1) + + if _, tmpFound := r.headingIDs[tmp]; !tmpFound { + r.headingIDs[id] = count + 1 + id = tmp + } else { + id = id + "-1" + } + } + + if _, found := r.headingIDs[id]; !found { + r.headingIDs[id] = 0 + } + + return id +} + +func (r *HTMLRenderer) addAbsPrefix(link []byte) []byte { + if r.AbsolutePrefix != "" && isRelativeLink(link) && link[0] != '.' { + newDest := r.AbsolutePrefix + if link[0] != '/' { + newDest += "/" + } + newDest += string(link) + return []byte(newDest) + } + return link +} + +func appendLinkAttrs(attrs []string, flags HTMLFlags, link []byte) []string { + if isRelativeLink(link) { + return attrs + } + val := []string{} + if flags&NofollowLinks != 0 { + val = append(val, "nofollow") + } + if flags&NoreferrerLinks != 0 { + val = append(val, "noreferrer") + } + if flags&NoopenerLinks != 0 { + val = append(val, "noopener") + } + if flags&HrefTargetBlank != 0 { + attrs = append(attrs, "target=\"_blank\"") + } + if len(val) == 0 { + return attrs + } + attr := fmt.Sprintf("rel=%q", strings.Join(val, " ")) + return append(attrs, attr) +} + +func isMailto(link []byte) bool { + return bytes.HasPrefix(link, []byte("mailto:")) +} + +func needSkipLink(flags HTMLFlags, dest []byte) bool { + if flags&SkipLinks != 0 { + return true + } + return flags&Safelink != 0 && !isSafeLink(dest) && !isMailto(dest) +} + +func isSmartypantable(node *Node) bool { + pt := node.Parent.Type + return pt != Link && pt != CodeBlock && pt != Code +} + +func appendLanguageAttr(attrs []string, info []byte) []string { + if len(info) == 0 { + return attrs + } + endOfLang := bytes.IndexAny(info, "\t ") + if endOfLang < 0 { + endOfLang = len(info) + } + return append(attrs, fmt.Sprintf("class=\"language-%s\"", info[:endOfLang])) +} + +func (r *HTMLRenderer) tag(w io.Writer, name []byte, attrs []string) { + w.Write(name) + if len(attrs) > 0 { + w.Write(spaceBytes) + w.Write([]byte(strings.Join(attrs, " "))) + } + w.Write(gtBytes) + r.lastOutputLen = 1 +} + +func footnoteRef(prefix string, node *Node) []byte { + urlFrag := prefix + string(slugify(node.Destination)) + anchor := fmt.Sprintf(`%d`, urlFrag, node.NoteID) + return []byte(fmt.Sprintf(`%s`, urlFrag, anchor)) +} + +func footnoteItem(prefix string, slug []byte) []byte { + return []byte(fmt.Sprintf(`
  • `, prefix, slug)) +} + +func footnoteReturnLink(prefix, returnLink string, slug []byte) []byte { + const format = ` %s` + return []byte(fmt.Sprintf(format, prefix, slug, returnLink)) +} + +func itemOpenCR(node *Node) bool { + if node.Prev == nil { + return false + } + ld := node.Parent.ListData + return !ld.Tight && ld.ListFlags&ListTypeDefinition == 0 +} + +func skipParagraphTags(node *Node) bool { + grandparent := node.Parent.Parent + if grandparent == nil || grandparent.Type != List { + return false + } + tightOrTerm := grandparent.Tight || node.Parent.ListFlags&ListTypeTerm != 0 + return grandparent.Type == List && tightOrTerm +} + +func cellAlignment(align CellAlignFlags) string { + switch align { + case TableAlignmentLeft: + return "left" + case TableAlignmentRight: + return "right" + case TableAlignmentCenter: + return "center" + default: + return "" + } +} + +func (r *HTMLRenderer) out(w io.Writer, text []byte) { + if r.disableTags > 0 { + w.Write(htmlTagRe.ReplaceAll(text, []byte{})) + } else { + w.Write(text) + } + r.lastOutputLen = len(text) +} + +func (r *HTMLRenderer) cr(w io.Writer) { + if r.lastOutputLen > 0 { + r.out(w, nlBytes) + } +} + +var ( + nlBytes = []byte{'\n'} + gtBytes = []byte{'>'} + spaceBytes = []byte{' '} +) + +var ( + brTag = []byte("
    ") + brXHTMLTag = []byte("
    ") + emTag = []byte("") + emCloseTag = []byte("") + strongTag = []byte("") + strongCloseTag = []byte("") + delTag = []byte("") + delCloseTag = []byte("") + ttTag = []byte("") + ttCloseTag = []byte("") + aTag = []byte("") + preTag = []byte("
    ")
    +	preCloseTag        = []byte("
    ") + codeTag = []byte("") + codeCloseTag = []byte("") + pTag = []byte("

    ") + pCloseTag = []byte("

    ") + blockquoteTag = []byte("
    ") + blockquoteCloseTag = []byte("
    ") + hrTag = []byte("
    ") + hrXHTMLTag = []byte("
    ") + ulTag = []byte("
      ") + ulCloseTag = []byte("
    ") + olTag = []byte("
      ") + olCloseTag = []byte("
    ") + dlTag = []byte("
    ") + dlCloseTag = []byte("
    ") + liTag = []byte("
  • ") + liCloseTag = []byte("
  • ") + ddTag = []byte("
    ") + ddCloseTag = []byte("
    ") + dtTag = []byte("
    ") + dtCloseTag = []byte("
    ") + tableTag = []byte("") + tableCloseTag = []byte("
    ") + tdTag = []byte("") + thTag = []byte("") + theadTag = []byte("") + theadCloseTag = []byte("") + tbodyTag = []byte("") + tbodyCloseTag = []byte("") + trTag = []byte("") + trCloseTag = []byte("") + h1Tag = []byte("") + h2Tag = []byte("") + h3Tag = []byte("") + h4Tag = []byte("") + h5Tag = []byte("") + h6Tag = []byte("") + + footnotesDivBytes = []byte("\n
    \n\n") + footnotesCloseDivBytes = []byte("\n
    \n") +) + +func headingTagsFromLevel(level int) ([]byte, []byte) { + if level <= 1 { + return h1Tag, h1CloseTag + } + switch level { + case 2: + return h2Tag, h2CloseTag + case 3: + return h3Tag, h3CloseTag + case 4: + return h4Tag, h4CloseTag + case 5: + return h5Tag, h5CloseTag + } + return h6Tag, h6CloseTag +} + +func (r *HTMLRenderer) outHRTag(w io.Writer) { + if r.Flags&UseXHTML == 0 { + r.out(w, hrTag) + } else { + r.out(w, hrXHTMLTag) + } +} + +// RenderNode is a default renderer of a single node of a syntax tree. For +// block nodes it will be called twice: first time with entering=true, second +// time with entering=false, so that it could know when it's working on an open +// tag and when on close. It writes the result to w. +// +// The return value is a way to tell the calling walker to adjust its walk +// pattern: e.g. it can terminate the traversal by returning Terminate. Or it +// can ask the walker to skip a subtree of this node by returning SkipChildren. +// The typical behavior is to return GoToNext, which asks for the usual +// traversal to the next node. +func (r *HTMLRenderer) RenderNode(w io.Writer, node *Node, entering bool) WalkStatus { + attrs := []string{} + switch node.Type { + case Text: + if r.Flags&Smartypants != 0 { + var tmp bytes.Buffer + escapeHTML(&tmp, node.Literal) + r.sr.Process(w, tmp.Bytes()) + } else { + if node.Parent.Type == Link { + escLink(w, node.Literal) + } else { + escapeHTML(w, node.Literal) + } + } + case Softbreak: + r.cr(w) + // TODO: make it configurable via out(renderer.softbreak) + case Hardbreak: + if r.Flags&UseXHTML == 0 { + r.out(w, brTag) + } else { + r.out(w, brXHTMLTag) + } + r.cr(w) + case Emph: + if entering { + r.out(w, emTag) + } else { + r.out(w, emCloseTag) + } + case Strong: + if entering { + r.out(w, strongTag) + } else { + r.out(w, strongCloseTag) + } + case Del: + if entering { + r.out(w, delTag) + } else { + r.out(w, delCloseTag) + } + case HTMLSpan: + if r.Flags&SkipHTML != 0 { + break + } + r.out(w, node.Literal) + case Link: + // mark it but don't link it if it is not a safe link: no smartypants + dest := node.LinkData.Destination + if needSkipLink(r.Flags, dest) { + if entering { + r.out(w, ttTag) + } else { + r.out(w, ttCloseTag) + } + } else { + if entering { + dest = r.addAbsPrefix(dest) + var hrefBuf bytes.Buffer + hrefBuf.WriteString("href=\"") + escLink(&hrefBuf, dest) + hrefBuf.WriteByte('"') + attrs = append(attrs, hrefBuf.String()) + if node.NoteID != 0 { + r.out(w, footnoteRef(r.FootnoteAnchorPrefix, node)) + break + } + attrs = appendLinkAttrs(attrs, r.Flags, dest) + if len(node.LinkData.Title) > 0 { + var titleBuff bytes.Buffer + titleBuff.WriteString("title=\"") + escapeHTML(&titleBuff, node.LinkData.Title) + titleBuff.WriteByte('"') + attrs = append(attrs, titleBuff.String()) + } + r.tag(w, aTag, attrs) + } else { + if node.NoteID != 0 { + break + } + r.out(w, aCloseTag) + } + } + case Image: + if r.Flags&SkipImages != 0 { + return SkipChildren + } + if entering { + dest := node.LinkData.Destination + dest = r.addAbsPrefix(dest) + if r.disableTags == 0 { + //if options.safe && potentiallyUnsafe(dest) { + //out(w, ``)
+				//} else {
+				r.out(w, []byte(`<img src=`)) + } + } + case Code: + r.out(w, codeTag) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + case Document: + break + case Paragraph: + if skipParagraphTags(node) { + break + } + if entering { + // TODO: untangle this clusterfuck about when the newlines need + // to be added and when not. + if node.Prev != nil { + switch node.Prev.Type { + case HTMLBlock, List, Paragraph, Heading, CodeBlock, BlockQuote, HorizontalRule: + r.cr(w) + } + } + if node.Parent.Type == BlockQuote && node.Prev == nil { + r.cr(w) + } + r.out(w, pTag) + } else { + r.out(w, pCloseTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case BlockQuote: + if entering { + r.cr(w) + r.out(w, blockquoteTag) + } else { + r.out(w, blockquoteCloseTag) + r.cr(w) + } + case HTMLBlock: + if r.Flags&SkipHTML != 0 { + break + } + r.cr(w) + r.out(w, node.Literal) + r.cr(w) + case Heading: + headingLevel := r.HTMLRendererParameters.HeadingLevelOffset + node.Level + openTag, closeTag := headingTagsFromLevel(headingLevel) + if entering { + if node.IsTitleblock { + attrs = append(attrs, `class="title"`) + } + if node.HeadingID != "" { + id := r.ensureUniqueHeadingID(node.HeadingID) + if r.HeadingIDPrefix != "" { + id = r.HeadingIDPrefix + id + } + if r.HeadingIDSuffix != "" { + id = id + r.HeadingIDSuffix + } + attrs = append(attrs, fmt.Sprintf(`id="%s"`, id)) + } + r.cr(w) + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + if !(node.Parent.Type == Item && node.Next == nil) { + r.cr(w) + } + } + case HorizontalRule: + r.cr(w) + r.outHRTag(w) + r.cr(w) + case List: + openTag := ulTag + closeTag := ulCloseTag + if node.ListFlags&ListTypeOrdered != 0 { + openTag = olTag + closeTag = olCloseTag + } + if node.ListFlags&ListTypeDefinition != 0 { + openTag = dlTag + closeTag = dlCloseTag + } + if entering { + if node.IsFootnotesList { + r.out(w, footnotesDivBytes) + r.outHRTag(w) + r.cr(w) + } + r.cr(w) + if node.Parent.Type == Item && node.Parent.Parent.Tight { + r.cr(w) + } + r.tag(w, openTag[:len(openTag)-1], attrs) + r.cr(w) + } else { + r.out(w, closeTag) + //cr(w) + //if node.parent.Type != Item { + // cr(w) + //} + if node.Parent.Type == Item && node.Next != nil { + r.cr(w) + } + if node.Parent.Type == Document || node.Parent.Type == BlockQuote { + r.cr(w) + } + if node.IsFootnotesList { + r.out(w, footnotesCloseDivBytes) + } + } + case Item: + openTag := liTag + closeTag := liCloseTag + if node.ListFlags&ListTypeDefinition != 0 { + openTag = ddTag + closeTag = ddCloseTag + } + if node.ListFlags&ListTypeTerm != 0 { + openTag = dtTag + closeTag = dtCloseTag + } + if entering { + if itemOpenCR(node) { + r.cr(w) + } + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + r.out(w, footnoteItem(r.FootnoteAnchorPrefix, slug)) + break + } + r.out(w, openTag) + } else { + if node.ListData.RefLink != nil { + slug := slugify(node.ListData.RefLink) + if r.Flags&FootnoteReturnLinks != 0 { + r.out(w, footnoteReturnLink(r.FootnoteAnchorPrefix, r.FootnoteReturnLinkContents, slug)) + } + } + r.out(w, closeTag) + r.cr(w) + } + case CodeBlock: + attrs = appendLanguageAttr(attrs, node.Info) + r.cr(w) + r.out(w, preTag) + r.tag(w, codeTag[:len(codeTag)-1], attrs) + escapeAllHTML(w, node.Literal) + r.out(w, codeCloseTag) + r.out(w, preCloseTag) + if node.Parent.Type != Item { + r.cr(w) + } + case Table: + if entering { + r.cr(w) + r.out(w, tableTag) + } else { + r.out(w, tableCloseTag) + r.cr(w) + } + case TableCell: + openTag := tdTag + closeTag := tdCloseTag + if node.IsHeader { + openTag = thTag + closeTag = thCloseTag + } + if entering { + align := cellAlignment(node.Align) + if align != "" { + attrs = append(attrs, fmt.Sprintf(`align="%s"`, align)) + } + if node.Prev == nil { + r.cr(w) + } + r.tag(w, openTag, attrs) + } else { + r.out(w, closeTag) + r.cr(w) + } + case TableHead: + if entering { + r.cr(w) + r.out(w, theadTag) + } else { + r.out(w, theadCloseTag) + r.cr(w) + } + case TableBody: + if entering { + r.cr(w) + r.out(w, tbodyTag) + // XXX: this is to adhere to a rather silly test. Should fix test. + if node.FirstChild == nil { + r.cr(w) + } + } else { + r.out(w, tbodyCloseTag) + r.cr(w) + } + case TableRow: + if entering { + r.cr(w) + r.out(w, trTag) + } else { + r.out(w, trCloseTag) + r.cr(w) + } + default: + panic("Unknown node type " + node.Type.String()) + } + return GoToNext +} + +// RenderHeader writes HTML document preamble and TOC if requested. +func (r *HTMLRenderer) RenderHeader(w io.Writer, ast *Node) { + r.writeDocumentHeader(w) + if r.Flags&TOC != 0 { + r.writeTOC(w, ast) + } +} + +// RenderFooter writes HTML document footer. +func (r *HTMLRenderer) RenderFooter(w io.Writer, ast *Node) { + if r.Flags&CompletePage == 0 { + return + } + io.WriteString(w, "\n\n\n") +} + +func (r *HTMLRenderer) writeDocumentHeader(w io.Writer) { + if r.Flags&CompletePage == 0 { + return + } + ending := "" + if r.Flags&UseXHTML != 0 { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + ending = " /" + } else { + io.WriteString(w, "\n") + io.WriteString(w, "\n") + } + io.WriteString(w, "\n") + io.WriteString(w, " ") + if r.Flags&Smartypants != 0 { + r.sr.Process(w, []byte(r.Title)) + } else { + escapeHTML(w, []byte(r.Title)) + } + io.WriteString(w, "\n") + io.WriteString(w, " \n") + io.WriteString(w, " \n") + if r.CSS != "" { + io.WriteString(w, " \n") + } + if r.Icon != "" { + io.WriteString(w, " \n") + } + io.WriteString(w, "\n") + io.WriteString(w, "\n\n") +} + +func (r *HTMLRenderer) writeTOC(w io.Writer, ast *Node) { + buf := bytes.Buffer{} + + inHeading := false + tocLevel := 0 + headingCount := 0 + + ast.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Heading && !node.HeadingData.IsTitleblock { + inHeading = entering + if entering { + node.HeadingID = fmt.Sprintf("toc_%d", headingCount) + if node.Level == tocLevel { + buf.WriteString("\n\n
  • ") + } else if node.Level < tocLevel { + for node.Level < tocLevel { + tocLevel-- + buf.WriteString("
  • \n") + } + buf.WriteString("\n\n
  • ") + } else { + for node.Level > tocLevel { + tocLevel++ + buf.WriteString("\n") + } + + if buf.Len() > 0 { + io.WriteString(w, "\n") + } + r.lastOutputLen = buf.Len() +} diff --git a/vendor/github.com/russross/blackfriday/v2/inline.go b/vendor/github.com/russross/blackfriday/v2/inline.go new file mode 100644 index 0000000000..d45bd94172 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/inline.go @@ -0,0 +1,1228 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse inline elements. +// + +package blackfriday + +import ( + "bytes" + "regexp" + "strconv" +) + +var ( + urlRe = `((https?|ftp):\/\/|\/)[-A-Za-z0-9+&@#\/%?=~_|!:,.;\(\)]+` + anchorRe = regexp.MustCompile(`^(]+")?\s?>` + urlRe + `<\/a>)`) + + // https://www.w3.org/TR/html5/syntax.html#character-references + // highest unicode code point in 17 planes (2^20): 1,114,112d = + // 7 dec digits or 6 hex digits + // named entity references can be 2-31 characters with stuff like < + // at one end and ∳ at the other. There + // are also sometimes numbers at the end, although this isn't inherent + // in the specification; there are never numbers anywhere else in + // current character references, though; see ¾ and ▒, etc. + // https://www.w3.org/TR/html5/syntax.html#named-character-references + // + // entity := "&" (named group | number ref) ";" + // named group := [a-zA-Z]{2,31}[0-9]{0,2} + // number ref := "#" (dec ref | hex ref) + // dec ref := [0-9]{1,7} + // hex ref := ("x" | "X") [0-9a-fA-F]{1,6} + htmlEntityRe = regexp.MustCompile(`&([a-zA-Z]{2,31}[0-9]{0,2}|#([0-9]{1,7}|[xX][0-9a-fA-F]{1,6}));`) +) + +// Functions to parse text within a block +// Each function returns the number of chars taken care of +// data is the complete block being rendered +// offset is the number of valid chars before the current cursor + +func (p *Markdown) inline(currBlock *Node, data []byte) { + // handlers might call us recursively: enforce a maximum depth + if p.nesting >= p.maxNesting || len(data) == 0 { + return + } + p.nesting++ + beg, end := 0, 0 + for end < len(data) { + handler := p.inlineCallback[data[end]] + if handler != nil { + if consumed, node := handler(p, data, end); consumed == 0 { + // No action from the callback. + end++ + } else { + // Copy inactive chars into the output. + currBlock.AppendChild(text(data[beg:end])) + if node != nil { + currBlock.AppendChild(node) + } + // Skip past whatever the callback used. + beg = end + consumed + end = beg + } + } else { + end++ + } + } + if beg < len(data) { + if data[end-1] == '\n' { + end-- + } + currBlock.AppendChild(text(data[beg:end])) + } + p.nesting-- +} + +// single and double emphasis parsing +func emphasis(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + c := data[0] + + if len(data) > 2 && data[1] != c { + // whitespace cannot follow an opening emphasis; + // strikethrough only takes two characters '~~' + if c == '~' || isspace(data[1]) { + return 0, nil + } + ret, node := helperEmphasis(p, data[1:], c) + if ret == 0 { + return 0, nil + } + + return ret + 1, node + } + + if len(data) > 3 && data[1] == c && data[2] != c { + if isspace(data[2]) { + return 0, nil + } + ret, node := helperDoubleEmphasis(p, data[2:], c) + if ret == 0 { + return 0, nil + } + + return ret + 2, node + } + + if len(data) > 4 && data[1] == c && data[2] == c && data[3] != c { + if c == '~' || isspace(data[3]) { + return 0, nil + } + ret, node := helperTripleEmphasis(p, data, 3, c) + if ret == 0 { + return 0, nil + } + + return ret + 3, node + } + + return 0, nil +} + +func codeSpan(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + nb := 0 + + // count the number of backticks in the delimiter + for nb < len(data) && data[nb] == '`' { + nb++ + } + + // find the next delimiter + i, end := 0, 0 + for end = nb; end < len(data) && i < nb; end++ { + if data[end] == '`' { + i++ + } else { + i = 0 + } + } + + // no matching delimiter? + if i < nb && end >= len(data) { + return 0, nil + } + + // trim outside whitespace + fBegin := nb + for fBegin < end && data[fBegin] == ' ' { + fBegin++ + } + + fEnd := end - nb + for fEnd > fBegin && data[fEnd-1] == ' ' { + fEnd-- + } + + // render the code span + if fBegin != fEnd { + code := NewNode(Code) + code.Literal = data[fBegin:fEnd] + return end, code + } + + return end, nil +} + +// newline preceded by two spaces becomes
    +func maybeLineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + origOffset := offset + for offset < len(data) && data[offset] == ' ' { + offset++ + } + + if offset < len(data) && data[offset] == '\n' { + if offset-origOffset >= 2 { + return offset - origOffset + 1, NewNode(Hardbreak) + } + return offset - origOffset, nil + } + return 0, nil +} + +// newline without two spaces works when HardLineBreak is enabled +func lineBreak(p *Markdown, data []byte, offset int) (int, *Node) { + if p.extensions&HardLineBreak != 0 { + return 1, NewNode(Hardbreak) + } + return 0, nil +} + +type linkType int + +const ( + linkNormal linkType = iota + linkImg + linkDeferredFootnote + linkInlineFootnote +) + +func isReferenceStyleLink(data []byte, pos int, t linkType) bool { + if t == linkDeferredFootnote { + return false + } + return pos < len(data)-1 && data[pos] == '[' && data[pos+1] != '^' +} + +func maybeImage(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +func maybeInlineFootnote(p *Markdown, data []byte, offset int) (int, *Node) { + if offset < len(data)-1 && data[offset+1] == '[' { + return link(p, data, offset) + } + return 0, nil +} + +// '[': parse a link or an image or a footnote +func link(p *Markdown, data []byte, offset int) (int, *Node) { + // no links allowed inside regular links, footnote, and deferred footnotes + if p.insideLink && (offset > 0 && data[offset-1] == '[' || len(data)-1 > offset && data[offset+1] == '^') { + return 0, nil + } + + var t linkType + switch { + // special case: ![^text] == deferred footnote (that follows something with + // an exclamation point) + case p.extensions&Footnotes != 0 && len(data)-1 > offset && data[offset+1] == '^': + t = linkDeferredFootnote + // ![alt] == image + case offset >= 0 && data[offset] == '!': + t = linkImg + offset++ + // ^[text] == inline footnote + // [^refId] == deferred footnote + case p.extensions&Footnotes != 0: + if offset >= 0 && data[offset] == '^' { + t = linkInlineFootnote + offset++ + } else if len(data)-1 > offset && data[offset+1] == '^' { + t = linkDeferredFootnote + } + // [text] == regular link + default: + t = linkNormal + } + + data = data[offset:] + + var ( + i = 1 + noteID int + title, link, altContent []byte + textHasNl = false + ) + + if t == linkDeferredFootnote { + i++ + } + + // look for the matching closing bracket + for level := 1; level > 0 && i < len(data); i++ { + switch { + case data[i] == '\n': + textHasNl = true + + case isBackslashEscaped(data, i): + continue + + case data[i] == '[': + level++ + + case data[i] == ']': + level-- + if level <= 0 { + i-- // compensate for extra i++ in for loop + } + } + } + + if i >= len(data) { + return 0, nil + } + + txtE := i + i++ + var footnoteNode *Node + + // skip any amount of whitespace or newline + // (this is much more lax than original markdown syntax) + for i < len(data) && isspace(data[i]) { + i++ + } + + // inline style link + switch { + case i < len(data) && data[i] == '(': + // skip initial whitespace + i++ + + for i < len(data) && isspace(data[i]) { + i++ + } + + linkB := i + + // look for link end: ' " ) + findlinkend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')' || data[i] == '\'' || data[i] == '"': + break findlinkend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + linkE := i + + // look for title end if present + titleB, titleE := 0, 0 + if data[i] == '\'' || data[i] == '"' { + i++ + titleB = i + + findtitleend: + for i < len(data) { + switch { + case data[i] == '\\': + i += 2 + + case data[i] == ')': + break findtitleend + + default: + i++ + } + } + + if i >= len(data) { + return 0, nil + } + + // skip whitespace after title + titleE = i - 1 + for titleE > titleB && isspace(data[titleE]) { + titleE-- + } + + // check for closing quote presence + if data[titleE] != '\'' && data[titleE] != '"' { + titleB, titleE = 0, 0 + linkE = i + } + } + + // remove whitespace at the end of the link + for linkE > linkB && isspace(data[linkE-1]) { + linkE-- + } + + // remove optional angle brackets around the link + if data[linkB] == '<' { + linkB++ + } + if data[linkE-1] == '>' { + linkE-- + } + + // build escaped link and title + if linkE > linkB { + link = data[linkB:linkE] + } + + if titleE > titleB { + title = data[titleB:titleE] + } + + i++ + + // reference style link + case isReferenceStyleLink(data, i, t): + var id []byte + altContentConsidered := false + + // look for the id + i++ + linkB := i + for i < len(data) && data[i] != ']' { + i++ + } + if i >= len(data) { + return 0, nil + } + linkE := i + + // find the reference + if linkB == linkE { + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + id = data[1:txtE] + altContentConsidered = true + } + } else { + id = data[linkB:linkE] + } + + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + // keep link and title from reference + link = lr.link + title = lr.title + if altContentConsidered { + altContent = lr.text + } + i++ + + // shortcut reference style link or reference or inline footnote + default: + var id []byte + + // craft the id + if textHasNl { + var b bytes.Buffer + + for j := 1; j < txtE; j++ { + switch { + case data[j] != '\n': + b.WriteByte(data[j]) + case data[j-1] != ' ': + b.WriteByte(' ') + } + } + + id = b.Bytes() + } else { + if t == linkDeferredFootnote { + id = data[2:txtE] // get rid of the ^ + } else { + id = data[1:txtE] + } + } + + footnoteNode = NewNode(Item) + if t == linkInlineFootnote { + // create a new reference + noteID = len(p.notes) + 1 + + var fragment []byte + if len(id) > 0 { + if len(id) < 16 { + fragment = make([]byte, len(id)) + } else { + fragment = make([]byte, 16) + } + copy(fragment, slugify(id)) + } else { + fragment = append([]byte("footnote-"), []byte(strconv.Itoa(noteID))...) + } + + ref := &reference{ + noteID: noteID, + hasBlock: false, + link: fragment, + title: id, + footnote: footnoteNode, + } + + p.notes = append(p.notes, ref) + + link = ref.link + title = ref.title + } else { + // find the reference with matching id + lr, ok := p.getRef(string(id)) + if !ok { + return 0, nil + } + + if t == linkDeferredFootnote { + lr.noteID = len(p.notes) + 1 + lr.footnote = footnoteNode + p.notes = append(p.notes, lr) + } + + // keep link and title from reference + link = lr.link + // if inline footnote, title == footnote contents + title = lr.title + noteID = lr.noteID + } + + // rewind the whitespace + i = txtE + 1 + } + + var uLink []byte + if t == linkNormal || t == linkImg { + if len(link) > 0 { + var uLinkBuf bytes.Buffer + unescapeText(&uLinkBuf, link) + uLink = uLinkBuf.Bytes() + } + + // links need something to click on and somewhere to go + if len(uLink) == 0 || (t == linkNormal && txtE <= 1) { + return 0, nil + } + } + + // call the relevant rendering function + var linkNode *Node + switch t { + case linkNormal: + linkNode = NewNode(Link) + linkNode.Destination = normalizeURI(uLink) + linkNode.Title = title + if len(altContent) > 0 { + linkNode.AppendChild(text(altContent)) + } else { + // links cannot contain other links, so turn off link parsing + // temporarily and recurse + insideLink := p.insideLink + p.insideLink = true + p.inline(linkNode, data[1:txtE]) + p.insideLink = insideLink + } + + case linkImg: + linkNode = NewNode(Image) + linkNode.Destination = uLink + linkNode.Title = title + linkNode.AppendChild(text(data[1:txtE])) + i++ + + case linkInlineFootnote, linkDeferredFootnote: + linkNode = NewNode(Link) + linkNode.Destination = link + linkNode.Title = title + linkNode.NoteID = noteID + linkNode.Footnote = footnoteNode + if t == linkInlineFootnote { + i++ + } + + default: + return 0, nil + } + + return i, linkNode +} + +func (p *Markdown) inlineHTMLComment(data []byte) int { + if len(data) < 5 { + return 0 + } + if data[0] != '<' || data[1] != '!' || data[2] != '-' || data[3] != '-' { + return 0 + } + i := 5 + // scan for an end-of-comment marker, across lines if necessary + for i < len(data) && !(data[i-2] == '-' && data[i-1] == '-' && data[i] == '>') { + i++ + } + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return i + 1 +} + +func stripMailto(link []byte) []byte { + if bytes.HasPrefix(link, []byte("mailto://")) { + return link[9:] + } else if bytes.HasPrefix(link, []byte("mailto:")) { + return link[7:] + } else { + return link + } +} + +// autolinkType specifies a kind of autolink that gets detected. +type autolinkType int + +// These are the possible flag values for the autolink renderer. +const ( + notAutolink autolinkType = iota + normalAutolink + emailAutolink +) + +// '<' when tags or autolinks are allowed +func leftAngle(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + altype, end := tagLength(data) + if size := p.inlineHTMLComment(data); size > 0 { + end = size + } + if end > 2 { + if altype != notAutolink { + var uLink bytes.Buffer + unescapeText(&uLink, data[1:end+1-2]) + if uLink.Len() > 0 { + link := uLink.Bytes() + node := NewNode(Link) + node.Destination = link + if altype == emailAutolink { + node.Destination = append([]byte("mailto:"), link...) + } + node.AppendChild(text(stripMailto(link))) + return end, node + } + } else { + htmlTag := NewNode(HTMLSpan) + htmlTag.Literal = data[:end] + return end, htmlTag + } + } + + return end, nil +} + +// '\\' backslash escape +var escapeChars = []byte("\\`*_{}[]()#+-.!:|&<>~") + +func escape(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + if len(data) > 1 { + if p.extensions&BackslashLineBreak != 0 && data[1] == '\n' { + return 2, NewNode(Hardbreak) + } + if bytes.IndexByte(escapeChars, data[1]) < 0 { + return 0, nil + } + + return 2, text(data[1:2]) + } + + return 2, nil +} + +func unescapeText(ob *bytes.Buffer, src []byte) { + i := 0 + for i < len(src) { + org := i + for i < len(src) && src[i] != '\\' { + i++ + } + + if i > org { + ob.Write(src[org:i]) + } + + if i+1 >= len(src) { + break + } + + ob.WriteByte(src[i+1]) + i += 2 + } +} + +// '&' escaped when it doesn't belong to an entity +// valid entities are assumed to be anything matching &#?[A-Za-z0-9]+; +func entity(p *Markdown, data []byte, offset int) (int, *Node) { + data = data[offset:] + + end := 1 + + if end < len(data) && data[end] == '#' { + end++ + } + + for end < len(data) && isalnum(data[end]) { + end++ + } + + if end < len(data) && data[end] == ';' { + end++ // real entity + } else { + return 0, nil // lone '&' + } + + ent := data[:end] + // undo & escaping or it will be converted to &amp; by another + // escaper in the renderer + if bytes.Equal(ent, []byte("&")) { + ent = []byte{'&'} + } + + return end, text(ent) +} + +func linkEndsWithEntity(data []byte, linkEnd int) bool { + entityRanges := htmlEntityRe.FindAllIndex(data[:linkEnd], -1) + return entityRanges != nil && entityRanges[len(entityRanges)-1][1] == linkEnd +} + +// hasPrefixCaseInsensitive is a custom implementation of +// strings.HasPrefix(strings.ToLower(s), prefix) +// we rolled our own because ToLower pulls in a huge machinery of lowercasing +// anything from Unicode and that's very slow. Since this func will only be +// used on ASCII protocol prefixes, we can take shortcuts. +func hasPrefixCaseInsensitive(s, prefix []byte) bool { + if len(s) < len(prefix) { + return false + } + delta := byte('a' - 'A') + for i, b := range prefix { + if b != s[i] && b != s[i]+delta { + return false + } + } + return true +} + +var protocolPrefixes = [][]byte{ + []byte("http://"), + []byte("https://"), + []byte("ftp://"), + []byte("file://"), + []byte("mailto:"), +} + +const shortestPrefix = 6 // len("ftp://"), the shortest of the above + +func maybeAutoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // quick check to rule out most false hits + if p.insideLink || len(data) < offset+shortestPrefix { + return 0, nil + } + for _, prefix := range protocolPrefixes { + endOfHead := offset + 8 // 8 is the len() of the longest prefix + if endOfHead > len(data) { + endOfHead = len(data) + } + if hasPrefixCaseInsensitive(data[offset:endOfHead], prefix) { + return autoLink(p, data, offset) + } + } + return 0, nil +} + +func autoLink(p *Markdown, data []byte, offset int) (int, *Node) { + // Now a more expensive check to see if we're not inside an anchor element + anchorStart := offset + offsetFromAnchor := 0 + for anchorStart > 0 && data[anchorStart] != '<' { + anchorStart-- + offsetFromAnchor++ + } + + anchorStr := anchorRe.Find(data[anchorStart:]) + if anchorStr != nil { + anchorClose := NewNode(HTMLSpan) + anchorClose.Literal = anchorStr[offsetFromAnchor:] + return len(anchorStr) - offsetFromAnchor, anchorClose + } + + // scan backward for a word boundary + rewind := 0 + for offset-rewind > 0 && rewind <= 7 && isletter(data[offset-rewind-1]) { + rewind++ + } + if rewind > 6 { // longest supported protocol is "mailto" which has 6 letters + return 0, nil + } + + origData := data + data = data[offset-rewind:] + + if !isSafeLink(data) { + return 0, nil + } + + linkEnd := 0 + for linkEnd < len(data) && !isEndOfLink(data[linkEnd]) { + linkEnd++ + } + + // Skip punctuation at the end of the link + if (data[linkEnd-1] == '.' || data[linkEnd-1] == ',') && data[linkEnd-2] != '\\' { + linkEnd-- + } + + // But don't skip semicolon if it's a part of escaped entity: + if data[linkEnd-1] == ';' && data[linkEnd-2] != '\\' && !linkEndsWithEntity(data, linkEnd) { + linkEnd-- + } + + // See if the link finishes with a punctuation sign that can be closed. + var copen byte + switch data[linkEnd-1] { + case '"': + copen = '"' + case '\'': + copen = '\'' + case ')': + copen = '(' + case ']': + copen = '[' + case '}': + copen = '{' + default: + copen = 0 + } + + if copen != 0 { + bufEnd := offset - rewind + linkEnd - 2 + + openDelim := 1 + + /* Try to close the final punctuation sign in this same line; + * if we managed to close it outside of the URL, that means that it's + * not part of the URL. If it closes inside the URL, that means it + * is part of the URL. + * + * Examples: + * + * foo http://www.pokemon.com/Pikachu_(Electric) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo (http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric) + * + * foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => http://www.pokemon.com/Pikachu_(Electric)) + * + * (foo http://www.pokemon.com/Pikachu_(Electric)) bar + * => foo http://www.pokemon.com/Pikachu_(Electric) + */ + + for bufEnd >= 0 && origData[bufEnd] != '\n' && openDelim != 0 { + if origData[bufEnd] == data[linkEnd-1] { + openDelim++ + } + + if origData[bufEnd] == copen { + openDelim-- + } + + bufEnd-- + } + + if openDelim == 0 { + linkEnd-- + } + } + + var uLink bytes.Buffer + unescapeText(&uLink, data[:linkEnd]) + + if uLink.Len() > 0 { + node := NewNode(Link) + node.Destination = uLink.Bytes() + node.AppendChild(text(uLink.Bytes())) + return linkEnd, node + } + + return linkEnd, nil +} + +func isEndOfLink(char byte) bool { + return isspace(char) || char == '<' +} + +var validUris = [][]byte{[]byte("http://"), []byte("https://"), []byte("ftp://"), []byte("mailto://")} +var validPaths = [][]byte{[]byte("/"), []byte("./"), []byte("../")} + +func isSafeLink(link []byte) bool { + for _, path := range validPaths { + if len(link) >= len(path) && bytes.Equal(link[:len(path)], path) { + if len(link) == len(path) { + return true + } else if isalnum(link[len(path)]) { + return true + } + } + } + + for _, prefix := range validUris { + // TODO: handle unicode here + // case-insensitive prefix test + if len(link) > len(prefix) && bytes.Equal(bytes.ToLower(link[:len(prefix)]), prefix) && isalnum(link[len(prefix)]) { + return true + } + } + + return false +} + +// return the length of the given tag, or 0 is it's not valid +func tagLength(data []byte) (autolink autolinkType, end int) { + var i, j int + + // a valid tag can't be shorter than 3 chars + if len(data) < 3 { + return notAutolink, 0 + } + + // begins with a '<' optionally followed by '/', followed by letter or number + if data[0] != '<' { + return notAutolink, 0 + } + if data[1] == '/' { + i = 2 + } else { + i = 1 + } + + if !isalnum(data[i]) { + return notAutolink, 0 + } + + // scheme test + autolink = notAutolink + + // try to find the beginning of an URI + for i < len(data) && (isalnum(data[i]) || data[i] == '.' || data[i] == '+' || data[i] == '-') { + i++ + } + + if i > 1 && i < len(data) && data[i] == '@' { + if j = isMailtoAutoLink(data[i:]); j != 0 { + return emailAutolink, i + j + } + } + + if i > 2 && i < len(data) && data[i] == ':' { + autolink = normalAutolink + i++ + } + + // complete autolink test: no whitespace or ' or " + switch { + case i >= len(data): + autolink = notAutolink + case autolink != notAutolink: + j = i + + for i < len(data) { + if data[i] == '\\' { + i += 2 + } else if data[i] == '>' || data[i] == '\'' || data[i] == '"' || isspace(data[i]) { + break + } else { + i++ + } + + } + + if i >= len(data) { + return autolink, 0 + } + if i > j && data[i] == '>' { + return autolink, i + 1 + } + + // one of the forbidden chars has been found + autolink = notAutolink + } + i += bytes.IndexByte(data[i:], '>') + if i < 0 { + return autolink, 0 + } + return autolink, i + 1 +} + +// look for the address part of a mail autolink and '>' +// this is less strict than the original markdown e-mail address matching +func isMailtoAutoLink(data []byte) int { + nb := 0 + + // address is assumed to be: [-@._a-zA-Z0-9]+ with exactly one '@' + for i := 0; i < len(data); i++ { + if isalnum(data[i]) { + continue + } + + switch data[i] { + case '@': + nb++ + + case '-', '.', '_': + break + + case '>': + if nb == 1 { + return i + 1 + } + return 0 + default: + return 0 + } + } + + return 0 +} + +// look for the next emph char, skipping other constructs +func helperFindEmphChar(data []byte, c byte) int { + i := 0 + + for i < len(data) { + for i < len(data) && data[i] != c && data[i] != '`' && data[i] != '[' { + i++ + } + if i >= len(data) { + return 0 + } + // do not count escaped chars + if i != 0 && data[i-1] == '\\' { + i++ + continue + } + if data[i] == c { + return i + } + + if data[i] == '`' { + // skip a code span + tmpI := 0 + i++ + for i < len(data) && data[i] != '`' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } else if data[i] == '[' { + // skip a link + tmpI := 0 + i++ + for i < len(data) && data[i] != ']' { + if tmpI == 0 && data[i] == c { + tmpI = i + } + i++ + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\n') { + i++ + } + if i >= len(data) { + return tmpI + } + if data[i] != '[' && data[i] != '(' { // not a link + if tmpI > 0 { + return tmpI + } + continue + } + cc := data[i] + i++ + for i < len(data) && data[i] != cc { + if tmpI == 0 && data[i] == c { + return i + } + i++ + } + if i >= len(data) { + return tmpI + } + i++ + } + } + return 0 +} + +func helperEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + // skip one symbol if coming from emph3 + if len(data) > 1 && data[0] == c && data[1] == c { + i = 1 + } + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + if i >= len(data) { + return 0, nil + } + + if i+1 < len(data) && data[i+1] == c { + i++ + continue + } + + if data[i] == c && !isspace(data[i-1]) { + + if p.extensions&NoIntraEmphasis != 0 { + if !(i+1 == len(data) || isspace(data[i+1]) || ispunct(data[i+1])) { + continue + } + } + + emph := NewNode(Emph) + p.inline(emph, data[:i]) + return i + 1, emph + } + } + + return 0, nil +} + +func helperDoubleEmphasis(p *Markdown, data []byte, c byte) (int, *Node) { + i := 0 + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + if i+1 < len(data) && data[i] == c && data[i+1] == c && i > 0 && !isspace(data[i-1]) { + nodeType := Strong + if c == '~' { + nodeType = Del + } + node := NewNode(nodeType) + p.inline(node, data[:i]) + return i + 2, node + } + i++ + } + return 0, nil +} + +func helperTripleEmphasis(p *Markdown, data []byte, offset int, c byte) (int, *Node) { + i := 0 + origData := data + data = data[offset:] + + for i < len(data) { + length := helperFindEmphChar(data[i:], c) + if length == 0 { + return 0, nil + } + i += length + + // skip whitespace preceded symbols + if data[i] != c || isspace(data[i-1]) { + continue + } + + switch { + case i+2 < len(data) && data[i+1] == c && data[i+2] == c: + // triple symbol found + strong := NewNode(Strong) + em := NewNode(Emph) + strong.AppendChild(em) + p.inline(em, data[:i]) + return i + 3, strong + case (i+1 < len(data) && data[i+1] == c): + // double symbol found, hand over to emph1 + length, node := helperEmphasis(p, origData[offset-2:], c) + if length == 0 { + return 0, nil + } + return length - 2, node + default: + // single symbol found, hand over to emph2 + length, node := helperDoubleEmphasis(p, origData[offset-1:], c) + if length == 0 { + return 0, nil + } + return length - 1, node + } + } + return 0, nil +} + +func text(s []byte) *Node { + node := NewNode(Text) + node.Literal = s + return node +} + +func normalizeURI(s []byte) []byte { + return s // TODO: implement +} diff --git a/vendor/github.com/russross/blackfriday/v2/markdown.go b/vendor/github.com/russross/blackfriday/v2/markdown.go new file mode 100644 index 0000000000..58d2e4538c --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/markdown.go @@ -0,0 +1,950 @@ +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. + +package blackfriday + +import ( + "bytes" + "fmt" + "io" + "strings" + "unicode/utf8" +) + +// +// Markdown parsing and processing +// + +// Version string of the package. Appears in the rendered document when +// CompletePage flag is on. +const Version = "2.0" + +// Extensions is a bitwise or'ed collection of enabled Blackfriday's +// extensions. +type Extensions int + +// These are the supported markdown parsing extensions. +// OR these values together to select multiple extensions. +const ( + NoExtensions Extensions = 0 + NoIntraEmphasis Extensions = 1 << iota // Ignore emphasis markers inside words + Tables // Render tables + FencedCode // Render fenced code blocks + Autolink // Detect embedded URLs that are not explicitly marked + Strikethrough // Strikethrough text using ~~test~~ + LaxHTMLBlocks // Loosen up HTML block parsing rules + SpaceHeadings // Be strict about prefix heading rules + HardLineBreak // Translate newlines into line breaks + TabSizeEight // Expand tabs to eight spaces instead of four + Footnotes // Pandoc-style footnotes + NoEmptyLineBeforeBlock // No need to insert an empty line to start a (code, quote, ordered list, unordered list) block + HeadingIDs // specify heading IDs with {#id} + Titleblock // Titleblock ala pandoc + AutoHeadingIDs // Create the heading ID from the text + BackslashLineBreak // Translate trailing backslashes into line breaks + DefinitionLists // Render definition lists + + CommonHTMLFlags HTMLFlags = UseXHTML | Smartypants | + SmartypantsFractions | SmartypantsDashes | SmartypantsLatexDashes + + CommonExtensions Extensions = NoIntraEmphasis | Tables | FencedCode | + Autolink | Strikethrough | SpaceHeadings | HeadingIDs | + BackslashLineBreak | DefinitionLists +) + +// ListType contains bitwise or'ed flags for list and list item objects. +type ListType int + +// These are the possible flag values for the ListItem renderer. +// Multiple flag values may be ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + ListTypeOrdered ListType = 1 << iota + ListTypeDefinition + ListTypeTerm + + ListItemContainsBlock + ListItemBeginningOfList // TODO: figure out if this is of any use now + ListItemEndOfList +) + +// CellAlignFlags holds a type of alignment in a table cell. +type CellAlignFlags int + +// These are the possible flag values for the table cell renderer. +// Only a single one of these values will be used; they are not ORed together. +// These are mostly of interest if you are writing a new output format. +const ( + TableAlignmentLeft CellAlignFlags = 1 << iota + TableAlignmentRight + TableAlignmentCenter = (TableAlignmentLeft | TableAlignmentRight) +) + +// The size of a tab stop. +const ( + TabSizeDefault = 4 + TabSizeDouble = 8 +) + +// blockTags is a set of tags that are recognized as HTML block tags. +// Any of these can be included in markdown text without special escaping. +var blockTags = map[string]struct{}{ + "blockquote": {}, + "del": {}, + "div": {}, + "dl": {}, + "fieldset": {}, + "form": {}, + "h1": {}, + "h2": {}, + "h3": {}, + "h4": {}, + "h5": {}, + "h6": {}, + "iframe": {}, + "ins": {}, + "math": {}, + "noscript": {}, + "ol": {}, + "pre": {}, + "p": {}, + "script": {}, + "style": {}, + "table": {}, + "ul": {}, + + // HTML5 + "address": {}, + "article": {}, + "aside": {}, + "canvas": {}, + "figcaption": {}, + "figure": {}, + "footer": {}, + "header": {}, + "hgroup": {}, + "main": {}, + "nav": {}, + "output": {}, + "progress": {}, + "section": {}, + "video": {}, +} + +// Renderer is the rendering interface. This is mostly of interest if you are +// implementing a new rendering format. +// +// Only an HTML implementation is provided in this repository, see the README +// for external implementations. +type Renderer interface { + // RenderNode is the main rendering method. It will be called once for + // every leaf node and twice for every non-leaf node (first with + // entering=true, then with entering=false). The method should write its + // rendition of the node to the supplied writer w. + RenderNode(w io.Writer, node *Node, entering bool) WalkStatus + + // RenderHeader is a method that allows the renderer to produce some + // content preceding the main body of the output document. The header is + // understood in the broad sense here. For example, the default HTML + // renderer will write not only the HTML document preamble, but also the + // table of contents if it was requested. + // + // The method will be passed an entire document tree, in case a particular + // implementation needs to inspect it to produce output. + // + // The output should be written to the supplied writer w. If your + // implementation has no header to write, supply an empty implementation. + RenderHeader(w io.Writer, ast *Node) + + // RenderFooter is a symmetric counterpart of RenderHeader. + RenderFooter(w io.Writer, ast *Node) +} + +// Callback functions for inline parsing. One such function is defined +// for each character that triggers a response when parsing inline data. +type inlineParser func(p *Markdown, data []byte, offset int) (int, *Node) + +// Markdown is a type that holds extensions and the runtime state used by +// Parse, and the renderer. You can not use it directly, construct it with New. +type Markdown struct { + renderer Renderer + referenceOverride ReferenceOverrideFunc + refs map[string]*reference + inlineCallback [256]inlineParser + extensions Extensions + nesting int + maxNesting int + insideLink bool + + // Footnotes need to be ordered as well as available to quickly check for + // presence. If a ref is also a footnote, it's stored both in refs and here + // in notes. Slice is nil if footnotes not enabled. + notes []*reference + + doc *Node + tip *Node // = doc + oldTip *Node + lastMatchedContainer *Node // = doc + allClosed bool +} + +func (p *Markdown) getRef(refid string) (ref *reference, found bool) { + if p.referenceOverride != nil { + r, overridden := p.referenceOverride(refid) + if overridden { + if r == nil { + return nil, false + } + return &reference{ + link: []byte(r.Link), + title: []byte(r.Title), + noteID: 0, + hasBlock: false, + text: []byte(r.Text)}, true + } + } + // refs are case insensitive + ref, found = p.refs[strings.ToLower(refid)] + return ref, found +} + +func (p *Markdown) finalize(block *Node) { + above := block.Parent + block.open = false + p.tip = above +} + +func (p *Markdown) addChild(node NodeType, offset uint32) *Node { + return p.addExistingChild(NewNode(node), offset) +} + +func (p *Markdown) addExistingChild(node *Node, offset uint32) *Node { + for !p.tip.canContain(node.Type) { + p.finalize(p.tip) + } + p.tip.AppendChild(node) + p.tip = node + return node +} + +func (p *Markdown) closeUnmatchedBlocks() { + if !p.allClosed { + for p.oldTip != p.lastMatchedContainer { + parent := p.oldTip.Parent + p.finalize(p.oldTip) + p.oldTip = parent + } + p.allClosed = true + } +} + +// +// +// Public interface +// +// + +// Reference represents the details of a link. +// See the documentation in Options for more details on use-case. +type Reference struct { + // Link is usually the URL the reference points to. + Link string + // Title is the alternate text describing the link in more detail. + Title string + // Text is the optional text to override the ref with if the syntax used was + // [refid][] + Text string +} + +// ReferenceOverrideFunc is expected to be called with a reference string and +// return either a valid Reference type that the reference string maps to or +// nil. If overridden is false, the default reference logic will be executed. +// See the documentation in Options for more details on use-case. +type ReferenceOverrideFunc func(reference string) (ref *Reference, overridden bool) + +// New constructs a Markdown processor. You can use the same With* functions as +// for Run() to customize parser's behavior and the renderer. +func New(opts ...Option) *Markdown { + var p Markdown + for _, opt := range opts { + opt(&p) + } + p.refs = make(map[string]*reference) + p.maxNesting = 16 + p.insideLink = false + docNode := NewNode(Document) + p.doc = docNode + p.tip = docNode + p.oldTip = docNode + p.lastMatchedContainer = docNode + p.allClosed = true + // register inline parsers + p.inlineCallback[' '] = maybeLineBreak + p.inlineCallback['*'] = emphasis + p.inlineCallback['_'] = emphasis + if p.extensions&Strikethrough != 0 { + p.inlineCallback['~'] = emphasis + } + p.inlineCallback['`'] = codeSpan + p.inlineCallback['\n'] = lineBreak + p.inlineCallback['['] = link + p.inlineCallback['<'] = leftAngle + p.inlineCallback['\\'] = escape + p.inlineCallback['&'] = entity + p.inlineCallback['!'] = maybeImage + p.inlineCallback['^'] = maybeInlineFootnote + if p.extensions&Autolink != 0 { + p.inlineCallback['h'] = maybeAutoLink + p.inlineCallback['m'] = maybeAutoLink + p.inlineCallback['f'] = maybeAutoLink + p.inlineCallback['H'] = maybeAutoLink + p.inlineCallback['M'] = maybeAutoLink + p.inlineCallback['F'] = maybeAutoLink + } + if p.extensions&Footnotes != 0 { + p.notes = make([]*reference, 0) + } + return &p +} + +// Option customizes the Markdown processor's default behavior. +type Option func(*Markdown) + +// WithRenderer allows you to override the default renderer. +func WithRenderer(r Renderer) Option { + return func(p *Markdown) { + p.renderer = r + } +} + +// WithExtensions allows you to pick some of the many extensions provided by +// Blackfriday. You can bitwise OR them. +func WithExtensions(e Extensions) Option { + return func(p *Markdown) { + p.extensions = e + } +} + +// WithNoExtensions turns off all extensions and custom behavior. +func WithNoExtensions() Option { + return func(p *Markdown) { + p.extensions = NoExtensions + p.renderer = NewHTMLRenderer(HTMLRendererParameters{ + Flags: HTMLFlagsNone, + }) + } +} + +// WithRefOverride sets an optional function callback that is called every +// time a reference is resolved. +// +// In Markdown, the link reference syntax can be made to resolve a link to +// a reference instead of an inline URL, in one of the following ways: +// +// * [link text][refid] +// * [refid][] +// +// Usually, the refid is defined at the bottom of the Markdown document. If +// this override function is provided, the refid is passed to the override +// function first, before consulting the defined refids at the bottom. If +// the override function indicates an override did not occur, the refids at +// the bottom will be used to fill in the link details. +func WithRefOverride(o ReferenceOverrideFunc) Option { + return func(p *Markdown) { + p.referenceOverride = o + } +} + +// Run is the main entry point to Blackfriday. It parses and renders a +// block of markdown-encoded text. +// +// The simplest invocation of Run takes one argument, input: +// output := Run(input) +// This will parse the input with CommonExtensions enabled and render it with +// the default HTMLRenderer (with CommonHTMLFlags). +// +// Variadic arguments opts can customize the default behavior. Since Markdown +// type does not contain exported fields, you can not use it directly. Instead, +// use the With* functions. For example, this will call the most basic +// functionality, with no extensions: +// output := Run(input, WithNoExtensions()) +// +// You can use any number of With* arguments, even contradicting ones. They +// will be applied in order of appearance and the latter will override the +// former: +// output := Run(input, WithNoExtensions(), WithExtensions(exts), +// WithRenderer(yourRenderer)) +func Run(input []byte, opts ...Option) []byte { + r := NewHTMLRenderer(HTMLRendererParameters{ + Flags: CommonHTMLFlags, + }) + optList := []Option{WithRenderer(r), WithExtensions(CommonExtensions)} + optList = append(optList, opts...) + parser := New(optList...) + ast := parser.Parse(input) + var buf bytes.Buffer + parser.renderer.RenderHeader(&buf, ast) + ast.Walk(func(node *Node, entering bool) WalkStatus { + return parser.renderer.RenderNode(&buf, node, entering) + }) + parser.renderer.RenderFooter(&buf, ast) + return buf.Bytes() +} + +// Parse is an entry point to the parsing part of Blackfriday. It takes an +// input markdown document and produces a syntax tree for its contents. This +// tree can then be rendered with a default or custom renderer, or +// analyzed/transformed by the caller to whatever non-standard needs they have. +// The return value is the root node of the syntax tree. +func (p *Markdown) Parse(input []byte) *Node { + p.block(input) + // Walk the tree and finish up some of unfinished blocks + for p.tip != nil { + p.finalize(p.tip) + } + // Walk the tree again and process inline markdown in each block + p.doc.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading || node.Type == TableCell { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) + p.parseRefsToAST() + return p.doc +} + +func (p *Markdown) parseRefsToAST() { + if p.extensions&Footnotes == 0 || len(p.notes) == 0 { + return + } + p.tip = p.doc + block := p.addBlock(List, nil) + block.IsFootnotesList = true + block.ListFlags = ListTypeOrdered + flags := ListItemBeginningOfList + // Note: this loop is intentionally explicit, not range-form. This is + // because the body of the loop will append nested footnotes to p.notes and + // we need to process those late additions. Range form would only walk over + // the fixed initial set. + for i := 0; i < len(p.notes); i++ { + ref := p.notes[i] + p.addExistingChild(ref.footnote, 0) + block := ref.footnote + block.ListFlags = flags | ListTypeOrdered + block.RefLink = ref.link + if ref.hasBlock { + flags |= ListItemContainsBlock + p.block(ref.title) + } else { + p.inline(block, ref.title) + } + flags &^= ListItemBeginningOfList | ListItemContainsBlock + } + above := block.Parent + finalizeList(block) + p.tip = above + block.Walk(func(node *Node, entering bool) WalkStatus { + if node.Type == Paragraph || node.Type == Heading { + p.inline(node, node.content) + node.content = nil + } + return GoToNext + }) +} + +// +// Link references +// +// This section implements support for references that (usually) appear +// as footnotes in a document, and can be referenced anywhere in the document. +// The basic format is: +// +// [1]: http://www.google.com/ "Google" +// [2]: http://www.github.com/ "Github" +// +// Anywhere in the document, the reference can be linked by referring to its +// label, i.e., 1 and 2 in this example, as in: +// +// This library is hosted on [Github][2], a git hosting site. +// +// Actual footnotes as specified in Pandoc and supported by some other Markdown +// libraries such as php-markdown are also taken care of. They look like this: +// +// This sentence needs a bit of further explanation.[^note] +// +// [^note]: This is the explanation. +// +// Footnotes should be placed at the end of the document in an ordered list. +// Finally, there are inline footnotes such as: +// +// Inline footnotes^[Also supported.] provide a quick inline explanation, +// but are rendered at the bottom of the document. +// + +// reference holds all information necessary for a reference-style links or +// footnotes. +// +// Consider this markdown with reference-style links: +// +// [link][ref] +// +// [ref]: /url/ "tooltip title" +// +// It will be ultimately converted to this HTML: +// +//

    link

    +// +// And a reference structure will be populated as follows: +// +// p.refs["ref"] = &reference{ +// link: "/url/", +// title: "tooltip title", +// } +// +// Alternatively, reference can contain information about a footnote. Consider +// this markdown: +// +// Text needing a footnote.[^a] +// +// [^a]: This is the note +// +// A reference structure will be populated as follows: +// +// p.refs["a"] = &reference{ +// link: "a", +// title: "This is the note", +// noteID: , +// } +// +// TODO: As you can see, it begs for splitting into two dedicated structures +// for refs and for footnotes. +type reference struct { + link []byte + title []byte + noteID int // 0 if not a footnote ref + hasBlock bool + footnote *Node // a link to the Item node within a list of footnotes + + text []byte // only gets populated by refOverride feature with Reference.Text +} + +func (r *reference) String() string { + return fmt.Sprintf("{link: %q, title: %q, text: %q, noteID: %d, hasBlock: %v}", + r.link, r.title, r.text, r.noteID, r.hasBlock) +} + +// Check whether or not data starts with a reference link. +// If so, it is parsed and stored in the list of references +// (in the render struct). +// Returns the number of bytes to skip to move past it, +// or zero if the first line is not a reference. +func isReference(p *Markdown, data []byte, tabSize int) int { + // up to 3 optional leading spaces + if len(data) < 4 { + return 0 + } + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + + noteID := 0 + + // id part: anything but a newline between brackets + if data[i] != '[' { + return 0 + } + i++ + if p.extensions&Footnotes != 0 { + if i < len(data) && data[i] == '^' { + // we can set it to anything here because the proper noteIds will + // be assigned later during the second pass. It just has to be != 0 + noteID = 1 + i++ + } + } + idOffset := i + for i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != ']' { + i++ + } + if i >= len(data) || data[i] != ']' { + return 0 + } + idEnd := i + // footnotes can have empty ID, like this: [^], but a reference can not be + // empty like this: []. Break early if it's not a footnote and there's no ID + if noteID == 0 && idOffset == idEnd { + return 0 + } + // spacer: colon (space | tab)* newline? (space | tab)* + i++ + if i >= len(data) || data[i] != ':' { + return 0 + } + i++ + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && (data[i] == '\n' || data[i] == '\r') { + i++ + if i < len(data) && data[i] == '\n' && data[i-1] == '\r' { + i++ + } + } + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i >= len(data) { + return 0 + } + + var ( + linkOffset, linkEnd int + titleOffset, titleEnd int + lineEnd int + raw []byte + hasBlock bool + ) + + if p.extensions&Footnotes != 0 && noteID != 0 { + linkOffset, linkEnd, raw, hasBlock = scanFootnote(p, data, i, tabSize) + lineEnd = linkEnd + } else { + linkOffset, linkEnd, titleOffset, titleEnd, lineEnd = scanLinkRef(p, data, i) + } + if lineEnd == 0 { + return 0 + } + + // a valid ref has been found + + ref := &reference{ + noteID: noteID, + hasBlock: hasBlock, + } + + if noteID > 0 { + // reusing the link field for the id since footnotes don't have links + ref.link = data[idOffset:idEnd] + // if footnote, it's not really a title, it's the contained text + ref.title = raw + } else { + ref.link = data[linkOffset:linkEnd] + ref.title = data[titleOffset:titleEnd] + } + + // id matches are case-insensitive + id := string(bytes.ToLower(data[idOffset:idEnd])) + + p.refs[id] = ref + + return lineEnd +} + +func scanLinkRef(p *Markdown, data []byte, i int) (linkOffset, linkEnd, titleOffset, titleEnd, lineEnd int) { + // link: whitespace-free sequence, optionally between angle brackets + if data[i] == '<' { + i++ + } + linkOffset = i + for i < len(data) && data[i] != ' ' && data[i] != '\t' && data[i] != '\n' && data[i] != '\r' { + i++ + } + linkEnd = i + if data[linkOffset] == '<' && data[linkEnd-1] == '>' { + linkOffset++ + linkEnd-- + } + + // optional spacer: (space | tab)* (newline | '\'' | '"' | '(' ) + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + if i < len(data) && data[i] != '\n' && data[i] != '\r' && data[i] != '\'' && data[i] != '"' && data[i] != '(' { + return + } + + // compute end-of-line + if i >= len(data) || data[i] == '\r' || data[i] == '\n' { + lineEnd = i + } + if i+1 < len(data) && data[i] == '\r' && data[i+1] == '\n' { + lineEnd++ + } + + // optional (space|tab)* spacer after a newline + if lineEnd > 0 { + i = lineEnd + 1 + for i < len(data) && (data[i] == ' ' || data[i] == '\t') { + i++ + } + } + + // optional title: any non-newline sequence enclosed in '"() alone on its line + if i+1 < len(data) && (data[i] == '\'' || data[i] == '"' || data[i] == '(') { + i++ + titleOffset = i + + // look for EOL + for i < len(data) && data[i] != '\n' && data[i] != '\r' { + i++ + } + if i+1 < len(data) && data[i] == '\n' && data[i+1] == '\r' { + titleEnd = i + 1 + } else { + titleEnd = i + } + + // step back + i-- + for i > titleOffset && (data[i] == ' ' || data[i] == '\t') { + i-- + } + if i > titleOffset && (data[i] == '\'' || data[i] == '"' || data[i] == ')') { + lineEnd = titleEnd + titleEnd = i + } + } + + return +} + +// The first bit of this logic is the same as Parser.listItem, but the rest +// is much simpler. This function simply finds the entire block and shifts it +// over by one tab if it is indeed a block (just returns the line if it's not). +// blockEnd is the end of the section in the input buffer, and contents is the +// extracted text that was shifted over one tab. It will need to be rendered at +// the end of the document. +func scanFootnote(p *Markdown, data []byte, i, indentSize int) (blockStart, blockEnd int, contents []byte, hasBlock bool) { + if i == 0 || len(data) == 0 { + return + } + + // skip leading whitespace on first line + for i < len(data) && data[i] == ' ' { + i++ + } + + blockStart = i + + // find the end of the line + blockEnd = i + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[blockEnd:i]) + blockEnd = i + + // process the following lines + containsBlankLine := false + +gatherLines: + for blockEnd < len(data) { + i++ + + // find the end of this line + for i < len(data) && data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[blockEnd:i]) > 0 { + containsBlankLine = true + blockEnd = i + continue + } + + n := 0 + if n = isIndented(data[blockEnd:i], indentSize); n == 0 { + // this is the end of the block. + // we don't want to include this last line in the index. + break gatherLines + } + + // if there were blank lines before this one, insert a new one now + if containsBlankLine { + raw.WriteByte('\n') + containsBlankLine = false + } + + // get rid of that first tab, write to buffer + raw.Write(data[blockEnd+n : i]) + hasBlock = true + + blockEnd = i + } + + if data[blockEnd-1] != '\n' { + raw.WriteByte('\n') + } + + contents = raw.Bytes() + + return +} + +// +// +// Miscellaneous helper functions +// +// + +// Test if a character is a punctuation symbol. +// Taken from a private function in regexp in the stdlib. +func ispunct(c byte) bool { + for _, r := range []byte("!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~") { + if c == r { + return true + } + } + return false +} + +// Test if a character is a whitespace character. +func isspace(c byte) bool { + return ishorizontalspace(c) || isverticalspace(c) +} + +// Test if a character is a horizontal whitespace character. +func ishorizontalspace(c byte) bool { + return c == ' ' || c == '\t' +} + +// Test if a character is a vertical character. +func isverticalspace(c byte) bool { + return c == '\n' || c == '\r' || c == '\f' || c == '\v' +} + +// Test if a character is letter. +func isletter(c byte) bool { + return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') +} + +// Test if a character is a letter or a digit. +// TODO: check when this is looking for ASCII alnum and when it should use unicode +func isalnum(c byte) bool { + return (c >= '0' && c <= '9') || isletter(c) +} + +// Replace tab characters with spaces, aligning to the next TAB_SIZE column. +// always ends output with a newline +func expandTabs(out *bytes.Buffer, line []byte, tabSize int) { + // first, check for common cases: no tabs, or only tabs at beginning of line + i, prefix := 0, 0 + slowcase := false + for i = 0; i < len(line); i++ { + if line[i] == '\t' { + if prefix == i { + prefix++ + } else { + slowcase = true + break + } + } + } + + // no need to decode runes if all tabs are at the beginning of the line + if !slowcase { + for i = 0; i < prefix*tabSize; i++ { + out.WriteByte(' ') + } + out.Write(line[prefix:]) + return + } + + // the slow case: we need to count runes to figure out how + // many spaces to insert for each tab + column := 0 + i = 0 + for i < len(line) { + start := i + for i < len(line) && line[i] != '\t' { + _, size := utf8.DecodeRune(line[i:]) + i += size + column++ + } + + if i > start { + out.Write(line[start:i]) + } + + if i >= len(line) { + break + } + + for { + out.WriteByte(' ') + column++ + if column%tabSize == 0 { + break + } + } + + i++ + } +} + +// Find if a line counts as indented or not. +// Returns number of characters the indent is (0 = not indented). +func isIndented(data []byte, indentSize int) int { + if len(data) == 0 { + return 0 + } + if data[0] == '\t' { + return 1 + } + if len(data) < indentSize { + return 0 + } + for i := 0; i < indentSize; i++ { + if data[i] != ' ' { + return 0 + } + } + return indentSize +} + +// Create a url-safe slug for fragments +func slugify(in []byte) []byte { + if len(in) == 0 { + return in + } + out := make([]byte, 0, len(in)) + sym := false + + for _, ch := range in { + if isalnum(ch) { + sym = false + out = append(out, ch) + } else if sym { + continue + } else { + out = append(out, '-') + sym = true + } + } + var a, b int + var ch byte + for a, ch = range out { + if ch != '-' { + break + } + } + for b = len(out) - 1; b > 0; b-- { + if out[b] != '-' { + break + } + } + return out[a : b+1] +} diff --git a/vendor/github.com/russross/blackfriday/v2/node.go b/vendor/github.com/russross/blackfriday/v2/node.go new file mode 100644 index 0000000000..04e6050cee --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/node.go @@ -0,0 +1,360 @@ +package blackfriday + +import ( + "bytes" + "fmt" +) + +// NodeType specifies a type of a single node of a syntax tree. Usually one +// node (and its type) corresponds to a single markdown feature, e.g. emphasis +// or code block. +type NodeType int + +// Constants for identifying different types of nodes. See NodeType. +const ( + Document NodeType = iota + BlockQuote + List + Item + Paragraph + Heading + HorizontalRule + Emph + Strong + Del + Link + Image + Text + HTMLBlock + CodeBlock + Softbreak + Hardbreak + Code + HTMLSpan + Table + TableCell + TableHead + TableBody + TableRow +) + +var nodeTypeNames = []string{ + Document: "Document", + BlockQuote: "BlockQuote", + List: "List", + Item: "Item", + Paragraph: "Paragraph", + Heading: "Heading", + HorizontalRule: "HorizontalRule", + Emph: "Emph", + Strong: "Strong", + Del: "Del", + Link: "Link", + Image: "Image", + Text: "Text", + HTMLBlock: "HTMLBlock", + CodeBlock: "CodeBlock", + Softbreak: "Softbreak", + Hardbreak: "Hardbreak", + Code: "Code", + HTMLSpan: "HTMLSpan", + Table: "Table", + TableCell: "TableCell", + TableHead: "TableHead", + TableBody: "TableBody", + TableRow: "TableRow", +} + +func (t NodeType) String() string { + return nodeTypeNames[t] +} + +// ListData contains fields relevant to a List and Item node type. +type ListData struct { + ListFlags ListType + Tight bool // Skip

    s around list item data if true + BulletChar byte // '*', '+' or '-' in bullet lists + Delimiter byte // '.' or ')' after the number in ordered lists + RefLink []byte // If not nil, turns this list item into a footnote item and triggers different rendering + IsFootnotesList bool // This is a list of footnotes +} + +// LinkData contains fields relevant to a Link node type. +type LinkData struct { + Destination []byte // Destination is what goes into a href + Title []byte // Title is the tooltip thing that goes in a title attribute + NoteID int // NoteID contains a serial number of a footnote, zero if it's not a footnote + Footnote *Node // If it's a footnote, this is a direct link to the footnote Node. Otherwise nil. +} + +// CodeBlockData contains fields relevant to a CodeBlock node type. +type CodeBlockData struct { + IsFenced bool // Specifies whether it's a fenced code block or an indented one + Info []byte // This holds the info string + FenceChar byte + FenceLength int + FenceOffset int +} + +// TableCellData contains fields relevant to a TableCell node type. +type TableCellData struct { + IsHeader bool // This tells if it's under the header row + Align CellAlignFlags // This holds the value for align attribute +} + +// HeadingData contains fields relevant to a Heading node type. +type HeadingData struct { + Level int // This holds the heading level number + HeadingID string // This might hold heading ID, if present + IsTitleblock bool // Specifies whether it's a title block +} + +// Node is a single element in the abstract syntax tree of the parsed document. +// It holds connections to the structurally neighboring nodes and, for certain +// types of nodes, additional information that might be needed when rendering. +type Node struct { + Type NodeType // Determines the type of the node + Parent *Node // Points to the parent + FirstChild *Node // Points to the first child, if any + LastChild *Node // Points to the last child, if any + Prev *Node // Previous sibling; nil if it's the first child + Next *Node // Next sibling; nil if it's the last child + + Literal []byte // Text contents of the leaf nodes + + HeadingData // Populated if Type is Heading + ListData // Populated if Type is List + CodeBlockData // Populated if Type is CodeBlock + LinkData // Populated if Type is Link + TableCellData // Populated if Type is TableCell + + content []byte // Markdown content of the block nodes + open bool // Specifies an open block node that has not been finished to process yet +} + +// NewNode allocates a node of a specified type. +func NewNode(typ NodeType) *Node { + return &Node{ + Type: typ, + open: true, + } +} + +func (n *Node) String() string { + ellipsis := "" + snippet := n.Literal + if len(snippet) > 16 { + snippet = snippet[:16] + ellipsis = "..." + } + return fmt.Sprintf("%s: '%s%s'", n.Type, snippet, ellipsis) +} + +// Unlink removes node 'n' from the tree. +// It panics if the node is nil. +func (n *Node) Unlink() { + if n.Prev != nil { + n.Prev.Next = n.Next + } else if n.Parent != nil { + n.Parent.FirstChild = n.Next + } + if n.Next != nil { + n.Next.Prev = n.Prev + } else if n.Parent != nil { + n.Parent.LastChild = n.Prev + } + n.Parent = nil + n.Next = nil + n.Prev = nil +} + +// AppendChild adds a node 'child' as a child of 'n'. +// It panics if either node is nil. +func (n *Node) AppendChild(child *Node) { + child.Unlink() + child.Parent = n + if n.LastChild != nil { + n.LastChild.Next = child + child.Prev = n.LastChild + n.LastChild = child + } else { + n.FirstChild = child + n.LastChild = child + } +} + +// InsertBefore inserts 'sibling' immediately before 'n'. +// It panics if either node is nil. +func (n *Node) InsertBefore(sibling *Node) { + sibling.Unlink() + sibling.Prev = n.Prev + if sibling.Prev != nil { + sibling.Prev.Next = sibling + } + sibling.Next = n + n.Prev = sibling + sibling.Parent = n.Parent + if sibling.Prev == nil { + sibling.Parent.FirstChild = sibling + } +} + +// IsContainer returns true if 'n' can contain children. +func (n *Node) IsContainer() bool { + switch n.Type { + case Document: + fallthrough + case BlockQuote: + fallthrough + case List: + fallthrough + case Item: + fallthrough + case Paragraph: + fallthrough + case Heading: + fallthrough + case Emph: + fallthrough + case Strong: + fallthrough + case Del: + fallthrough + case Link: + fallthrough + case Image: + fallthrough + case Table: + fallthrough + case TableHead: + fallthrough + case TableBody: + fallthrough + case TableRow: + fallthrough + case TableCell: + return true + default: + return false + } +} + +// IsLeaf returns true if 'n' is a leaf node. +func (n *Node) IsLeaf() bool { + return !n.IsContainer() +} + +func (n *Node) canContain(t NodeType) bool { + if n.Type == List { + return t == Item + } + if n.Type == Document || n.Type == BlockQuote || n.Type == Item { + return t != Item + } + if n.Type == Table { + return t == TableHead || t == TableBody + } + if n.Type == TableHead || n.Type == TableBody { + return t == TableRow + } + if n.Type == TableRow { + return t == TableCell + } + return false +} + +// WalkStatus allows NodeVisitor to have some control over the tree traversal. +// It is returned from NodeVisitor and different values allow Node.Walk to +// decide which node to go to next. +type WalkStatus int + +const ( + // GoToNext is the default traversal of every node. + GoToNext WalkStatus = iota + // SkipChildren tells walker to skip all children of current node. + SkipChildren + // Terminate tells walker to terminate the traversal. + Terminate +) + +// NodeVisitor is a callback to be called when traversing the syntax tree. +// Called twice for every node: once with entering=true when the branch is +// first visited, then with entering=false after all the children are done. +type NodeVisitor func(node *Node, entering bool) WalkStatus + +// Walk is a convenience method that instantiates a walker and starts a +// traversal of subtree rooted at n. +func (n *Node) Walk(visitor NodeVisitor) { + w := newNodeWalker(n) + for w.current != nil { + status := visitor(w.current, w.entering) + switch status { + case GoToNext: + w.next() + case SkipChildren: + w.entering = false + w.next() + case Terminate: + return + } + } +} + +type nodeWalker struct { + current *Node + root *Node + entering bool +} + +func newNodeWalker(root *Node) *nodeWalker { + return &nodeWalker{ + current: root, + root: root, + entering: true, + } +} + +func (nw *nodeWalker) next() { + if (!nw.current.IsContainer() || !nw.entering) && nw.current == nw.root { + nw.current = nil + return + } + if nw.entering && nw.current.IsContainer() { + if nw.current.FirstChild != nil { + nw.current = nw.current.FirstChild + nw.entering = true + } else { + nw.entering = false + } + } else if nw.current.Next == nil { + nw.current = nw.current.Parent + nw.entering = false + } else { + nw.current = nw.current.Next + nw.entering = true + } +} + +func dump(ast *Node) { + fmt.Println(dumpString(ast)) +} + +func dumpR(ast *Node, depth int) string { + if ast == nil { + return "" + } + indent := bytes.Repeat([]byte("\t"), depth) + content := ast.Literal + if content == nil { + content = ast.content + } + result := fmt.Sprintf("%s%s(%q)\n", indent, ast.Type, content) + for n := ast.FirstChild; n != nil; n = n.Next { + result += dumpR(n, depth+1) + } + return result +} + +func dumpString(ast *Node) string { + return dumpR(ast, 0) +} diff --git a/vendor/github.com/russross/blackfriday/v2/smartypants.go b/vendor/github.com/russross/blackfriday/v2/smartypants.go new file mode 100644 index 0000000000..3a220e9424 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/v2/smartypants.go @@ -0,0 +1,457 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// SmartyPants rendering +// +// + +package blackfriday + +import ( + "bytes" + "io" +) + +// SPRenderer is a struct containing state of a Smartypants renderer. +type SPRenderer struct { + inSingleQuote bool + inDoubleQuote bool + callbacks [256]smartCallback +} + +func wordBoundary(c byte) bool { + return c == 0 || isspace(c) || ispunct(c) +} + +func tolower(c byte) byte { + if c >= 'A' && c <= 'Z' { + return c - 'A' + 'a' + } + return c +} + +func isdigit(c byte) bool { + return c >= '0' && c <= '9' +} + +func smartQuoteHelper(out *bytes.Buffer, previousChar byte, nextChar byte, quote byte, isOpen *bool, addNBSP bool) bool { + // edge of the buffer is likely to be a tag that we don't get to see, + // so we treat it like text sometimes + + // enumerate all sixteen possibilities for (previousChar, nextChar) + // each can be one of {0, space, punct, other} + switch { + case previousChar == 0 && nextChar == 0: + // context is not any help here, so toggle + *isOpen = !*isOpen + case isspace(previousChar) && nextChar == 0: + // [ "] might be [ "foo...] + *isOpen = true + case ispunct(previousChar) && nextChar == 0: + // [!"] hmm... could be [Run!"] or [("...] + *isOpen = false + case /* isnormal(previousChar) && */ nextChar == 0: + // [a"] is probably a close + *isOpen = false + case previousChar == 0 && isspace(nextChar): + // [" ] might be [...foo" ] + *isOpen = false + case isspace(previousChar) && isspace(nextChar): + // [ " ] context is not any help here, so toggle + *isOpen = !*isOpen + case ispunct(previousChar) && isspace(nextChar): + // [!" ] is probably a close + *isOpen = false + case /* isnormal(previousChar) && */ isspace(nextChar): + // [a" ] this is one of the easy cases + *isOpen = false + case previousChar == 0 && ispunct(nextChar): + // ["!] hmm... could be ["$1.95] or ["!...] + *isOpen = false + case isspace(previousChar) && ispunct(nextChar): + // [ "!] looks more like [ "$1.95] + *isOpen = true + case ispunct(previousChar) && ispunct(nextChar): + // [!"!] context is not any help here, so toggle + *isOpen = !*isOpen + case /* isnormal(previousChar) && */ ispunct(nextChar): + // [a"!] is probably a close + *isOpen = false + case previousChar == 0 /* && isnormal(nextChar) */ : + // ["a] is probably an open + *isOpen = true + case isspace(previousChar) /* && isnormal(nextChar) */ : + // [ "a] this is one of the easy cases + *isOpen = true + case ispunct(previousChar) /* && isnormal(nextChar) */ : + // [!"a] is probably an open + *isOpen = true + default: + // [a'b] maybe a contraction? + *isOpen = false + } + + // Note that with the limited lookahead, this non-breaking + // space will also be appended to single double quotes. + if addNBSP && !*isOpen { + out.WriteString(" ") + } + + out.WriteByte('&') + if *isOpen { + out.WriteByte('l') + } else { + out.WriteByte('r') + } + out.WriteByte(quote) + out.WriteString("quo;") + + if addNBSP && *isOpen { + out.WriteString(" ") + } + + return true +} + +func (r *SPRenderer) smartSingleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + t1 := tolower(text[1]) + + if t1 == '\'' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + if (t1 == 's' || t1 == 't' || t1 == 'm' || t1 == 'd') && (len(text) < 3 || wordBoundary(text[2])) { + out.WriteString("’") + return 0 + } + + if len(text) >= 3 { + t2 := tolower(text[2]) + + if ((t1 == 'r' && t2 == 'e') || (t1 == 'l' && t2 == 'l') || (t1 == 'v' && t2 == 'e')) && + (len(text) < 4 || wordBoundary(text[3])) { + out.WriteString("’") + return 0 + } + } + } + + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if smartQuoteHelper(out, previousChar, nextChar, 's', &r.inSingleQuote, false) { + return 0 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartParens(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 { + t1 := tolower(text[1]) + t2 := tolower(text[2]) + + if t1 == 'c' && t2 == ')' { + out.WriteString("©") + return 2 + } + + if t1 == 'r' && t2 == ')' { + out.WriteString("®") + return 2 + } + + if len(text) >= 4 && t1 == 't' && t2 == 'm' && text[3] == ')' { + out.WriteString("™") + return 3 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDash(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 { + if text[1] == '-' { + out.WriteString("—") + return 1 + } + + if wordBoundary(previousChar) && wordBoundary(text[1]) { + out.WriteString("–") + return 0 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDashLatex(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '-' && text[2] == '-' { + out.WriteString("—") + return 2 + } + if len(text) >= 2 && text[1] == '-' { + out.WriteString("–") + return 1 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartAmpVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte, addNBSP bool) int { + if bytes.HasPrefix(text, []byte(""")) { + nextChar := byte(0) + if len(text) >= 7 { + nextChar = text[6] + } + if smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, addNBSP) { + return 5 + } + } + + if bytes.HasPrefix(text, []byte("�")) { + return 3 + } + + out.WriteByte('&') + return 0 +} + +func (r *SPRenderer) smartAmp(angledQuotes, addNBSP bool) func(*bytes.Buffer, byte, []byte) int { + var quote byte = 'd' + if angledQuotes { + quote = 'a' + } + + return func(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartAmpVariant(out, previousChar, text, quote, addNBSP) + } +} + +func (r *SPRenderer) smartPeriod(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 3 && text[1] == '.' && text[2] == '.' { + out.WriteString("…") + return 2 + } + + if len(text) >= 5 && text[1] == ' ' && text[2] == '.' && text[3] == ' ' && text[4] == '.' { + out.WriteString("…") + return 4 + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartBacktick(out *bytes.Buffer, previousChar byte, text []byte) int { + if len(text) >= 2 && text[1] == '`' { + nextChar := byte(0) + if len(text) >= 3 { + nextChar = text[2] + } + if smartQuoteHelper(out, previousChar, nextChar, 'd', &r.inDoubleQuote, false) { + return 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumberGeneric(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + // is it of the form digits/digits(word boundary)?, i.e., \d+/\d+\b + // note: check for regular slash (/) or fraction slash (⁄, 0x2044, or 0xe2 81 84 in utf-8) + // and avoid changing dates like 1/23/2005 into fractions. + numEnd := 0 + for len(text) > numEnd && isdigit(text[numEnd]) { + numEnd++ + } + if numEnd == 0 { + out.WriteByte(text[0]) + return 0 + } + denStart := numEnd + 1 + if len(text) > numEnd+3 && text[numEnd] == 0xe2 && text[numEnd+1] == 0x81 && text[numEnd+2] == 0x84 { + denStart = numEnd + 3 + } else if len(text) < numEnd+2 || text[numEnd] != '/' { + out.WriteByte(text[0]) + return 0 + } + denEnd := denStart + for len(text) > denEnd && isdigit(text[denEnd]) { + denEnd++ + } + if denEnd == denStart { + out.WriteByte(text[0]) + return 0 + } + if len(text) == denEnd || wordBoundary(text[denEnd]) && text[denEnd] != '/' { + out.WriteString("") + out.Write(text[:numEnd]) + out.WriteString("") + out.Write(text[denStart:denEnd]) + out.WriteString("") + return denEnd - 1 + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartNumber(out *bytes.Buffer, previousChar byte, text []byte) int { + if wordBoundary(previousChar) && previousChar != '/' && len(text) >= 3 { + if text[0] == '1' && text[1] == '/' && text[2] == '2' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' { + out.WriteString("½") + return 2 + } + } + + if text[0] == '1' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 5 && tolower(text[3]) == 't' && tolower(text[4]) == 'h') { + out.WriteString("¼") + return 2 + } + } + + if text[0] == '3' && text[1] == '/' && text[2] == '4' { + if len(text) < 4 || wordBoundary(text[3]) && text[3] != '/' || (len(text) >= 6 && tolower(text[3]) == 't' && tolower(text[4]) == 'h' && tolower(text[5]) == 's') { + out.WriteString("¾") + return 2 + } + } + } + + out.WriteByte(text[0]) + return 0 +} + +func (r *SPRenderer) smartDoubleQuoteVariant(out *bytes.Buffer, previousChar byte, text []byte, quote byte) int { + nextChar := byte(0) + if len(text) > 1 { + nextChar = text[1] + } + if !smartQuoteHelper(out, previousChar, nextChar, quote, &r.inDoubleQuote, false) { + out.WriteString(""") + } + + return 0 +} + +func (r *SPRenderer) smartDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'd') +} + +func (r *SPRenderer) smartAngledDoubleQuote(out *bytes.Buffer, previousChar byte, text []byte) int { + return r.smartDoubleQuoteVariant(out, previousChar, text, 'a') +} + +func (r *SPRenderer) smartLeftAngle(out *bytes.Buffer, previousChar byte, text []byte) int { + i := 0 + + for i < len(text) && text[i] != '>' { + i++ + } + + out.Write(text[:i+1]) + return i +} + +type smartCallback func(out *bytes.Buffer, previousChar byte, text []byte) int + +// NewSmartypantsRenderer constructs a Smartypants renderer object. +func NewSmartypantsRenderer(flags HTMLFlags) *SPRenderer { + var ( + r SPRenderer + + smartAmpAngled = r.smartAmp(true, false) + smartAmpAngledNBSP = r.smartAmp(true, true) + smartAmpRegular = r.smartAmp(false, false) + smartAmpRegularNBSP = r.smartAmp(false, true) + + addNBSP = flags&SmartypantsQuotesNBSP != 0 + ) + + if flags&SmartypantsAngledQuotes == 0 { + r.callbacks['"'] = r.smartDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpRegular + } else { + r.callbacks['&'] = smartAmpRegularNBSP + } + } else { + r.callbacks['"'] = r.smartAngledDoubleQuote + if !addNBSP { + r.callbacks['&'] = smartAmpAngled + } else { + r.callbacks['&'] = smartAmpAngledNBSP + } + } + r.callbacks['\''] = r.smartSingleQuote + r.callbacks['('] = r.smartParens + if flags&SmartypantsDashes != 0 { + if flags&SmartypantsLatexDashes == 0 { + r.callbacks['-'] = r.smartDash + } else { + r.callbacks['-'] = r.smartDashLatex + } + } + r.callbacks['.'] = r.smartPeriod + if flags&SmartypantsFractions == 0 { + r.callbacks['1'] = r.smartNumber + r.callbacks['3'] = r.smartNumber + } else { + for ch := '1'; ch <= '9'; ch++ { + r.callbacks[ch] = r.smartNumberGeneric + } + } + r.callbacks['<'] = r.smartLeftAngle + r.callbacks['`'] = r.smartBacktick + return &r +} + +// Process is the entry point of the Smartypants renderer. +func (r *SPRenderer) Process(w io.Writer, text []byte) { + mark := 0 + for i := 0; i < len(text); i++ { + if action := r.callbacks[text[i]]; action != nil { + if i > mark { + w.Write(text[mark:i]) + } + previousChar := byte(0) + if i > 0 { + previousChar = text[i-1] + } + var tmp bytes.Buffer + i += action(&tmp, previousChar, text[i:]) + w.Write(tmp.Bytes()) + mark = i + 1 + } + } + if mark < len(text) { + w.Write(text[mark:]) + } +} diff --git a/vendor/golang.org/x/net/internal/socks/client.go b/vendor/golang.org/x/net/internal/socks/client.go new file mode 100644 index 0000000000..3d6f516a59 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/client.go @@ -0,0 +1,168 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" + "time" +) + +var ( + noDeadline = time.Time{} + aLongTimeAgo = time.Unix(1, 0) +) + +func (d *Dialer) connect(ctx context.Context, c net.Conn, address string) (_ net.Addr, ctxErr error) { + host, port, err := splitHostPort(address) + if err != nil { + return nil, err + } + if deadline, ok := ctx.Deadline(); ok && !deadline.IsZero() { + c.SetDeadline(deadline) + defer c.SetDeadline(noDeadline) + } + if ctx != context.Background() { + errCh := make(chan error, 1) + done := make(chan struct{}) + defer func() { + close(done) + if ctxErr == nil { + ctxErr = <-errCh + } + }() + go func() { + select { + case <-ctx.Done(): + c.SetDeadline(aLongTimeAgo) + errCh <- ctx.Err() + case <-done: + errCh <- nil + } + }() + } + + b := make([]byte, 0, 6+len(host)) // the size here is just an estimate + b = append(b, Version5) + if len(d.AuthMethods) == 0 || d.Authenticate == nil { + b = append(b, 1, byte(AuthMethodNotRequired)) + } else { + ams := d.AuthMethods + if len(ams) > 255 { + return nil, errors.New("too many authentication methods") + } + b = append(b, byte(len(ams))) + for _, am := range ams { + b = append(b, byte(am)) + } + } + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:2]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + am := AuthMethod(b[1]) + if am == AuthMethodNoAcceptableMethods { + return nil, errors.New("no acceptable authentication methods") + } + if d.Authenticate != nil { + if ctxErr = d.Authenticate(ctx, c, am); ctxErr != nil { + return + } + } + + b = b[:0] + b = append(b, Version5, byte(d.cmd), 0) + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + b = append(b, AddrTypeIPv4) + b = append(b, ip4...) + } else if ip6 := ip.To16(); ip6 != nil { + b = append(b, AddrTypeIPv6) + b = append(b, ip6...) + } else { + return nil, errors.New("unknown address type") + } + } else { + if len(host) > 255 { + return nil, errors.New("FQDN too long") + } + b = append(b, AddrTypeFQDN) + b = append(b, byte(len(host))) + b = append(b, host...) + } + b = append(b, byte(port>>8), byte(port)) + if _, ctxErr = c.Write(b); ctxErr != nil { + return + } + + if _, ctxErr = io.ReadFull(c, b[:4]); ctxErr != nil { + return + } + if b[0] != Version5 { + return nil, errors.New("unexpected protocol version " + strconv.Itoa(int(b[0]))) + } + if cmdErr := Reply(b[1]); cmdErr != StatusSucceeded { + return nil, errors.New("unknown error " + cmdErr.String()) + } + if b[2] != 0 { + return nil, errors.New("non-zero reserved field") + } + l := 2 + var a Addr + switch b[3] { + case AddrTypeIPv4: + l += net.IPv4len + a.IP = make(net.IP, net.IPv4len) + case AddrTypeIPv6: + l += net.IPv6len + a.IP = make(net.IP, net.IPv6len) + case AddrTypeFQDN: + if _, err := io.ReadFull(c, b[:1]); err != nil { + return nil, err + } + l += int(b[0]) + default: + return nil, errors.New("unknown address type " + strconv.Itoa(int(b[3]))) + } + if cap(b) < l { + b = make([]byte, l) + } else { + b = b[:l] + } + if _, ctxErr = io.ReadFull(c, b); ctxErr != nil { + return + } + if a.IP != nil { + copy(a.IP, b) + } else { + a.Name = string(b[:len(b)-2]) + } + a.Port = int(b[len(b)-2])<<8 | int(b[len(b)-1]) + return &a, nil +} + +func splitHostPort(address string) (string, int, error) { + host, port, err := net.SplitHostPort(address) + if err != nil { + return "", 0, err + } + portnum, err := strconv.Atoi(port) + if err != nil { + return "", 0, err + } + if 1 > portnum || portnum > 0xffff { + return "", 0, errors.New("port number out of range " + port) + } + return host, portnum, nil +} diff --git a/vendor/golang.org/x/net/internal/socks/socks.go b/vendor/golang.org/x/net/internal/socks/socks.go new file mode 100644 index 0000000000..84fcc32b63 --- /dev/null +++ b/vendor/golang.org/x/net/internal/socks/socks.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package socks provides a SOCKS version 5 client implementation. +// +// SOCKS protocol version 5 is defined in RFC 1928. +// Username/Password authentication for SOCKS version 5 is defined in +// RFC 1929. +package socks + +import ( + "context" + "errors" + "io" + "net" + "strconv" +) + +// A Command represents a SOCKS command. +type Command int + +func (cmd Command) String() string { + switch cmd { + case CmdConnect: + return "socks connect" + case cmdBind: + return "socks bind" + default: + return "socks " + strconv.Itoa(int(cmd)) + } +} + +// An AuthMethod represents a SOCKS authentication method. +type AuthMethod int + +// A Reply represents a SOCKS command reply code. +type Reply int + +func (code Reply) String() string { + switch code { + case StatusSucceeded: + return "succeeded" + case 0x01: + return "general SOCKS server failure" + case 0x02: + return "connection not allowed by ruleset" + case 0x03: + return "network unreachable" + case 0x04: + return "host unreachable" + case 0x05: + return "connection refused" + case 0x06: + return "TTL expired" + case 0x07: + return "command not supported" + case 0x08: + return "address type not supported" + default: + return "unknown code: " + strconv.Itoa(int(code)) + } +} + +// Wire protocol constants. +const ( + Version5 = 0x05 + + AddrTypeIPv4 = 0x01 + AddrTypeFQDN = 0x03 + AddrTypeIPv6 = 0x04 + + CmdConnect Command = 0x01 // establishes an active-open forward proxy connection + cmdBind Command = 0x02 // establishes a passive-open forward proxy connection + + AuthMethodNotRequired AuthMethod = 0x00 // no authentication required + AuthMethodUsernamePassword AuthMethod = 0x02 // use username/password + AuthMethodNoAcceptableMethods AuthMethod = 0xff // no acceptable authentication methods + + StatusSucceeded Reply = 0x00 +) + +// An Addr represents a SOCKS-specific address. +// Either Name or IP is used exclusively. +type Addr struct { + Name string // fully-qualified domain name + IP net.IP + Port int +} + +func (a *Addr) Network() string { return "socks" } + +func (a *Addr) String() string { + if a == nil { + return "" + } + port := strconv.Itoa(a.Port) + if a.IP == nil { + return net.JoinHostPort(a.Name, port) + } + return net.JoinHostPort(a.IP.String(), port) +} + +// A Conn represents a forward proxy connection. +type Conn struct { + net.Conn + + boundAddr net.Addr +} + +// BoundAddr returns the address assigned by the proxy server for +// connecting to the command target address from the proxy server. +func (c *Conn) BoundAddr() net.Addr { + if c == nil { + return nil + } + return c.boundAddr +} + +// A Dialer holds SOCKS-specific options. +type Dialer struct { + cmd Command // either CmdConnect or cmdBind + proxyNetwork string // network between a proxy server and a client + proxyAddress string // proxy server address + + // ProxyDial specifies the optional dial function for + // establishing the transport connection. + ProxyDial func(context.Context, string, string) (net.Conn, error) + + // AuthMethods specifies the list of request authentication + // methods. + // If empty, SOCKS client requests only AuthMethodNotRequired. + AuthMethods []AuthMethod + + // Authenticate specifies the optional authentication + // function. It must be non-nil when AuthMethods is not empty. + // It must return an error when the authentication is failed. + Authenticate func(context.Context, io.ReadWriter, AuthMethod) error +} + +// DialContext connects to the provided address on the provided +// network. +// +// The returned error value may be a net.OpError. When the Op field of +// net.OpError contains "socks", the Source field contains a proxy +// server address and the Addr field contains a command target +// address. +// +// See func Dial of the net package of standard library for a +// description of the network and address parameters. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(ctx, d.proxyNetwork, d.proxyAddress) + } else { + var dd net.Dialer + c, err = dd.DialContext(ctx, d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + a, err := d.connect(ctx, c, address) + if err != nil { + c.Close() + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return &Conn{Conn: c, boundAddr: a}, nil +} + +// DialWithConn initiates a connection from SOCKS server to the target +// network and address using the connection c that is already +// connected to the SOCKS server. +// +// It returns the connection's local address assigned by the SOCKS +// server. +func (d *Dialer) DialWithConn(ctx context.Context, c net.Conn, network, address string) (net.Addr, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if ctx == nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: errors.New("nil context")} + } + a, err := d.connect(ctx, c, address) + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + return a, nil +} + +// Dial connects to the provided address on the provided network. +// +// Unlike DialContext, it returns a raw transport connection instead +// of a forward proxy connection. +// +// Deprecated: Use DialContext or DialWithConn instead. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + if err := d.validateTarget(network, address); err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + var err error + var c net.Conn + if d.ProxyDial != nil { + c, err = d.ProxyDial(context.Background(), d.proxyNetwork, d.proxyAddress) + } else { + c, err = net.Dial(d.proxyNetwork, d.proxyAddress) + } + if err != nil { + proxy, dst, _ := d.pathAddrs(address) + return nil, &net.OpError{Op: d.cmd.String(), Net: network, Source: proxy, Addr: dst, Err: err} + } + if _, err := d.DialWithConn(context.Background(), c, network, address); err != nil { + c.Close() + return nil, err + } + return c, nil +} + +func (d *Dialer) validateTarget(network, address string) error { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return errors.New("network not implemented") + } + switch d.cmd { + case CmdConnect, cmdBind: + default: + return errors.New("command not implemented") + } + return nil +} + +func (d *Dialer) pathAddrs(address string) (proxy, dst net.Addr, err error) { + for i, s := range []string{d.proxyAddress, address} { + host, port, err := splitHostPort(s) + if err != nil { + return nil, nil, err + } + a := &Addr{Port: port} + a.IP = net.ParseIP(host) + if a.IP == nil { + a.Name = host + } + if i == 0 { + proxy = a + } else { + dst = a + } + } + return +} + +// NewDialer returns a new Dialer that dials through the provided +// proxy server's network and address. +func NewDialer(network, address string) *Dialer { + return &Dialer{proxyNetwork: network, proxyAddress: address, cmd: CmdConnect} +} + +const ( + authUsernamePasswordVersion = 0x01 + authStatusSucceeded = 0x00 +) + +// UsernamePassword are the credentials for the username/password +// authentication method. +type UsernamePassword struct { + Username string + Password string +} + +// Authenticate authenticates a pair of username and password with the +// proxy server. +func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter, auth AuthMethod) error { + switch auth { + case AuthMethodNotRequired: + return nil + case AuthMethodUsernamePassword: + if len(up.Username) == 0 || len(up.Username) > 255 || len(up.Password) > 255 { + return errors.New("invalid username/password") + } + b := []byte{authUsernamePasswordVersion} + b = append(b, byte(len(up.Username))) + b = append(b, up.Username...) + b = append(b, byte(len(up.Password))) + b = append(b, up.Password...) + // TODO(mikio): handle IO deadlines and cancelation if + // necessary + if _, err := rw.Write(b); err != nil { + return err + } + if _, err := io.ReadFull(rw, b[:2]); err != nil { + return err + } + if b[0] != authUsernamePasswordVersion { + return errors.New("invalid username/password version") + } + if b[1] != authStatusSucceeded { + return errors.New("username/password authentication failed") + } + return nil + } + return errors.New("unsupported authentication method " + strconv.Itoa(int(auth))) +} diff --git a/vendor/golang.org/x/net/proxy/dial.go b/vendor/golang.org/x/net/proxy/dial.go new file mode 100644 index 0000000000..811c2e4e96 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/dial.go @@ -0,0 +1,54 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +// A ContextDialer dials using a context. +type ContextDialer interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +// Dial works like DialContext on net.Dialer but using a dialer returned by FromEnvironment. +// +// The passed ctx is only used for returning the Conn, not the lifetime of the Conn. +// +// Custom dialers (registered via RegisterDialerType) that do not implement ContextDialer +// can leak a goroutine for as long as it takes the underlying Dialer implementation to timeout. +// +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func Dial(ctx context.Context, network, address string) (net.Conn, error) { + d := FromEnvironment() + if xd, ok := d.(ContextDialer); ok { + return xd.DialContext(ctx, network, address) + } + return dialContext(ctx, d, network, address) +} + +// WARNING: this can leak a goroutine for as long as the underlying Dialer implementation takes to timeout +// A Conn returned from a successful Dial after the context has been cancelled will be immediately closed. +func dialContext(ctx context.Context, d Dialer, network, address string) (net.Conn, error) { + var ( + conn net.Conn + done = make(chan struct{}, 1) + err error + ) + go func() { + conn, err = d.Dial(network, address) + close(done) + if conn != nil && ctx.Err() != nil { + conn.Close() + } + }() + select { + case <-ctx.Done(): + err = ctx.Err() + case <-done: + } + return conn, err +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 0000000000..3d66bdef9d --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,31 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" +) + +type direct struct{} + +// Direct implements Dialer by making network connections directly using net.Dial or net.DialContext. +var Direct = direct{} + +var ( + _ Dialer = Direct + _ ContextDialer = Direct +) + +// Dial directly invokes net.Dial with the supplied parameters. +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} + +// DialContext instantiates a net.Dialer and invokes its DialContext receiver with the supplied parameters. +func (direct) DialContext(ctx context.Context, network, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 0000000000..d7d4b8b6e3 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,151 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the host name +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +// DialContext connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + d := p.dialerForRequest(host) + if x, ok := d.(ContextDialer); ok { + return x.DialContext(ctx, network, addr) + } + return dialContext(ctx, d, network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone ".example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a host name +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + zone = strings.TrimSuffix(zone, ".") + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a host name that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + host = strings.TrimSuffix(host, ".") + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 0000000000..9ff4b9a776 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,149 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" + "sync" +) + +// A Dialer is a means to establish a connection. +// Custom dialers should also implement ContextDialer. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy-related +// variables in the environment and makes underlying connections +// directly. +func FromEnvironment() Dialer { + return FromEnvironmentUsing(Direct) +} + +// FromEnvironmentUsing returns the dialer specify by the proxy-related +// variables in the environment and makes underlying connections +// using the provided forwarding Dialer (for instance, a *net.Dialer +// with desired configuration). +func FromEnvironmentUsing(forward Dialer) Dialer { + allProxy := allProxyEnv.Get() + if len(allProxy) == 0 { + return forward + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return forward + } + proxy, err := FromURL(proxyURL, forward) + if err != nil { + return forward + } + + noProxy := noProxyEnv.Get() + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, forward) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5", "socks5h": + addr := u.Hostname() + port := u.Port() + if port == "" { + port = "1080" + } + return SOCKS5("tcp", net.JoinHostPort(addr, port), auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} + +var ( + allProxyEnv = &envOnce{ + names: []string{"ALL_PROXY", "all_proxy"}, + } + noProxyEnv = &envOnce{ + names: []string{"NO_PROXY", "no_proxy"}, + } +) + +// envOnce looks up an environment variable (optionally by multiple +// names) once. It mitigates expensive lookups on some platforms +// (e.g. Windows). +// (Borrowed from net/http/transport.go) +type envOnce struct { + names []string + once sync.Once + val string +} + +func (e *envOnce) Get() string { + e.once.Do(e.init) + return e.val +} + +func (e *envOnce) init() { + for _, n := range e.names { + e.val = os.Getenv(n) + if e.val != "" { + return + } + } +} + +// reset is used by tests +func (e *envOnce) reset() { + e.once = sync.Once{} + e.val = "" +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 0000000000..c91651f96d --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,42 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "context" + "net" + + "golang.org/x/net/internal/socks" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given +// address with an optional username and password. +// See RFC 1928 and RFC 1929. +func SOCKS5(network, address string, auth *Auth, forward Dialer) (Dialer, error) { + d := socks.NewDialer(network, address) + if forward != nil { + if f, ok := forward.(ContextDialer); ok { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return f.DialContext(ctx, network, address) + } + } else { + d.ProxyDial = func(ctx context.Context, network string, address string) (net.Conn, error) { + return dialContext(ctx, forward, network, address) + } + } + } + if auth != nil { + up := socks.UsernamePassword{ + Username: auth.User, + Password: auth.Password, + } + d.AuthMethods = []socks.AuthMethod{ + socks.AuthMethodNotRequired, + socks.AuthMethodUsernamePassword, + } + d.Authenticate = up.Authenticate + } + return d, nil +} diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go new file mode 100644 index 0000000000..5db6d52d47 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=imagepolicy.k8s.io + +package v1alpha1 // import "k8s.io/api/imagepolicy/v1alpha1" diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go new file mode 100644 index 0000000000..57732a5164 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.pb.go @@ -0,0 +1,1374 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/api/imagepolicy/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ImageReview) Reset() { *m = ImageReview{} } +func (*ImageReview) ProtoMessage() {} +func (*ImageReview) Descriptor() ([]byte, []int) { + return fileDescriptor_7620d1538838ac6f, []int{0} +} +func (m *ImageReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReview.Merge(m, src) +} +func (m *ImageReview) XXX_Size() int { + return m.Size() +} +func (m *ImageReview) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReview.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReview proto.InternalMessageInfo + +func (m *ImageReviewContainerSpec) Reset() { *m = ImageReviewContainerSpec{} } +func (*ImageReviewContainerSpec) ProtoMessage() {} +func (*ImageReviewContainerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7620d1538838ac6f, []int{1} +} +func (m *ImageReviewContainerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewContainerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewContainerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewContainerSpec.Merge(m, src) +} +func (m *ImageReviewContainerSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewContainerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewContainerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewContainerSpec proto.InternalMessageInfo + +func (m *ImageReviewSpec) Reset() { *m = ImageReviewSpec{} } +func (*ImageReviewSpec) ProtoMessage() {} +func (*ImageReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7620d1538838ac6f, []int{2} +} +func (m *ImageReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewSpec.Merge(m, src) +} +func (m *ImageReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewSpec proto.InternalMessageInfo + +func (m *ImageReviewStatus) Reset() { *m = ImageReviewStatus{} } +func (*ImageReviewStatus) ProtoMessage() {} +func (*ImageReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_7620d1538838ac6f, []int{3} +} +func (m *ImageReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ImageReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageReviewStatus.Merge(m, src) +} +func (m *ImageReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *ImageReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ImageReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageReviewStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ImageReview)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReview") + proto.RegisterType((*ImageReviewContainerSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewContainerSpec") + proto.RegisterType((*ImageReviewSpec)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewSpec.AnnotationsEntry") + proto.RegisterType((*ImageReviewStatus)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.imagepolicy.v1alpha1.ImageReviewStatus.AuditAnnotationsEntry") +} + +func init() { + proto.RegisterFile("k8s.io/api/imagepolicy/v1alpha1/generated.proto", fileDescriptor_7620d1538838ac6f) +} + +var fileDescriptor_7620d1538838ac6f = []byte{ + // 593 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x94, 0x4f, 0x6f, 0xd3, 0x30, + 0x18, 0xc6, 0x9b, 0x74, 0xff, 0xea, 0x02, 0xeb, 0x0c, 0x48, 0x51, 0x0f, 0xe9, 0x54, 0x24, 0x34, + 0x0e, 0xd8, 0xb4, 0x42, 0x68, 0x70, 0x00, 0x35, 0xd3, 0x24, 0x38, 0x00, 0x92, 0xb9, 0xed, 0x84, + 0x9b, 0x9a, 0xd4, 0xb4, 0x89, 0xa3, 0xd8, 0xe9, 0xe8, 0x8d, 0x4f, 0x80, 0xf8, 0x06, 0x7c, 0x11, + 0x3e, 0x40, 0x8f, 0x3b, 0xee, 0x34, 0xd1, 0x70, 0xe4, 0x4b, 0xa0, 0x38, 0x69, 0x13, 0xda, 0xa1, + 0xa9, 0xb7, 0xbc, 0xef, 0xeb, 0xe7, 0xf7, 0x3e, 0x79, 0x62, 0x05, 0xe0, 0xd1, 0xb1, 0x44, 0x5c, + 0x60, 0x1a, 0x72, 0xcc, 0x7d, 0xea, 0xb1, 0x50, 0x8c, 0xb9, 0x3b, 0xc5, 0x93, 0x0e, 0x1d, 0x87, + 0x43, 0xda, 0xc1, 0x1e, 0x0b, 0x58, 0x44, 0x15, 0x1b, 0xa0, 0x30, 0x12, 0x4a, 0xc0, 0x56, 0x26, + 0x40, 0x34, 0xe4, 0xa8, 0x24, 0x40, 0x0b, 0x41, 0xf3, 0xb1, 0xc7, 0xd5, 0x30, 0xee, 0x23, 0x57, + 0xf8, 0xd8, 0x13, 0x9e, 0xc0, 0x5a, 0xd7, 0x8f, 0x3f, 0xe9, 0x4a, 0x17, 0xfa, 0x29, 0xe3, 0x35, + 0x9f, 0x16, 0x06, 0x7c, 0xea, 0x0e, 0x79, 0xc0, 0xa2, 0x29, 0x0e, 0x47, 0x5e, 0xda, 0x90, 0xd8, + 0x67, 0x8a, 0xe2, 0xc9, 0x9a, 0x8b, 0x26, 0xfe, 0x9f, 0x2a, 0x8a, 0x03, 0xc5, 0x7d, 0xb6, 0x26, + 0x78, 0x76, 0x93, 0x40, 0xba, 0x43, 0xe6, 0xd3, 0x55, 0x5d, 0xfb, 0x87, 0x09, 0xea, 0x6f, 0xd2, + 0xd7, 0x24, 0x6c, 0xc2, 0xd9, 0x39, 0xfc, 0x08, 0xf6, 0x52, 0x4f, 0x03, 0xaa, 0xa8, 0x65, 0x1c, + 0x1a, 0x47, 0xf5, 0xee, 0x13, 0x54, 0x24, 0xb2, 0x44, 0xa3, 0x70, 0xe4, 0xa5, 0x0d, 0x89, 0xd2, + 0xd3, 0x68, 0xd2, 0x41, 0xef, 0xfb, 0x9f, 0x99, 0xab, 0xde, 0x32, 0x45, 0x1d, 0x38, 0xbb, 0x6a, + 0x55, 0x92, 0xab, 0x16, 0x28, 0x7a, 0x64, 0x49, 0x85, 0x04, 0x6c, 0xc9, 0x90, 0xb9, 0x96, 0xb9, + 0x46, 0xbf, 0x36, 0x6f, 0x54, 0x72, 0xf7, 0x21, 0x64, 0xae, 0x73, 0x2b, 0xa7, 0x6f, 0xa5, 0x15, + 0xd1, 0x2c, 0x78, 0x06, 0x76, 0xa4, 0xa2, 0x2a, 0x96, 0x56, 0x55, 0x53, 0xbb, 0x1b, 0x51, 0xb5, + 0xd2, 0xb9, 0x93, 0x73, 0x77, 0xb2, 0x9a, 0xe4, 0xc4, 0xf6, 0x2b, 0x60, 0x95, 0x0e, 0x9f, 0x88, + 0x40, 0xd1, 0x34, 0x82, 0x74, 0x3b, 0x7c, 0x00, 0xb6, 0x35, 0x5d, 0x47, 0x55, 0x73, 0x6e, 0xe7, + 0x88, 0xed, 0x4c, 0x90, 0xcd, 0xda, 0x7f, 0x4c, 0xb0, 0xbf, 0xf2, 0x12, 0xd0, 0x07, 0xc0, 0x5d, + 0x90, 0xa4, 0x65, 0x1c, 0x56, 0x8f, 0xea, 0xdd, 0xe7, 0x9b, 0x98, 0xfe, 0xc7, 0x47, 0x91, 0xf8, + 0xb2, 0x2d, 0x49, 0x69, 0x01, 0xfc, 0x02, 0xea, 0x34, 0x08, 0x84, 0xa2, 0x8a, 0x8b, 0x40, 0x5a, + 0xa6, 0xde, 0xd7, 0xdb, 0x34, 0x7a, 0xd4, 0x2b, 0x18, 0xa7, 0x81, 0x8a, 0xa6, 0xce, 0xdd, 0x7c, + 0x6f, 0xbd, 0x34, 0x21, 0xe5, 0x55, 0x10, 0x83, 0x5a, 0x40, 0x7d, 0x26, 0x43, 0xea, 0x32, 0xfd, + 0x71, 0x6a, 0xce, 0x41, 0x2e, 0xaa, 0xbd, 0x5b, 0x0c, 0x48, 0x71, 0xa6, 0xf9, 0x12, 0x34, 0x56, + 0xd7, 0xc0, 0x06, 0xa8, 0x8e, 0xd8, 0x34, 0x0b, 0x99, 0xa4, 0x8f, 0xf0, 0x1e, 0xd8, 0x9e, 0xd0, + 0x71, 0xcc, 0xf4, 0x2d, 0xaa, 0x91, 0xac, 0x78, 0x61, 0x1e, 0x1b, 0xed, 0x9f, 0x26, 0x38, 0x58, + 0xfb, 0xb8, 0xf0, 0x11, 0xd8, 0xa5, 0xe3, 0xb1, 0x38, 0x67, 0x03, 0x4d, 0xd9, 0x73, 0xf6, 0x73, + 0x13, 0xbb, 0xbd, 0xac, 0x4d, 0x16, 0x73, 0xf8, 0x10, 0xec, 0x44, 0x8c, 0x4a, 0x11, 0x64, 0xec, + 0xe2, 0x5e, 0x10, 0xdd, 0x25, 0xf9, 0x14, 0x7e, 0x33, 0x40, 0x83, 0xc6, 0x03, 0xae, 0x4a, 0x76, + 0xad, 0xaa, 0x4e, 0xf6, 0xf5, 0xe6, 0xd7, 0x0f, 0xf5, 0x56, 0x50, 0x59, 0xc0, 0x56, 0xbe, 0xbc, + 0xb1, 0x3a, 0x26, 0x6b, 0xbb, 0x9b, 0x27, 0xe0, 0xfe, 0xb5, 0x90, 0x4d, 0xe2, 0x73, 0x4e, 0x67, + 0x73, 0xbb, 0x72, 0x31, 0xb7, 0x2b, 0x97, 0x73, 0xbb, 0xf2, 0x35, 0xb1, 0x8d, 0x59, 0x62, 0x1b, + 0x17, 0x89, 0x6d, 0x5c, 0x26, 0xb6, 0xf1, 0x2b, 0xb1, 0x8d, 0xef, 0xbf, 0xed, 0xca, 0x59, 0xeb, + 0x86, 0xbf, 0xea, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x59, 0x86, 0x92, 0x15, 0x77, 0x05, 0x00, + 0x00, +} + +func (m *ImageReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageReviewContainerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReviewContainerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewContainerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ImageReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ImageReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AuditAnnotations) > 0 { + keysForAuditAnnotations := make([]string, 0, len(m.AuditAnnotations)) + for k := range m.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + for iNdEx := len(keysForAuditAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.AuditAnnotations[string(keysForAuditAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAuditAnnotations[iNdEx]) + copy(dAtA[i:], keysForAuditAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAuditAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ImageReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageReviewContainerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ImageReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AuditAnnotations) > 0 { + for k, v := range m.AuditAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ImageReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ImageReviewSpec", "ImageReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ImageReviewStatus", "ImageReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageReviewContainerSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageReviewContainerSpec{`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `}`, + }, "") + return s +} +func (this *ImageReviewSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainers := "[]ImageReviewContainerSpec{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ImageReviewContainerSpec", "ImageReviewContainerSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ImageReviewSpec{`, + `Containers:` + repeatedStringForContainers + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *ImageReviewStatus) String() string { + if this == nil { + return "nil" + } + keysForAuditAnnotations := make([]string, 0, len(this.AuditAnnotations)) + for k := range this.AuditAnnotations { + keysForAuditAnnotations = append(keysForAuditAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAuditAnnotations) + mapStringForAuditAnnotations := "map[string]string{" + for _, k := range keysForAuditAnnotations { + mapStringForAuditAnnotations += fmt.Sprintf("%v: %v,", k, this.AuditAnnotations[k]) + } + mapStringForAuditAnnotations += "}" + s := strings.Join([]string{`&ImageReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `AuditAnnotations:` + mapStringForAuditAnnotations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ImageReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageReviewContainerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReviewContainerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReviewContainerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, ImageReviewContainerSpec{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AuditAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AuditAnnotations == nil { + m.AuditAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AuditAnnotations[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto new file mode 100644 index 0000000000..fd55972f20 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/generated.proto @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.imagepolicy.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/imagepolicy/v1alpha1"; + +// ImageReview checks if the set of images in a pod are allowed. +message ImageReview { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the pod being evaluated + optional ImageReviewSpec spec = 2; + + // Status is filled in by the backend and indicates whether the pod should be allowed. + // +optional + optional ImageReviewStatus status = 3; +} + +// ImageReviewContainerSpec is a description of a container within the pod creation request. +message ImageReviewContainerSpec { + // This can be in the form image:tag or image@SHA:012345679abcdef. + // +optional + optional string image = 1; +} + +// ImageReviewSpec is a description of the pod creation request. +message ImageReviewSpec { + // Containers is a list of a subset of the information in each container of the Pod being created. + // +optional + // +listType=atomic + repeated ImageReviewContainerSpec containers = 1; + + // Annotations is a list of key-value pairs extracted from the Pod's annotations. + // It only includes keys which match the pattern `*.image-policy.k8s.io/*`. + // It is up to each webhook backend to determine how to interpret these annotations, if at all. + // +optional + map annotations = 2; + + // Namespace is the namespace the pod is being created in. + // +optional + optional string namespace = 3; +} + +// ImageReviewStatus is the result of the review for the pod creation request. +message ImageReviewStatus { + // Allowed indicates that all images were allowed to be run. + optional bool allowed = 1; + + // Reason should be empty unless Allowed is false in which case it + // may contain a short description of what is wrong. Kubernetes + // may truncate excessively long errors when displaying to the user. + // +optional + optional string reason = 2; + + // AuditAnnotations will be added to the attributes object of the + // admission controller request using 'AddAnnotation'. The keys should + // be prefix-less (i.e., the admission controller will add an + // appropriate prefix). + // +optional + map auditAnnotations = 3; +} + diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go new file mode 100644 index 0000000000..477571bbb2 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "imagepolicy.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ImageReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go new file mode 100644 index 0000000000..19ac2b536f --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types.go @@ -0,0 +1,83 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ImageReview checks if the set of images in a pod are allowed. +type ImageReview struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the pod being evaluated + Spec ImageReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the backend and indicates whether the pod should be allowed. + // +optional + Status ImageReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ImageReviewSpec is a description of the pod creation request. +type ImageReviewSpec struct { + // Containers is a list of a subset of the information in each container of the Pod being created. + // +optional + // +listType=atomic + Containers []ImageReviewContainerSpec `json:"containers,omitempty" protobuf:"bytes,1,rep,name=containers"` + // Annotations is a list of key-value pairs extracted from the Pod's annotations. + // It only includes keys which match the pattern `*.image-policy.k8s.io/*`. + // It is up to each webhook backend to determine how to interpret these annotations, if at all. + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,2,rep,name=annotations"` + // Namespace is the namespace the pod is being created in. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` +} + +// ImageReviewContainerSpec is a description of a container within the pod creation request. +type ImageReviewContainerSpec struct { + // This can be in the form image:tag or image@SHA:012345679abcdef. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,1,opt,name=image"` + // In future, we may add command line overrides, exec health check command lines, and so on. +} + +// ImageReviewStatus is the result of the review for the pod creation request. +type ImageReviewStatus struct { + // Allowed indicates that all images were allowed to be run. + Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Reason should be empty unless Allowed is false in which case it + // may contain a short description of what is wrong. Kubernetes + // may truncate excessively long errors when displaying to the user. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // AuditAnnotations will be added to the attributes object of the + // admission controller request using 'AddAnnotation'. The keys should + // be prefix-less (i.e., the admission controller will add an + // appropriate prefix). + // +optional + AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,3,rep,name=auditAnnotations"` +} diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000000..dadf95e1d5 --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,72 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-codegen.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ImageReview = map[string]string{ + "": "ImageReview checks if the set of images in a pod are allowed.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec holds information about the pod being evaluated", + "status": "Status is filled in by the backend and indicates whether the pod should be allowed.", +} + +func (ImageReview) SwaggerDoc() map[string]string { + return map_ImageReview +} + +var map_ImageReviewContainerSpec = map[string]string{ + "": "ImageReviewContainerSpec is a description of a container within the pod creation request.", + "image": "This can be in the form image:tag or image@SHA:012345679abcdef.", +} + +func (ImageReviewContainerSpec) SwaggerDoc() map[string]string { + return map_ImageReviewContainerSpec +} + +var map_ImageReviewSpec = map[string]string{ + "": "ImageReviewSpec is a description of the pod creation request.", + "containers": "Containers is a list of a subset of the information in each container of the Pod being created.", + "annotations": "Annotations is a list of key-value pairs extracted from the Pod's annotations. It only includes keys which match the pattern `*.image-policy.k8s.io/*`. It is up to each webhook backend to determine how to interpret these annotations, if at all.", + "namespace": "Namespace is the namespace the pod is being created in.", +} + +func (ImageReviewSpec) SwaggerDoc() map[string]string { + return map_ImageReviewSpec +} + +var map_ImageReviewStatus = map[string]string{ + "": "ImageReviewStatus is the result of the review for the pod creation request.", + "allowed": "Allowed indicates that all images were allowed to be run.", + "reason": "Reason should be empty unless Allowed is false in which case it may contain a short description of what is wrong. Kubernetes may truncate excessively long errors when displaying to the user.", + "auditAnnotations": "AuditAnnotations will be added to the attributes object of the admission controller request using 'AddAnnotation'. The keys should be prefix-less (i.e., the admission controller will add an appropriate prefix).", +} + +func (ImageReviewStatus) SwaggerDoc() map[string]string { + return map_ImageReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..f230656f3f --- /dev/null +++ b/vendor/k8s.io/api/imagepolicy/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,121 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReview) DeepCopyInto(out *ImageReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReview. +func (in *ImageReview) DeepCopy() *ImageReview { + if in == nil { + return nil + } + out := new(ImageReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ImageReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReviewContainerSpec) DeepCopyInto(out *ImageReviewContainerSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewContainerSpec. +func (in *ImageReviewContainerSpec) DeepCopy() *ImageReviewContainerSpec { + if in == nil { + return nil + } + out := new(ImageReviewContainerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReviewSpec) DeepCopyInto(out *ImageReviewSpec) { + *out = *in + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ImageReviewContainerSpec, len(*in)) + copy(*out, *in) + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewSpec. +func (in *ImageReviewSpec) DeepCopy() *ImageReviewSpec { + if in == nil { + return nil + } + out := new(ImageReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ImageReviewStatus) DeepCopyInto(out *ImageReviewStatus) { + *out = *in + if in.AuditAnnotations != nil { + in, out := &in.AuditAnnotations, &out.AuditAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageReviewStatus. +func (in *ImageReviewStatus) DeepCopy() *ImageReviewStatus { + if in == nil { + return nil + } + out := new(ImageReviewStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go new file mode 100644 index 0000000000..d4ceab84f0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go @@ -0,0 +1,204 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "net" + "net/http" + "sync" + "time" + + "github.com/moby/spdystream" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog/v2" +) + +// connection maintains state about a spdystream.Connection and its associated +// streams. +type connection struct { + conn *spdystream.Connection + streams map[uint32]httpstream.Stream + streamLock sync.Mutex + newStreamHandler httpstream.NewStreamHandler + ping func() (time.Duration, error) +} + +// NewClientConnection creates a new SPDY client connection. +func NewClientConnection(conn net.Conn) (httpstream.Connection, error) { + return NewClientConnectionWithPings(conn, 0) +} + +// NewClientConnectionWithPings creates a new SPDY client connection. +// +// If pingPeriod is non-zero, a background goroutine will send periodic Ping +// frames to the server. Use this to keep idle connections through certain load +// balancers alive longer. +func NewClientConnectionWithPings(conn net.Conn, pingPeriod time.Duration) (httpstream.Connection, error) { + spdyConn, err := spdystream.NewConnection(conn, false) + if err != nil { + defer conn.Close() + return nil, err + } + + return newConnection(spdyConn, httpstream.NoOpNewStreamHandler, pingPeriod, spdyConn.Ping), nil +} + +// NewServerConnection creates a new SPDY server connection. newStreamHandler +// will be invoked when the server receives a newly created stream from the +// client. +func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) { + return NewServerConnectionWithPings(conn, newStreamHandler, 0) +} + +// NewServerConnectionWithPings creates a new SPDY server connection. +// newStreamHandler will be invoked when the server receives a newly created +// stream from the client. +// +// If pingPeriod is non-zero, a background goroutine will send periodic Ping +// frames to the server. Use this to keep idle connections through certain load +// balancers alive longer. +func NewServerConnectionWithPings(conn net.Conn, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration) (httpstream.Connection, error) { + spdyConn, err := spdystream.NewConnection(conn, true) + if err != nil { + defer conn.Close() + return nil, err + } + + return newConnection(spdyConn, newStreamHandler, pingPeriod, spdyConn.Ping), nil +} + +// newConnection returns a new connection wrapping conn. newStreamHandler +// will be invoked when the server receives a newly created stream from the +// client. +func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler, pingPeriod time.Duration, pingFn func() (time.Duration, error)) httpstream.Connection { + c := &connection{ + conn: conn, + newStreamHandler: newStreamHandler, + ping: pingFn, + streams: make(map[uint32]httpstream.Stream), + } + go conn.Serve(c.newSpdyStream) + if pingPeriod > 0 && pingFn != nil { + go c.sendPings(pingPeriod) + } + return c +} + +// createStreamResponseTimeout indicates how long to wait for the other side to +// acknowledge the new stream before timing out. +const createStreamResponseTimeout = 30 * time.Second + +// Close first sends a reset for all of the connection's streams, and then +// closes the underlying spdystream.Connection. +func (c *connection) Close() error { + c.streamLock.Lock() + for _, s := range c.streams { + // calling Reset instead of Close ensures that all streams are fully torn down + s.Reset() + } + c.streams = make(map[uint32]httpstream.Stream, 0) + c.streamLock.Unlock() + + // now that all streams are fully torn down, it's safe to call close on the underlying connection, + // which should be able to terminate immediately at this point, instead of waiting for any + // remaining graceful stream termination. + return c.conn.Close() +} + +// RemoveStreams can be used to removes a set of streams from the Connection. +func (c *connection) RemoveStreams(streams ...httpstream.Stream) { + c.streamLock.Lock() + for _, stream := range streams { + // It may be possible that the provided stream is nil if timed out. + if stream != nil { + delete(c.streams, stream.Identifier()) + } + } + c.streamLock.Unlock() +} + +// CreateStream creates a new stream with the specified headers and registers +// it with the connection. +func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) { + stream, err := c.conn.CreateStream(headers, nil, false) + if err != nil { + return nil, err + } + if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil { + return nil, err + } + + c.registerStream(stream) + return stream, nil +} + +// registerStream adds the stream s to the connection's list of streams that +// it owns. +func (c *connection) registerStream(s httpstream.Stream) { + c.streamLock.Lock() + c.streams[s.Identifier()] = s + c.streamLock.Unlock() +} + +// CloseChan returns a channel that, when closed, indicates that the underlying +// spdystream.Connection has been closed. +func (c *connection) CloseChan() <-chan bool { + return c.conn.CloseChan() +} + +// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve. +// It calls connection's newStreamHandler, giving it the opportunity to accept or reject +// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the +// stream is accepted and registered with the connection. +func (c *connection) newSpdyStream(stream *spdystream.Stream) { + replySent := make(chan struct{}) + err := c.newStreamHandler(stream, replySent) + rejectStream := (err != nil) + if rejectStream { + klog.Warningf("Stream rejected: %v", err) + stream.Reset() + return + } + + c.registerStream(stream) + stream.SendReply(http.Header{}, rejectStream) + close(replySent) +} + +// SetIdleTimeout sets the amount of time the connection may remain idle before +// it is automatically closed. +func (c *connection) SetIdleTimeout(timeout time.Duration) { + c.conn.SetIdleTimeout(timeout) +} + +func (c *connection) sendPings(period time.Duration) { + t := time.NewTicker(period) + defer t.Stop() + for { + select { + case <-c.conn.CloseChan(): + return + case <-t.C: + } + if _, err := c.ping(); err != nil { + klog.V(3).Infof("SPDY Ping failed: %v", err) + // Continue, in case this is a transient failure. + // c.conn.CloseChan above will tell us when the connection is + // actually closed. + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go new file mode 100644 index 0000000000..c78326fa3b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go @@ -0,0 +1,399 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "bufio" + "context" + "crypto/tls" + "encoding/base64" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + "strings" + "time" + + "golang.org/x/net/proxy" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/util/httpstream" + utilnet "k8s.io/apimachinery/pkg/util/net" + apiproxy "k8s.io/apimachinery/pkg/util/proxy" + "k8s.io/apimachinery/third_party/forked/golang/netutil" +) + +// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports +// multiplexed streams. After RoundTrip() is invoked, Conn will be set +// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface. +type SpdyRoundTripper struct { + //tlsConfig holds the TLS configuration settings to use when connecting + //to the remote server. + tlsConfig *tls.Config + + /* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper + must be safe for use by multiple concurrent goroutines. If this is absolutely + necessary, we could keep a map from http.Request to net.Conn. In practice, + a client will create an http.Client, set the transport to a new insteace of + SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue. + */ + // conn is the underlying network connection to the remote server. + conn net.Conn + + // Dialer is the dialer used to connect. Used if non-nil. + Dialer *net.Dialer + + // proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment + // Used primarily for mocking the proxy discovery in tests. + proxier func(req *http.Request) (*url.URL, error) + + // pingPeriod is a period for sending Ping frames over established + // connections. + pingPeriod time.Duration + + // upgradeTransport is an optional substitute for dialing if present. This field is + // mutually exclusive with the "tlsConfig", "Dialer", and "proxier". + upgradeTransport http.RoundTripper +} + +var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{} +var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{} +var _ utilnet.Dialer = &SpdyRoundTripper{} + +// NewRoundTripper creates a new SpdyRoundTripper that will use the specified +// tlsConfig. +func NewRoundTripper(tlsConfig *tls.Config) (*SpdyRoundTripper, error) { + return NewRoundTripperWithConfig(RoundTripperConfig{ + TLS: tlsConfig, + UpgradeTransport: nil, + }) +} + +// NewRoundTripperWithProxy creates a new SpdyRoundTripper that will use the +// specified tlsConfig and proxy func. +func NewRoundTripperWithProxy(tlsConfig *tls.Config, proxier func(*http.Request) (*url.URL, error)) (*SpdyRoundTripper, error) { + return NewRoundTripperWithConfig(RoundTripperConfig{ + TLS: tlsConfig, + Proxier: proxier, + UpgradeTransport: nil, + }) +} + +// NewRoundTripperWithConfig creates a new SpdyRoundTripper with the specified +// configuration. Returns an error if the SpdyRoundTripper is misconfigured. +func NewRoundTripperWithConfig(cfg RoundTripperConfig) (*SpdyRoundTripper, error) { + // Process UpgradeTransport, which is mutually exclusive to TLSConfig and Proxier. + if cfg.UpgradeTransport != nil { + if cfg.TLS != nil || cfg.Proxier != nil { + return nil, fmt.Errorf("SpdyRoundTripper: UpgradeTransport is mutually exclusive to TLSConfig or Proxier") + } + tlsConfig, err := utilnet.TLSClientConfig(cfg.UpgradeTransport) + if err != nil { + return nil, fmt.Errorf("SpdyRoundTripper: Unable to retrieve TLSConfig from UpgradeTransport: %v", err) + } + cfg.TLS = tlsConfig + } + if cfg.Proxier == nil { + cfg.Proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) + } + return &SpdyRoundTripper{ + tlsConfig: cfg.TLS, + proxier: cfg.Proxier, + pingPeriod: cfg.PingPeriod, + upgradeTransport: cfg.UpgradeTransport, + }, nil +} + +// RoundTripperConfig is a set of options for an SpdyRoundTripper. +type RoundTripperConfig struct { + // TLS configuration used by the round tripper if UpgradeTransport not present. + TLS *tls.Config + // Proxier is a proxy function invoked on each request. Optional. + Proxier func(*http.Request) (*url.URL, error) + // PingPeriod is a period for sending SPDY Pings on the connection. + // Optional. + PingPeriod time.Duration + // UpgradeTransport is a subtitute transport used for dialing. If set, + // this field will be used instead of "TLS" and "Proxier" for connection creation. + // Optional. + UpgradeTransport http.RoundTripper +} + +// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during +// proxying with a spdy roundtripper. +func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config { + return s.tlsConfig +} + +// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer. +func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) { + var conn net.Conn + var err error + if s.upgradeTransport != nil { + conn, err = apiproxy.DialURL(req.Context(), req.URL, s.upgradeTransport) + } else { + conn, err = s.dial(req) + } + if err != nil { + return nil, err + } + + if err := req.Write(conn); err != nil { + conn.Close() + return nil, err + } + + return conn, nil +} + +// dial dials the host specified by req, using TLS if appropriate, optionally +// using a proxy server if one is configured via environment variables. +func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) { + proxyURL, err := s.proxier(req) + if err != nil { + return nil, err + } + + if proxyURL == nil { + return s.dialWithoutProxy(req.Context(), req.URL) + } + + switch proxyURL.Scheme { + case "socks5": + return s.dialWithSocks5Proxy(req, proxyURL) + case "https", "http", "": + return s.dialWithHttpProxy(req, proxyURL) + } + + return nil, fmt.Errorf("proxy URL scheme not supported: %s", proxyURL.Scheme) +} + +// dialWithHttpProxy dials the host specified by url through an http or an https proxy. +func (s *SpdyRoundTripper) dialWithHttpProxy(req *http.Request, proxyURL *url.URL) (net.Conn, error) { + // ensure we use a canonical host with proxyReq + targetHost := netutil.CanonicalAddr(req.URL) + + // proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support + proxyReq := http.Request{ + Method: "CONNECT", + URL: &url.URL{}, + Host: targetHost, + } + + proxyReq = *proxyReq.WithContext(req.Context()) + + if pa := s.proxyAuth(proxyURL); pa != "" { + proxyReq.Header = http.Header{} + proxyReq.Header.Set("Proxy-Authorization", pa) + } + + proxyDialConn, err := s.dialWithoutProxy(proxyReq.Context(), proxyURL) + if err != nil { + return nil, err + } + + //nolint:staticcheck // SA1019 ignore deprecated httputil.NewProxyClientConn + proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil) + response, err := proxyClientConn.Do(&proxyReq) + //nolint:staticcheck // SA1019 ignore deprecated httputil.ErrPersistEOF: it might be + // returned from the invocation of proxyClientConn.Do + if err != nil && err != httputil.ErrPersistEOF { + return nil, err + } + if response != nil && response.StatusCode >= 300 || response.StatusCode < 200 { + return nil, fmt.Errorf("CONNECT request to %s returned response: %s", proxyURL.Redacted(), response.Status) + } + + rwc, _ := proxyClientConn.Hijack() + + if req.URL.Scheme == "https" { + return s.tlsConn(proxyReq.Context(), rwc, targetHost) + } + return rwc, nil +} + +// dialWithSocks5Proxy dials the host specified by url through a socks5 proxy. +func (s *SpdyRoundTripper) dialWithSocks5Proxy(req *http.Request, proxyURL *url.URL) (net.Conn, error) { + // ensure we use a canonical host with proxyReq + targetHost := netutil.CanonicalAddr(req.URL) + proxyDialAddr := netutil.CanonicalAddr(proxyURL) + + var auth *proxy.Auth + if proxyURL.User != nil { + pass, _ := proxyURL.User.Password() + auth = &proxy.Auth{ + User: proxyURL.User.Username(), + Password: pass, + } + } + + dialer := s.Dialer + if dialer == nil { + dialer = &net.Dialer{ + Timeout: 30 * time.Second, + } + } + + proxyDialer, err := proxy.SOCKS5("tcp", proxyDialAddr, auth, dialer) + if err != nil { + return nil, err + } + + // According to the implementation of proxy.SOCKS5, the type assertion will always succeed + contextDialer, ok := proxyDialer.(proxy.ContextDialer) + if !ok { + return nil, errors.New("SOCKS5 Dialer must implement ContextDialer") + } + + proxyDialConn, err := contextDialer.DialContext(req.Context(), "tcp", targetHost) + if err != nil { + return nil, err + } + + if req.URL.Scheme == "https" { + return s.tlsConn(req.Context(), proxyDialConn, targetHost) + } + return proxyDialConn, nil +} + +// tlsConn returns a TLS client side connection using rwc as the underlying transport. +func (s *SpdyRoundTripper) tlsConn(ctx context.Context, rwc net.Conn, targetHost string) (net.Conn, error) { + + host, _, err := net.SplitHostPort(targetHost) + if err != nil { + return nil, err + } + + tlsConfig := s.tlsConfig + switch { + case tlsConfig == nil: + tlsConfig = &tls.Config{ServerName: host} + case len(tlsConfig.ServerName) == 0: + tlsConfig = tlsConfig.Clone() + tlsConfig.ServerName = host + } + + tlsConn := tls.Client(rwc, tlsConfig) + + if err := tlsConn.HandshakeContext(ctx); err != nil { + tlsConn.Close() + return nil, err + } + + return tlsConn, nil +} + +// dialWithoutProxy dials the host specified by url, using TLS if appropriate. +func (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) { + dialAddr := netutil.CanonicalAddr(url) + dialer := s.Dialer + if dialer == nil { + dialer = &net.Dialer{} + } + + if url.Scheme == "http" { + return dialer.DialContext(ctx, "tcp", dialAddr) + } + + tlsDialer := tls.Dialer{ + NetDialer: dialer, + Config: s.tlsConfig, + } + return tlsDialer.DialContext(ctx, "tcp", dialAddr) +} + +// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header +func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string { + if proxyURL == nil || proxyURL.User == nil { + return "" + } + username := proxyURL.User.Username() + password, _ := proxyURL.User.Password() + auth := username + ":" + password + return "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) +} + +// RoundTrip executes the Request and upgrades it. After a successful upgrade, +// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded +// connection. +func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = utilnet.CloneRequest(req) + req.Header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) + req.Header.Add(httpstream.HeaderUpgrade, HeaderSpdy31) + + conn, err := s.Dial(req) + if err != nil { + return nil, err + } + + responseReader := bufio.NewReader(conn) + + resp, err := http.ReadResponse(responseReader, nil) + if err != nil { + conn.Close() + return nil, err + } + + s.conn = conn + + return resp, nil +} + +// NewConnection validates the upgrade response, creating and returning a new +// httpstream.Connection if there were no errors. +func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) { + connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection)) + upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade)) + if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { + defer resp.Body.Close() + responseError := "" + responseErrorBytes, err := io.ReadAll(resp.Body) + if err != nil { + responseError = "unable to read error from server response" + } else { + // TODO: I don't belong here, I should be abstracted from this class + if obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil { + if status, ok := obj.(*metav1.Status); ok { + return nil, &apierrors.StatusError{ErrStatus: *status} + } + } + responseError = string(responseErrorBytes) + responseError = strings.TrimSpace(responseError) + } + + return nil, fmt.Errorf("unable to upgrade connection: %s", responseError) + } + + return NewClientConnectionWithPings(s.conn, s.pingPeriod) +} + +// statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection +var statusScheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var statusCodecs = serializer.NewCodecFactory(statusScheme) + +func init() { + statusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion, + &metav1.Status{}, + ) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go new file mode 100644 index 0000000000..d30ae2fa3d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go @@ -0,0 +1,120 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "bufio" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync/atomic" + "time" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/runtime" +) + +const HeaderSpdy31 = "SPDY/3.1" + +// responseUpgrader knows how to upgrade HTTP responses. It +// implements the httpstream.ResponseUpgrader interface. +type responseUpgrader struct { + pingPeriod time.Duration +} + +// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All +// calls will be handled directly by the underlying net.Conn with the exception +// of Read and Close calls, which will consider data in the bufio.Reader. This +// ensures that data already inside the used bufio.Reader instance is also +// read. +type connWrapper struct { + net.Conn + closed int32 + bufReader *bufio.Reader +} + +func (w *connWrapper) Read(b []byte) (n int, err error) { + if atomic.LoadInt32(&w.closed) == 1 { + return 0, io.EOF + } + return w.bufReader.Read(b) +} + +func (w *connWrapper) Close() error { + err := w.Conn.Close() + atomic.StoreInt32(&w.closed, 1) + return err +} + +// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is +// capable of upgrading HTTP responses using SPDY/3.1 via the +// spdystream package. +func NewResponseUpgrader() httpstream.ResponseUpgrader { + return NewResponseUpgraderWithPings(0) +} + +// NewResponseUpgraderWithPings returns a new httpstream.ResponseUpgrader that +// is capable of upgrading HTTP responses using SPDY/3.1 via the spdystream +// package. +// +// If pingPeriod is non-zero, for each incoming connection a background +// goroutine will send periodic Ping frames to the server. Use this to keep +// idle connections through certain load balancers alive longer. +func NewResponseUpgraderWithPings(pingPeriod time.Duration) httpstream.ResponseUpgrader { + return responseUpgrader{pingPeriod: pingPeriod} +} + +// UpgradeResponse upgrades an HTTP response to one that supports multiplexed +// streams. newStreamHandler will be called synchronously whenever the +// other end of the upgraded connection creates a new stream. +func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection { + connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection)) + upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade)) + if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) { + errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header) + http.Error(w, errorMsg, http.StatusBadRequest) + return nil + } + + hijacker, ok := w.(http.Hijacker) + if !ok { + errorMsg := "unable to upgrade: unable to hijack response" + http.Error(w, errorMsg, http.StatusInternalServerError) + return nil + } + + w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade) + w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31) + w.WriteHeader(http.StatusSwitchingProtocols) + + conn, bufrw, err := hijacker.Hijack() + if err != nil { + runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err)) + return nil + } + + connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader} + spdyConn, err := NewServerConnectionWithPings(connWithBuf, newStreamHandler, u.pingPeriod) + if err != nil { + runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err)) + return nil + } + + return spdyConn +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go new file mode 100644 index 0000000000..e5196d1ee8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/dial.go @@ -0,0 +1,122 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/url" + + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/third_party/forked/golang/netutil" + "k8s.io/klog/v2" +) + +// DialURL will dial the specified URL using the underlying dialer held by the passed +// RoundTripper. The primary use of this method is to support proxying upgradable connections. +// For this reason this method will prefer to negotiate http/1.1 if the URL scheme is https. +// If you wish to ensure ALPN negotiates http2 then set NextProto=[]string{"http2"} in the +// TLSConfig of the http.Transport +func DialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (net.Conn, error) { + dialAddr := netutil.CanonicalAddr(url) + + dialer, err := utilnet.DialerFor(transport) + if err != nil { + klog.V(5).Infof("Unable to unwrap transport %T to get dialer: %v", transport, err) + } + + switch url.Scheme { + case "http": + if dialer != nil { + return dialer(ctx, "tcp", dialAddr) + } + var d net.Dialer + return d.DialContext(ctx, "tcp", dialAddr) + case "https": + // Get the tls config from the transport if we recognize it + tlsConfig, err := utilnet.TLSClientConfig(transport) + if err != nil { + klog.V(5).Infof("Unable to unwrap transport %T to get at TLS config: %v", transport, err) + } + + if dialer != nil { + // We have a dialer; use it to open the connection, then + // create a tls client using the connection. + netConn, err := dialer(ctx, "tcp", dialAddr) + if err != nil { + return nil, err + } + if tlsConfig == nil { + // tls.Client requires non-nil config + klog.Warning("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify") + // tls.Handshake() requires ServerName or InsecureSkipVerify + tlsConfig = &tls.Config{ + InsecureSkipVerify: true, + } + } else if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify { + // tls.HandshakeContext() requires ServerName or InsecureSkipVerify + // infer the ServerName from the hostname we're connecting to. + inferredHost := dialAddr + if host, _, err := net.SplitHostPort(dialAddr); err == nil { + inferredHost = host + } + // Make a copy to avoid polluting the provided config + tlsConfigCopy := tlsConfig.Clone() + tlsConfigCopy.ServerName = inferredHost + tlsConfig = tlsConfigCopy + } + + // Since this method is primarily used within a "Connection: Upgrade" call we assume the caller is + // going to write HTTP/1.1 request to the wire. http2 should not be allowed in the TLSConfig.NextProtos, + // so we explicitly set that here. We only do this check if the TLSConfig support http/1.1. + if supportsHTTP11(tlsConfig.NextProtos) { + tlsConfig = tlsConfig.Clone() + tlsConfig.NextProtos = []string{"http/1.1"} + } + + tlsConn := tls.Client(netConn, tlsConfig) + if err := tlsConn.HandshakeContext(ctx); err != nil { + netConn.Close() + return nil, err + } + return tlsConn, nil + } else { + // Dial. + tlsDialer := tls.Dialer{ + Config: tlsConfig, + } + return tlsDialer.DialContext(ctx, "tcp", dialAddr) + } + default: + return nil, fmt.Errorf("unknown scheme: %s", url.Scheme) + } +} + +func supportsHTTP11(nextProtos []string) bool { + if len(nextProtos) == 0 { + return true + } + for _, proto := range nextProtos { + if proto == "http/1.1" { + return true + } + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go new file mode 100644 index 0000000000..d14ecfad54 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package proxy provides transport and upgrade support for proxies. +package proxy // import "k8s.io/apimachinery/pkg/util/proxy" diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go new file mode 100644 index 0000000000..5a2dd6e14c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/transport.go @@ -0,0 +1,272 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "bytes" + "compress/flate" + "compress/gzip" + "fmt" + "io" + "net/http" + "net/url" + "path" + "strings" + + "golang.org/x/net/html" + "golang.org/x/net/html/atom" + "k8s.io/klog/v2" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/sets" +) + +// atomsToAttrs states which attributes of which tags require URL substitution. +// Sources: http://www.w3.org/TR/REC-html40/index/attributes.html +// +// http://www.w3.org/html/wg/drafts/html/master/index.html#attributes-1 +var atomsToAttrs = map[atom.Atom]sets.String{ + atom.A: sets.NewString("href"), + atom.Applet: sets.NewString("codebase"), + atom.Area: sets.NewString("href"), + atom.Audio: sets.NewString("src"), + atom.Base: sets.NewString("href"), + atom.Blockquote: sets.NewString("cite"), + atom.Body: sets.NewString("background"), + atom.Button: sets.NewString("formaction"), + atom.Command: sets.NewString("icon"), + atom.Del: sets.NewString("cite"), + atom.Embed: sets.NewString("src"), + atom.Form: sets.NewString("action"), + atom.Frame: sets.NewString("longdesc", "src"), + atom.Head: sets.NewString("profile"), + atom.Html: sets.NewString("manifest"), + atom.Iframe: sets.NewString("longdesc", "src"), + atom.Img: sets.NewString("longdesc", "src", "usemap"), + atom.Input: sets.NewString("src", "usemap", "formaction"), + atom.Ins: sets.NewString("cite"), + atom.Link: sets.NewString("href"), + atom.Object: sets.NewString("classid", "codebase", "data", "usemap"), + atom.Q: sets.NewString("cite"), + atom.Script: sets.NewString("src"), + atom.Source: sets.NewString("src"), + atom.Video: sets.NewString("poster", "src"), + + // TODO: css URLs hidden in style elements. +} + +// Transport is a transport for text/html content that replaces URLs in html +// content with the prefix of the proxy server +type Transport struct { + Scheme string + Host string + PathPrepend string + + http.RoundTripper +} + +// RoundTrip implements the http.RoundTripper interface +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + // Add reverse proxy headers. + forwardedURI := path.Join(t.PathPrepend, req.URL.EscapedPath()) + if strings.HasSuffix(req.URL.Path, "/") { + forwardedURI = forwardedURI + "/" + } + req.Header.Set("X-Forwarded-Uri", forwardedURI) + if len(t.Host) > 0 { + req.Header.Set("X-Forwarded-Host", t.Host) + } + if len(t.Scheme) > 0 { + req.Header.Set("X-Forwarded-Proto", t.Scheme) + } + + rt := t.RoundTripper + if rt == nil { + rt = http.DefaultTransport + } + resp, err := rt.RoundTrip(req) + + if err != nil { + return nil, errors.NewServiceUnavailable(fmt.Sprintf("error trying to reach service: %v", err)) + } + + if redirect := resp.Header.Get("Location"); redirect != "" { + targetURL, err := url.Parse(redirect) + if err != nil { + return nil, errors.NewInternalError(fmt.Errorf("error trying to parse Location header: %v", err)) + } + resp.Header.Set("Location", t.rewriteURL(targetURL, req.URL, req.Host)) + return resp, nil + } + + cType := resp.Header.Get("Content-Type") + cType = strings.TrimSpace(strings.SplitN(cType, ";", 2)[0]) + if cType != "text/html" { + // Do nothing, simply pass through + return resp, nil + } + + return t.rewriteResponse(req, resp) +} + +var _ = net.RoundTripperWrapper(&Transport{}) + +func (rt *Transport) WrappedRoundTripper() http.RoundTripper { + return rt.RoundTripper +} + +// rewriteURL rewrites a single URL to go through the proxy, if the URL refers +// to the same host as sourceURL, which is the page on which the target URL +// occurred, or if the URL matches the sourceRequestHost. +func (t *Transport) rewriteURL(url *url.URL, sourceURL *url.URL, sourceRequestHost string) string { + // Example: + // When API server processes a proxy request to a service (e.g. /api/v1/namespace/foo/service/bar/proxy/), + // the sourceURL.Host (i.e. req.URL.Host) is the endpoint IP address of the service. The + // sourceRequestHost (i.e. req.Host) is the Host header that specifies the host on which the + // URL is sought, which can be different from sourceURL.Host. For example, if user sends the + // request through "kubectl proxy" locally (i.e. localhost:8001/api/v1/namespace/foo/service/bar/proxy/), + // sourceRequestHost is "localhost:8001". + // + // If the service's response URL contains non-empty host, and url.Host is equal to either sourceURL.Host + // or sourceRequestHost, we should not consider the returned URL to be a completely different host. + // It's the API server's responsibility to rewrite a same-host-and-absolute-path URL and append the + // necessary URL prefix (i.e. /api/v1/namespace/foo/service/bar/proxy/). + isDifferentHost := url.Host != "" && url.Host != sourceURL.Host && url.Host != sourceRequestHost + isRelative := !strings.HasPrefix(url.Path, "/") + if isDifferentHost || isRelative { + return url.String() + } + + // Do not rewrite scheme and host if the Transport has empty scheme and host + // when targetURL already contains the sourceRequestHost + if !(url.Host == sourceRequestHost && t.Scheme == "" && t.Host == "") { + url.Scheme = t.Scheme + url.Host = t.Host + } + + origPath := url.Path + // Do not rewrite URL if the sourceURL already contains the necessary prefix. + if strings.HasPrefix(url.Path, t.PathPrepend) { + return url.String() + } + url.Path = path.Join(t.PathPrepend, url.Path) + if strings.HasSuffix(origPath, "/") { + // Add back the trailing slash, which was stripped by path.Join(). + url.Path += "/" + } + + return url.String() +} + +// rewriteHTML scans the HTML for tags with url-valued attributes, and updates +// those values with the urlRewriter function. The updated HTML is output to the +// writer. +func rewriteHTML(reader io.Reader, writer io.Writer, urlRewriter func(*url.URL) string) error { + // Note: This assumes the content is UTF-8. + tokenizer := html.NewTokenizer(reader) + + var err error + for err == nil { + tokenType := tokenizer.Next() + switch tokenType { + case html.ErrorToken: + err = tokenizer.Err() + case html.StartTagToken, html.SelfClosingTagToken: + token := tokenizer.Token() + if urlAttrs, ok := atomsToAttrs[token.DataAtom]; ok { + for i, attr := range token.Attr { + if urlAttrs.Has(attr.Key) { + url, err := url.Parse(attr.Val) + if err != nil { + // Do not rewrite the URL if it isn't valid. It is intended not + // to error here to prevent the inability to understand the + // content of the body to cause a fatal error. + continue + } + token.Attr[i].Val = urlRewriter(url) + } + } + } + _, err = writer.Write([]byte(token.String())) + default: + _, err = writer.Write(tokenizer.Raw()) + } + } + if err != io.EOF { + return err + } + return nil +} + +// rewriteResponse modifies an HTML response by updating absolute links referring +// to the original host to instead refer to the proxy transport. +func (t *Transport) rewriteResponse(req *http.Request, resp *http.Response) (*http.Response, error) { + origBody := resp.Body + defer origBody.Close() + + newContent := &bytes.Buffer{} + var reader io.Reader = origBody + var writer io.Writer = newContent + encoding := resp.Header.Get("Content-Encoding") + switch encoding { + case "gzip": + var err error + reader, err = gzip.NewReader(reader) + if err != nil { + return nil, fmt.Errorf("errorf making gzip reader: %v", err) + } + gzw := gzip.NewWriter(writer) + defer gzw.Close() + writer = gzw + case "deflate": + var err error + reader = flate.NewReader(reader) + flw, err := flate.NewWriter(writer, flate.BestCompression) + if err != nil { + return nil, fmt.Errorf("errorf making flate writer: %v", err) + } + defer func() { + flw.Close() + flw.Flush() + }() + writer = flw + case "": + // This is fine + default: + // Some encoding we don't understand-- don't try to parse this + klog.Errorf("Proxy encountered encoding %v for text/html; can't understand this so not fixing links.", encoding) + return resp, nil + } + + urlRewriter := func(targetUrl *url.URL) string { + return t.rewriteURL(targetUrl, req.URL, req.Host) + } + err := rewriteHTML(reader, writer, urlRewriter) + if err != nil { + klog.Errorf("Failed to rewrite URLs: %v", err) + return resp, err + } + + resp.Body = io.NopCloser(newContent) + // Update header node with new content-length + // TODO: Remove any hash/signature headers here? + resp.Header.Del("Content-Length") + resp.ContentLength = int64(newContent.Len()) + + return resp, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go b/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go new file mode 100644 index 0000000000..8c30a366de --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/proxy/upgradeaware.go @@ -0,0 +1,558 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "bufio" + "bytes" + "fmt" + "io" + "log" + "net" + "net/http" + "net/http/httputil" + "net/url" + "os" + "strings" + "time" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/util/httpstream" + utilnet "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + + "github.com/mxk/go-flowrate/flowrate" + + "k8s.io/klog/v2" +) + +// UpgradeRequestRoundTripper provides an additional method to decorate a request +// with any authentication or other protocol level information prior to performing +// an upgrade on the server. Any response will be handled by the intercepting +// proxy. +type UpgradeRequestRoundTripper interface { + http.RoundTripper + // WrapRequest takes a valid HTTP request and returns a suitably altered version + // of request with any HTTP level values required to complete the request half of + // an upgrade on the server. It does not get a chance to see the response and + // should bypass any request side logic that expects to see the response. + WrapRequest(*http.Request) (*http.Request, error) +} + +// UpgradeAwareHandler is a handler for proxy requests that may require an upgrade +type UpgradeAwareHandler struct { + // UpgradeRequired will reject non-upgrade connections if true. + UpgradeRequired bool + // Location is the location of the upstream proxy. It is used as the location to Dial on the upstream server + // for upgrade requests unless UseRequestLocationOnUpgrade is true. + Location *url.URL + // AppendLocationPath determines if the original path of the Location should be appended to the upstream proxy request path + AppendLocationPath bool + // Transport provides an optional round tripper to use to proxy. If nil, the default proxy transport is used + Transport http.RoundTripper + // UpgradeTransport, if specified, will be used as the backend transport when upgrade requests are provided. + // This allows clients to disable HTTP/2. + UpgradeTransport UpgradeRequestRoundTripper + // WrapTransport indicates whether the provided Transport should be wrapped with default proxy transport behavior (URL rewriting, X-Forwarded-* header setting) + WrapTransport bool + // UseRequestLocation will use the incoming request URL when talking to the backend server. + UseRequestLocation bool + // UseLocationHost overrides the HTTP host header in requests to the backend server to use the Host from Location. + // This will override the req.Host field of a request, while UseRequestLocation will override the req.URL field + // of a request. The req.URL.Host specifies the server to connect to, while the req.Host field + // specifies the Host header value to send in the HTTP request. If this is false, the incoming req.Host header will + // just be forwarded to the backend server. + UseLocationHost bool + // FlushInterval controls how often the standard HTTP proxy will flush content from the upstream. + FlushInterval time.Duration + // MaxBytesPerSec controls the maximum rate for an upstream connection. No rate is imposed if the value is zero. + MaxBytesPerSec int64 + // Responder is passed errors that occur while setting up proxying. + Responder ErrorResponder + // Reject to forward redirect response + RejectForwardingRedirects bool +} + +const defaultFlushInterval = 200 * time.Millisecond + +// ErrorResponder abstracts error reporting to the proxy handler to remove the need to hardcode a particular +// error format. +type ErrorResponder interface { + Error(w http.ResponseWriter, req *http.Request, err error) +} + +// SimpleErrorResponder is the legacy implementation of ErrorResponder for callers that only +// service a single request/response per proxy. +type SimpleErrorResponder interface { + Error(err error) +} + +func NewErrorResponder(r SimpleErrorResponder) ErrorResponder { + return simpleResponder{r} +} + +type simpleResponder struct { + responder SimpleErrorResponder +} + +func (r simpleResponder) Error(w http.ResponseWriter, req *http.Request, err error) { + r.responder.Error(err) +} + +// upgradeRequestRoundTripper implements proxy.UpgradeRequestRoundTripper. +type upgradeRequestRoundTripper struct { + http.RoundTripper + upgrader http.RoundTripper +} + +var ( + _ UpgradeRequestRoundTripper = &upgradeRequestRoundTripper{} + _ utilnet.RoundTripperWrapper = &upgradeRequestRoundTripper{} +) + +// WrappedRoundTripper returns the round tripper that a caller would use. +func (rt *upgradeRequestRoundTripper) WrappedRoundTripper() http.RoundTripper { + return rt.RoundTripper +} + +// WriteToRequest calls the nested upgrader and then copies the returned request +// fields onto the passed request. +func (rt *upgradeRequestRoundTripper) WrapRequest(req *http.Request) (*http.Request, error) { + resp, err := rt.upgrader.RoundTrip(req) + if err != nil { + return nil, err + } + return resp.Request, nil +} + +// onewayRoundTripper captures the provided request - which is assumed to have +// been modified by other round trippers - and then returns a fake response. +type onewayRoundTripper struct{} + +// RoundTrip returns a simple 200 OK response that captures the provided request. +func (onewayRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return &http.Response{ + Status: "200 OK", + StatusCode: http.StatusOK, + Body: io.NopCloser(&bytes.Buffer{}), + Request: req, + }, nil +} + +// MirrorRequest is a round tripper that can be called to get back the calling request as +// the core round tripper in a chain. +var MirrorRequest http.RoundTripper = onewayRoundTripper{} + +// NewUpgradeRequestRoundTripper takes two round trippers - one for the underlying TCP connection, and +// one that is able to write headers to an HTTP request. The request rt is used to set the request headers +// and that is written to the underlying connection rt. +func NewUpgradeRequestRoundTripper(connection, request http.RoundTripper) UpgradeRequestRoundTripper { + return &upgradeRequestRoundTripper{ + RoundTripper: connection, + upgrader: request, + } +} + +// normalizeLocation returns the result of parsing the full URL, with scheme set to http if missing +func normalizeLocation(location *url.URL) *url.URL { + normalized, _ := url.Parse(location.String()) + if len(normalized.Scheme) == 0 { + normalized.Scheme = "http" + } + return normalized +} + +// NewUpgradeAwareHandler creates a new proxy handler with a default flush interval. Responder is required for returning +// errors to the caller. +func NewUpgradeAwareHandler(location *url.URL, transport http.RoundTripper, wrapTransport, upgradeRequired bool, responder ErrorResponder) *UpgradeAwareHandler { + return &UpgradeAwareHandler{ + Location: normalizeLocation(location), + Transport: transport, + WrapTransport: wrapTransport, + UpgradeRequired: upgradeRequired, + FlushInterval: defaultFlushInterval, + Responder: responder, + } +} + +func proxyRedirectsforRootPath(path string, w http.ResponseWriter, req *http.Request) bool { + redirect := false + method := req.Method + + // From pkg/genericapiserver/endpoints/handlers/proxy.go#ServeHTTP: + // Redirect requests with an empty path to a location that ends with a '/' + // This is essentially a hack for https://issue.k8s.io/4958. + // Note: Keep this code after tryUpgrade to not break that flow. + if len(path) == 0 && (method == http.MethodGet || method == http.MethodHead) { + var queryPart string + if len(req.URL.RawQuery) > 0 { + queryPart = "?" + req.URL.RawQuery + } + w.Header().Set("Location", req.URL.Path+"/"+queryPart) + w.WriteHeader(http.StatusMovedPermanently) + redirect = true + } + return redirect +} + +// ServeHTTP handles the proxy request +func (h *UpgradeAwareHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if h.tryUpgrade(w, req) { + return + } + if h.UpgradeRequired { + h.Responder.Error(w, req, errors.NewBadRequest("Upgrade request required")) + return + } + + loc := *h.Location + loc.RawQuery = req.URL.RawQuery + + // If original request URL ended in '/', append a '/' at the end of the + // of the proxy URL + if !strings.HasSuffix(loc.Path, "/") && strings.HasSuffix(req.URL.Path, "/") { + loc.Path += "/" + } + + proxyRedirect := proxyRedirectsforRootPath(loc.Path, w, req) + if proxyRedirect { + return + } + + if h.Transport == nil || h.WrapTransport { + h.Transport = h.defaultProxyTransport(req.URL, h.Transport) + } + + // WithContext creates a shallow clone of the request with the same context. + newReq := req.WithContext(req.Context()) + newReq.Header = utilnet.CloneHeader(req.Header) + if !h.UseRequestLocation { + newReq.URL = &loc + } + if h.UseLocationHost { + // exchanging req.Host with the backend location is necessary for backends that act on the HTTP host header (e.g. API gateways), + // because req.Host has preference over req.URL.Host in filling this header field + newReq.Host = h.Location.Host + } + + // create the target location to use for the reverse proxy + reverseProxyLocation := &url.URL{Scheme: h.Location.Scheme, Host: h.Location.Host} + if h.AppendLocationPath { + reverseProxyLocation.Path = h.Location.Path + } + + proxy := httputil.NewSingleHostReverseProxy(reverseProxyLocation) + proxy.Transport = h.Transport + proxy.FlushInterval = h.FlushInterval + proxy.ErrorLog = log.New(noSuppressPanicError{}, "", log.LstdFlags) + if h.RejectForwardingRedirects { + oldModifyResponse := proxy.ModifyResponse + proxy.ModifyResponse = func(response *http.Response) error { + code := response.StatusCode + if code >= 300 && code <= 399 && len(response.Header.Get("Location")) > 0 { + // close the original response + response.Body.Close() + msg := "the backend attempted to redirect this request, which is not permitted" + // replace the response + *response = http.Response{ + StatusCode: http.StatusBadGateway, + Status: fmt.Sprintf("%d %s", response.StatusCode, http.StatusText(response.StatusCode)), + Body: io.NopCloser(strings.NewReader(msg)), + ContentLength: int64(len(msg)), + } + } else { + if oldModifyResponse != nil { + if err := oldModifyResponse(response); err != nil { + return err + } + } + } + return nil + } + } + if h.Responder != nil { + // if an optional error interceptor/responder was provided wire it + // the custom responder might be used for providing a unified error reporting + // or supporting retry mechanisms by not sending non-fatal errors to the clients + proxy.ErrorHandler = h.Responder.Error + } + proxy.ServeHTTP(w, newReq) +} + +type noSuppressPanicError struct{} + +func (noSuppressPanicError) Write(p []byte) (n int, err error) { + // skip "suppressing panic for copyResponse error in test; copy error" error message + // that ends up in CI tests on each kube-apiserver termination as noise and + // everybody thinks this is fatal. + if strings.Contains(string(p), "suppressing panic") { + return len(p), nil + } + return os.Stderr.Write(p) +} + +// tryUpgrade returns true if the request was handled. +func (h *UpgradeAwareHandler) tryUpgrade(w http.ResponseWriter, req *http.Request) bool { + if !httpstream.IsUpgradeRequest(req) { + klog.V(6).Infof("Request was not an upgrade") + return false + } + + var ( + backendConn net.Conn + rawResponse []byte + err error + ) + + location := *h.Location + if h.UseRequestLocation { + location = *req.URL + location.Scheme = h.Location.Scheme + location.Host = h.Location.Host + if h.AppendLocationPath { + location.Path = singleJoiningSlash(h.Location.Path, location.Path) + } + } + + clone := utilnet.CloneRequest(req) + // Only append X-Forwarded-For in the upgrade path, since httputil.NewSingleHostReverseProxy + // handles this in the non-upgrade path. + utilnet.AppendForwardedForHeader(clone) + klog.V(6).Infof("Connecting to backend proxy (direct dial) %s\n Headers: %v", &location, clone.Header) + if h.UseLocationHost { + clone.Host = h.Location.Host + } + clone.URL = &location + klog.V(6).Infof("UpgradeAwareProxy: dialing for SPDY upgrade with headers: %v", clone.Header) + backendConn, err = h.DialForUpgrade(clone) + if err != nil { + klog.V(6).Infof("Proxy connection error: %v", err) + h.Responder.Error(w, req, err) + return true + } + defer backendConn.Close() + + // determine the http response code from the backend by reading from rawResponse+backendConn + backendHTTPResponse, headerBytes, err := getResponse(io.MultiReader(bytes.NewReader(rawResponse), backendConn)) + if err != nil { + klog.V(6).Infof("Proxy connection error: %v", err) + h.Responder.Error(w, req, err) + return true + } + if len(headerBytes) > len(rawResponse) { + // we read beyond the bytes stored in rawResponse, update rawResponse to the full set of bytes read from the backend + rawResponse = headerBytes + } + + // If the backend did not upgrade the request, return an error to the client. If the response was + // an error, the error is forwarded directly after the connection is hijacked. Otherwise, just + // return a generic error here. + if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols && backendHTTPResponse.StatusCode < 400 { + err := fmt.Errorf("invalid upgrade response: status code %d", backendHTTPResponse.StatusCode) + klog.Errorf("Proxy upgrade error: %v", err) + h.Responder.Error(w, req, err) + return true + } + + // Once the connection is hijacked, the ErrorResponder will no longer work, so + // hijacking should be the last step in the upgrade. + requestHijacker, ok := w.(http.Hijacker) + if !ok { + klog.Errorf("Unable to hijack response writer: %T", w) + h.Responder.Error(w, req, fmt.Errorf("request connection cannot be hijacked: %T", w)) + return true + } + requestHijackedConn, _, err := requestHijacker.Hijack() + if err != nil { + klog.Errorf("Unable to hijack response: %v", err) + h.Responder.Error(w, req, fmt.Errorf("error hijacking connection: %v", err)) + return true + } + defer requestHijackedConn.Close() + + if backendHTTPResponse.StatusCode != http.StatusSwitchingProtocols { + // If the backend did not upgrade the request, echo the response from the backend to the client and return, closing the connection. + klog.V(6).Infof("Proxy upgrade error, status code %d", backendHTTPResponse.StatusCode) + // set read/write deadlines + deadline := time.Now().Add(10 * time.Second) + backendConn.SetReadDeadline(deadline) + requestHijackedConn.SetWriteDeadline(deadline) + // write the response to the client + err := backendHTTPResponse.Write(requestHijackedConn) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + klog.Errorf("Error proxying data from backend to client: %v", err) + } + // Indicate we handled the request + return true + } + + // Forward raw response bytes back to client. + if len(rawResponse) > 0 { + klog.V(6).Infof("Writing %d bytes to hijacked connection", len(rawResponse)) + if _, err = requestHijackedConn.Write(rawResponse); err != nil { + utilruntime.HandleError(fmt.Errorf("Error proxying response from backend to client: %v", err)) + } + } + + // Proxy the connection. This is bidirectional, so we need a goroutine + // to copy in each direction. Once one side of the connection exits, we + // exit the function which performs cleanup and in the process closes + // the other half of the connection in the defer. + writerComplete := make(chan struct{}) + readerComplete := make(chan struct{}) + + go func() { + var writer io.WriteCloser + if h.MaxBytesPerSec > 0 { + writer = flowrate.NewWriter(backendConn, h.MaxBytesPerSec) + } else { + writer = backendConn + } + _, err := io.Copy(writer, requestHijackedConn) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + klog.Errorf("Error proxying data from client to backend: %v", err) + } + close(writerComplete) + }() + + go func() { + var reader io.ReadCloser + if h.MaxBytesPerSec > 0 { + reader = flowrate.NewReader(backendConn, h.MaxBytesPerSec) + } else { + reader = backendConn + } + _, err := io.Copy(requestHijackedConn, reader) + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + klog.Errorf("Error proxying data from backend to client: %v", err) + } + close(readerComplete) + }() + + // Wait for one half the connection to exit. Once it does the defer will + // clean up the other half of the connection. + select { + case <-writerComplete: + case <-readerComplete: + } + klog.V(6).Infof("Disconnecting from backend proxy %s\n Headers: %v", &location, clone.Header) + + return true +} + +// FIXME: Taken from net/http/httputil/reverseproxy.go as singleJoiningSlash is not exported to be re-used. +// See-also: https://github.com/golang/go/issues/44290 +func singleJoiningSlash(a, b string) string { + aslash := strings.HasSuffix(a, "/") + bslash := strings.HasPrefix(b, "/") + switch { + case aslash && bslash: + return a + b[1:] + case !aslash && !bslash: + return a + "/" + b + } + return a + b +} + +func (h *UpgradeAwareHandler) DialForUpgrade(req *http.Request) (net.Conn, error) { + if h.UpgradeTransport == nil { + return dial(req, h.Transport) + } + updatedReq, err := h.UpgradeTransport.WrapRequest(req) + if err != nil { + return nil, err + } + return dial(updatedReq, h.UpgradeTransport) +} + +// getResponseCode reads a http response from the given reader, returns the response, +// the bytes read from the reader, and any error encountered +func getResponse(r io.Reader) (*http.Response, []byte, error) { + rawResponse := bytes.NewBuffer(make([]byte, 0, 256)) + // Save the bytes read while reading the response headers into the rawResponse buffer + resp, err := http.ReadResponse(bufio.NewReader(io.TeeReader(r, rawResponse)), nil) + if err != nil { + return nil, nil, err + } + // return the http response and the raw bytes consumed from the reader in the process + return resp, rawResponse.Bytes(), nil +} + +// dial dials the backend at req.URL and writes req to it. +func dial(req *http.Request, transport http.RoundTripper) (net.Conn, error) { + conn, err := DialURL(req.Context(), req.URL, transport) + if err != nil { + return nil, fmt.Errorf("error dialing backend: %v", err) + } + + if err = req.Write(conn); err != nil { + conn.Close() + return nil, fmt.Errorf("error sending request: %v", err) + } + + return conn, err +} + +func (h *UpgradeAwareHandler) defaultProxyTransport(url *url.URL, internalTransport http.RoundTripper) http.RoundTripper { + scheme := url.Scheme + host := url.Host + suffix := h.Location.Path + if strings.HasSuffix(url.Path, "/") && !strings.HasSuffix(suffix, "/") { + suffix += "/" + } + pathPrepend := strings.TrimSuffix(url.Path, suffix) + rewritingTransport := &Transport{ + Scheme: scheme, + Host: host, + PathPrepend: pathPrepend, + RoundTripper: internalTransport, + } + return &corsRemovingTransport{ + RoundTripper: rewritingTransport, + } +} + +// corsRemovingTransport is a wrapper for an internal transport. It removes CORS headers +// from the internal response. +// Implements pkg/util/net.RoundTripperWrapper +type corsRemovingTransport struct { + http.RoundTripper +} + +var _ = utilnet.RoundTripperWrapper(&corsRemovingTransport{}) + +func (rt *corsRemovingTransport) RoundTrip(req *http.Request) (*http.Response, error) { + resp, err := rt.RoundTripper.RoundTrip(req) + if err != nil { + return nil, err + } + removeCORSHeaders(resp) + return resp, nil +} + +func (rt *corsRemovingTransport) WrappedRoundTripper() http.RoundTripper { + return rt.RoundTripper +} + +// removeCORSHeaders strip CORS headers sent from the backend +// This should be called on all responses before returning +func removeCORSHeaders(resp *http.Response) { + resp.Header.Del("Access-Control-Allow-Credentials") + resp.Header.Del("Access-Control-Allow-Headers") + resp.Header.Del("Access-Control-Allow-Methods") + resp.Header.Del("Access-Control-Allow-Origin") +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go new file mode 100644 index 0000000000..bd26f427e3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go @@ -0,0 +1,28 @@ +package netutil + +import ( + "net/url" + "strings" +) + +// FROM: http://golang.org/src/net/http/client.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", + "socks5": "1080", +} + +// FROM: http://golang.org/src/net/http/transport.go +// canonicalAddr returns url.Host but always with a ":port" suffix +func CanonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/k8s.io/client-go/scale/client.go b/vendor/k8s.io/client-go/scale/client.go new file mode 100644 index 0000000000..1306b37d9c --- /dev/null +++ b/vendor/k8s.io/client-go/scale/client.go @@ -0,0 +1,238 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scale + +import ( + "context" + "fmt" + + autoscaling "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/dynamic" + restclient "k8s.io/client-go/rest" +) + +var scaleConverter = NewScaleConverter() +var codecs = serializer.NewCodecFactory(scaleConverter.Scheme()) +var parameterScheme = runtime.NewScheme() +var dynamicParameterCodec = runtime.NewParameterCodec(parameterScheme) + +var versionV1 = schema.GroupVersion{Version: "v1"} + +func init() { + metav1.AddToGroupVersion(parameterScheme, versionV1) +} + +// scaleClient is an implementation of ScalesGetter +// which makes use of a RESTMapper and a generic REST +// client to support an discoverable resource. +// It behaves somewhat similarly to the dynamic ClientPool, +// but is more specifically scoped to Scale. +type scaleClient struct { + mapper PreferredResourceMapper + + apiPathResolverFunc dynamic.APIPathResolverFunc + scaleKindResolver ScaleKindResolver + clientBase restclient.Interface +} + +// NewForConfig creates a new ScalesGetter which resolves kinds +// to resources using the given RESTMapper, and API paths using +// the given dynamic.APIPathResolverFunc. +func NewForConfig(cfg *restclient.Config, mapper PreferredResourceMapper, resolver dynamic.APIPathResolverFunc, scaleKindResolver ScaleKindResolver) (ScalesGetter, error) { + // so that the RESTClientFor doesn't complain + cfg.GroupVersion = &schema.GroupVersion{} + + cfg.NegotiatedSerializer = codecs.WithoutConversion() + if len(cfg.UserAgent) == 0 { + cfg.UserAgent = restclient.DefaultKubernetesUserAgent() + } + + client, err := restclient.RESTClientFor(cfg) + if err != nil { + return nil, err + } + + return New(client, mapper, resolver, scaleKindResolver), nil +} + +// New creates a new ScalesGetter using the given client to make requests. +// The GroupVersion on the client is ignored. +func New(baseClient restclient.Interface, mapper PreferredResourceMapper, resolver dynamic.APIPathResolverFunc, scaleKindResolver ScaleKindResolver) ScalesGetter { + return &scaleClient{ + mapper: mapper, + + apiPathResolverFunc: resolver, + scaleKindResolver: scaleKindResolver, + clientBase: baseClient, + } +} + +// apiPathFor returns the absolute api path for the given GroupVersion +func (c *scaleClient) apiPathFor(groupVer schema.GroupVersion) string { + // we need to set the API path based on GroupVersion (defaulting to the legacy path if none is set) + // TODO: we "cheat" here since the API path really only depends on group ATM, but this should + // *probably* take GroupVersionResource and not GroupVersionKind. + apiPath := c.apiPathResolverFunc(groupVer.WithKind("")) + if apiPath == "" { + apiPath = "/api" + } + + return restclient.DefaultVersionedAPIPath(apiPath, groupVer) +} + +// pathAndVersionFor returns the appropriate base path and the associated full GroupVersionResource +// for the given GroupResource +func (c *scaleClient) pathAndVersionFor(resource schema.GroupResource) (string, schema.GroupVersionResource, error) { + gvr, err := c.mapper.ResourceFor(resource.WithVersion("")) + if err != nil { + return "", gvr, fmt.Errorf("unable to get full preferred group-version-resource for %s: %v", resource.String(), err) + } + + groupVer := gvr.GroupVersion() + + return c.apiPathFor(groupVer), gvr, nil +} + +// namespacedScaleClient is an ScaleInterface for fetching +// Scales in a given namespace. +type namespacedScaleClient struct { + client *scaleClient + namespace string +} + +// convertToScale converts the response body to autoscaling/v1.Scale +func convertToScale(result *restclient.Result) (*autoscaling.Scale, error) { + scaleBytes, err := result.Raw() + if err != nil { + return nil, err + } + decoder := scaleConverter.codecs.UniversalDecoder(scaleConverter.ScaleVersions()...) + rawScaleObj, err := runtime.Decode(decoder, scaleBytes) + if err != nil { + return nil, err + } + + // convert whatever this is to autoscaling/v1.Scale + scaleObj, err := scaleConverter.ConvertToVersion(rawScaleObj, autoscaling.SchemeGroupVersion) + if err != nil { + return nil, fmt.Errorf("received an object from a /scale endpoint which was not convertible to autoscaling Scale: %v", err) + } + + return scaleObj.(*autoscaling.Scale), nil +} + +func (c *scaleClient) Scales(namespace string) ScaleInterface { + return &namespacedScaleClient{ + client: c, + namespace: namespace, + } +} + +func (c *namespacedScaleClient) Get(ctx context.Context, resource schema.GroupResource, name string, opts metav1.GetOptions) (*autoscaling.Scale, error) { + // Currently, a /scale endpoint can return different scale types. + // Until we have support for the alternative API representations proposal, + // we need to deal with accepting different API versions. + // In practice, this is autoscaling/v1.Scale and extensions/v1beta1.Scale + + path, gvr, err := c.client.pathAndVersionFor(resource) + if err != nil { + return nil, fmt.Errorf("unable to get client for %s: %v", resource.String(), err) + } + + result := c.client.clientBase.Get(). + AbsPath(path). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(gvr.Resource). + Name(name). + SubResource("scale"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + + return convertToScale(&result) +} + +func (c *namespacedScaleClient) Update(ctx context.Context, resource schema.GroupResource, scale *autoscaling.Scale, opts metav1.UpdateOptions) (*autoscaling.Scale, error) { + path, gvr, err := c.client.pathAndVersionFor(resource) + if err != nil { + return nil, fmt.Errorf("unable to get client for %s: %v", resource.String(), err) + } + + // Currently, a /scale endpoint can receive and return different scale types. + // Until we have support for the alternative API representations proposal, + // we need to deal with sending and accepting different API versions. + + // figure out what scale we actually need here + desiredGVK, err := c.client.scaleKindResolver.ScaleForResource(gvr) + if err != nil { + return nil, fmt.Errorf("could not find proper group-version for scale subresource of %s: %v", gvr.String(), err) + } + + // convert this to whatever this endpoint wants + scaleUpdate, err := scaleConverter.ConvertToVersion(scale, desiredGVK.GroupVersion()) + if err != nil { + return nil, fmt.Errorf("could not convert scale update to external Scale: %v", err) + } + encoder := scaleConverter.codecs.LegacyCodec(desiredGVK.GroupVersion()) + scaleUpdateBytes, err := runtime.Encode(encoder, scaleUpdate) + if err != nil { + return nil, fmt.Errorf("could not encode scale update to external Scale: %v", err) + } + + result := c.client.clientBase.Put(). + AbsPath(path). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(gvr.Resource). + Name(scale.Name). + SubResource("scale"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Body(scaleUpdateBytes). + Do(ctx) + if err := result.Error(); err != nil { + // propagate "raw" error from the API + // this allows callers to interpret underlying Reason field + // for example: errors.IsConflict(err) + return nil, err + } + + return convertToScale(&result) +} + +func (c *namespacedScaleClient) Patch(ctx context.Context, gvr schema.GroupVersionResource, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions) (*autoscaling.Scale, error) { + groupVersion := gvr.GroupVersion() + result := c.client.clientBase.Patch(pt). + AbsPath(c.client.apiPathFor(groupVersion)). + NamespaceIfScoped(c.namespace, c.namespace != ""). + Resource(gvr.Resource). + Name(name). + SubResource("scale"). + SpecificallyVersionedParams(&opts, dynamicParameterCodec, versionV1). + Body(data). + Do(ctx) + if err := result.Error(); err != nil { + return nil, err + } + + return convertToScale(&result) +} diff --git a/vendor/k8s.io/client-go/scale/doc.go b/vendor/k8s.io/client-go/scale/doc.go new file mode 100644 index 0000000000..b6fa3f5f20 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package scale provides a polymorphic scale client capable of fetching +// and updating Scale for any resource which implements the `scale` subresource, +// as long as that subresource operates on a version of scale convertable to +// autoscaling.Scale. +package scale // import "k8s.io/client-go/scale" diff --git a/vendor/k8s.io/client-go/scale/interfaces.go b/vendor/k8s.io/client-go/scale/interfaces.go new file mode 100644 index 0000000000..a7bb3e6cb5 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/interfaces.go @@ -0,0 +1,47 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scale + +import ( + "context" + + autoscalingapi "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// ScalesGetter can produce a ScaleInterface +type ScalesGetter interface { + // Scales produces a ScaleInterface for a particular namespace. + // Set namespace to the empty string for non-namespaced resources. + Scales(namespace string) ScaleInterface +} + +// ScaleInterface can fetch and update scales for +// resources in a particular namespace which implement +// the scale subresource. +type ScaleInterface interface { + // Get fetches the scale of the given scalable resource. + Get(ctx context.Context, resource schema.GroupResource, name string, opts metav1.GetOptions) (*autoscalingapi.Scale, error) + + // Update updates the scale of the given scalable resource. + Update(ctx context.Context, resource schema.GroupResource, scale *autoscalingapi.Scale, opts metav1.UpdateOptions) (*autoscalingapi.Scale, error) + + // Patch patches the scale of the given scalable resource. + Patch(ctx context.Context, gvr schema.GroupVersionResource, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions) (*autoscalingapi.Scale, error) +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go new file mode 100644 index 0000000000..16f29e2afe --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsint/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package appsint contains the necessary scaffolding of the +// internal version of extensions as required by conversion logic. +// It doesn't have any of its own types -- it's just necessary to +// get the expected behavior out of runtime.Scheme.ConvertToVersion +// and associated methods. +package appsint diff --git a/vendor/k8s.io/client-go/scale/scheme/appsint/register.go b/vendor/k8s.io/client-go/scale/scheme/appsint/register.go new file mode 100644 index 0000000000..d3a76b518b --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsint/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsint + +import ( + appsv1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + scalescheme "k8s.io/client-go/scale/scheme" +) + +// GroupName is the group name use in this package +const GroupName = appsv1beta2.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &scalescheme.Scale{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go new file mode 100644 index 0000000000..f271c82592 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/conversion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsv1beta1 + +import ( + "fmt" + + v1beta1 "k8s.io/api/apps/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + scheme "k8s.io/client-go/scale/scheme" +) + +func Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + + return nil +} + +func Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go new file mode 100644 index 0000000000..830619b449 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme +// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta1 + +package appsv1beta1 // import "k8s.io/client-go/scale/scheme/appsv1beta1" diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go new file mode 100644 index 0000000000..f11fcbd009 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsv1beta1 + +import ( + appsapiv1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = appsapiv1beta1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &appsapiv1beta1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register() +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..d56861ead5 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta1/zz_generated.conversion.go @@ -0,0 +1,134 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package appsv1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1beta1.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Scale_To_scheme_Scale(a.(*v1beta1.Scale), b.(*scheme.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1beta1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_Scale_To_v1beta1_Scale(a.(*scheme.Scale), b.(*v1beta1.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(a.(*v1beta1.ScaleSpec), b.(*scheme.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1beta1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1beta1.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*scheme.ScaleStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Scale_To_scheme_Scale is an autogenerated conversion function. +func Convert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { + return autoConvert_v1beta1_Scale_To_scheme_Scale(in, out, s) +} + +func autoConvert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_scheme_Scale_To_v1beta1_Scale is an autogenerated conversion function. +func Convert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { + return autoConvert_scheme_Scale_To_v1beta1_Scale(in, out, s) +} + +func autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. +func Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in, out, s) +} + +func autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. +func Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + return autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) +} + +func autoConvert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go new file mode 100644 index 0000000000..35d15c30d4 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/conversion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsv1beta2 + +import ( + "fmt" + + v1beta2 "k8s.io/api/apps/v1beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + scheme "k8s.io/client-go/scale/scheme" +) + +func Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(in *scheme.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + + return nil +} + +func Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(in *v1beta2.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go new file mode 100644 index 0000000000..c21a56d569 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme +// +k8s:conversion-gen-external-types=k8s.io/api/apps/v1beta2 + +package appsv1beta2 // import "k8s.io/client-go/scale/scheme/appsv1beta2" diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go new file mode 100644 index 0000000000..5e8a5d2006 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package appsv1beta2 + +import ( + appsapiv1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = appsapiv1beta2.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &appsapiv1beta2.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register() +} diff --git a/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go new file mode 100644 index 0000000000..09c17420e0 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/appsv1beta2/zz_generated.conversion.go @@ -0,0 +1,134 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package appsv1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1beta2.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_Scale_To_scheme_Scale(a.(*v1beta2.Scale), b.(*scheme.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1beta2.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_Scale_To_v1beta2_Scale(a.(*scheme.Scale), b.(*v1beta2.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta2.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(a.(*v1beta2.ScaleSpec), b.(*scheme.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1beta2.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1beta2.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta2.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta2.ScaleStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta2.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta2.ScaleStatus), b.(*scheme.ScaleStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta2_Scale_To_scheme_Scale(in *v1beta2.Scale, out *scheme.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta2_Scale_To_scheme_Scale is an autogenerated conversion function. +func Convert_v1beta2_Scale_To_scheme_Scale(in *v1beta2.Scale, out *scheme.Scale, s conversion.Scope) error { + return autoConvert_v1beta2_Scale_To_scheme_Scale(in, out, s) +} + +func autoConvert_scheme_Scale_To_v1beta2_Scale(in *scheme.Scale, out *v1beta2.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_scheme_Scale_To_v1beta2_Scale is an autogenerated conversion function. +func Convert_scheme_Scale_To_v1beta2_Scale(in *scheme.Scale, out *v1beta2.Scale, s conversion.Scope) error { + return autoConvert_scheme_Scale_To_v1beta2_Scale(in, out, s) +} + +func autoConvert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in *v1beta2.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. +func Convert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in *v1beta2.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta2_ScaleSpec_To_scheme_ScaleSpec(in, out, s) +} + +func autoConvert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in *scheme.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec is an autogenerated conversion function. +func Convert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in *scheme.ScaleSpec, out *v1beta2.ScaleSpec, s conversion.Scope) error { + return autoConvert_scheme_ScaleSpec_To_v1beta2_ScaleSpec(in, out, s) +} + +func autoConvert_v1beta2_ScaleStatus_To_scheme_ScaleStatus(in *v1beta2.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_scheme_ScaleStatus_To_v1beta2_ScaleStatus(in *scheme.ScaleStatus, out *v1beta2.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go new file mode 100644 index 0000000000..36ef82b92d --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/conversion.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscalingv1 + +import ( + "fmt" + + v1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + scheme "k8s.io/client-go/scale/scheme" +) + +func Convert_scheme_ScaleStatus_To_v1_ScaleStatus(in *scheme.ScaleStatus, out *v1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = "" + if in.Selector != nil { + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.Selector = selector.String() + } + + return nil +} + +func Convert_v1_ScaleStatus_To_scheme_ScaleStatus(in *v1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + if in.Selector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.Selector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } + + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go new file mode 100644 index 0000000000..03684dd90d --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme +// +k8s:conversion-gen-external-types=k8s.io/api/autoscaling/v1 + +package autoscalingv1 // import "k8s.io/client-go/scale/scheme/autoscalingv1" diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go new file mode 100644 index 0000000000..4339376c93 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscalingv1 + +import ( + autoscalingapiv1 "k8s.io/api/autoscaling/v1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = autoscalingapiv1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &autoscalingapiv1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register() +} diff --git a/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go new file mode 100644 index 0000000000..09e73584eb --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/autoscalingv1/zz_generated.conversion.go @@ -0,0 +1,133 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package autoscalingv1 + +import ( + v1 "k8s.io/api/autoscaling/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Scale_To_scheme_Scale(a.(*v1.Scale), b.(*scheme.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_Scale_To_v1_Scale(a.(*scheme.Scale), b.(*v1.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ScaleSpec_To_scheme_ScaleSpec(a.(*v1.ScaleSpec), b.(*scheme.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleSpec_To_v1_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleStatus_To_v1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1.ScaleStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1.ScaleStatus), b.(*scheme.ScaleStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_Scale_To_scheme_Scale(in *v1.Scale, out *scheme.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Scale_To_scheme_Scale is an autogenerated conversion function. +func Convert_v1_Scale_To_scheme_Scale(in *v1.Scale, out *scheme.Scale, s conversion.Scope) error { + return autoConvert_v1_Scale_To_scheme_Scale(in, out, s) +} + +func autoConvert_scheme_Scale_To_v1_Scale(in *scheme.Scale, out *v1.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_scheme_ScaleSpec_To_v1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_scheme_ScaleStatus_To_v1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_scheme_Scale_To_v1_Scale is an autogenerated conversion function. +func Convert_scheme_Scale_To_v1_Scale(in *scheme.Scale, out *v1.Scale, s conversion.Scope) error { + return autoConvert_scheme_Scale_To_v1_Scale(in, out, s) +} + +func autoConvert_v1_ScaleSpec_To_scheme_ScaleSpec(in *v1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_v1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. +func Convert_v1_ScaleSpec_To_scheme_ScaleSpec(in *v1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1_ScaleSpec_To_scheme_ScaleSpec(in, out, s) +} + +func autoConvert_scheme_ScaleSpec_To_v1_ScaleSpec(in *scheme.ScaleSpec, out *v1.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_scheme_ScaleSpec_To_v1_ScaleSpec is an autogenerated conversion function. +func Convert_scheme_ScaleSpec_To_v1_ScaleSpec(in *scheme.ScaleSpec, out *v1.ScaleSpec, s conversion.Scope) error { + return autoConvert_scheme_ScaleSpec_To_v1_ScaleSpec(in, out, s) +} + +func autoConvert_v1_ScaleStatus_To_scheme_ScaleStatus(in *v1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + return nil +} + +func autoConvert_scheme_ScaleStatus_To_v1_ScaleStatus(in *scheme.ScaleStatus, out *v1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs string) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/doc.go b/vendor/k8s.io/client-go/scale/scheme/doc.go new file mode 100644 index 0000000000..0203d6d5a2 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +// Package scheme contains a runtime.Scheme to be used for serializing +// and deserializing different versions of Scale, and for converting +// in between them. +package scheme diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go b/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go new file mode 100644 index 0000000000..9aaac60861 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsint/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package extensionsint contains the necessary scaffolding of the +// internal version of extensions as required by conversion logic. +// It doesn't have any of its own types -- it's just necessary to +// get the expected behavior out of runtime.Scheme.ConvertToVersion +// and associated methods. +package extensionsint diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go b/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go new file mode 100644 index 0000000000..570a8a54ab --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsint/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensionsint + +import ( + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + scalescheme "k8s.io/client-go/scale/scheme" +) + +// GroupName is the group name use in this package +const GroupName = extensionsv1beta1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &scalescheme.Scale{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go new file mode 100644 index 0000000000..821eb33d7d --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/conversion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensionsv1beta1 + +import ( + "fmt" + + v1beta1 "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + scheme "k8s.io/client-go/scale/scheme" +) + +func Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.Selector = nil + out.TargetSelector = "" + if in.Selector != nil { + if in.Selector.MatchExpressions == nil || len(in.Selector.MatchExpressions) == 0 { + out.Selector = in.Selector.MatchLabels + } + + selector, err := metav1.LabelSelectorAsSelector(in.Selector) + if err != nil { + return fmt.Errorf("invalid label selector: %v", err) + } + out.TargetSelector = selector.String() + } + + return nil +} + +func Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + + // Normally when 2 fields map to the same internal value we favor the old field, since + // old clients can't be expected to know about new fields but clients that know about the + // new field can be expected to know about the old field (though that's not quite true, due + // to kubectl apply). However, these fields are readonly, so any non-nil value should work. + if in.TargetSelector != "" { + labelSelector, err := metav1.ParseToLabelSelector(in.TargetSelector) + if err != nil { + out.Selector = nil + return fmt.Errorf("failed to parse target selector: %v", err) + } + out.Selector = labelSelector + } else if in.Selector != nil { + out.Selector = new(metav1.LabelSelector) + selector := make(map[string]string) + for key, val := range in.Selector { + selector[key] = val + } + out.Selector.MatchLabels = selector + } else { + out.Selector = nil + } + + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go new file mode 100644 index 0000000000..1e719884f0 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/vendor/k8s.io/client-go/scale/scheme +// +k8s:conversion-gen-external-types=k8s.io/api/extensions/v1beta1 + +package extensionsv1beta1 // import "k8s.io/client-go/scale/scheme/extensionsv1beta1" diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go new file mode 100644 index 0000000000..248a007127 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package extensionsv1beta1 + +import ( + extensionsapiv1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = extensionsapiv1beta1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &extensionsapiv1beta1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register() +} diff --git a/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go new file mode 100644 index 0000000000..46b29f171f --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/extensionsv1beta1/zz_generated.conversion.go @@ -0,0 +1,134 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package extensionsv1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + scheme "k8s.io/client-go/scale/scheme" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1beta1.Scale)(nil), (*scheme.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_Scale_To_scheme_Scale(a.(*v1beta1.Scale), b.(*scheme.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.Scale)(nil), (*v1beta1.Scale)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_Scale_To_v1beta1_Scale(a.(*scheme.Scale), b.(*v1beta1.Scale), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1beta1.ScaleSpec)(nil), (*scheme.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(a.(*v1beta1.ScaleSpec), b.(*scheme.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*scheme.ScaleSpec)(nil), (*v1beta1.ScaleSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(a.(*scheme.ScaleSpec), b.(*v1beta1.ScaleSpec), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*scheme.ScaleStatus)(nil), (*v1beta1.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(a.(*scheme.ScaleStatus), b.(*v1beta1.ScaleStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*v1beta1.ScaleStatus)(nil), (*scheme.ScaleStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(a.(*v1beta1.ScaleStatus), b.(*scheme.ScaleStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1beta1_Scale_To_scheme_Scale is an autogenerated conversion function. +func Convert_v1beta1_Scale_To_scheme_Scale(in *v1beta1.Scale, out *scheme.Scale, s conversion.Scope) error { + return autoConvert_v1beta1_Scale_To_scheme_Scale(in, out, s) +} + +func autoConvert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_scheme_Scale_To_v1beta1_Scale is an autogenerated conversion function. +func Convert_scheme_Scale_To_v1beta1_Scale(in *scheme.Scale, out *v1beta1.Scale, s conversion.Scope) error { + return autoConvert_scheme_Scale_To_v1beta1_Scale(in, out, s) +} + +func autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec is an autogenerated conversion function. +func Convert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in *v1beta1.ScaleSpec, out *scheme.ScaleSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ScaleSpec_To_scheme_ScaleSpec(in, out, s) +} + +func autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + out.Replicas = in.Replicas + return nil +} + +// Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec is an autogenerated conversion function. +func Convert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in *scheme.ScaleSpec, out *v1beta1.ScaleSpec, s conversion.Scope) error { + return autoConvert_scheme_ScaleSpec_To_v1beta1_ScaleSpec(in, out, s) +} + +func autoConvert_v1beta1_ScaleStatus_To_scheme_ScaleStatus(in *v1beta1.ScaleStatus, out *scheme.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (map[string]string vs *k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector) + // WARNING: in.TargetSelector requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_scheme_ScaleStatus_To_v1beta1_ScaleStatus(in *scheme.ScaleStatus, out *v1beta1.ScaleStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + // WARNING: in.Selector requires manual conversion: inconvertible types (*k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector vs map[string]string) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/register.go b/vendor/k8s.io/client-go/scale/scheme/register.go new file mode 100644 index 0000000000..4339e6173b --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/register.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + autoscalingv1 "k8s.io/api/autoscaling/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = autoscalingv1.GroupName + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Scale{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/scale/scheme/types.go b/vendor/k8s.io/client-go/scale/scheme/types.go new file mode 100644 index 0000000000..5c5d0a6f23 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/types.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// This file contains our own "internal" version of scale that we use for conversions, +// since we can't use the main Kubernetes internal versions. + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus +} + +// ScaleSpec describes the attributes of a scale subresource. +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 + + // label query over pods that should match the replicas count. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector +} diff --git a/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go new file mode 100644 index 0000000000..6ee3c2071d --- /dev/null +++ b/vendor/k8s.io/client-go/scale/scheme/zz_generated.deepcopy.go @@ -0,0 +1,92 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/scale/util.go b/vendor/k8s.io/client-go/scale/util.go new file mode 100644 index 0000000000..2f43a7a795 --- /dev/null +++ b/vendor/k8s.io/client-go/scale/util.go @@ -0,0 +1,197 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scale + +import ( + "fmt" + "strings" + "sync" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/discovery" + scalescheme "k8s.io/client-go/scale/scheme" + scaleappsint "k8s.io/client-go/scale/scheme/appsint" + scaleappsv1beta1 "k8s.io/client-go/scale/scheme/appsv1beta1" + scaleappsv1beta2 "k8s.io/client-go/scale/scheme/appsv1beta2" + scaleautoscaling "k8s.io/client-go/scale/scheme/autoscalingv1" + scaleextint "k8s.io/client-go/scale/scheme/extensionsint" + scaleext "k8s.io/client-go/scale/scheme/extensionsv1beta1" +) + +// PreferredResourceMapper determines the preferred version of a resource to scale +type PreferredResourceMapper interface { + // ResourceFor takes a partial resource and returns the preferred resource. + ResourceFor(resource schema.GroupVersionResource) (preferredResource schema.GroupVersionResource, err error) +} + +// Ensure a RESTMapper satisfies the PreferredResourceMapper interface +var _ PreferredResourceMapper = meta.RESTMapper(nil) + +// ScaleKindResolver knows about the relationship between +// resources and the GroupVersionKind of their scale subresources. +type ScaleKindResolver interface { + // ScaleForResource returns the GroupVersionKind of the + // scale subresource for the given GroupVersionResource. + ScaleForResource(resource schema.GroupVersionResource) (scaleVersion schema.GroupVersionKind, err error) +} + +// discoveryScaleResolver is a ScaleKindResolver that uses +// a DiscoveryInterface to associate resources with their +// scale-kinds +type discoveryScaleResolver struct { + discoveryClient discovery.ServerResourcesInterface +} + +func (r *discoveryScaleResolver) ScaleForResource(inputRes schema.GroupVersionResource) (scaleVersion schema.GroupVersionKind, err error) { + groupVerResources, err := r.discoveryClient.ServerResourcesForGroupVersion(inputRes.GroupVersion().String()) + if err != nil { + return schema.GroupVersionKind{}, fmt.Errorf("unable to fetch discovery information for %s: %v", inputRes.String(), err) + } + + for _, resource := range groupVerResources.APIResources { + resourceParts := strings.SplitN(resource.Name, "/", 2) + if len(resourceParts) != 2 || resourceParts[0] != inputRes.Resource || resourceParts[1] != "scale" { + // skip non-scale resources, or scales for resources that we're not looking for + continue + } + + scaleGV := inputRes.GroupVersion() + if resource.Group != "" && resource.Version != "" { + scaleGV = schema.GroupVersion{ + Group: resource.Group, + Version: resource.Version, + } + } + + return scaleGV.WithKind(resource.Kind), nil + } + + return schema.GroupVersionKind{}, fmt.Errorf("could not find scale subresource for %s in discovery information", inputRes.String()) +} + +// cachedScaleKindResolver is a ScaleKindResolver that caches results +// from another ScaleKindResolver, re-fetching on cache misses. +type cachedScaleKindResolver struct { + base ScaleKindResolver + + cache map[schema.GroupVersionResource]schema.GroupVersionKind + mu sync.RWMutex +} + +func (r *cachedScaleKindResolver) ScaleForResource(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + r.mu.RLock() + gvk, isCached := r.cache[resource] + r.mu.RUnlock() + if isCached { + return gvk, nil + } + + // we could have multiple fetches of the same resources, but that's probably + // better than limiting to only one reader at once (mu.Mutex), + // or blocking checks for other resources while we fetch + // (mu.Lock before fetch). + gvk, err := r.base.ScaleForResource(resource) + if err != nil { + return schema.GroupVersionKind{}, err + } + + r.mu.Lock() + defer r.mu.Unlock() + r.cache[resource] = gvk + + return gvk, nil +} + +// NewDiscoveryScaleKindResolver creates a new ScaleKindResolver which uses information from the given +// disovery client to resolve the correct Scale GroupVersionKind for different resources. +func NewDiscoveryScaleKindResolver(client discovery.ServerResourcesInterface) ScaleKindResolver { + base := &discoveryScaleResolver{ + discoveryClient: client, + } + + return &cachedScaleKindResolver{ + base: base, + cache: make(map[schema.GroupVersionResource]schema.GroupVersionKind), + } +} + +// ScaleConverter knows how to convert between external scale versions. +type ScaleConverter struct { + scheme *runtime.Scheme + codecs serializer.CodecFactory + internalVersioner runtime.GroupVersioner +} + +// NewScaleConverter creates a new ScaleConverter for converting between +// Scales in autoscaling/v1 and extensions/v1beta1. +func NewScaleConverter() *ScaleConverter { + scheme := runtime.NewScheme() + utilruntime.Must(scaleautoscaling.AddToScheme(scheme)) + utilruntime.Must(scalescheme.AddToScheme(scheme)) + utilruntime.Must(scaleext.AddToScheme(scheme)) + utilruntime.Must(scaleextint.AddToScheme(scheme)) + utilruntime.Must(scaleappsint.AddToScheme(scheme)) + utilruntime.Must(scaleappsv1beta1.AddToScheme(scheme)) + utilruntime.Must(scaleappsv1beta2.AddToScheme(scheme)) + + return &ScaleConverter{ + scheme: scheme, + codecs: serializer.NewCodecFactory(scheme), + internalVersioner: runtime.NewMultiGroupVersioner( + scalescheme.SchemeGroupVersion, + schema.GroupKind{Group: scaleext.GroupName, Kind: "Scale"}, + schema.GroupKind{Group: scaleautoscaling.GroupName, Kind: "Scale"}, + schema.GroupKind{Group: scaleappsv1beta1.GroupName, Kind: "Scale"}, + schema.GroupKind{Group: scaleappsv1beta2.GroupName, Kind: "Scale"}, + ), + } +} + +// Scheme returns the scheme used by this scale converter. +func (c *ScaleConverter) Scheme() *runtime.Scheme { + return c.scheme +} + +func (c *ScaleConverter) Codecs() serializer.CodecFactory { + return c.codecs +} + +func (c *ScaleConverter) ScaleVersions() []schema.GroupVersion { + return []schema.GroupVersion{ + scaleautoscaling.SchemeGroupVersion, + scalescheme.SchemeGroupVersion, + scaleext.SchemeGroupVersion, + scaleextint.SchemeGroupVersion, + scaleappsint.SchemeGroupVersion, + scaleappsv1beta1.SchemeGroupVersion, + scaleappsv1beta2.SchemeGroupVersion, + } +} + +// ConvertToVersion converts the given *external* input object to the given output *external* output group-version. +func (c *ScaleConverter) ConvertToVersion(in runtime.Object, outVersion schema.GroupVersion) (runtime.Object, error) { + scaleInt, err := c.scheme.ConvertToVersion(in, c.internalVersioner) + if err != nil { + return nil, err + } + + return c.scheme.ConvertToVersion(scaleInt, outVersion) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/doc.go b/vendor/k8s.io/client-go/tools/remotecommand/doc.go new file mode 100644 index 0000000000..ac06a9cd37 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package remotecommand adds support for executing commands in containers, +// with support for separate stdin, stdout, and stderr streams, as well as +// TTY. +package remotecommand // import "k8s.io/client-go/tools/remotecommand" diff --git a/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go b/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go new file mode 100644 index 0000000000..e60dd7cdc7 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/errorstream.go @@ -0,0 +1,54 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + + "k8s.io/apimachinery/pkg/util/runtime" +) + +// errorStreamDecoder interprets the data on the error channel and creates a go error object from it. +type errorStreamDecoder interface { + decode(message []byte) error +} + +// watchErrorStream watches the errorStream for remote command error data, +// decodes it with the given errorStreamDecoder, sends the decoded error (or nil if the remote +// command exited successfully) to the returned error channel, and closes it. +// This function returns immediately. +func watchErrorStream(errorStream io.Reader, d errorStreamDecoder) chan error { + errorChan := make(chan error) + + go func() { + defer runtime.HandleCrash() + + message, err := io.ReadAll(errorStream) + switch { + case err != nil && err != io.EOF: + errorChan <- fmt.Errorf("error reading from error stream: %s", err) + case len(message) > 0: + errorChan <- d.decode(message) + default: + errorChan <- nil + } + close(errorChan) + }() + + return errorChan +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/fallback.go b/vendor/k8s.io/client-go/tools/remotecommand/fallback.go new file mode 100644 index 0000000000..4846cdb550 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/fallback.go @@ -0,0 +1,57 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "context" +) + +var _ Executor = &fallbackExecutor{} + +type fallbackExecutor struct { + primary Executor + secondary Executor + shouldFallback func(error) bool +} + +// NewFallbackExecutor creates an Executor that first attempts to use the +// WebSocketExecutor, falling back to the legacy SPDYExecutor if the initial +// websocket "StreamWithContext" call fails. +// func NewFallbackExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { +func NewFallbackExecutor(primary, secondary Executor, shouldFallback func(error) bool) (Executor, error) { + return &fallbackExecutor{ + primary: primary, + secondary: secondary, + shouldFallback: shouldFallback, + }, nil +} + +// Stream is deprecated. Please use "StreamWithContext". +func (f *fallbackExecutor) Stream(options StreamOptions) error { + return f.StreamWithContext(context.Background(), options) +} + +// StreamWithContext initially attempts to call "StreamWithContext" using the +// primary executor, falling back to calling the secondary executor if the +// initial primary call to upgrade to a websocket connection fails. +func (f *fallbackExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { + err := f.primary.StreamWithContext(ctx, options) + if f.shouldFallback(err) { + return f.secondary.StreamWithContext(ctx, options) + } + return err +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/reader.go b/vendor/k8s.io/client-go/tools/remotecommand/reader.go new file mode 100644 index 0000000000..d1f1be34c9 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/reader.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "io" +) + +// readerWrapper delegates to an io.Reader so that only the io.Reader interface is implemented, +// to keep io.Copy from doing things we don't want when copying from the reader to the data stream. +// +// If the Stdin io.Reader provided to remotecommand implements a WriteTo function (like bytes.Buffer does[1]), +// io.Copy calls that method[2] to attempt to write the entire buffer to the stream in one call. +// That results in an oversized call to spdystream.Stream#Write [3], +// which results in a single oversized data frame[4] that is too large. +// +// [1] https://golang.org/pkg/bytes/#Buffer.WriteTo +// [2] https://golang.org/pkg/io/#Copy +// [3] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/stream.go#L66-L73 +// [4] https://github.com/kubernetes/kubernetes/blob/90295640ef87db9daa0144c5617afe889e7992b2/vendor/github.com/docker/spdystream/spdy/write.go#L302-L304 +type readerWrapper struct { + reader io.Reader +} + +func (r readerWrapper) Read(p []byte) (int, error) { + return r.reader.Read(p) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go new file mode 100644 index 0000000000..1ae67729be --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -0,0 +1,58 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "context" + "io" + "net/http" + + "k8s.io/apimachinery/pkg/util/httpstream" +) + +// StreamOptions holds information pertaining to the current streaming session: +// input/output streams, if the client is requesting a TTY, and a terminal size queue to +// support terminal resizing. +type StreamOptions struct { + Stdin io.Reader + Stdout io.Writer + Stderr io.Writer + Tty bool + TerminalSizeQueue TerminalSizeQueue +} + +// Executor is an interface for transporting shell-style streams. +type Executor interface { + // Deprecated: use StreamWithContext instead to avoid possible resource leaks. + // See https://github.com/kubernetes/kubernetes/pull/103177 for details. + Stream(options StreamOptions) error + + // StreamWithContext initiates the transport of the standard shell streams. It will + // transport any non-nil stream to a remote system, and return an error if a problem + // occurs. If tty is set, the stderr stream is not used (raw TTY manages stdout and + // stderr over the stdout stream). + // The context controls the entire lifetime of stream execution. + StreamWithContext(ctx context.Context, options StreamOptions) error +} + +type streamCreator interface { + CreateStream(headers http.Header) (httpstream.Stream, error) +} + +type streamProtocolHandler interface { + stream(conn streamCreator) error +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/resize.go b/vendor/k8s.io/client-go/tools/remotecommand/resize.go new file mode 100644 index 0000000000..c838f21ba6 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/resize.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +// TerminalSize and TerminalSizeQueue was a part of k8s.io/kubernetes/pkg/util/term +// and were moved in order to decouple client from other term dependencies + +// TerminalSize represents the width and height of a terminal. +type TerminalSize struct { + Width uint16 + Height uint16 +} + +// TerminalSizeQueue is capable of returning terminal resize events as they occur. +type TerminalSizeQueue interface { + // Next returns the new terminal size after the terminal has been resized. It returns nil when + // monitoring has been stopped. + Next() *TerminalSize +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/spdy.go b/vendor/k8s.io/client-go/tools/remotecommand/spdy.go new file mode 100644 index 0000000000..c2bfcf8a65 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/spdy.go @@ -0,0 +1,171 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "context" + "fmt" + "net/http" + "net/url" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/remotecommand" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/transport/spdy" + "k8s.io/klog/v2" +) + +// spdyStreamExecutor handles transporting standard shell streams over an httpstream connection. +type spdyStreamExecutor struct { + upgrader spdy.Upgrader + transport http.RoundTripper + + method string + url *url.URL + protocols []string + rejectRedirects bool // if true, receiving redirect from upstream is an error +} + +// NewSPDYExecutor connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams. +func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { + wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) +} + +// NewSPDYExecutorRejectRedirects returns an Executor that will upgrade the future +// connection to a SPDY bi-directional streaming connection when calling "Stream" (deprecated) +// or "StreamWithContext" (preferred). Additionally, if the upstream server returns a redirect +// during the attempted upgrade in these "Stream" calls, an error is returned. +func NewSPDYExecutorRejectRedirects(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { + executor, err := NewSPDYExecutorForTransports(transport, upgrader, method, url) + if err != nil { + return nil, err + } + spdyExecutor := executor.(*spdyStreamExecutor) + spdyExecutor.rejectRedirects = true + return spdyExecutor, nil +} + +// NewSPDYExecutorForTransports connects to the provided server using the given transport, +// upgrades the response using the given upgrader to multiplexed bidirectional streams. +func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { + return NewSPDYExecutorForProtocols( + transport, upgrader, method, url, + remotecommand.StreamProtocolV5Name, + remotecommand.StreamProtocolV4Name, + remotecommand.StreamProtocolV3Name, + remotecommand.StreamProtocolV2Name, + remotecommand.StreamProtocolV1Name, + ) +} + +// NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to +// multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most +// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. +func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { + return &spdyStreamExecutor{ + upgrader: upgrader, + transport: transport, + method: method, + url: url, + protocols: protocols, + }, nil +} + +// Stream opens a protocol streamer to the server and streams until a client closes +// the connection or the server disconnects. +func (e *spdyStreamExecutor) Stream(options StreamOptions) error { + return e.StreamWithContext(context.Background(), options) +} + +// newConnectionAndStream creates a new SPDY connection and a stream protocol handler upon it. +func (e *spdyStreamExecutor) newConnectionAndStream(ctx context.Context, options StreamOptions) (httpstream.Connection, streamProtocolHandler, error) { + req, err := http.NewRequestWithContext(ctx, e.method, e.url.String(), nil) + if err != nil { + return nil, nil, fmt.Errorf("error creating request: %v", err) + } + + client := http.Client{Transport: e.transport} + if e.rejectRedirects { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + return fmt.Errorf("redirect not allowed") + } + } + conn, protocol, err := spdy.Negotiate( + e.upgrader, + &client, + req, + e.protocols..., + ) + if err != nil { + return nil, nil, err + } + + var streamer streamProtocolHandler + + switch protocol { + case remotecommand.StreamProtocolV5Name: + streamer = newStreamProtocolV5(options) + case remotecommand.StreamProtocolV4Name: + streamer = newStreamProtocolV4(options) + case remotecommand.StreamProtocolV3Name: + streamer = newStreamProtocolV3(options) + case remotecommand.StreamProtocolV2Name: + streamer = newStreamProtocolV2(options) + case "": + klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + fallthrough + case remotecommand.StreamProtocolV1Name: + streamer = newStreamProtocolV1(options) + } + + return conn, streamer, nil +} + +// StreamWithContext opens a protocol streamer to the server and streams until a client closes +// the connection or the server disconnects or the context is done. +func (e *spdyStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { + conn, streamer, err := e.newConnectionAndStream(ctx, options) + if err != nil { + return err + } + defer conn.Close() + + panicChan := make(chan any, 1) + errorChan := make(chan error, 1) + go func() { + defer func() { + if p := recover(); p != nil { + panicChan <- p + } + }() + errorChan <- streamer.stream(conn) + }() + + select { + case p := <-panicChan: + panic(p) + case err := <-errorChan: + return err + case <-ctx.Done(): + return ctx.Err() + } +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v1.go b/vendor/k8s.io/client-go/tools/remotecommand/v1.go new file mode 100644 index 0000000000..efa9a6c990 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v1.go @@ -0,0 +1,159 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "net/http" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/klog/v2" +) + +// streamProtocolV1 implements the first version of the streaming exec & attach +// protocol. This version has some bugs, such as not being able to detect when +// non-interactive stdin data has ended. See https://issues.k8s.io/13394 and +// https://issues.k8s.io/13395 for more details. +type streamProtocolV1 struct { + StreamOptions + + errorStream httpstream.Stream + remoteStdin httpstream.Stream + remoteStdout httpstream.Stream + remoteStderr httpstream.Stream +} + +var _ streamProtocolHandler = &streamProtocolV1{} + +func newStreamProtocolV1(options StreamOptions) streamProtocolHandler { + return &streamProtocolV1{ + StreamOptions: options, + } +} + +func (p *streamProtocolV1) stream(conn streamCreator) error { + doneChan := make(chan struct{}, 2) + errorChan := make(chan error) + + cp := func(s string, dst io.Writer, src io.Reader) { + klog.V(6).Infof("Copying %s", s) + defer klog.V(6).Infof("Done copying %s", s) + if _, err := io.Copy(dst, src); err != nil && err != io.EOF { + klog.Errorf("Error copying %s: %v", s, err) + } + if s == v1.StreamTypeStdout || s == v1.StreamTypeStderr { + doneChan <- struct{}{} + } + } + + // set up all the streams first + var err error + headers := http.Header{} + headers.Set(v1.StreamType, v1.StreamTypeError) + p.errorStream, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.errorStream.Reset() + + // Create all the streams first, then start the copy goroutines. The server doesn't start its copy + // goroutines until it's received all of the streams. If the client creates the stdin stream and + // immediately begins copying stdin data to the server, it's possible to overwhelm and wedge the + // spdy frame handler in the server so that it is full of unprocessed frames. The frames aren't + // getting processed because the server hasn't started its copying, and it won't do that until it + // gets all the streams. By creating all the streams first, we ensure that the server is ready to + // process data before the client starts sending any. See https://issues.k8s.io/16373 for more info. + if p.Stdin != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdin) + p.remoteStdin, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.remoteStdin.Reset() + } + + if p.Stdout != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdout) + p.remoteStdout, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.remoteStdout.Reset() + } + + if p.Stderr != nil && !p.Tty { + headers.Set(v1.StreamType, v1.StreamTypeStderr) + p.remoteStderr, err = conn.CreateStream(headers) + if err != nil { + return err + } + defer p.remoteStderr.Reset() + } + + // now that all the streams have been created, proceed with reading & copying + + // always read from errorStream + go func() { + message, err := io.ReadAll(p.errorStream) + if err != nil && err != io.EOF { + errorChan <- fmt.Errorf("Error reading from error stream: %s", err) + return + } + if len(message) > 0 { + errorChan <- fmt.Errorf("Error executing remote command: %s", message) + return + } + }() + + if p.Stdin != nil { + // TODO this goroutine will never exit cleanly (the io.Copy never unblocks) + // because stdin is not closed until the process exits. If we try to call + // stdin.Close(), it returns no error but doesn't unblock the copy. It will + // exit when the process exits, instead. + go cp(v1.StreamTypeStdin, p.remoteStdin, readerWrapper{p.Stdin}) + } + + waitCount := 0 + completedStreams := 0 + + if p.Stdout != nil { + waitCount++ + go cp(v1.StreamTypeStdout, p.Stdout, p.remoteStdout) + } + + if p.Stderr != nil && !p.Tty { + waitCount++ + go cp(v1.StreamTypeStderr, p.Stderr, p.remoteStderr) + } + +Loop: + for { + select { + case <-doneChan: + completedStreams++ + if completedStreams == waitCount { + break Loop + } + case err := <-errorChan: + return err + } + } + + return nil +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v2.go b/vendor/k8s.io/client-go/tools/remotecommand/v2.go new file mode 100644 index 0000000000..d54612f4c2 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v2.go @@ -0,0 +1,199 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "fmt" + "io" + "net/http" + "sync" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/runtime" +) + +// streamProtocolV2 implements version 2 of the streaming protocol for attach +// and exec. The original streaming protocol was metav1. As a result, this +// version is referred to as version 2, even though it is the first actual +// numbered version. +type streamProtocolV2 struct { + StreamOptions + + errorStream io.Reader + remoteStdin io.ReadWriteCloser + remoteStdout io.Reader + remoteStderr io.Reader +} + +var _ streamProtocolHandler = &streamProtocolV2{} + +func newStreamProtocolV2(options StreamOptions) streamProtocolHandler { + return &streamProtocolV2{ + StreamOptions: options, + } +} + +func (p *streamProtocolV2) createStreams(conn streamCreator) error { + var err error + headers := http.Header{} + + // set up error stream + headers.Set(v1.StreamType, v1.StreamTypeError) + p.errorStream, err = conn.CreateStream(headers) + if err != nil { + return err + } + + // set up stdin stream + if p.Stdin != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdin) + p.remoteStdin, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + + // set up stdout stream + if p.Stdout != nil { + headers.Set(v1.StreamType, v1.StreamTypeStdout) + p.remoteStdout, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + + // set up stderr stream + if p.Stderr != nil && !p.Tty { + headers.Set(v1.StreamType, v1.StreamTypeStderr) + p.remoteStderr, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + return nil +} + +func (p *streamProtocolV2) copyStdin() { + if p.Stdin != nil { + var once sync.Once + + // copy from client's stdin to container's stdin + go func() { + defer runtime.HandleCrash() + + // if p.stdin is noninteractive, p.g. `echo abc | kubectl exec -i -- cat`, make sure + // we close remoteStdin as soon as the copy from p.stdin to remoteStdin finishes. Otherwise + // the executed command will remain running. + defer once.Do(func() { p.remoteStdin.Close() }) + + if _, err := io.Copy(p.remoteStdin, readerWrapper{p.Stdin}); err != nil { + runtime.HandleError(err) + } + }() + + // read from remoteStdin until the stream is closed. this is essential to + // be able to exit interactive sessions cleanly and not leak goroutines or + // hang the client's terminal. + // + // TODO we aren't using go-dockerclient any more; revisit this to determine if it's still + // required by engine-api. + // + // go-dockerclient's current hijack implementation + // (https://github.com/fsouza/go-dockerclient/blob/89f3d56d93788dfe85f864a44f85d9738fca0670/client.go#L564) + // waits for all three streams (stdin/stdout/stderr) to finish copying + // before returning. When hijack finishes copying stdout/stderr, it calls + // Close() on its side of remoteStdin, which allows this copy to complete. + // When that happens, we must Close() on our side of remoteStdin, to + // allow the copy in hijack to complete, and hijack to return. + go func() { + defer runtime.HandleCrash() + defer once.Do(func() { p.remoteStdin.Close() }) + + // this "copy" doesn't actually read anything - it's just here to wait for + // the server to close remoteStdin. + if _, err := io.Copy(io.Discard, p.remoteStdin); err != nil { + runtime.HandleError(err) + } + }() + } +} + +func (p *streamProtocolV2) copyStdout(wg *sync.WaitGroup) { + if p.Stdout == nil { + return + } + + wg.Add(1) + go func() { + defer runtime.HandleCrash() + defer wg.Done() + // make sure, packet in queue can be consumed. + // block in queue may lead to deadlock in conn.server + // issue: https://github.com/kubernetes/kubernetes/issues/96339 + defer io.Copy(io.Discard, p.remoteStdout) + + if _, err := io.Copy(p.Stdout, p.remoteStdout); err != nil { + runtime.HandleError(err) + } + }() +} + +func (p *streamProtocolV2) copyStderr(wg *sync.WaitGroup) { + if p.Stderr == nil || p.Tty { + return + } + + wg.Add(1) + go func() { + defer runtime.HandleCrash() + defer wg.Done() + defer io.Copy(io.Discard, p.remoteStderr) + + if _, err := io.Copy(p.Stderr, p.remoteStderr); err != nil { + runtime.HandleError(err) + } + }() +} + +func (p *streamProtocolV2) stream(conn streamCreator) error { + if err := p.createStreams(conn); err != nil { + return err + } + + // now that all the streams have been created, proceed with reading & copying + + errorChan := watchErrorStream(p.errorStream, &errorDecoderV2{}) + + p.copyStdin() + + var wg sync.WaitGroup + p.copyStdout(&wg) + p.copyStderr(&wg) + + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan +} + +// errorDecoderV2 interprets the error channel data as plain text. +type errorDecoderV2 struct{} + +func (d *errorDecoderV2) decode(message []byte) error { + return fmt.Errorf("error executing remote command: %s", message) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v3.go b/vendor/k8s.io/client-go/tools/remotecommand/v3.go new file mode 100644 index 0000000000..846dd24a5e --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v3.go @@ -0,0 +1,111 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "encoding/json" + "io" + "net/http" + "sync" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/runtime" +) + +// streamProtocolV3 implements version 3 of the streaming protocol for attach +// and exec. This version adds support for resizing the container's terminal. +type streamProtocolV3 struct { + *streamProtocolV2 + + resizeStream io.Writer +} + +var _ streamProtocolHandler = &streamProtocolV3{} + +func newStreamProtocolV3(options StreamOptions) streamProtocolHandler { + return &streamProtocolV3{ + streamProtocolV2: newStreamProtocolV2(options).(*streamProtocolV2), + } +} + +func (p *streamProtocolV3) createStreams(conn streamCreator) error { + // set up the streams from v2 + if err := p.streamProtocolV2.createStreams(conn); err != nil { + return err + } + + // set up resize stream + if p.Tty { + headers := http.Header{} + headers.Set(v1.StreamType, v1.StreamTypeResize) + var err error + p.resizeStream, err = conn.CreateStream(headers) + if err != nil { + return err + } + } + + return nil +} + +func (p *streamProtocolV3) handleResizes() { + if p.resizeStream == nil || p.TerminalSizeQueue == nil { + return + } + go func() { + defer runtime.HandleCrash() + + encoder := json.NewEncoder(p.resizeStream) + for { + size := p.TerminalSizeQueue.Next() + if size == nil { + return + } + if err := encoder.Encode(&size); err != nil { + runtime.HandleError(err) + } + } + }() +} + +func (p *streamProtocolV3) stream(conn streamCreator) error { + if err := p.createStreams(conn); err != nil { + return err + } + + // now that all the streams have been created, proceed with reading & copying + + errorChan := watchErrorStream(p.errorStream, &errorDecoderV3{}) + + p.handleResizes() + + p.copyStdin() + + var wg sync.WaitGroup + p.copyStdout(&wg) + p.copyStderr(&wg) + + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan +} + +type errorDecoderV3 struct { + errorDecoderV2 +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v4.go b/vendor/k8s.io/client-go/tools/remotecommand/v4.go new file mode 100644 index 0000000000..69ca934a0d --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v4.go @@ -0,0 +1,119 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "sync" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/remotecommand" + "k8s.io/client-go/util/exec" +) + +// streamProtocolV4 implements version 4 of the streaming protocol for attach +// and exec. This version adds support for exit codes on the error stream through +// the use of metav1.Status instead of plain text messages. +type streamProtocolV4 struct { + *streamProtocolV3 +} + +var _ streamProtocolHandler = &streamProtocolV4{} + +func newStreamProtocolV4(options StreamOptions) streamProtocolHandler { + return &streamProtocolV4{ + streamProtocolV3: newStreamProtocolV3(options).(*streamProtocolV3), + } +} + +func (p *streamProtocolV4) createStreams(conn streamCreator) error { + return p.streamProtocolV3.createStreams(conn) +} + +func (p *streamProtocolV4) handleResizes() { + p.streamProtocolV3.handleResizes() +} + +func (p *streamProtocolV4) stream(conn streamCreator) error { + if err := p.createStreams(conn); err != nil { + return err + } + + // now that all the streams have been created, proceed with reading & copying + + errorChan := watchErrorStream(p.errorStream, &errorDecoderV4{}) + + p.handleResizes() + + p.copyStdin() + + var wg sync.WaitGroup + p.copyStdout(&wg) + p.copyStderr(&wg) + + // we're waiting for stdout/stderr to finish copying + wg.Wait() + + // waits for errorStream to finish reading with an error or nil + return <-errorChan +} + +// errorDecoderV4 interprets the json-marshaled metav1.Status on the error channel +// and creates an exec.ExitError from it. +type errorDecoderV4 struct{} + +func (d *errorDecoderV4) decode(message []byte) error { + status := metav1.Status{} + err := json.Unmarshal(message, &status) + if err != nil { + return fmt.Errorf("error stream protocol error: %v in %q", err, string(message)) + } + switch status.Status { + case metav1.StatusSuccess: + return nil + case metav1.StatusFailure: + if status.Reason == remotecommand.NonZeroExitCodeReason { + if status.Details == nil { + return errors.New("error stream protocol error: details must be set") + } + for i := range status.Details.Causes { + c := &status.Details.Causes[i] + if c.Type != remotecommand.ExitCodeCauseType { + continue + } + + rc, err := strconv.ParseUint(c.Message, 10, 8) + if err != nil { + return fmt.Errorf("error stream protocol error: invalid exit code value %q", c.Message) + } + return exec.CodeExitError{ + Err: fmt.Errorf("command terminated with exit code %d", rc), + Code: int(rc), + } + } + + return fmt.Errorf("error stream protocol error: no %s cause given", remotecommand.ExitCodeCauseType) + } + default: + return errors.New("error stream protocol error: unknown error") + } + + return fmt.Errorf(status.Message) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/v5.go b/vendor/k8s.io/client-go/tools/remotecommand/v5.go new file mode 100644 index 0000000000..4da7bfb139 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/v5.go @@ -0,0 +1,35 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +// streamProtocolV5 add support for V5 of the remote command subprotocol. +// For the streamProtocolHandler, this version is the same as V4. +type streamProtocolV5 struct { + *streamProtocolV4 +} + +var _ streamProtocolHandler = &streamProtocolV5{} + +func newStreamProtocolV5(options StreamOptions) streamProtocolHandler { + return &streamProtocolV5{ + streamProtocolV4: newStreamProtocolV4(options).(*streamProtocolV4), + } +} + +func (p *streamProtocolV5) stream(conn streamCreator) error { + return p.streamProtocolV4.stream(conn) +} diff --git a/vendor/k8s.io/client-go/tools/remotecommand/websocket.go b/vendor/k8s.io/client-go/tools/remotecommand/websocket.go new file mode 100644 index 0000000000..49ef4717cd --- /dev/null +++ b/vendor/k8s.io/client-go/tools/remotecommand/websocket.go @@ -0,0 +1,519 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package remotecommand + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "sync" + "time" + + gwebsocket "github.com/gorilla/websocket" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/remotecommand" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/transport/websocket" + "k8s.io/klog/v2" +) + +// writeDeadline defines the time that a write to the websocket connection +// must complete by, otherwise an i/o timeout occurs. The writeDeadline +// has nothing to do with a response from the other websocket connection +// endpoint; only that the message was successfully processed by the +// local websocket connection. The typical write deadline within the websocket +// library is one second. +const writeDeadline = 2 * time.Second + +var ( + _ Executor = &wsStreamExecutor{} + _ streamCreator = &wsStreamCreator{} + _ httpstream.Stream = &stream{} + + streamType2streamID = map[string]byte{ + v1.StreamTypeStdin: remotecommand.StreamStdIn, + v1.StreamTypeStdout: remotecommand.StreamStdOut, + v1.StreamTypeStderr: remotecommand.StreamStdErr, + v1.StreamTypeError: remotecommand.StreamErr, + v1.StreamTypeResize: remotecommand.StreamResize, + } +) + +const ( + // pingPeriod defines how often a heartbeat "ping" message is sent. + pingPeriod = 5 * time.Second + // pingReadDeadline defines the time waiting for a response heartbeat + // "pong" message before a timeout error occurs for websocket reading. + // This duration must always be greater than the "pingPeriod". By defining + // this deadline in terms of the ping period, we are essentially saying + // we can drop "X-1" (e.g. 3-1=2) pings before firing the timeout. + pingReadDeadline = (pingPeriod * 3) + (1 * time.Second) +) + +// wsStreamExecutor handles transporting standard shell streams over an httpstream connection. +type wsStreamExecutor struct { + transport http.RoundTripper + upgrader websocket.ConnectionHolder + method string + url string + // requested protocols in priority order (e.g. v5.channel.k8s.io before v4.channel.k8s.io). + protocols []string + // selected protocol from the handshake process; could be empty string if handshake fails. + negotiated string + // period defines how often a "ping" heartbeat message is sent to the other endpoint. + heartbeatPeriod time.Duration + // deadline defines the amount of time before "pong" response must be received. + heartbeatDeadline time.Duration +} + +func NewWebSocketExecutor(config *restclient.Config, method, url string) (Executor, error) { + // Only supports V5 protocol for correct version skew functionality. + // Previous api servers will proxy upgrade requests to legacy websocket + // servers on container runtimes which support V1-V4. These legacy + // websocket servers will not handle the new CLOSE signal. + return NewWebSocketExecutorForProtocols(config, method, url, remotecommand.StreamProtocolV5Name) +} + +// NewWebSocketExecutorForProtocols allows to execute commands via a WebSocket connection. +func NewWebSocketExecutorForProtocols(config *restclient.Config, method, url string, protocols ...string) (Executor, error) { + transport, upgrader, err := websocket.RoundTripperFor(config) + if err != nil { + return nil, fmt.Errorf("error creating websocket transports: %v", err) + } + return &wsStreamExecutor{ + transport: transport, + upgrader: upgrader, + method: method, + url: url, + protocols: protocols, + heartbeatPeriod: pingPeriod, + heartbeatDeadline: pingReadDeadline, + }, nil +} + +// Deprecated: use StreamWithContext instead to avoid possible resource leaks. +// See https://github.com/kubernetes/kubernetes/pull/103177 for details. +func (e *wsStreamExecutor) Stream(options StreamOptions) error { + return e.StreamWithContext(context.Background(), options) +} + +// StreamWithContext upgrades an HTTPRequest to a WebSocket connection, and starts the various +// goroutines to implement the necessary streams over the connection. The "options" parameter +// defines which streams are requested. Returns an error if one occurred. This method is NOT +// safe to run concurrently with the same executor (because of the state stored in the upgrader). +func (e *wsStreamExecutor) StreamWithContext(ctx context.Context, options StreamOptions) error { + req, err := http.NewRequestWithContext(ctx, e.method, e.url, nil) + if err != nil { + return err + } + conn, err := websocket.Negotiate(e.transport, e.upgrader, req, e.protocols...) + if err != nil { + return err + } + if conn == nil { + panic(fmt.Errorf("websocket connection is nil")) + } + defer conn.Close() + e.negotiated = conn.Subprotocol() + klog.V(4).Infof("The subprotocol is %s", e.negotiated) + + var streamer streamProtocolHandler + switch e.negotiated { + case remotecommand.StreamProtocolV5Name: + streamer = newStreamProtocolV5(options) + case remotecommand.StreamProtocolV4Name: + streamer = newStreamProtocolV4(options) + case remotecommand.StreamProtocolV3Name: + streamer = newStreamProtocolV3(options) + case remotecommand.StreamProtocolV2Name: + streamer = newStreamProtocolV2(options) + case "": + klog.V(4).Infof("The server did not negotiate a streaming protocol version. Falling back to %s", remotecommand.StreamProtocolV1Name) + fallthrough + case remotecommand.StreamProtocolV1Name: + streamer = newStreamProtocolV1(options) + } + + panicChan := make(chan any, 1) + errorChan := make(chan error, 1) + go func() { + defer func() { + if p := recover(); p != nil { + panicChan <- p + } + }() + creator := newWSStreamCreator(conn) + go creator.readDemuxLoop( + e.upgrader.DataBufferSize(), + e.heartbeatPeriod, + e.heartbeatDeadline, + ) + errorChan <- streamer.stream(creator) + }() + + select { + case p := <-panicChan: + panic(p) + case err := <-errorChan: + return err + case <-ctx.Done(): + return ctx.Err() + } +} + +type wsStreamCreator struct { + conn *gwebsocket.Conn + // Protects writing to websocket connection; reading is lock-free + connWriteLock sync.Mutex + // map of stream id to stream; multiple streams read/write the connection + streams map[byte]*stream + streamsMu sync.Mutex + // setStreamErr holds the error to return to anyone calling setStreams. + // this is populated in closeAllStreamReaders + setStreamErr error +} + +func newWSStreamCreator(conn *gwebsocket.Conn) *wsStreamCreator { + return &wsStreamCreator{ + conn: conn, + streams: map[byte]*stream{}, + } +} + +func (c *wsStreamCreator) getStream(id byte) *stream { + c.streamsMu.Lock() + defer c.streamsMu.Unlock() + return c.streams[id] +} + +func (c *wsStreamCreator) setStream(id byte, s *stream) error { + c.streamsMu.Lock() + defer c.streamsMu.Unlock() + if c.setStreamErr != nil { + return c.setStreamErr + } + c.streams[id] = s + return nil +} + +// CreateStream uses id from passed headers to create a stream over "c.conn" connection. +// Returns a Stream structure or nil and an error if one occurred. +func (c *wsStreamCreator) CreateStream(headers http.Header) (httpstream.Stream, error) { + streamType := headers.Get(v1.StreamType) + id, ok := streamType2streamID[streamType] + if !ok { + return nil, fmt.Errorf("unknown stream type: %s", streamType) + } + if s := c.getStream(id); s != nil { + return nil, fmt.Errorf("duplicate stream for type %s", streamType) + } + reader, writer := io.Pipe() + s := &stream{ + headers: headers, + readPipe: reader, + writePipe: writer, + conn: c.conn, + connWriteLock: &c.connWriteLock, + id: id, + } + if err := c.setStream(id, s); err != nil { + _ = s.writePipe.Close() + _ = s.readPipe.Close() + return nil, err + } + return s, nil +} + +// readDemuxLoop is the lock-free reading processor for this endpoint of the websocket +// connection. This loop reads the connection, and demultiplexes the data +// into one of the individual stream pipes (by checking the stream id). This +// loop can *not* be run concurrently, because there can only be one websocket +// connection reader at a time (a read mutex would provide no benefit). +func (c *wsStreamCreator) readDemuxLoop(bufferSize int, period time.Duration, deadline time.Duration) { + // Initialize and start the ping/pong heartbeat. + h := newHeartbeat(c.conn, period, deadline) + // Set initial timeout for websocket connection reading. + if err := c.conn.SetReadDeadline(time.Now().Add(deadline)); err != nil { + klog.Errorf("Websocket initial setting read deadline failed %v", err) + return + } + go h.start() + // Buffer size must correspond to the same size allocated + // for the read buffer during websocket client creation. A + // difference can cause incomplete connection reads. + readBuffer := make([]byte, bufferSize) + for { + // NextReader() only returns data messages (BinaryMessage or Text + // Message). Even though this call will never return control frames + // such as ping, pong, or close, this call is necessary for these + // message types to be processed. There can only be one reader + // at a time, so this reader loop must *not* be run concurrently; + // there is no lock for reading. Calling "NextReader()" before the + // current reader has been processed will close the current reader. + // If the heartbeat read deadline times out, this "NextReader()" will + // return an i/o error, and error handling will clean up. + messageType, r, err := c.conn.NextReader() + if err != nil { + websocketErr, ok := err.(*gwebsocket.CloseError) + if ok && websocketErr.Code == gwebsocket.CloseNormalClosure { + err = nil // readers will get io.EOF as it's a normal closure + } else { + err = fmt.Errorf("next reader: %w", err) + } + c.closeAllStreamReaders(err) + return + } + // All remote command protocols send/receive only binary data messages. + if messageType != gwebsocket.BinaryMessage { + c.closeAllStreamReaders(fmt.Errorf("unexpected message type: %d", messageType)) + return + } + // It's ok to read just a single byte because the underlying library wraps the actual + // connection with a buffered reader anyway. + _, err = io.ReadFull(r, readBuffer[:1]) + if err != nil { + c.closeAllStreamReaders(fmt.Errorf("read stream id: %w", err)) + return + } + streamID := readBuffer[0] + s := c.getStream(streamID) + if s == nil { + klog.Errorf("Unknown stream id %d, discarding message", streamID) + continue + } + for { + nr, errRead := r.Read(readBuffer) + if nr > 0 { + // Write the data to the stream's pipe. This can block. + _, errWrite := s.writePipe.Write(readBuffer[:nr]) + if errWrite != nil { + // Pipe must have been closed by the stream user. + // Nothing to do, discard the message. + break + } + } + if errRead != nil { + if errRead == io.EOF { + break + } + c.closeAllStreamReaders(fmt.Errorf("read message: %w", err)) + return + } + } + } +} + +// closeAllStreamReaders closes readers in all streams. +// This unblocks all stream.Read() calls, and keeps any future streams from being created. +func (c *wsStreamCreator) closeAllStreamReaders(err error) { + c.streamsMu.Lock() + defer c.streamsMu.Unlock() + for _, s := range c.streams { + // Closing writePipe unblocks all readPipe.Read() callers and prevents any future writes. + _ = s.writePipe.CloseWithError(err) + } + // ensure callers to setStreams receive an error after this point + if err != nil { + c.setStreamErr = err + } else { + c.setStreamErr = fmt.Errorf("closed all streams") + } +} + +type stream struct { + headers http.Header + readPipe *io.PipeReader + writePipe *io.PipeWriter + // conn is used for writing directly into the connection. + // Is nil after Close() / Reset() to prevent future writes. + conn *gwebsocket.Conn + // connWriteLock protects conn against concurrent write operations. There must be a single writer and a single reader only. + // The mutex is shared across all streams because the underlying connection is shared. + connWriteLock *sync.Mutex + id byte +} + +func (s *stream) Read(p []byte) (n int, err error) { + return s.readPipe.Read(p) +} + +// Write writes directly to the underlying WebSocket connection. +func (s *stream) Write(p []byte) (n int, err error) { + klog.V(4).Infof("Write() on stream %d", s.id) + defer klog.V(4).Infof("Write() done on stream %d", s.id) + s.connWriteLock.Lock() + defer s.connWriteLock.Unlock() + if s.conn == nil { + return 0, fmt.Errorf("write on closed stream %d", s.id) + } + err = s.conn.SetWriteDeadline(time.Now().Add(writeDeadline)) + if err != nil { + klog.V(7).Infof("Websocket setting write deadline failed %v", err) + return 0, err + } + // Message writer buffers the message data, so we don't need to do that ourselves. + // Just write id and the data as two separate writes to avoid allocating an intermediate buffer. + w, err := s.conn.NextWriter(gwebsocket.BinaryMessage) + if err != nil { + return 0, err + } + defer func() { + if w != nil { + w.Close() + } + }() + _, err = w.Write([]byte{s.id}) + if err != nil { + return 0, err + } + n, err = w.Write(p) + if err != nil { + return n, err + } + err = w.Close() + w = nil + return n, err +} + +// Close half-closes the stream, indicating this side is finished with the stream. +func (s *stream) Close() error { + klog.V(4).Infof("Close() on stream %d", s.id) + defer klog.V(4).Infof("Close() done on stream %d", s.id) + s.connWriteLock.Lock() + defer s.connWriteLock.Unlock() + if s.conn == nil { + return fmt.Errorf("Close() on already closed stream %d", s.id) + } + // Communicate the CLOSE stream signal to the other websocket endpoint. + err := s.conn.WriteMessage(gwebsocket.BinaryMessage, []byte{remotecommand.StreamClose, s.id}) + s.conn = nil + return err +} + +func (s *stream) Reset() error { + klog.V(4).Infof("Reset() on stream %d", s.id) + defer klog.V(4).Infof("Reset() done on stream %d", s.id) + s.Close() + return s.writePipe.Close() +} + +func (s *stream) Headers() http.Header { + return s.headers +} + +func (s *stream) Identifier() uint32 { + return uint32(s.id) +} + +// heartbeat encasulates data necessary for the websocket ping/pong heartbeat. This +// heartbeat works by setting a read deadline on the websocket connection, then +// pushing this deadline into the future for every successful heartbeat. If the +// heartbeat "pong" fails to respond within the deadline, then the "NextReader()" call +// inside the "readDemuxLoop" will return an i/o error prompting a connection close +// and cleanup. +type heartbeat struct { + conn *gwebsocket.Conn + // period defines how often a "ping" heartbeat message is sent to the other endpoint + period time.Duration + // closing the "closer" channel will clean up the heartbeat timers + closer chan struct{} + // optional data to send with "ping" message + message []byte + // optionally received data message with "pong" message, same as sent with ping + pongMessage []byte +} + +// newHeartbeat creates heartbeat structure encapsulating fields necessary to +// run the websocket connection ping/pong mechanism and sets up handlers on +// the websocket connection. +func newHeartbeat(conn *gwebsocket.Conn, period time.Duration, deadline time.Duration) *heartbeat { + h := &heartbeat{ + conn: conn, + period: period, + closer: make(chan struct{}), + } + // Set up handler for receiving returned "pong" message from other endpoint + // by pushing the read deadline into the future. The "msg" received could + // be empty. + h.conn.SetPongHandler(func(msg string) error { + // Push the read deadline into the future. + klog.V(8).Infof("Pong message received (%s)--resetting read deadline", msg) + err := h.conn.SetReadDeadline(time.Now().Add(deadline)) + if err != nil { + klog.Errorf("Websocket setting read deadline failed %v", err) + return err + } + if len(msg) > 0 { + h.pongMessage = []byte(msg) + } + return nil + }) + // Set up handler to cleanup timers when this endpoint receives "Close" message. + closeHandler := h.conn.CloseHandler() + h.conn.SetCloseHandler(func(code int, text string) error { + close(h.closer) + return closeHandler(code, text) + }) + return h +} + +// setMessage is optional data sent with "ping" heartbeat. According to the websocket RFC +// this data sent with "ping" message should be returned in "pong" message. +func (h *heartbeat) setMessage(msg string) { + h.message = []byte(msg) +} + +// start the heartbeat by setting up necesssary handlers and looping by sending "ping" +// message every "period" until the "closer" channel is closed. +func (h *heartbeat) start() { + // Loop to continually send "ping" message through websocket connection every "period". + t := time.NewTicker(h.period) + defer t.Stop() + for { + select { + case <-h.closer: + klog.V(8).Infof("closed channel--returning") + return + case <-t.C: + // "WriteControl" does not need to be protected by a mutex. According to + // gorilla/websockets library docs: "The Close and WriteControl methods can + // be called concurrently with all other methods." + if err := h.conn.WriteControl(gwebsocket.PingMessage, h.message, time.Now().Add(writeDeadline)); err == nil { + klog.V(8).Infof("Websocket Ping succeeeded") + } else { + klog.Errorf("Websocket Ping failed: %v", err) + if errors.Is(err, gwebsocket.ErrCloseSent) { + // we continue because c.conn.CloseChan will manage closing the connection already + continue + } else if e, ok := err.(net.Error); ok && e.Timeout() { + // Continue, in case this is a transient failure. + // c.conn.CloseChan above will tell us when the connection is + // actually closed. + // If Temporary function hadn't been deprecated, we would have used it. + // But most of temporary errors are timeout errors anyway. + continue + } + return + } + } + } +} diff --git a/vendor/k8s.io/client-go/tools/watch/informerwatcher.go b/vendor/k8s.io/client-go/tools/watch/informerwatcher.go new file mode 100644 index 0000000000..5e6aad5cf1 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/informerwatcher.go @@ -0,0 +1,150 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" +) + +func newEventProcessor(out chan<- watch.Event) *eventProcessor { + return &eventProcessor{ + out: out, + cond: sync.NewCond(&sync.Mutex{}), + done: make(chan struct{}), + } +} + +// eventProcessor buffers events and writes them to an out chan when a reader +// is waiting. Because of the requirement to buffer events, it synchronizes +// input with a condition, and synchronizes output with a channels. It needs to +// be able to yield while both waiting on an input condition and while blocked +// on writing to the output channel. +type eventProcessor struct { + out chan<- watch.Event + + cond *sync.Cond + buff []watch.Event + + done chan struct{} +} + +func (e *eventProcessor) run() { + for { + batch := e.takeBatch() + e.writeBatch(batch) + if e.stopped() { + return + } + } +} + +func (e *eventProcessor) takeBatch() []watch.Event { + e.cond.L.Lock() + defer e.cond.L.Unlock() + + for len(e.buff) == 0 && !e.stopped() { + e.cond.Wait() + } + + batch := e.buff + e.buff = nil + return batch +} + +func (e *eventProcessor) writeBatch(events []watch.Event) { + for _, event := range events { + select { + case e.out <- event: + case <-e.done: + return + } + } +} + +func (e *eventProcessor) push(event watch.Event) { + e.cond.L.Lock() + defer e.cond.L.Unlock() + defer e.cond.Signal() + e.buff = append(e.buff, event) +} + +func (e *eventProcessor) stopped() bool { + select { + case <-e.done: + return true + default: + return false + } +} + +func (e *eventProcessor) stop() { + close(e.done) + e.cond.Signal() +} + +// NewIndexerInformerWatcher will create an IndexerInformer and wrap it into watch.Interface +// so you can use it anywhere where you'd have used a regular Watcher returned from Watch method. +// it also returns a channel you can use to wait for the informers to fully shutdown. +func NewIndexerInformerWatcher(lw cache.ListerWatcher, objType runtime.Object) (cache.Indexer, cache.Controller, watch.Interface, <-chan struct{}) { + ch := make(chan watch.Event) + w := watch.NewProxyWatcher(ch) + e := newEventProcessor(ch) + + indexer, informer := cache.NewIndexerInformer(lw, objType, 0, cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + e.push(watch.Event{ + Type: watch.Added, + Object: obj.(runtime.Object), + }) + }, + UpdateFunc: func(old, new interface{}) { + e.push(watch.Event{ + Type: watch.Modified, + Object: new.(runtime.Object), + }) + }, + DeleteFunc: func(obj interface{}) { + staleObj, stale := obj.(cache.DeletedFinalStateUnknown) + if stale { + // We have no means of passing the additional information down using + // watch API based on watch.Event but the caller can filter such + // objects by checking if metadata.deletionTimestamp is set + obj = staleObj.Obj + } + + e.push(watch.Event{ + Type: watch.Deleted, + Object: obj.(runtime.Object), + }) + }, + }, cache.Indexers{}) + + go e.run() + + doneCh := make(chan struct{}) + go func() { + defer close(doneCh) + defer e.stop() + informer.Run(w.StopChan()) + }() + + return indexer, informer, w, doneCh +} diff --git a/vendor/k8s.io/client-go/tools/watch/retrywatcher.go b/vendor/k8s.io/client-go/tools/watch/retrywatcher.go new file mode 100644 index 0000000000..d81dc43570 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/retrywatcher.go @@ -0,0 +1,295 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/dump" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +// resourceVersionGetter is an interface used to get resource version from events. +// We can't reuse an interface from meta otherwise it would be a cyclic dependency and we need just this one method +type resourceVersionGetter interface { + GetResourceVersion() string +} + +// RetryWatcher will make sure that in case the underlying watcher is closed (e.g. due to API timeout or etcd timeout) +// it will get restarted from the last point without the consumer even knowing about it. +// RetryWatcher does that by inspecting events and keeping track of resourceVersion. +// Especially useful when using watch.UntilWithoutRetry where premature termination is causing issues and flakes. +// Please note that this is not resilient to etcd cache not having the resource version anymore - you would need to +// use Informers for that. +type RetryWatcher struct { + lastResourceVersion string + watcherClient cache.Watcher + resultChan chan watch.Event + stopChan chan struct{} + doneChan chan struct{} + minRestartDelay time.Duration +} + +// NewRetryWatcher creates a new RetryWatcher. +// It will make sure that watches gets restarted in case of recoverable errors. +// The initialResourceVersion will be given to watch method when first called. +func NewRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher) (*RetryWatcher, error) { + return newRetryWatcher(initialResourceVersion, watcherClient, 1*time.Second) +} + +func newRetryWatcher(initialResourceVersion string, watcherClient cache.Watcher, minRestartDelay time.Duration) (*RetryWatcher, error) { + switch initialResourceVersion { + case "", "0": + // TODO: revisit this if we ever get WATCH v2 where it means start "now" + // without doing the synthetic list of objects at the beginning (see #74022) + return nil, fmt.Errorf("initial RV %q is not supported due to issues with underlying WATCH", initialResourceVersion) + default: + break + } + + rw := &RetryWatcher{ + lastResourceVersion: initialResourceVersion, + watcherClient: watcherClient, + stopChan: make(chan struct{}), + doneChan: make(chan struct{}), + resultChan: make(chan watch.Event, 0), + minRestartDelay: minRestartDelay, + } + + go rw.receive() + return rw, nil +} + +func (rw *RetryWatcher) send(event watch.Event) bool { + // Writing to an unbuffered channel is blocking operation + // and we need to check if stop wasn't requested while doing so. + select { + case rw.resultChan <- event: + return true + case <-rw.stopChan: + return false + } +} + +// doReceive returns true when it is done, false otherwise. +// If it is not done the second return value holds the time to wait before calling it again. +func (rw *RetryWatcher) doReceive() (bool, time.Duration) { + watcher, err := rw.watcherClient.Watch(metav1.ListOptions{ + ResourceVersion: rw.lastResourceVersion, + AllowWatchBookmarks: true, + }) + // We are very unlikely to hit EOF here since we are just establishing the call, + // but it may happen that the apiserver is just shutting down (e.g. being restarted) + // This is consistent with how it is handled for informers + switch err { + case nil: + break + + case io.EOF: + // watch closed normally + return false, 0 + + case io.ErrUnexpectedEOF: + klog.V(1).InfoS("Watch closed with unexpected EOF", "err", err) + return false, 0 + + default: + msg := "Watch failed" + if net.IsProbableEOF(err) || net.IsTimeout(err) { + klog.V(5).InfoS(msg, "err", err) + // Retry + return false, 0 + } + + klog.ErrorS(err, msg) + // Retry + return false, 0 + } + + if watcher == nil { + klog.ErrorS(nil, "Watch returned nil watcher") + // Retry + return false, 0 + } + + ch := watcher.ResultChan() + defer watcher.Stop() + + for { + select { + case <-rw.stopChan: + klog.V(4).InfoS("Stopping RetryWatcher.") + return true, 0 + case event, ok := <-ch: + if !ok { + klog.V(4).InfoS("Failed to get event! Re-creating the watcher.", "resourceVersion", rw.lastResourceVersion) + return false, 0 + } + + // We need to inspect the event and get ResourceVersion out of it + switch event.Type { + case watch.Added, watch.Modified, watch.Deleted, watch.Bookmark: + metaObject, ok := event.Object.(resourceVersionGetter) + if !ok { + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(errors.New("retryWatcher: doesn't support resourceVersion")).ErrStatus, + }) + // We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + + resourceVersion := metaObject.GetResourceVersion() + if resourceVersion == "" { + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher: object %#v doesn't support resourceVersion", event.Object)).ErrStatus, + }) + // We have to abort here because this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + + // All is fine; send the non-bookmark events and update resource version. + if event.Type != watch.Bookmark { + ok = rw.send(event) + if !ok { + return true, 0 + } + } + rw.lastResourceVersion = resourceVersion + + continue + + case watch.Error: + // This round trip allows us to handle unstructured status + errObject := apierrors.FromObject(event.Object) + statusErr, ok := errObject.(*apierrors.StatusError) + if !ok { + klog.Error(fmt.Sprintf("Received an error which is not *metav1.Status but %s", dump.Pretty(event.Object))) + // Retry unknown errors + return false, 0 + } + + status := statusErr.ErrStatus + + statusDelay := time.Duration(0) + if status.Details != nil { + statusDelay = time.Duration(status.Details.RetryAfterSeconds) * time.Second + } + + switch status.Code { + case http.StatusGone: + // Never retry RV too old errors + _ = rw.send(event) + return true, 0 + + case http.StatusGatewayTimeout, http.StatusInternalServerError: + // Retry + return false, statusDelay + + default: + // We retry by default. RetryWatcher is meant to proceed unless it is certain + // that it can't. If we are not certain, we proceed with retry and leave it + // up to the user to timeout if needed. + + // Log here so we have a record of hitting the unexpected error + // and we can whitelist some error codes if we missed any that are expected. + klog.V(5).Info(fmt.Sprintf("Retrying after unexpected error: %s", dump.Pretty(event.Object))) + + // Retry + return false, statusDelay + } + + default: + klog.Errorf("Failed to recognize Event type %q", event.Type) + _ = rw.send(watch.Event{ + Type: watch.Error, + Object: &apierrors.NewInternalError(fmt.Errorf("retryWatcher failed to recognize Event type %q", event.Type)).ErrStatus, + }) + // We are unable to restart the watch and have to stop the loop or this might cause lastResourceVersion inconsistency by skipping a potential RV with valid data! + return true, 0 + } + } + } +} + +// receive reads the result from a watcher, restarting it if necessary. +func (rw *RetryWatcher) receive() { + defer close(rw.doneChan) + defer close(rw.resultChan) + + klog.V(4).Info("Starting RetryWatcher.") + defer klog.V(4).Info("Stopping RetryWatcher.") + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + go func() { + select { + case <-rw.stopChan: + cancel() + return + case <-ctx.Done(): + return + } + }() + + // We use non sliding until so we don't introduce delays on happy path when WATCH call + // timeouts or gets closed and we need to reestablish it while also avoiding hot loops. + wait.NonSlidingUntilWithContext(ctx, func(ctx context.Context) { + done, retryAfter := rw.doReceive() + if done { + cancel() + return + } + + timer := time.NewTimer(retryAfter) + select { + case <-ctx.Done(): + timer.Stop() + return + case <-timer.C: + } + + klog.V(4).Infof("Restarting RetryWatcher at RV=%q", rw.lastResourceVersion) + }, rw.minRestartDelay) +} + +// ResultChan implements Interface. +func (rw *RetryWatcher) ResultChan() <-chan watch.Event { + return rw.resultChan +} + +// Stop implements Interface. +func (rw *RetryWatcher) Stop() { + close(rw.stopChan) +} + +// Done allows the caller to be notified when Retry watcher stops. +func (rw *RetryWatcher) Done() <-chan struct{} { + return rw.doneChan +} diff --git a/vendor/k8s.io/client-go/tools/watch/until.go b/vendor/k8s.io/client-go/tools/watch/until.go new file mode 100644 index 0000000000..a2474556b0 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/watch/until.go @@ -0,0 +1,168 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "context" + "errors" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/cache" + "k8s.io/klog/v2" +) + +// PreconditionFunc returns true if the condition has been reached, false if it has not been reached yet, +// or an error if the condition failed or detected an error state. +type PreconditionFunc func(store cache.Store) (bool, error) + +// ConditionFunc returns true if the condition has been reached, false if it has not been reached yet, +// or an error if the condition cannot be checked and should terminate. In general, it is better to define +// level driven conditions over edge driven conditions (pod has ready=true, vs pod modified and ready changed +// from false to true). +type ConditionFunc func(event watch.Event) (bool, error) + +// ErrWatchClosed is returned when the watch channel is closed before timeout in UntilWithoutRetry. +var ErrWatchClosed = errors.New("watch closed before UntilWithoutRetry timeout") + +// UntilWithoutRetry reads items from the watch until each provided condition succeeds, and then returns the last watch +// encountered. The first condition that returns an error terminates the watch (and the event is also returned). +// If no event has been received, the returned event will be nil. +// Conditions are satisfied sequentially so as to provide a useful primitive for higher level composition. +// Waits until context deadline or until context is canceled. +// +// Warning: Unless you have a very specific use case (probably a special Watcher) don't use this function!!! +// Warning: This will fail e.g. on API timeouts and/or 'too old resource version' error. +// Warning: You are most probably looking for a function *Until* or *UntilWithSync* below, +// Warning: solving such issues. +// TODO: Consider making this function private to prevent misuse when the other occurrences in our codebase are gone. +func UntilWithoutRetry(ctx context.Context, watcher watch.Interface, conditions ...ConditionFunc) (*watch.Event, error) { + ch := watcher.ResultChan() + defer watcher.Stop() + var lastEvent *watch.Event + for _, condition := range conditions { + // check the next condition against the previous event and short circuit waiting for the next watch + if lastEvent != nil { + done, err := condition(*lastEvent) + if err != nil { + return lastEvent, err + } + if done { + continue + } + } + ConditionSucceeded: + for { + select { + case event, ok := <-ch: + if !ok { + return lastEvent, ErrWatchClosed + } + lastEvent = &event + + done, err := condition(event) + if err != nil { + return lastEvent, err + } + if done { + break ConditionSucceeded + } + + case <-ctx.Done(): + return lastEvent, wait.ErrWaitTimeout + } + } + } + return lastEvent, nil +} + +// Until wraps the watcherClient's watch function with RetryWatcher making sure that watcher gets restarted in case of errors. +// The initialResourceVersion will be given to watch method when first called. It shall not be "" or "0" +// given the underlying WATCH call issues (#74022). +// Remaining behaviour is identical to function UntilWithoutRetry. (See above.) +// Until can deal with API timeouts and lost connections. +// It guarantees you to see all events and in the order they happened. +// Due to this guarantee there is no way it can deal with 'Resource version too old error'. It will fail in this case. +// (See `UntilWithSync` if you'd prefer to recover from all the errors including RV too old by re-listing +// those items. In normal code you should care about being level driven so you'd not care about not seeing all the edges.) +// +// The most frequent usage for Until would be a test where you want to verify exact order of events ("edges"). +func Until(ctx context.Context, initialResourceVersion string, watcherClient cache.Watcher, conditions ...ConditionFunc) (*watch.Event, error) { + w, err := NewRetryWatcher(initialResourceVersion, watcherClient) + if err != nil { + return nil, err + } + + return UntilWithoutRetry(ctx, w, conditions...) +} + +// UntilWithSync creates an informer from lw, optionally checks precondition when the store is synced, +// and watches the output until each provided condition succeeds, in a way that is identical +// to function UntilWithoutRetry. (See above.) +// UntilWithSync can deal with all errors like API timeout, lost connections and 'Resource version too old'. +// It is the only function that can recover from 'Resource version too old', Until and UntilWithoutRetry will +// just fail in that case. On the other hand it can't provide you with guarantees as strong as using simple +// Watch method with Until. It can skip some intermediate events in case of watch function failing but it will +// re-list to recover and you always get an event, if there has been a change, after recovery. +// Also with the current implementation based on DeltaFIFO, order of the events you receive is guaranteed only for +// particular object, not between more of them even it's the same resource. +// The most frequent usage would be a command that needs to watch the "state of the world" and should't fail, like: +// waiting for object reaching a state, "small" controllers, ... +func UntilWithSync(ctx context.Context, lw cache.ListerWatcher, objType runtime.Object, precondition PreconditionFunc, conditions ...ConditionFunc) (*watch.Event, error) { + indexer, informer, watcher, done := NewIndexerInformerWatcher(lw, objType) + // We need to wait for the internal informers to fully stop so it's easier to reason about + // and it works with non-thread safe clients. + defer func() { <-done }() + // Proxy watcher can be stopped multiple times so it's fine to use defer here to cover alternative branches and + // let UntilWithoutRetry to stop it + defer watcher.Stop() + + if precondition != nil { + if !cache.WaitForCacheSync(ctx.Done(), informer.HasSynced) { + return nil, fmt.Errorf("UntilWithSync: unable to sync caches: %w", ctx.Err()) + } + + done, err := precondition(indexer) + if err != nil { + return nil, err + } + + if done { + return nil, nil + } + } + + return UntilWithoutRetry(ctx, watcher, conditions...) +} + +// ContextWithOptionalTimeout wraps context.WithTimeout and handles infinite timeouts expressed as 0 duration. +func ContextWithOptionalTimeout(parent context.Context, timeout time.Duration) (context.Context, context.CancelFunc) { + if timeout < 0 { + // This should be handled in validation + klog.Errorf("Timeout for context shall not be negative!") + timeout = 0 + } + + if timeout == 0 { + return context.WithCancel(parent) + } + + return context.WithTimeout(parent, timeout) +} diff --git a/vendor/k8s.io/client-go/transport/spdy/spdy.go b/vendor/k8s.io/client-go/transport/spdy/spdy.go new file mode 100644 index 0000000000..9fddc6c5f2 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/spdy/spdy.go @@ -0,0 +1,107 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package spdy + +import ( + "fmt" + "net/http" + "net/url" + "time" + + "k8s.io/apimachinery/pkg/util/httpstream" + "k8s.io/apimachinery/pkg/util/httpstream/spdy" + restclient "k8s.io/client-go/rest" +) + +// Upgrader validates a response from the server after a SPDY upgrade. +type Upgrader interface { + // NewConnection validates the response and creates a new Connection. + NewConnection(resp *http.Response) (httpstream.Connection, error) +} + +// RoundTripperFor returns a round tripper and upgrader to use with SPDY. +func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, error) { + tlsConfig, err := restclient.TLSConfigFor(config) + if err != nil { + return nil, nil, err + } + proxy := http.ProxyFromEnvironment + if config.Proxy != nil { + proxy = config.Proxy + } + upgradeRoundTripper, err := spdy.NewRoundTripperWithConfig(spdy.RoundTripperConfig{ + TLS: tlsConfig, + Proxier: proxy, + PingPeriod: time.Second * 5, + UpgradeTransport: nil, + }) + if err != nil { + return nil, nil, err + } + wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper) + if err != nil { + return nil, nil, err + } + return wrapper, upgradeRoundTripper, nil +} + +// dialer implements the httpstream.Dialer interface. +type dialer struct { + client *http.Client + upgrader Upgrader + method string + url *url.URL +} + +var _ httpstream.Dialer = &dialer{} + +// NewDialer will create a dialer that connects to the provided URL and upgrades the connection to SPDY. +func NewDialer(upgrader Upgrader, client *http.Client, method string, url *url.URL) httpstream.Dialer { + return &dialer{ + client: client, + upgrader: upgrader, + method: method, + url: url, + } +} + +func (d *dialer) Dial(protocols ...string) (httpstream.Connection, string, error) { + req, err := http.NewRequest(d.method, d.url.String(), nil) + if err != nil { + return nil, "", fmt.Errorf("error creating request: %v", err) + } + return Negotiate(d.upgrader, d.client, req, protocols...) +} + +// Negotiate opens a connection to a remote server and attempts to negotiate +// a SPDY connection. Upon success, it returns the connection and the protocol selected by +// the server. The client transport must use the upgradeRoundTripper - see RoundTripperFor. +func Negotiate(upgrader Upgrader, client *http.Client, req *http.Request, protocols ...string) (httpstream.Connection, string, error) { + for i := range protocols { + req.Header.Add(httpstream.HeaderProtocolVersion, protocols[i]) + } + resp, err := client.Do(req) + if err != nil { + return nil, "", fmt.Errorf("error sending request: %v", err) + } + defer resp.Body.Close() + conn, err := upgrader.NewConnection(resp) + if err != nil { + return nil, "", err + } + return conn, resp.Header.Get(httpstream.HeaderProtocolVersion), nil +} diff --git a/vendor/k8s.io/client-go/transport/websocket/roundtripper.go b/vendor/k8s.io/client-go/transport/websocket/roundtripper.go new file mode 100644 index 0000000000..010f916bc7 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/websocket/roundtripper.go @@ -0,0 +1,163 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package websocket + +import ( + "crypto/tls" + "fmt" + "net/http" + "net/url" + + gwebsocket "github.com/gorilla/websocket" + + "k8s.io/apimachinery/pkg/util/httpstream" + utilnet "k8s.io/apimachinery/pkg/util/net" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/transport" +) + +var ( + _ utilnet.TLSClientConfigHolder = &RoundTripper{} + _ http.RoundTripper = &RoundTripper{} +) + +// ConnectionHolder defines functions for structure providing +// access to the websocket connection. +type ConnectionHolder interface { + DataBufferSize() int + Connection() *gwebsocket.Conn +} + +// RoundTripper knows how to establish a connection to a remote WebSocket endpoint and make it available for use. +// RoundTripper must not be reused. +type RoundTripper struct { + // TLSConfig holds the TLS configuration settings to use when connecting + // to the remote server. + TLSConfig *tls.Config + + // Proxier specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxier func(req *http.Request) (*url.URL, error) + + // Conn holds the WebSocket connection after a round trip. + Conn *gwebsocket.Conn +} + +// Connection returns the stored websocket connection. +func (rt *RoundTripper) Connection() *gwebsocket.Conn { + return rt.Conn +} + +// DataBufferSize returns the size of buffers for the +// websocket connection. +func (rt *RoundTripper) DataBufferSize() int { + return 32 * 1024 +} + +// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder. +func (rt *RoundTripper) TLSClientConfig() *tls.Config { + return rt.TLSConfig +} + +// RoundTrip connects to the remote websocket using the headers in the request and the TLS +// configuration from the config +func (rt *RoundTripper) RoundTrip(request *http.Request) (retResp *http.Response, retErr error) { + defer func() { + if request.Body != nil { + err := request.Body.Close() + if retErr == nil { + retErr = err + } + } + }() + + // set the protocol version directly on the dialer from the header + protocolVersions := request.Header[httpstream.HeaderProtocolVersion] + delete(request.Header, httpstream.HeaderProtocolVersion) + + dialer := gwebsocket.Dialer{ + Proxy: rt.Proxier, + TLSClientConfig: rt.TLSConfig, + Subprotocols: protocolVersions, + ReadBufferSize: rt.DataBufferSize() + 1024, // add space for the protocol byte indicating which channel the data is for + WriteBufferSize: rt.DataBufferSize() + 1024, // add space for the protocol byte indicating which channel the data is for + } + switch request.URL.Scheme { + case "https": + request.URL.Scheme = "wss" + case "http": + request.URL.Scheme = "ws" + default: + return nil, fmt.Errorf("unknown url scheme: %s", request.URL.Scheme) + } + wsConn, resp, err := dialer.DialContext(request.Context(), request.URL.String(), request.Header) + if err != nil { + return nil, &httpstream.UpgradeFailureError{Cause: err} + } + + rt.Conn = wsConn + + return resp, nil +} + +// RoundTripperFor transforms the passed rest config into a wrapped roundtripper, as well +// as a pointer to the websocket RoundTripper. The websocket RoundTripper contains the +// websocket connection after RoundTrip() on the wrapper. Returns an error if there is +// a problem creating the round trippers. +func RoundTripperFor(config *restclient.Config) (http.RoundTripper, ConnectionHolder, error) { + transportCfg, err := config.TransportConfig() + if err != nil { + return nil, nil, err + } + tlsConfig, err := transport.TLSConfigFor(transportCfg) + if err != nil { + return nil, nil, err + } + proxy := config.Proxy + if proxy == nil { + proxy = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) + } + + upgradeRoundTripper := &RoundTripper{ + TLSConfig: tlsConfig, + Proxier: proxy, + } + wrapper, err := transport.HTTPWrappersForConfig(transportCfg, upgradeRoundTripper) + if err != nil { + return nil, nil, err + } + return wrapper, upgradeRoundTripper, nil +} + +// Negotiate opens a connection to a remote server and attempts to negotiate +// a WebSocket connection. Upon success, it returns the negotiated connection. +// The round tripper rt must use the WebSocket round tripper wsRt - see RoundTripperFor. +func Negotiate(rt http.RoundTripper, connectionInfo ConnectionHolder, req *http.Request, protocols ...string) (*gwebsocket.Conn, error) { + req.Header[httpstream.HeaderProtocolVersion] = protocols + resp, err := rt.RoundTrip(req) + if err != nil { + return nil, err + } + err = resp.Body.Close() + if err != nil { + connectionInfo.Connection().Close() + return nil, fmt.Errorf("error closing response body: %v", err) + } + return connectionInfo.Connection(), nil +} diff --git a/vendor/k8s.io/client-go/util/exec/exec.go b/vendor/k8s.io/client-go/util/exec/exec.go new file mode 100644 index 0000000000..d170badb60 --- /dev/null +++ b/vendor/k8s.io/client-go/util/exec/exec.go @@ -0,0 +1,52 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +// ExitError is an interface that presents an API similar to os.ProcessState, which is +// what ExitError from os/exec is. This is designed to make testing a bit easier and +// probably loses some of the cross-platform properties of the underlying library. +type ExitError interface { + String() string + Error() string + Exited() bool + ExitStatus() int +} + +// CodeExitError is an implementation of ExitError consisting of an error object +// and an exit code (the upper bits of os.exec.ExitStatus). +type CodeExitError struct { + Err error + Code int +} + +var _ ExitError = CodeExitError{} + +func (e CodeExitError) Error() string { + return e.Err.Error() +} + +func (e CodeExitError) String() string { + return e.Err.Error() +} + +func (e CodeExitError) Exited() bool { + return true +} + +func (e CodeExitError) ExitStatus() int { + return e.Code +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go new file mode 100644 index 0000000000..b1aa8c008a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/errors.go @@ -0,0 +1,79 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" +) + +type errors struct { + errors []error +} + +func (e *errors) Errors() []error { + return e.errors +} + +func (e *errors) AppendErrors(err ...error) { + e.errors = append(e.errors, err...) +} + +type ValidationError struct { + Path string + Err error +} + +func (e ValidationError) Error() string { + return fmt.Sprintf("ValidationError(%s): %v", e.Path, e.Err) +} + +type InvalidTypeError struct { + Path string + Expected string + Actual string +} + +func (e InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) +} + +type MissingRequiredFieldError struct { + Path string + Field string +} + +func (e MissingRequiredFieldError) Error() string { + return fmt.Sprintf("missing required field %q in %s", e.Field, e.Path) +} + +type UnknownFieldError struct { + Path string + Field string +} + +func (e UnknownFieldError) Error() string { + return fmt.Sprintf("unknown field %q in %s", e.Field, e.Path) +} + +type InvalidObjectTypeError struct { + Path string + Type string +} + +func (e InvalidObjectTypeError) Error() string { + return fmt.Sprintf("unknown object type %q in %s", e.Type, e.Path) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go new file mode 100644 index 0000000000..e66342a7f1 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/types.go @@ -0,0 +1,299 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "reflect" + "sort" + + "k8s.io/kube-openapi/pkg/util/proto" +) + +type validationItem interface { + proto.SchemaVisitor + + Errors() []error + Path() *proto.Path +} + +type baseItem struct { + errors errors + path proto.Path +} + +// Errors returns the list of errors found for this item. +func (item *baseItem) Errors() []error { + return item.errors.Errors() +} + +// AddValidationError wraps the given error into a ValidationError and +// attaches it to this item. +func (item *baseItem) AddValidationError(err error) { + item.errors.AppendErrors(ValidationError{Path: item.path.String(), Err: err}) +} + +// AddError adds a regular (non-validation related) error to the list. +func (item *baseItem) AddError(err error) { + item.errors.AppendErrors(err) +} + +// CopyErrors adds a list of errors to this item. This is useful to copy +// errors from subitems. +func (item *baseItem) CopyErrors(errs []error) { + item.errors.AppendErrors(errs...) +} + +// Path returns the path of this item, helps print useful errors. +func (item *baseItem) Path() *proto.Path { + return &item.path +} + +// mapItem represents a map entry in the yaml. +type mapItem struct { + baseItem + + Map map[string]interface{} +} + +func (item *mapItem) sortedKeys() []string { + sortedKeys := []string{} + for key := range item.Map { + sortedKeys = append(sortedKeys, key) + } + sort.Strings(sortedKeys) + return sortedKeys +} + +var _ validationItem = &mapItem{} + +func (item *mapItem) VisitPrimitive(schema *proto.Primitive) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: "map"}) +} + +func (item *mapItem) VisitArray(schema *proto.Array) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: "map"}) +} + +func (item *mapItem) VisitMap(schema *proto.Map) { + for _, key := range item.sortedKeys() { + subItem, err := itemFactory(item.Path().FieldPath(key), item.Map[key]) + if err != nil { + item.AddError(err) + continue + } + schema.SubType.Accept(subItem) + item.CopyErrors(subItem.Errors()) + } +} + +func (item *mapItem) VisitKind(schema *proto.Kind) { + // Verify each sub-field. + for _, key := range item.sortedKeys() { + if item.Map[key] == nil { + continue + } + subItem, err := itemFactory(item.Path().FieldPath(key), item.Map[key]) + if err != nil { + item.AddError(err) + continue + } + if _, ok := schema.Fields[key]; !ok { + item.AddValidationError(UnknownFieldError{Path: schema.GetPath().String(), Field: key}) + continue + } + schema.Fields[key].Accept(subItem) + item.CopyErrors(subItem.Errors()) + } + + // Verify that all required fields are present. + for _, required := range schema.RequiredFields { + if v, ok := item.Map[required]; !ok || v == nil { + item.AddValidationError(MissingRequiredFieldError{Path: schema.GetPath().String(), Field: required}) + } + } +} + +func (item *mapItem) VisitArbitrary(schema *proto.Arbitrary) { +} + +func (item *mapItem) VisitReference(schema proto.Reference) { + // passthrough + schema.SubSchema().Accept(item) +} + +// arrayItem represents a yaml array. +type arrayItem struct { + baseItem + + Array []interface{} +} + +var _ validationItem = &arrayItem{} + +func (item *arrayItem) VisitPrimitive(schema *proto.Primitive) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: "array"}) +} + +func (item *arrayItem) VisitArray(schema *proto.Array) { + for i, v := range item.Array { + path := item.Path().ArrayPath(i) + if v == nil { + item.AddValidationError(InvalidObjectTypeError{Type: "nil", Path: path.String()}) + continue + } + subItem, err := itemFactory(path, v) + if err != nil { + item.AddError(err) + continue + } + schema.SubType.Accept(subItem) + item.CopyErrors(subItem.Errors()) + } +} + +func (item *arrayItem) VisitMap(schema *proto.Map) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"}) +} + +func (item *arrayItem) VisitKind(schema *proto.Kind) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: "array"}) +} + +func (item *arrayItem) VisitArbitrary(schema *proto.Arbitrary) { +} + +func (item *arrayItem) VisitReference(schema proto.Reference) { + // passthrough + schema.SubSchema().Accept(item) +} + +// primitiveItem represents a yaml value. +type primitiveItem struct { + baseItem + + Value interface{} + Kind string +} + +var _ validationItem = &primitiveItem{} + +func (item *primitiveItem) VisitPrimitive(schema *proto.Primitive) { + // Some types of primitives can match more than one (a number + // can be a string, but not the other way around). Return from + // the switch if we have a valid possible type conversion + // NOTE(apelisse): This logic is blindly copied from the + // existing swagger logic, and I'm not sure I agree with it. + switch schema.Type { + case proto.Boolean: + switch item.Kind { + case proto.Boolean: + return + } + case proto.Integer: + switch item.Kind { + case proto.Integer, proto.Number: + return + } + case proto.Number: + switch item.Kind { + case proto.Integer, proto.Number: + return + } + case proto.String: + return + } + // TODO(wrong): this misses "null" + + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: schema.Type, Actual: item.Kind}) +} + +func (item *primitiveItem) VisitArray(schema *proto.Array) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "array", Actual: item.Kind}) +} + +func (item *primitiveItem) VisitMap(schema *proto.Map) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind}) +} + +func (item *primitiveItem) VisitKind(schema *proto.Kind) { + item.AddValidationError(InvalidTypeError{Path: schema.GetPath().String(), Expected: "map", Actual: item.Kind}) +} + +func (item *primitiveItem) VisitArbitrary(schema *proto.Arbitrary) { +} + +func (item *primitiveItem) VisitReference(schema proto.Reference) { + // passthrough + schema.SubSchema().Accept(item) +} + +// itemFactory creates the relevant item type/visitor based on the current yaml type. +func itemFactory(path proto.Path, v interface{}) (validationItem, error) { + // We need to special case for no-type fields in yaml (e.g. empty item in list) + if v == nil { + return nil, InvalidObjectTypeError{Type: "nil", Path: path.String()} + } + kind := reflect.TypeOf(v).Kind() + switch kind { + case reflect.Bool: + return &primitiveItem{ + baseItem: baseItem{path: path}, + Value: v, + Kind: proto.Boolean, + }, nil + case reflect.Int, + reflect.Int8, + reflect.Int16, + reflect.Int32, + reflect.Int64, + reflect.Uint, + reflect.Uint8, + reflect.Uint16, + reflect.Uint32, + reflect.Uint64: + return &primitiveItem{ + baseItem: baseItem{path: path}, + Value: v, + Kind: proto.Integer, + }, nil + case reflect.Float32, + reflect.Float64: + return &primitiveItem{ + baseItem: baseItem{path: path}, + Value: v, + Kind: proto.Number, + }, nil + case reflect.String: + return &primitiveItem{ + baseItem: baseItem{path: path}, + Value: v, + Kind: proto.String, + }, nil + case reflect.Array, + reflect.Slice: + return &arrayItem{ + baseItem: baseItem{path: path}, + Array: v.([]interface{}), + }, nil + case reflect.Map: + return &mapItem{ + baseItem: baseItem{path: path}, + Map: v.(map[string]interface{}), + }, nil + } + return nil, InvalidObjectTypeError{Type: kind.String(), Path: path.String()} +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go new file mode 100644 index 0000000000..35310f637a --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/validation/validation.go @@ -0,0 +1,30 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "k8s.io/kube-openapi/pkg/util/proto" +) + +func ValidateModel(obj interface{}, schema proto.Schema, name string) []error { + rootValidation, err := itemFactory(proto.NewPath(name), obj) + if err != nil { + return []error{err} + } + schema.Accept(rootValidation) + return rootValidation.Errors() +} diff --git a/vendor/k8s.io/kubectl/LICENSE b/vendor/k8s.io/kubectl/LICENSE new file mode 100644 index 0000000000..8dada3edaf --- /dev/null +++ b/vendor/k8s.io/kubectl/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/kubectl/pkg/cmd/get/customcolumn.go b/vendor/k8s.io/kubectl/pkg/cmd/get/customcolumn.go new file mode 100644 index 0000000000..38024cfa5d --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/get/customcolumn.go @@ -0,0 +1,262 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package get + +import ( + "bufio" + "bytes" + "fmt" + "io" + "reflect" + "regexp" + "strings" + + "github.com/liggitt/tabwriter" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/client-go/util/jsonpath" +) + +var jsonRegexp = regexp.MustCompile(`^\{\.?([^{}]+)\}$|^\.?([^{}]+)$`) + +// RelaxedJSONPathExpression attempts to be flexible with JSONPath expressions, it accepts: +// - metadata.name (no leading '.' or curly braces '{...}' +// - {metadata.name} (no leading '.') +// - .metadata.name (no curly braces '{...}') +// - {.metadata.name} (complete expression) +// +// And transforms them all into a valid jsonpath expression: +// +// {.metadata.name} +func RelaxedJSONPathExpression(pathExpression string) (string, error) { + if len(pathExpression) == 0 { + return pathExpression, nil + } + submatches := jsonRegexp.FindStringSubmatch(pathExpression) + if submatches == nil { + return "", fmt.Errorf("unexpected path string, expected a 'name1.name2' or '.name1.name2' or '{name1.name2}' or '{.name1.name2}'") + } + if len(submatches) != 3 { + return "", fmt.Errorf("unexpected submatch list: %v", submatches) + } + var fieldSpec string + if len(submatches[1]) != 0 { + fieldSpec = submatches[1] + } else { + fieldSpec = submatches[2] + } + return fmt.Sprintf("{.%s}", fieldSpec), nil +} + +// NewCustomColumnsPrinterFromSpec creates a custom columns printer from a comma separated list of

    : pairs. +// e.g. NAME:metadata.name,API_VERSION:apiVersion creates a printer that prints: +// +// NAME API_VERSION +// foo bar +func NewCustomColumnsPrinterFromSpec(spec string, decoder runtime.Decoder, noHeaders bool) (*CustomColumnsPrinter, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("custom-columns format specified but no custom columns given") + } + parts := strings.Split(spec, ",") + columns := make([]Column, len(parts)) + for ix := range parts { + colSpec := strings.SplitN(parts[ix], ":", 2) + if len(colSpec) != 2 { + return nil, fmt.Errorf("unexpected custom-columns spec: %s, expected
    :", parts[ix]) + } + spec, err := RelaxedJSONPathExpression(colSpec[1]) + if err != nil { + return nil, err + } + columns[ix] = Column{Header: colSpec[0], FieldSpec: spec} + } + return &CustomColumnsPrinter{Columns: columns, Decoder: decoder, NoHeaders: noHeaders}, nil +} + +func splitOnWhitespace(line string) []string { + lineScanner := bufio.NewScanner(bytes.NewBufferString(line)) + lineScanner.Split(bufio.ScanWords) + result := []string{} + for lineScanner.Scan() { + result = append(result, lineScanner.Text()) + } + return result +} + +// NewCustomColumnsPrinterFromTemplate creates a custom columns printer from a template stream. The template is expected +// to consist of two lines, whitespace separated. The first line is the header line, the second line is the jsonpath field spec +// For example, the template below: +// NAME API_VERSION +// {metadata.name} {apiVersion} +func NewCustomColumnsPrinterFromTemplate(templateReader io.Reader, decoder runtime.Decoder) (*CustomColumnsPrinter, error) { + scanner := bufio.NewScanner(templateReader) + if !scanner.Scan() { + return nil, fmt.Errorf("invalid template, missing header line. Expected format is one line of space separated headers, one line of space separated column specs.") + } + headers := splitOnWhitespace(scanner.Text()) + + if !scanner.Scan() { + return nil, fmt.Errorf("invalid template, missing spec line. Expected format is one line of space separated headers, one line of space separated column specs.") + } + specs := splitOnWhitespace(scanner.Text()) + + if len(headers) != len(specs) { + return nil, fmt.Errorf("number of headers (%d) and field specifications (%d) don't match", len(headers), len(specs)) + } + + columns := make([]Column, len(headers)) + for ix := range headers { + spec, err := RelaxedJSONPathExpression(specs[ix]) + if err != nil { + return nil, err + } + columns[ix] = Column{ + Header: headers[ix], + FieldSpec: spec, + } + } + return &CustomColumnsPrinter{Columns: columns, Decoder: decoder, NoHeaders: false}, nil +} + +// Column represents a user specified column +type Column struct { + // The header to print above the column, general style is ALL_CAPS + Header string + // The pointer to the field in the object to print in JSONPath form + // e.g. {.ObjectMeta.Name}, see pkg/util/jsonpath for more details. + FieldSpec string +} + +// CustomColumnPrinter is a printer that knows how to print arbitrary columns +// of data from templates specified in the `Columns` array +type CustomColumnsPrinter struct { + Columns []Column + Decoder runtime.Decoder + NoHeaders bool + // lastType records type of resource printed last so that we don't repeat + // header while printing same type of resources. + lastType reflect.Type +} + +func (s *CustomColumnsPrinter) PrintObj(obj runtime.Object, out io.Writer) error { + // we use reflect.Indirect here in order to obtain the actual value from a pointer. + // we need an actual value in order to retrieve the package path for an object. + // using reflect.Indirect indiscriminately is valid here, as all runtime.Objects are supposed to be pointers. + if printers.InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(obj)).Type().PkgPath()) { + return fmt.Errorf(printers.InternalObjectPrinterErr) + } + + if _, found := out.(*tabwriter.Writer); !found { + w := printers.GetNewTabWriter(out) + out = w + defer w.Flush() + } + + t := reflect.TypeOf(obj) + if !s.NoHeaders && t != s.lastType { + headers := make([]string, len(s.Columns)) + for ix := range s.Columns { + headers[ix] = s.Columns[ix].Header + } + fmt.Fprintln(out, strings.Join(headers, "\t")) + s.lastType = t + } + parsers := make([]*jsonpath.JSONPath, len(s.Columns)) + for ix := range s.Columns { + parsers[ix] = jsonpath.New(fmt.Sprintf("column%d", ix)).AllowMissingKeys(true) + if err := parsers[ix].Parse(s.Columns[ix].FieldSpec); err != nil { + return err + } + } + + if meta.IsListType(obj) { + objs, err := meta.ExtractList(obj) + if err != nil { + return err + } + for ix := range objs { + if err := s.printOneObject(objs[ix], parsers, out); err != nil { + return err + } + } + } else { + if err := s.printOneObject(obj, parsers, out); err != nil { + return err + } + } + return nil +} + +func (s *CustomColumnsPrinter) printOneObject(obj runtime.Object, parsers []*jsonpath.JSONPath, out io.Writer) error { + columns := make([]string, len(parsers)) + switch u := obj.(type) { + case *metav1.WatchEvent: + if printers.InternalObjectPreventer.IsForbidden(reflect.Indirect(reflect.ValueOf(u.Object.Object)).Type().PkgPath()) { + return fmt.Errorf(printers.InternalObjectPrinterErr) + } + unstructuredObject, err := runtime.DefaultUnstructuredConverter.ToUnstructured(u.Object.Object) + if err != nil { + return err + } + obj = &unstructured.Unstructured{ + Object: map[string]interface{}{ + "type": u.Type, + "object": unstructuredObject, + }, + } + + case *runtime.Unknown: + if len(u.Raw) > 0 { + var err error + if obj, err = runtime.Decode(s.Decoder, u.Raw); err != nil { + return fmt.Errorf("can't decode object for printing: %v (%s)", err, u.Raw) + } + } + } + + for ix := range parsers { + parser := parsers[ix] + + var values [][]reflect.Value + var err error + if unstructured, ok := obj.(runtime.Unstructured); ok { + values, err = parser.FindResults(unstructured.UnstructuredContent()) + } else { + values, err = parser.FindResults(reflect.ValueOf(obj).Elem().Interface()) + } + + if err != nil { + return err + } + valueStrings := []string{} + if len(values) == 0 || len(values[0]) == 0 { + valueStrings = append(valueStrings, "") + } + for arrIx := range values { + for valIx := range values[arrIx] { + valueStrings = append(valueStrings, printers.EscapeTerminal(fmt.Sprint(values[arrIx][valIx].Interface()))) + } + } + columns[ix] = strings.Join(valueStrings, ",") + } + fmt.Fprintln(out, strings.Join(columns, "\t")) + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/get/customcolumn_flags.go b/vendor/k8s.io/kubectl/pkg/cmd/get/customcolumn_flags.go new file mode 100644 index 0000000000..efd7fcf752 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/get/customcolumn_flags.go @@ -0,0 +1,113 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package get + +import ( + "fmt" + "os" + "sort" + "strings" + + "github.com/spf13/cobra" + + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/kubectl/pkg/scheme" +) + +var columnsFormats = map[string]bool{ + "custom-columns-file": true, + "custom-columns": true, +} + +// CustomColumnsPrintFlags provides default flags necessary for printing +// custom resource columns from an inline-template or file. +type CustomColumnsPrintFlags struct { + NoHeaders bool + TemplateArgument string +} + +func (f *CustomColumnsPrintFlags) AllowedFormats() []string { + formats := make([]string, 0, len(columnsFormats)) + for format := range columnsFormats { + formats = append(formats, format) + } + sort.Strings(formats) + return formats +} + +// ToPrinter receives an templateFormat and returns a printer capable of +// handling custom-column printing. +// Returns false if the specified templateFormat does not match a supported format. +// Supported format types can be found in pkg/printers/printers.go +func (f *CustomColumnsPrintFlags) ToPrinter(templateFormat string) (printers.ResourcePrinter, error) { + if len(templateFormat) == 0 { + return nil, genericclioptions.NoCompatiblePrinterError{} + } + + templateValue := "" + + if len(f.TemplateArgument) == 0 { + for format := range columnsFormats { + format = format + "=" + if strings.HasPrefix(templateFormat, format) { + templateValue = templateFormat[len(format):] + templateFormat = format[:len(format)-1] + break + } + } + } else { + templateValue = f.TemplateArgument + } + + if _, supportedFormat := columnsFormats[templateFormat]; !supportedFormat { + return nil, genericclioptions.NoCompatiblePrinterError{OutputFormat: &templateFormat, AllowedFormats: f.AllowedFormats()} + } + + if len(templateValue) == 0 { + return nil, fmt.Errorf("custom-columns format specified but no custom columns given") + } + + // UniversalDecoder call must specify parameter versions; otherwise it will decode to internal versions. + decoder := scheme.Codecs.UniversalDecoder(scheme.Scheme.PrioritizedVersionsAllGroups()...) + + if templateFormat == "custom-columns-file" { + file, err := os.Open(templateValue) + if err != nil { + return nil, fmt.Errorf("error reading template %s, %v\n", templateValue, err) + } + defer file.Close() + p, err := NewCustomColumnsPrinterFromTemplate(file, decoder) + return p, err + } + + return NewCustomColumnsPrinterFromSpec(templateValue, decoder, f.NoHeaders) +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to custom-columns printing +func (f *CustomColumnsPrintFlags) AddFlags(c *cobra.Command) {} + +// NewCustomColumnsPrintFlags returns flags associated with +// custom-column printing, with default values set. +// NoHeaders and TemplateArgument should be set by callers. +func NewCustomColumnsPrintFlags() *CustomColumnsPrintFlags { + return &CustomColumnsPrintFlags{ + NoHeaders: false, + TemplateArgument: "", + } +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/get/get.go b/vendor/k8s.io/kubectl/pkg/cmd/get/get.go new file mode 100644 index 0000000000..f16586994a --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/get/get.go @@ -0,0 +1,821 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package get + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/url" + "strings" + + "github.com/spf13/cobra" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/cli-runtime/pkg/resource" + kubernetesscheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + watchtools "k8s.io/client-go/tools/watch" + cmdutil "k8s.io/kubectl/pkg/cmd/util" + "k8s.io/kubectl/pkg/rawhttp" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util/i18n" + "k8s.io/kubectl/pkg/util/interrupt" + "k8s.io/kubectl/pkg/util/slice" + "k8s.io/kubectl/pkg/util/templates" + utilpointer "k8s.io/utils/pointer" +) + +// GetOptions contains the input to the get command. +type GetOptions struct { + PrintFlags *PrintFlags + ToPrinter func(*meta.RESTMapping, *bool, bool, bool) (printers.ResourcePrinterFunc, error) + IsHumanReadablePrinter bool + + CmdParent string + + resource.FilenameOptions + + Raw string + Watch bool + WatchOnly bool + ChunkSize int64 + + OutputWatchEvents bool + + LabelSelector string + FieldSelector string + AllNamespaces bool + Namespace string + ExplicitNamespace bool + Subresource string + SortBy string + + ServerPrint bool + + NoHeaders bool + IgnoreNotFound bool + + genericiooptions.IOStreams +} + +var ( + getLong = templates.LongDesc(i18n.T(` + Display one or many resources. + + Prints a table of the most important information about the specified resources. + You can filter the list using a label selector and the --selector flag. If the + desired resource type is namespaced you will only see results in your current + namespace unless you pass --all-namespaces. + + By specifying the output as 'template' and providing a Go template as the value + of the --template flag, you can filter the attributes of the fetched resources.`)) + + getExample = templates.Examples(i18n.T(` + # List all pods in ps output format + kubectl get pods + + # List all pods in ps output format with more information (such as node name) + kubectl get pods -o wide + + # List a single replication controller with specified NAME in ps output format + kubectl get replicationcontroller web + + # List deployments in JSON output format, in the "v1" version of the "apps" API group + kubectl get deployments.v1.apps -o json + + # List a single pod in JSON output format + kubectl get -o json pod web-pod-13je7 + + # List a pod identified by type and name specified in "pod.yaml" in JSON output format + kubectl get -f pod.yaml -o json + + # List resources from a directory with kustomization.yaml - e.g. dir/kustomization.yaml + kubectl get -k dir/ + + # Return only the phase value of the specified pod + kubectl get -o template pod/web-pod-13je7 --template={{.status.phase}} + + # List resource information in custom columns + kubectl get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0].name,IMAGE:.spec.containers[0].image + + # List all replication controllers and services together in ps output format + kubectl get rc,services + + # List one or more resources by their type and names + kubectl get rc/web service/frontend pods/web-pod-13je7 + + # List the 'status' subresource for a single pod + kubectl get pod web-pod-13je7 --subresource status`)) +) + +const ( + useServerPrintColumns = "server-print" +) + +var supportedSubresources = []string{"status", "scale"} + +// NewGetOptions returns a GetOptions with default chunk size 500. +func NewGetOptions(parent string, streams genericiooptions.IOStreams) *GetOptions { + return &GetOptions{ + PrintFlags: NewGetPrintFlags(), + CmdParent: parent, + + IOStreams: streams, + ChunkSize: cmdutil.DefaultChunkSize, + ServerPrint: true, + } +} + +// NewCmdGet creates a command object for the generic "get" action, which +// retrieves one or more resources from a server. +func NewCmdGet(parent string, f cmdutil.Factory, streams genericiooptions.IOStreams) *cobra.Command { + o := NewGetOptions(parent, streams) + + cmd := &cobra.Command{ + Use: fmt.Sprintf("get [(-o|--output=)%s] (TYPE[.VERSION][.GROUP] [NAME | -l label] | TYPE[.VERSION][.GROUP]/NAME ...) [flags]", strings.Join(o.PrintFlags.AllowedFormats(), "|")), + DisableFlagsInUseLine: true, + Short: i18n.T("Display one or many resources"), + Long: getLong + "\n\n" + cmdutil.SuggestAPIResources(parent), + Example: getExample, + // ValidArgsFunction is set when this function is called so that we have access to the util package + Run: func(cmd *cobra.Command, args []string) { + cmdutil.CheckErr(o.Complete(f, cmd, args)) + cmdutil.CheckErr(o.Validate()) + cmdutil.CheckErr(o.Run(f, args)) + }, + SuggestFor: []string{"list", "ps"}, + } + + o.PrintFlags.AddFlags(cmd) + + cmd.Flags().StringVar(&o.Raw, "raw", o.Raw, "Raw URI to request from the server. Uses the transport specified by the kubeconfig file.") + cmd.Flags().BoolVarP(&o.Watch, "watch", "w", o.Watch, "After listing/getting the requested object, watch for changes.") + cmd.Flags().BoolVar(&o.WatchOnly, "watch-only", o.WatchOnly, "Watch for changes to the requested object(s), without listing/getting first.") + cmd.Flags().BoolVar(&o.OutputWatchEvents, "output-watch-events", o.OutputWatchEvents, "Output watch event objects when --watch or --watch-only is used. Existing objects are output as initial ADDED events.") + cmd.Flags().BoolVar(&o.IgnoreNotFound, "ignore-not-found", o.IgnoreNotFound, "If the requested object does not exist the command will return exit code 0.") + cmd.Flags().StringVar(&o.FieldSelector, "field-selector", o.FieldSelector, "Selector (field query) to filter on, supports '=', '==', and '!='.(e.g. --field-selector key1=value1,key2=value2). The server only supports a limited number of field queries per type.") + cmd.Flags().BoolVarP(&o.AllNamespaces, "all-namespaces", "A", o.AllNamespaces, "If present, list the requested object(s) across all namespaces. Namespace in current context is ignored even if specified with --namespace.") + addServerPrintColumnFlags(cmd, o) + cmdutil.AddFilenameOptionFlags(cmd, &o.FilenameOptions, "identifying the resource to get from a server.") + cmdutil.AddChunkSizeFlag(cmd, &o.ChunkSize) + cmdutil.AddLabelSelectorFlagVar(cmd, &o.LabelSelector) + cmdutil.AddSubresourceFlags(cmd, &o.Subresource, "If specified, gets the subresource of the requested object.", supportedSubresources...) + return cmd +} + +// Complete takes the command arguments and factory and infers any remaining options. +func (o *GetOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error { + if len(o.Raw) > 0 { + if len(args) > 0 { + return fmt.Errorf("arguments may not be passed when --raw is specified") + } + return nil + } + + var err error + o.Namespace, o.ExplicitNamespace, err = f.ToRawKubeConfigLoader().Namespace() + if err != nil { + return err + } + if o.AllNamespaces { + o.ExplicitNamespace = false + } + + if o.PrintFlags.HumanReadableFlags.SortBy != nil { + o.SortBy = *o.PrintFlags.HumanReadableFlags.SortBy + } + + o.NoHeaders = cmdutil.GetFlagBool(cmd, "no-headers") + + // TODO (soltysh): currently we don't support custom columns + // with server side print. So in these cases force the old behavior. + outputOption := cmd.Flags().Lookup("output").Value.String() + if strings.Contains(outputOption, "custom-columns") || outputOption == "yaml" || strings.Contains(outputOption, "json") { + o.ServerPrint = false + } + + templateArg := "" + if o.PrintFlags.TemplateFlags != nil && o.PrintFlags.TemplateFlags.TemplateArgument != nil { + templateArg = *o.PrintFlags.TemplateFlags.TemplateArgument + } + + // human readable printers have special conversion rules, so we determine if we're using one. + if (len(*o.PrintFlags.OutputFormat) == 0 && len(templateArg) == 0) || *o.PrintFlags.OutputFormat == "wide" { + o.IsHumanReadablePrinter = true + } + + o.ToPrinter = func(mapping *meta.RESTMapping, outputObjects *bool, withNamespace bool, withKind bool) (printers.ResourcePrinterFunc, error) { + // make a new copy of current flags / opts before mutating + printFlags := o.PrintFlags.Copy() + + if mapping != nil { + printFlags.SetKind(mapping.GroupVersionKind.GroupKind()) + } + if withNamespace { + printFlags.EnsureWithNamespace() + } + if withKind { + printFlags.EnsureWithKind() + } + + printer, err := printFlags.ToPrinter() + if err != nil { + return nil, err + } + printer, err = printers.NewTypeSetter(scheme.Scheme).WrapToPrinter(printer, nil) + if err != nil { + return nil, err + } + + if len(o.SortBy) > 0 { + printer = &SortingPrinter{Delegate: printer, SortField: o.SortBy} + } + if outputObjects != nil { + printer = &skipPrinter{delegate: printer, output: outputObjects} + } + if o.ServerPrint { + printer = &TablePrinter{Delegate: printer} + } + return printer.PrintObj, nil + } + + switch { + case o.Watch || o.WatchOnly: + if len(o.SortBy) > 0 { + fmt.Fprintf(o.IOStreams.ErrOut, "warning: --watch or --watch-only requested, --sort-by will be ignored\n") + } + default: + if len(args) == 0 && cmdutil.IsFilenameSliceEmpty(o.Filenames, o.Kustomize) { + fmt.Fprintf(o.ErrOut, "You must specify the type of resource to get. %s\n\n", cmdutil.SuggestAPIResources(o.CmdParent)) + fullCmdName := cmd.Parent().CommandPath() + usageString := "Required resource not specified." + if len(fullCmdName) > 0 && cmdutil.IsSiblingCommandExists(cmd, "explain") { + usageString = fmt.Sprintf("%s\nUse \"%s explain \" for a detailed description of that resource (e.g. %[2]s explain pods).", usageString, fullCmdName) + } + + return cmdutil.UsageErrorf(cmd, usageString) + } + } + + return nil +} + +// Validate checks the set of flags provided by the user. +func (o *GetOptions) Validate() error { + if len(o.Raw) > 0 { + if o.Watch || o.WatchOnly || len(o.LabelSelector) > 0 { + return fmt.Errorf("--raw may not be specified with other flags that filter the server request or alter the output") + } + if o.PrintFlags.OutputFormat != nil && len(*o.PrintFlags.OutputFormat) > 0 { + return fmt.Errorf("--raw and --output are mutually exclusive") + } + if _, err := url.ParseRequestURI(o.Raw); err != nil { + return fmt.Errorf("--raw must be a valid URL path: %v", err) + } + } + if o.PrintFlags.HumanReadableFlags.ShowLabels != nil && *o.PrintFlags.HumanReadableFlags.ShowLabels && o.PrintFlags.OutputFormat != nil { + outputOption := *o.PrintFlags.OutputFormat + if outputOption != "" && outputOption != "wide" { + return fmt.Errorf("--show-labels option cannot be used with %s printer", outputOption) + } + } + if o.OutputWatchEvents && !(o.Watch || o.WatchOnly) { + return fmt.Errorf("--output-watch-events option can only be used with --watch or --watch-only") + } + if len(o.Subresource) > 0 && !slice.ContainsString(supportedSubresources, o.Subresource, nil) { + return fmt.Errorf("invalid subresource value: %q. Must be one of %v", o.Subresource, supportedSubresources) + } + return nil +} + +// OriginalPositioner and NopPositioner is required for swap/sort operations of data in table format +type OriginalPositioner interface { + OriginalPosition(int) int +} + +// NopPositioner and OriginalPositioner is required for swap/sort operations of data in table format +type NopPositioner struct{} + +// OriginalPosition returns the original position from NopPositioner object +func (t *NopPositioner) OriginalPosition(ix int) int { + return ix +} + +// RuntimeSorter holds the required objects to perform sorting of runtime objects +type RuntimeSorter struct { + field string + decoder runtime.Decoder + objects []runtime.Object + positioner OriginalPositioner +} + +// Sort performs the sorting of runtime objects +func (r *RuntimeSorter) Sort() error { + // a list is only considered "sorted" if there are 0 or 1 items in it + // AND (if 1 item) the item is not a Table object + if len(r.objects) == 0 { + return nil + } + if len(r.objects) == 1 { + _, isTable := r.objects[0].(*metav1.Table) + if !isTable { + return nil + } + } + + includesTable := false + includesRuntimeObjs := false + + for _, obj := range r.objects { + switch t := obj.(type) { + case *metav1.Table: + includesTable = true + + if sorter, err := NewTableSorter(t, r.field); err != nil { + return err + } else if err := sorter.Sort(); err != nil { + return err + } + default: + includesRuntimeObjs = true + } + } + + // we use a NopPositioner when dealing with Table objects + // because the objects themselves are not swapped, but rather + // the rows in each object are swapped / sorted. + r.positioner = &NopPositioner{} + + if includesRuntimeObjs && includesTable { + return fmt.Errorf("sorting is not supported on mixed Table and non-Table object lists") + } + if includesTable { + return nil + } + + // if not dealing with a Table response from the server, assume + // all objects are runtime.Object as usual, and sort using old method. + var err error + if r.positioner, err = SortObjects(r.decoder, r.objects, r.field); err != nil { + return err + } + return nil +} + +// OriginalPosition returns the original position of a runtime object +func (r *RuntimeSorter) OriginalPosition(ix int) int { + if r.positioner == nil { + return 0 + } + return r.positioner.OriginalPosition(ix) +} + +// WithDecoder allows custom decoder to be set for testing +func (r *RuntimeSorter) WithDecoder(decoder runtime.Decoder) *RuntimeSorter { + r.decoder = decoder + return r +} + +// NewRuntimeSorter returns a new instance of RuntimeSorter +func NewRuntimeSorter(objects []runtime.Object, sortBy string) *RuntimeSorter { + parsedField, err := RelaxedJSONPathExpression(sortBy) + if err != nil { + parsedField = sortBy + } + + return &RuntimeSorter{ + field: parsedField, + decoder: kubernetesscheme.Codecs.UniversalDecoder(), + objects: objects, + } +} + +func (o *GetOptions) transformRequests(req *rest.Request) { + if !o.ServerPrint || !o.IsHumanReadablePrinter { + return + } + + req.SetHeader("Accept", strings.Join([]string{ + fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1.SchemeGroupVersion.Version, metav1.GroupName), + fmt.Sprintf("application/json;as=Table;v=%s;g=%s", metav1beta1.SchemeGroupVersion.Version, metav1beta1.GroupName), + "application/json", + }, ",")) + + // if sorting, ensure we receive the full object in order to introspect its fields via jsonpath + if len(o.SortBy) > 0 { + req.Param("includeObject", "Object") + } +} + +// Run performs the get operation. +// TODO: remove the need to pass these arguments, like other commands. +func (o *GetOptions) Run(f cmdutil.Factory, args []string) error { + if len(o.Raw) > 0 { + restClient, err := f.RESTClient() + if err != nil { + return err + } + return rawhttp.RawGet(restClient, o.IOStreams, o.Raw) + } + if o.Watch || o.WatchOnly { + return o.watch(f, args) + } + + chunkSize := o.ChunkSize + if len(o.SortBy) > 0 { + // TODO(juanvallejo): in the future, we could have the client use chunking + // to gather all results, then sort them all at the end to reduce server load. + chunkSize = 0 + } + + r := f.NewBuilder(). + Unstructured(). + NamespaceParam(o.Namespace).DefaultNamespace().AllNamespaces(o.AllNamespaces). + FilenameParam(o.ExplicitNamespace, &o.FilenameOptions). + LabelSelectorParam(o.LabelSelector). + FieldSelectorParam(o.FieldSelector). + Subresource(o.Subresource). + RequestChunksOf(chunkSize). + ResourceTypeOrNameArgs(true, args...). + ContinueOnError(). + Latest(). + Flatten(). + TransformRequests(o.transformRequests). + Do() + + if o.IgnoreNotFound { + r.IgnoreErrors(apierrors.IsNotFound) + } + if err := r.Err(); err != nil { + return err + } + + if !o.IsHumanReadablePrinter { + return o.printGeneric(r) + } + + allErrs := []error{} + errs := sets.NewString() + infos, err := r.Infos() + if err != nil { + allErrs = append(allErrs, err) + } + printWithKind := multipleGVKsRequested(infos) + + objs := make([]runtime.Object, len(infos)) + for ix := range infos { + objs[ix] = infos[ix].Object + } + + var positioner OriginalPositioner + if len(o.SortBy) > 0 { + sorter := NewRuntimeSorter(objs, o.SortBy) + if err := sorter.Sort(); err != nil { + return err + } + positioner = sorter + } + + var printer printers.ResourcePrinter + var lastMapping *meta.RESTMapping + + // track if we write any output + trackingWriter := &trackingWriterWrapper{Delegate: o.Out} + // output an empty line separating output + separatorWriter := &separatorWriterWrapper{Delegate: trackingWriter} + + w := printers.GetNewTabWriter(separatorWriter) + allResourcesNamespaced := !o.AllNamespaces + for ix := range objs { + var mapping *meta.RESTMapping + var info *resource.Info + if positioner != nil { + info = infos[positioner.OriginalPosition(ix)] + mapping = info.Mapping + } else { + info = infos[ix] + mapping = info.Mapping + } + + allResourcesNamespaced = allResourcesNamespaced && info.Namespaced() + printWithNamespace := o.AllNamespaces + + if mapping != nil && mapping.Scope.Name() == meta.RESTScopeNameRoot { + printWithNamespace = false + } + + if shouldGetNewPrinterForMapping(printer, lastMapping, mapping) { + w.Flush() + w.SetRememberedWidths(nil) + + // add linebreaks between resource groups (if there is more than one) + // when it satisfies all following 3 conditions: + // 1) it's not the first resource group + // 2) it has row header + // 3) we've written output since the last time we started a new set of headers + if lastMapping != nil && !o.NoHeaders && trackingWriter.Written > 0 { + separatorWriter.SetReady(true) + } + + printer, err = o.ToPrinter(mapping, nil, printWithNamespace, printWithKind) + if err != nil { + if !errs.Has(err.Error()) { + errs.Insert(err.Error()) + allErrs = append(allErrs, err) + } + continue + } + + lastMapping = mapping + } + + printer.PrintObj(info.Object, w) + } + w.Flush() + if trackingWriter.Written == 0 && !o.IgnoreNotFound && len(allErrs) == 0 { + // if we wrote no output, and had no errors, and are not ignoring NotFound, be sure we output something + if allResourcesNamespaced { + fmt.Fprintf(o.ErrOut, "No resources found in %s namespace.\n", o.Namespace) + } else { + fmt.Fprintln(o.ErrOut, "No resources found") + } + } + return utilerrors.NewAggregate(allErrs) +} + +type trackingWriterWrapper struct { + Delegate io.Writer + Written int +} + +func (t *trackingWriterWrapper) Write(p []byte) (n int, err error) { + t.Written += len(p) + return t.Delegate.Write(p) +} + +type separatorWriterWrapper struct { + Delegate io.Writer + Ready bool +} + +func (s *separatorWriterWrapper) Write(p []byte) (n int, err error) { + // If we're about to write non-empty bytes and `s` is ready, + // we prepend an empty line to `p` and reset `s.Read`. + if len(p) != 0 && s.Ready { + fmt.Fprintln(s.Delegate) + s.Ready = false + } + return s.Delegate.Write(p) +} + +func (s *separatorWriterWrapper) SetReady(state bool) { + s.Ready = state +} + +// watch starts a client-side watch of one or more resources. +// TODO: remove the need for arguments here. +func (o *GetOptions) watch(f cmdutil.Factory, args []string) error { + r := f.NewBuilder(). + Unstructured(). + NamespaceParam(o.Namespace).DefaultNamespace().AllNamespaces(o.AllNamespaces). + FilenameParam(o.ExplicitNamespace, &o.FilenameOptions). + LabelSelectorParam(o.LabelSelector). + FieldSelectorParam(o.FieldSelector). + RequestChunksOf(o.ChunkSize). + ResourceTypeOrNameArgs(true, args...). + SingleResourceType(). + Latest(). + TransformRequests(o.transformRequests). + Do() + if err := r.Err(); err != nil { + return err + } + infos, err := r.Infos() + if err != nil { + return err + } + if multipleGVKsRequested(infos) { + return i18n.Errorf("watch is only supported on individual resources and resource collections - more than 1 resource was found") + } + + info := infos[0] + mapping := info.ResourceMapping() + outputObjects := utilpointer.BoolPtr(!o.WatchOnly) + printer, err := o.ToPrinter(mapping, outputObjects, o.AllNamespaces, false) + if err != nil { + return err + } + obj, err := r.Object() + if err != nil { + return err + } + + // watching from resourceVersion 0, starts the watch at ~now and + // will return an initial watch event. Starting form ~now, rather + // the rv of the object will insure that we start the watch from + // inside the watch window, which the rv of the object might not be. + rv := "0" + isList := meta.IsListType(obj) + if isList { + // the resourceVersion of list objects is ~now but won't return + // an initial watch event + rv, err = meta.NewAccessor().ResourceVersion(obj) + if err != nil { + return err + } + } + + writer := printers.GetNewTabWriter(o.Out) + + // print the current object + var objsToPrint []runtime.Object + if isList { + objsToPrint, _ = meta.ExtractList(obj) + } else { + objsToPrint = append(objsToPrint, obj) + } + for _, objToPrint := range objsToPrint { + if o.OutputWatchEvents { + objToPrint = &metav1.WatchEvent{Type: string(watch.Added), Object: runtime.RawExtension{Object: objToPrint}} + } + if err := printer.PrintObj(objToPrint, writer); err != nil { + return fmt.Errorf("unable to output the provided object: %v", err) + } + } + writer.Flush() + if isList { + // we can start outputting objects now, watches started from lists don't emit synthetic added events + *outputObjects = true + } else { + // suppress output, since watches started for individual items emit a synthetic ADDED event first + *outputObjects = false + } + + // print watched changes + w, err := r.Watch(rv) + if err != nil { + return err + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + intr := interrupt.New(nil, cancel) + intr.Run(func() error { + _, err := watchtools.UntilWithoutRetry(ctx, w, func(e watch.Event) (bool, error) { + objToPrint := e.Object + if o.OutputWatchEvents { + objToPrint = &metav1.WatchEvent{Type: string(e.Type), Object: runtime.RawExtension{Object: objToPrint}} + } + if err := printer.PrintObj(objToPrint, writer); err != nil { + return false, err + } + writer.Flush() + // after processing at least one event, start outputting objects + *outputObjects = true + return false, nil + }) + return err + }) + return nil +} + +func (o *GetOptions) printGeneric(r *resource.Result) error { + // we flattened the data from the builder, so we have individual items, but now we'd like to either: + // 1. if there is more than one item, combine them all into a single list + // 2. if there is a single item and that item is a list, leave it as its specific list + // 3. if there is a single item and it is not a list, leave it as a single item + var errs []error + singleItemImplied := false + infos, err := r.IntoSingleItemImplied(&singleItemImplied).Infos() + if err != nil { + if singleItemImplied { + return err + } + errs = append(errs, err) + } + + if len(infos) == 0 && o.IgnoreNotFound { + return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs))) + } + + printer, err := o.ToPrinter(nil, nil, false, false) + if err != nil { + return err + } + + var obj runtime.Object + if !singleItemImplied || len(infos) != 1 { + // we have zero or multple items, so coerce all items into a list. + // we don't want an *unstructured.Unstructured list yet, as we + // may be dealing with non-unstructured objects. Compose all items + // into an corev1.List, and then decode using an unstructured scheme. + list := corev1.List{ + TypeMeta: metav1.TypeMeta{ + Kind: "List", + APIVersion: "v1", + }, + ListMeta: metav1.ListMeta{}, + } + for _, info := range infos { + list.Items = append(list.Items, runtime.RawExtension{Object: info.Object}) + } + + listData, err := json.Marshal(list) + if err != nil { + return err + } + + converted, err := runtime.Decode(unstructured.UnstructuredJSONScheme, listData) + if err != nil { + return err + } + + obj = converted + } else { + obj = infos[0].Object + } + + isList := meta.IsListType(obj) + if isList { + items, err := meta.ExtractList(obj) + if err != nil { + return err + } + + // take the items and create a new list for display + list := &unstructured.UnstructuredList{ + Object: map[string]interface{}{ + "kind": "List", + "apiVersion": "v1", + "metadata": map[string]interface{}{}, + }, + } + if listMeta, err := meta.ListAccessor(obj); err == nil { + list.Object["metadata"] = map[string]interface{}{ + "resourceVersion": listMeta.GetResourceVersion(), + } + } + + for _, item := range items { + list.Items = append(list.Items, *item.(*unstructured.Unstructured)) + } + if err := printer.PrintObj(list, o.Out); err != nil { + errs = append(errs, err) + } + return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs))) + } + + if printErr := printer.PrintObj(obj, o.Out); printErr != nil { + errs = append(errs, printErr) + } + + return utilerrors.Reduce(utilerrors.Flatten(utilerrors.NewAggregate(errs))) +} + +func addServerPrintColumnFlags(cmd *cobra.Command, opt *GetOptions) { + cmd.Flags().BoolVar(&opt.ServerPrint, useServerPrintColumns, opt.ServerPrint, "If true, have the server return the appropriate table output. Supports extension APIs and CRDs.") +} + +func shouldGetNewPrinterForMapping(printer printers.ResourcePrinter, lastMapping, mapping *meta.RESTMapping) bool { + return printer == nil || lastMapping == nil || mapping == nil || mapping.Resource != lastMapping.Resource +} + +func multipleGVKsRequested(infos []*resource.Info) bool { + if len(infos) < 2 { + return false + } + gvk := infos[0].Mapping.GroupVersionKind + for _, info := range infos { + if info.Mapping.GroupVersionKind != gvk { + return true + } + } + return false +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/get/get_flags.go b/vendor/k8s.io/kubectl/pkg/cmd/get/get_flags.go new file mode 100644 index 0000000000..e5eb98b211 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/get/get_flags.go @@ -0,0 +1,170 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package get + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/kubectl/pkg/cmd/util" +) + +// PrintFlags composes common printer flag structs +// used in the Get command. +type PrintFlags struct { + JSONYamlPrintFlags *genericclioptions.JSONYamlPrintFlags + NamePrintFlags *genericclioptions.NamePrintFlags + CustomColumnsFlags *CustomColumnsPrintFlags + HumanReadableFlags *HumanPrintFlags + TemplateFlags *genericclioptions.KubeTemplatePrintFlags + + NoHeaders *bool + OutputFormat *string +} + +// SetKind sets the Kind option of humanreadable flags +func (f *PrintFlags) SetKind(kind schema.GroupKind) { + f.HumanReadableFlags.SetKind(kind) +} + +// EnsureWithNamespace ensures that humanreadable flags return +// a printer capable of printing with a "namespace" column. +func (f *PrintFlags) EnsureWithNamespace() error { + return f.HumanReadableFlags.EnsureWithNamespace() +} + +// EnsureWithKind ensures that humanreadable flags return +// a printer capable of including resource kinds. +func (f *PrintFlags) EnsureWithKind() error { + return f.HumanReadableFlags.EnsureWithKind() +} + +// Copy returns a copy of PrintFlags for mutation +func (f *PrintFlags) Copy() PrintFlags { + printFlags := *f + return printFlags +} + +// AllowedFormats is the list of formats in which data can be displayed +func (f *PrintFlags) AllowedFormats() []string { + formats := f.JSONYamlPrintFlags.AllowedFormats() + formats = append(formats, f.NamePrintFlags.AllowedFormats()...) + formats = append(formats, f.TemplateFlags.AllowedFormats()...) + formats = append(formats, f.CustomColumnsFlags.AllowedFormats()...) + formats = append(formats, f.HumanReadableFlags.AllowedFormats()...) + return formats +} + +// ToPrinter attempts to find a composed set of PrintFlags suitable for +// returning a printer based on current flag values. +func (f *PrintFlags) ToPrinter() (printers.ResourcePrinter, error) { + outputFormat := "" + if f.OutputFormat != nil { + outputFormat = *f.OutputFormat + } + + noHeaders := false + if f.NoHeaders != nil { + noHeaders = *f.NoHeaders + } + f.HumanReadableFlags.NoHeaders = noHeaders + f.CustomColumnsFlags.NoHeaders = noHeaders + + // for "get.go" we want to support a --template argument given, even when no --output format is provided + if f.TemplateFlags.TemplateArgument != nil && len(*f.TemplateFlags.TemplateArgument) > 0 && len(outputFormat) == 0 { + outputFormat = "go-template" + } + + if p, err := f.TemplateFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { + return p, err + } + + if f.TemplateFlags.TemplateArgument != nil { + f.CustomColumnsFlags.TemplateArgument = *f.TemplateFlags.TemplateArgument + } + + if p, err := f.JSONYamlPrintFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { + return p, err + } + + if p, err := f.HumanReadableFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { + return p, err + } + + if p, err := f.CustomColumnsFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { + return p, err + } + + if p, err := f.NamePrintFlags.ToPrinter(outputFormat); !genericclioptions.IsNoCompatiblePrinterError(err) { + return p, err + } + + return nil, genericclioptions.NoCompatiblePrinterError{OutputFormat: &outputFormat, AllowedFormats: f.AllowedFormats()} +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to humanreadable and template printing. +func (f *PrintFlags) AddFlags(cmd *cobra.Command) { + f.JSONYamlPrintFlags.AddFlags(cmd) + f.NamePrintFlags.AddFlags(cmd) + f.TemplateFlags.AddFlags(cmd) + f.HumanReadableFlags.AddFlags(cmd) + f.CustomColumnsFlags.AddFlags(cmd) + + if f.OutputFormat != nil { + cmd.Flags().StringVarP(f.OutputFormat, "output", "o", *f.OutputFormat, fmt.Sprintf(`Output format. One of: (%s). See custom columns [https://kubernetes.io/docs/reference/kubectl/#custom-columns], golang template [http://golang.org/pkg/text/template/#pkg-overview] and jsonpath template [https://kubernetes.io/docs/reference/kubectl/jsonpath/].`, strings.Join(f.AllowedFormats(), ", "))) + util.CheckErr(cmd.RegisterFlagCompletionFunc( + "output", + func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var comps []string + for _, format := range f.AllowedFormats() { + if strings.HasPrefix(format, toComplete) { + comps = append(comps, format) + } + } + return comps, cobra.ShellCompDirectiveNoFileComp + }, + )) + } + if f.NoHeaders != nil { + cmd.Flags().BoolVar(f.NoHeaders, "no-headers", *f.NoHeaders, "When using the default or custom-column output format, don't print headers (default print headers).") + } +} + +// NewGetPrintFlags returns flags associated with humanreadable, +// template, and "name" printing, with default values set. +func NewGetPrintFlags() *PrintFlags { + outputFormat := "" + noHeaders := false + + return &PrintFlags{ + OutputFormat: &outputFormat, + NoHeaders: &noHeaders, + + JSONYamlPrintFlags: genericclioptions.NewJSONYamlPrintFlags(), + NamePrintFlags: genericclioptions.NewNamePrintFlags(""), + TemplateFlags: genericclioptions.NewKubeTemplatePrintFlags(), + + HumanReadableFlags: NewHumanPrintFlags(), + CustomColumnsFlags: NewCustomColumnsPrintFlags(), + } +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/get/humanreadable_flags.go b/vendor/k8s.io/kubectl/pkg/cmd/get/humanreadable_flags.go new file mode 100644 index 0000000000..5fcf34d8cc --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/get/humanreadable_flags.go @@ -0,0 +1,138 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package get + +import ( + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericclioptions" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/cli-runtime/pkg/printers" +) + +// HumanPrintFlags provides default flags necessary for printing. +// Given the following flag values, a printer can be requested that knows +// how to handle printing based on these values. +type HumanPrintFlags struct { + ShowKind *bool + ShowLabels *bool + SortBy *string + ColumnLabels *[]string + + // get.go-specific values + NoHeaders bool + + Kind schema.GroupKind + WithNamespace bool +} + +// SetKind sets the Kind option +func (f *HumanPrintFlags) SetKind(kind schema.GroupKind) { + f.Kind = kind +} + +// EnsureWithKind sets the "Showkind" humanreadable option to true. +func (f *HumanPrintFlags) EnsureWithKind() error { + showKind := true + f.ShowKind = &showKind + return nil +} + +// EnsureWithNamespace sets the "WithNamespace" humanreadable option to true. +func (f *HumanPrintFlags) EnsureWithNamespace() error { + f.WithNamespace = true + return nil +} + +// AllowedFormats returns more customized formating options +func (f *HumanPrintFlags) AllowedFormats() []string { + return []string{"wide"} +} + +// ToPrinter receives an outputFormat and returns a printer capable of +// handling human-readable output. +func (f *HumanPrintFlags) ToPrinter(outputFormat string) (printers.ResourcePrinter, error) { + if len(outputFormat) > 0 && outputFormat != "wide" { + return nil, genericclioptions.NoCompatiblePrinterError{Options: f, AllowedFormats: f.AllowedFormats()} + } + + showKind := false + if f.ShowKind != nil { + showKind = *f.ShowKind + } + + showLabels := false + if f.ShowLabels != nil { + showLabels = *f.ShowLabels + } + + columnLabels := []string{} + if f.ColumnLabels != nil { + columnLabels = *f.ColumnLabels + } + + p := printers.NewTablePrinter(printers.PrintOptions{ + Kind: f.Kind, + WithKind: showKind, + NoHeaders: f.NoHeaders, + Wide: outputFormat == "wide", + WithNamespace: f.WithNamespace, + ColumnLabels: columnLabels, + ShowLabels: showLabels, + }) + + // TODO(juanvallejo): handle sorting here + + return p, nil +} + +// AddFlags receives a *cobra.Command reference and binds +// flags related to human-readable printing to it +func (f *HumanPrintFlags) AddFlags(c *cobra.Command) { + if f.ShowLabels != nil { + c.Flags().BoolVar(f.ShowLabels, "show-labels", *f.ShowLabels, "When printing, show all labels as the last column (default hide labels column)") + } + if f.SortBy != nil { + c.Flags().StringVar(f.SortBy, "sort-by", *f.SortBy, "If non-empty, sort list types using this field specification. The field specification is expressed as a JSONPath expression (e.g. '{.metadata.name}'). The field in the API resource specified by this JSONPath expression must be an integer or a string.") + } + if f.ColumnLabels != nil { + c.Flags().StringSliceVarP(f.ColumnLabels, "label-columns", "L", *f.ColumnLabels, "Accepts a comma separated list of labels that are going to be presented as columns. Names are case-sensitive. You can also use multiple flag options like -L label1 -L label2...") + } + if f.ShowKind != nil { + c.Flags().BoolVar(f.ShowKind, "show-kind", *f.ShowKind, "If present, list the resource type for the requested object(s).") + } +} + +// NewHumanPrintFlags returns flags associated with +// human-readable printing, with default values set. +func NewHumanPrintFlags() *HumanPrintFlags { + showLabels := false + sortBy := "" + showKind := false + columnLabels := []string{} + + return &HumanPrintFlags{ + NoHeaders: false, + WithNamespace: false, + ColumnLabels: &columnLabels, + + Kind: schema.GroupKind{}, + ShowLabels: &showLabels, + SortBy: &sortBy, + ShowKind: &showKind, + } +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/get/skip_printer.go b/vendor/k8s.io/kubectl/pkg/cmd/get/skip_printer.go new file mode 100644 index 0000000000..95a804315b --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/get/skip_printer.go @@ -0,0 +1,48 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package get + +import ( + "io" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/printers" +) + +// skipPrinter allows conditionally suppressing object output via the output field. +// table objects are suppressed by setting their Rows to nil (allowing column definitions to propagate to the delegate). +// non-table objects are suppressed by not calling the delegate at all. +type skipPrinter struct { + delegate printers.ResourcePrinter + output *bool +} + +func (p *skipPrinter) PrintObj(obj runtime.Object, writer io.Writer) error { + if *p.output { + return p.delegate.PrintObj(obj, writer) + } + + table, isTable := obj.(*metav1.Table) + if !isTable { + return nil + } + + table = table.DeepCopy() + table.Rows = nil + return p.delegate.PrintObj(table, writer) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/get/sorter.go b/vendor/k8s.io/kubectl/pkg/cmd/get/sorter.go new file mode 100644 index 0000000000..9f9e2b8042 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/get/sorter.go @@ -0,0 +1,424 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package get + +import ( + "fmt" + "io" + "reflect" + "sort" + + "k8s.io/klog/v2" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/client-go/util/jsonpath" + "k8s.io/utils/integer" + + "github.com/fvbommel/sortorder" +) + +// SortingPrinter sorts list types before delegating to another printer. +// Non-list types are simply passed through +type SortingPrinter struct { + SortField string + Delegate printers.ResourcePrinter + Decoder runtime.Decoder +} + +func (s *SortingPrinter) PrintObj(obj runtime.Object, out io.Writer) error { + if table, isTable := obj.(*metav1.Table); isTable && len(table.Rows) > 1 { + parsedField, err := RelaxedJSONPathExpression(s.SortField) + if err != nil { + parsedField = s.SortField + } + + if sorter, err := NewTableSorter(table, parsedField); err != nil { + return err + } else if err := sorter.Sort(); err != nil { + return err + } + return s.Delegate.PrintObj(table, out) + } + + if meta.IsListType(obj) { + if err := s.sortObj(obj); err != nil { + return err + } + return s.Delegate.PrintObj(obj, out) + } + + return s.Delegate.PrintObj(obj, out) +} + +func (s *SortingPrinter) sortObj(obj runtime.Object) error { + objs, err := meta.ExtractList(obj) + if err != nil { + return err + } + if len(objs) == 0 { + return nil + } + + sorter, err := SortObjects(s.Decoder, objs, s.SortField) + if err != nil { + return err + } + + switch list := obj.(type) { + case *corev1.List: + outputList := make([]runtime.RawExtension, len(objs)) + for ix := range objs { + outputList[ix] = list.Items[sorter.OriginalPosition(ix)] + } + list.Items = outputList + return nil + } + return meta.SetList(obj, objs) +} + +// SortObjects sorts the runtime.Object based on fieldInput and returns RuntimeSort that implements +// the golang sort interface +func SortObjects(decoder runtime.Decoder, objs []runtime.Object, fieldInput string) (*RuntimeSort, error) { + for ix := range objs { + item := objs[ix] + switch u := item.(type) { + case *runtime.Unknown: + var err error + // decode runtime.Unknown to runtime.Unstructured for sorting. + // we don't actually want the internal versions of known types. + if objs[ix], _, err = decoder.Decode(u.Raw, nil, &unstructured.Unstructured{}); err != nil { + return nil, err + } + } + } + + field, err := RelaxedJSONPathExpression(fieldInput) + if err != nil { + return nil, err + } + + parser := jsonpath.New("sorting").AllowMissingKeys(true) + if err := parser.Parse(field); err != nil { + return nil, err + } + + // We don't do any model validation here, so we traverse all objects to be sorted + // and, if the field is valid to at least one of them, we consider it to be a + // valid field; otherwise error out. + // Note that this requires empty fields to be considered later, when sorting. + var fieldFoundOnce bool + for _, obj := range objs { + values, err := findJSONPathResults(parser, obj) + if err != nil { + return nil, err + } + if len(values) > 0 && len(values[0]) > 0 { + fieldFoundOnce = true + break + } + } + if !fieldFoundOnce { + return nil, fmt.Errorf("couldn't find any field with path %q in the list of objects", field) + } + + sorter := NewRuntimeSort(field, objs) + sort.Sort(sorter) + return sorter, nil +} + +// RuntimeSort is an implementation of the golang sort interface that knows how to sort +// lists of runtime.Object +type RuntimeSort struct { + field string + objs []runtime.Object + origPosition []int +} + +// NewRuntimeSort creates a new RuntimeSort struct that implements golang sort interface +func NewRuntimeSort(field string, objs []runtime.Object) *RuntimeSort { + sorter := &RuntimeSort{field: field, objs: objs, origPosition: make([]int, len(objs))} + for ix := range objs { + sorter.origPosition[ix] = ix + } + return sorter +} + +func (r *RuntimeSort) Len() int { + return len(r.objs) +} + +func (r *RuntimeSort) Swap(i, j int) { + r.objs[i], r.objs[j] = r.objs[j], r.objs[i] + r.origPosition[i], r.origPosition[j] = r.origPosition[j], r.origPosition[i] +} + +func isLess(i, j reflect.Value) (bool, error) { + switch i.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return i.Int() < j.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return i.Uint() < j.Uint(), nil + case reflect.Float32, reflect.Float64: + return i.Float() < j.Float(), nil + case reflect.String: + return sortorder.NaturalLess(i.String(), j.String()), nil + case reflect.Pointer: + return isLess(i.Elem(), j.Elem()) + case reflect.Struct: + // sort metav1.Time + in := i.Interface() + if t, ok := in.(metav1.Time); ok { + time := j.Interface().(metav1.Time) + return t.Before(&time), nil + } + // sort resource.Quantity + if iQuantity, ok := in.(resource.Quantity); ok { + jQuantity := j.Interface().(resource.Quantity) + return iQuantity.Cmp(jQuantity) < 0, nil + } + // fallback to the fields comparison + for idx := 0; idx < i.NumField(); idx++ { + less, err := isLess(i.Field(idx), j.Field(idx)) + if err != nil || !less { + return less, err + } + } + return true, nil + case reflect.Array, reflect.Slice: + // note: the length of i and j may be different + for idx := 0; idx < integer.IntMin(i.Len(), j.Len()); idx++ { + less, err := isLess(i.Index(idx), j.Index(idx)) + if err != nil || !less { + return less, err + } + } + return true, nil + case reflect.Interface: + if i.IsNil() && j.IsNil() { + return false, nil + } else if i.IsNil() { + return true, nil + } else if j.IsNil() { + return false, nil + } + switch itype := i.Interface().(type) { + case uint8: + if jtype, ok := j.Interface().(uint8); ok { + return itype < jtype, nil + } + case uint16: + if jtype, ok := j.Interface().(uint16); ok { + return itype < jtype, nil + } + case uint32: + if jtype, ok := j.Interface().(uint32); ok { + return itype < jtype, nil + } + case uint64: + if jtype, ok := j.Interface().(uint64); ok { + return itype < jtype, nil + } + case int8: + if jtype, ok := j.Interface().(int8); ok { + return itype < jtype, nil + } + case int16: + if jtype, ok := j.Interface().(int16); ok { + return itype < jtype, nil + } + case int32: + if jtype, ok := j.Interface().(int32); ok { + return itype < jtype, nil + } + case int64: + if jtype, ok := j.Interface().(int64); ok { + return itype < jtype, nil + } + case uint: + if jtype, ok := j.Interface().(uint); ok { + return itype < jtype, nil + } + case int: + if jtype, ok := j.Interface().(int); ok { + return itype < jtype, nil + } + case float32: + if jtype, ok := j.Interface().(float32); ok { + return itype < jtype, nil + } + case float64: + if jtype, ok := j.Interface().(float64); ok { + return itype < jtype, nil + } + case string: + if jtype, ok := j.Interface().(string); ok { + // check if it's a Quantity + itypeQuantity, err := resource.ParseQuantity(itype) + if err != nil { + return sortorder.NaturalLess(itype, jtype), nil + } + jtypeQuantity, err := resource.ParseQuantity(jtype) + if err != nil { + return sortorder.NaturalLess(itype, jtype), nil + } + // Both strings are quantity + return itypeQuantity.Cmp(jtypeQuantity) < 0, nil + } + default: + return false, fmt.Errorf("unsortable type: %T", itype) + } + return false, fmt.Errorf("unsortable interface: %v", i.Kind()) + + default: + return false, fmt.Errorf("unsortable type: %v", i.Kind()) + } +} + +func (r *RuntimeSort) Less(i, j int) bool { + iObj := r.objs[i] + jObj := r.objs[j] + + var iValues [][]reflect.Value + var jValues [][]reflect.Value + var err error + + parser := jsonpath.New("sorting").AllowMissingKeys(true) + err = parser.Parse(r.field) + if err != nil { + panic(err) + } + + iValues, err = findJSONPathResults(parser, iObj) + if err != nil { + klog.Fatalf("Failed to get i values for %#v using %s (%#v)", iObj, r.field, err) + } + + jValues, err = findJSONPathResults(parser, jObj) + if err != nil { + klog.Fatalf("Failed to get j values for %#v using %s (%v)", jObj, r.field, err) + } + + if len(iValues) == 0 || len(iValues[0]) == 0 { + return true + } + if len(jValues) == 0 || len(jValues[0]) == 0 { + return false + } + iField := iValues[0][0] + jField := jValues[0][0] + + less, err := isLess(iField, jField) + if err != nil { + klog.Exitf("Field %s in %T is an unsortable type: %s, err: %v", r.field, iObj, iField.Kind().String(), err) + } + return less +} + +// OriginalPosition returns the starting (original) position of a particular index. +// e.g. If OriginalPosition(0) returns 5 than the +// item currently at position 0 was at position 5 in the original unsorted array. +func (r *RuntimeSort) OriginalPosition(ix int) int { + if ix < 0 || ix > len(r.origPosition) { + return -1 + } + return r.origPosition[ix] +} + +type TableSorter struct { + field string + obj *metav1.Table + parsedRows [][][]reflect.Value +} + +func (t *TableSorter) Len() int { + return len(t.obj.Rows) +} + +func (t *TableSorter) Swap(i, j int) { + t.obj.Rows[i], t.obj.Rows[j] = t.obj.Rows[j], t.obj.Rows[i] + t.parsedRows[i], t.parsedRows[j] = t.parsedRows[j], t.parsedRows[i] +} + +func (t *TableSorter) Less(i, j int) bool { + iValues := t.parsedRows[i] + jValues := t.parsedRows[j] + + if len(iValues) == 0 || len(iValues[0]) == 0 { + return true + } + if len(jValues) == 0 || len(jValues[0]) == 0 { + return false + } + + iField := iValues[0][0] + jField := jValues[0][0] + + less, err := isLess(iField, jField) + if err != nil { + klog.Exitf("Field %s in %T is an unsortable type: %s, err: %v", t.field, t.parsedRows, iField.Kind().String(), err) + } + return less +} + +func (t *TableSorter) Sort() error { + sort.Sort(t) + return nil +} + +func NewTableSorter(table *metav1.Table, field string) (*TableSorter, error) { + var parsedRows [][][]reflect.Value + + parser := jsonpath.New("sorting").AllowMissingKeys(true) + err := parser.Parse(field) + if err != nil { + return nil, fmt.Errorf("sorting error: %v", err) + } + + fieldFoundOnce := false + for i := range table.Rows { + parsedRow, err := findJSONPathResults(parser, table.Rows[i].Object.Object) + if err != nil { + return nil, fmt.Errorf("Failed to get values for %#v using %s (%#v)", parsedRow, field, err) + } + parsedRows = append(parsedRows, parsedRow) + if len(parsedRow) > 0 && len(parsedRow[0]) > 0 { + fieldFoundOnce = true + } + } + + if len(table.Rows) > 0 && !fieldFoundOnce { + return nil, fmt.Errorf("couldn't find any field with path %q in the list of objects", field) + } + + return &TableSorter{ + obj: table, + field: field, + parsedRows: parsedRows, + }, nil +} +func findJSONPathResults(parser *jsonpath.JSONPath, from runtime.Object) ([][]reflect.Value, error) { + if unstructuredObj, ok := from.(*unstructured.Unstructured); ok { + return parser.FindResults(unstructuredObj.Object) + } + return parser.FindResults(reflect.ValueOf(from).Elem().Interface()) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/get/table_printer.go b/vendor/k8s.io/kubectl/pkg/cmd/get/table_printer.go new file mode 100644 index 0000000000..9207227370 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/get/table_printer.go @@ -0,0 +1,94 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package get + +import ( + "fmt" + "io" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/cli-runtime/pkg/printers" + "k8s.io/klog/v2" +) + +// TablePrinter decodes table objects into typed objects before delegating to another printer. +// Non-table types are simply passed through +type TablePrinter struct { + Delegate printers.ResourcePrinter +} + +func (t *TablePrinter) PrintObj(obj runtime.Object, writer io.Writer) error { + table, err := decodeIntoTable(obj) + if err == nil { + return t.Delegate.PrintObj(table, writer) + } + // if we are unable to decode server response into a v1beta1.Table, + // fallback to client-side printing with whatever info the server returned. + klog.V(2).Infof("Unable to decode server response into a Table. Falling back to hardcoded types: %v", err) + return t.Delegate.PrintObj(obj, writer) +} + +var recognizedTableVersions = map[schema.GroupVersionKind]bool{ + metav1beta1.SchemeGroupVersion.WithKind("Table"): true, + metav1.SchemeGroupVersion.WithKind("Table"): true, +} + +// assert the types are identical, since we're decoding both types into a metav1.Table +var _ metav1.Table = metav1beta1.Table{} +var _ metav1beta1.Table = metav1.Table{} + +func decodeIntoTable(obj runtime.Object) (runtime.Object, error) { + event, isEvent := obj.(*metav1.WatchEvent) + if isEvent { + obj = event.Object.Object + } + + if !recognizedTableVersions[obj.GetObjectKind().GroupVersionKind()] { + return nil, fmt.Errorf("attempt to decode non-Table object") + } + + unstr, ok := obj.(*unstructured.Unstructured) + if !ok { + return nil, fmt.Errorf("attempt to decode non-Unstructured object") + } + table := &metav1.Table{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstr.Object, table); err != nil { + return nil, err + } + + for i := range table.Rows { + row := &table.Rows[i] + if row.Object.Raw == nil || row.Object.Object != nil { + continue + } + converted, err := runtime.Decode(unstructured.UnstructuredJSONScheme, row.Object.Raw) + if err != nil { + return nil, err + } + row.Object.Object = converted + } + + if isEvent { + event.Object.Object = table + return event, nil + } + return table, nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/env_file.go b/vendor/k8s.io/kubectl/pkg/cmd/util/env_file.go new file mode 100644 index 0000000000..ed1255839e --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/env_file.go @@ -0,0 +1,103 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bufio" + "bytes" + "fmt" + "os" + "strings" + "unicode" + "unicode/utf8" + + "k8s.io/apimachinery/pkg/util/validation" +) + +var utf8bom = []byte{0xEF, 0xBB, 0xBF} + +// processEnvFileLine returns a blank key if the line is empty or a comment. +// The value will be retrieved from the environment if necessary. +func processEnvFileLine(line []byte, filePath string, + currentLine int) (key, value string, err error) { + + if !utf8.Valid(line) { + return ``, ``, fmt.Errorf("env file %s contains invalid utf8 bytes at line %d: %v", + filePath, currentLine+1, line) + } + + // We trim UTF8 BOM from the first line of the file but no others + if currentLine == 0 { + line = bytes.TrimPrefix(line, utf8bom) + } + + // trim the line from all leading whitespace first + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + + // If the line is empty or a comment, we return a blank key/value pair. + if len(line) == 0 || line[0] == '#' { + return ``, ``, nil + } + + data := strings.SplitN(string(line), "=", 2) + key = data[0] + if errs := validation.IsEnvVarName(key); len(errs) != 0 { + return ``, ``, fmt.Errorf("%q is not a valid key name: %s", key, strings.Join(errs, ";")) + } + + if len(data) == 2 { + value = data[1] + } else { + // No value (no `=` in the line) is a signal to obtain the value + // from the environment. + value = os.Getenv(key) + } + return +} + +// AddFromEnvFile processes an env file allows a generic addTo to handle the +// collection of key value pairs or returns an error. +func AddFromEnvFile(filePath string, addTo func(key, value string) error) error { + f, err := os.Open(filePath) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + currentLine := 0 + for scanner.Scan() { + // Process the current line, retrieving a key/value pair if + // possible. + scannedBytes := scanner.Bytes() + key, value, err := processEnvFileLine(scannedBytes, filePath, currentLine) + if err != nil { + return err + } + currentLine++ + + if len(key) == 0 { + // no key means line was empty or a comment + continue + } + + if err = addTo(key, value); err != nil { + return err + } + } + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/factory.go b/vendor/k8s.io/kubectl/pkg/cmd/util/factory.go new file mode 100644 index 0000000000..e6414f3e0d --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/factory.go @@ -0,0 +1,72 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + openapiclient "k8s.io/client-go/openapi" + restclient "k8s.io/client-go/rest" + "k8s.io/kubectl/pkg/util/openapi" + "k8s.io/kubectl/pkg/validation" +) + +// Factory provides abstractions that allow the Kubectl command to be extended across multiple types +// of resources and different API sets. +// The rings are here for a reason. In order for composers to be able to provide alternative factory implementations +// they need to provide low level pieces of *certain* functions so that when the factory calls back into itself +// it uses the custom version of the function. Rather than try to enumerate everything that someone would want to override +// we split the factory into rings, where each ring can depend on methods in an earlier ring, but cannot depend +// upon peer methods in its own ring. +// TODO: make the functions interfaces +// TODO: pass the various interfaces on the factory directly into the command constructors (so the +// commands are decoupled from the factory). +type Factory interface { + genericclioptions.RESTClientGetter + + // DynamicClient returns a dynamic client ready for use + DynamicClient() (dynamic.Interface, error) + + // KubernetesClientSet gives you back an external clientset + KubernetesClientSet() (*kubernetes.Clientset, error) + + // Returns a RESTClient for accessing Kubernetes resources or an error. + RESTClient() (*restclient.RESTClient, error) + + // NewBuilder returns an object that assists in loading objects from both disk and the server + // and which implements the common patterns for CLI interactions with generic resources. + NewBuilder() *resource.Builder + + // Returns a RESTClient for working with the specified RESTMapping or an error. This is intended + // for working with arbitrary resources and is not guaranteed to point to a Kubernetes APIServer. + ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) + // Returns a RESTClient for working with Unstructured objects. + UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) + + // Returns a schema that can validate objects stored on disk. + Validator(validationDirective string) (validation.Schema, error) + + // Used for retrieving openapi v2 resources. + openapi.OpenAPIResourcesGetter + + // OpenAPIV3Schema returns a client for fetching parsed schemas for + // any group version + OpenAPIV3Client() (openapiclient.Client, error) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go b/vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go new file mode 100644 index 0000000000..6a1646b844 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/factory_client_access.go @@ -0,0 +1,218 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// this file contains factories with no other dependencies + +package util + +import ( + "errors" + "sync" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + openapiclient "k8s.io/client-go/openapi" + "k8s.io/client-go/openapi/cached" + restclient "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubectl/pkg/util/openapi" + "k8s.io/kubectl/pkg/validation" +) + +type factoryImpl struct { + clientGetter genericclioptions.RESTClientGetter + + // Caches OpenAPI document and parsed resources + openAPIParser *openapi.CachedOpenAPIParser + oapi *openapi.CachedOpenAPIGetter + parser sync.Once + getter sync.Once +} + +func NewFactory(clientGetter genericclioptions.RESTClientGetter) Factory { + if clientGetter == nil { + panic("attempt to instantiate client_access_factory with nil clientGetter") + } + f := &factoryImpl{ + clientGetter: clientGetter, + } + + return f +} + +func (f *factoryImpl) ToRESTConfig() (*restclient.Config, error) { + return f.clientGetter.ToRESTConfig() +} + +func (f *factoryImpl) ToRESTMapper() (meta.RESTMapper, error) { + return f.clientGetter.ToRESTMapper() +} + +func (f *factoryImpl) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + return f.clientGetter.ToDiscoveryClient() +} + +func (f *factoryImpl) ToRawKubeConfigLoader() clientcmd.ClientConfig { + return f.clientGetter.ToRawKubeConfigLoader() +} + +func (f *factoryImpl) KubernetesClientSet() (*kubernetes.Clientset, error) { + clientConfig, err := f.ToRESTConfig() + if err != nil { + return nil, err + } + return kubernetes.NewForConfig(clientConfig) +} + +func (f *factoryImpl) DynamicClient() (dynamic.Interface, error) { + clientConfig, err := f.ToRESTConfig() + if err != nil { + return nil, err + } + return dynamic.NewForConfig(clientConfig) +} + +// NewBuilder returns a new resource builder for structured api objects. +func (f *factoryImpl) NewBuilder() *resource.Builder { + return resource.NewBuilder(f.clientGetter) +} + +func (f *factoryImpl) RESTClient() (*restclient.RESTClient, error) { + clientConfig, err := f.ToRESTConfig() + if err != nil { + return nil, err + } + setKubernetesDefaults(clientConfig) + return restclient.RESTClientFor(clientConfig) +} + +func (f *factoryImpl) ClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { + cfg, err := f.clientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + if err := setKubernetesDefaults(cfg); err != nil { + return nil, err + } + gvk := mapping.GroupVersionKind + switch gvk.Group { + case corev1.GroupName: + cfg.APIPath = "/api" + default: + cfg.APIPath = "/apis" + } + gv := gvk.GroupVersion() + cfg.GroupVersion = &gv + return restclient.RESTClientFor(cfg) +} + +func (f *factoryImpl) UnstructuredClientForMapping(mapping *meta.RESTMapping) (resource.RESTClient, error) { + cfg, err := f.clientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + if err := restclient.SetKubernetesDefaults(cfg); err != nil { + return nil, err + } + cfg.APIPath = "/apis" + if mapping.GroupVersionKind.Group == corev1.GroupName { + cfg.APIPath = "/api" + } + gv := mapping.GroupVersionKind.GroupVersion() + cfg.ContentConfig = resource.UnstructuredPlusDefaultContentConfig() + cfg.GroupVersion = &gv + return restclient.RESTClientFor(cfg) +} + +func (f *factoryImpl) Validator(validationDirective string) (validation.Schema, error) { + // client-side schema validation is only performed + // when the validationDirective is strict. + // If the directive is warn, we rely on the ParamVerifyingSchema + // to ignore the client-side validation and provide a warning + // to the user that attempting warn validation when SS validation + // is unsupported is inert. + if validationDirective == metav1.FieldValidationIgnore { + return validation.NullSchema{}, nil + } + + schema := validation.ConjunctiveSchema{ + validation.NewSchemaValidation(f), + validation.NoDoubleKeySchema{}, + } + + dynamicClient, err := f.DynamicClient() + if err != nil { + return nil, err + } + // Create the FieldValidationVerifier for use in the ParamVerifyingSchema. + discoveryClient, err := f.ToDiscoveryClient() + if err != nil { + return nil, err + } + // Memory-cache the OpenAPI V3 responses. The disk cache behavior is determined by + // the discovery client. + oapiV3Client := cached.NewClient(discoveryClient.OpenAPIV3()) + queryParam := resource.QueryParamFieldValidation + primary := resource.NewQueryParamVerifierV3(dynamicClient, oapiV3Client, queryParam) + secondary := resource.NewQueryParamVerifier(dynamicClient, f.openAPIGetter(), queryParam) + fallback := resource.NewFallbackQueryParamVerifier(primary, secondary) + return validation.NewParamVerifyingSchema(schema, fallback, string(validationDirective)), nil +} + +// OpenAPISchema returns metadata and structural information about +// Kubernetes object definitions. +func (f *factoryImpl) OpenAPISchema() (openapi.Resources, error) { + openAPIGetter := f.openAPIGetter() + if openAPIGetter == nil { + return nil, errors.New("no openapi getter") + } + + // Lazily initialize the OpenAPIParser once + f.parser.Do(func() { + // Create the caching OpenAPIParser + f.openAPIParser = openapi.NewOpenAPIParser(f.openAPIGetter()) + }) + + // Delegate to the OpenAPIPArser + return f.openAPIParser.Parse() +} + +func (f *factoryImpl) openAPIGetter() discovery.OpenAPISchemaInterface { + discovery, err := f.clientGetter.ToDiscoveryClient() + if err != nil { + return nil + } + f.getter.Do(func() { + f.oapi = openapi.NewOpenAPIGetter(discovery) + }) + + return f.oapi +} + +func (f *factoryImpl) OpenAPIV3Client() (openapiclient.Client, error) { + discovery, err := f.clientGetter.ToDiscoveryClient() + if err != nil { + return nil, err + } + + return cached.NewClient(discovery.OpenAPIV3()), nil +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go b/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go new file mode 100644 index 0000000000..6d38fade3e --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/helpers.go @@ -0,0 +1,915 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "bytes" + "errors" + "fmt" + "io" + "net/url" + "os" + "strconv" + "strings" + "time" + + jsonpatch "github.com/evanphx/json-patch" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/strategicpatch" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" + "k8s.io/client-go/scale" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2" + utilexec "k8s.io/utils/exec" +) + +const ( + ApplyAnnotationsFlag = "save-config" + DefaultErrorExitCode = 1 + DefaultChunkSize = 500 +) + +type debugError interface { + DebugError() (msg string, args []interface{}) +} + +// AddSourceToErr adds handleResourcePrefix and source string to error message. +// verb is the string like "creating", "deleting" etc. +// source is the filename or URL to the template file(*.json or *.yaml), or stdin to use to handle the resource. +func AddSourceToErr(verb string, source string, err error) error { + if source != "" { + if statusError, ok := err.(apierrors.APIStatus); ok { + status := statusError.Status() + status.Message = fmt.Sprintf("error when %s %q: %v", verb, source, status.Message) + return &apierrors.StatusError{ErrStatus: status} + } + return fmt.Errorf("error when %s %q: %v", verb, source, err) + } + return err +} + +var fatalErrHandler = fatal + +// BehaviorOnFatal allows you to override the default behavior when a fatal +// error occurs, which is to call os.Exit(code). You can pass 'panic' as a function +// here if you prefer the panic() over os.Exit(1). +func BehaviorOnFatal(f func(string, int)) { + fatalErrHandler = f +} + +// DefaultBehaviorOnFatal allows you to undo any previous override. Useful in +// tests. +func DefaultBehaviorOnFatal() { + fatalErrHandler = fatal +} + +// fatal prints the message (if provided) and then exits. If V(99) or greater, +// klog.Fatal is invoked for extended information. This is intended for maintainer +// debugging and out of a reasonable range for users. +func fatal(msg string, code int) { + // nolint:logcheck // Not using the result of klog.V(99) inside the if + // branch is okay, we just use it to determine how to terminate. + if klog.V(99).Enabled() { + klog.FatalDepth(2, msg) + } + if len(msg) > 0 { + // add newline if needed + if !strings.HasSuffix(msg, "\n") { + msg += "\n" + } + fmt.Fprint(os.Stderr, msg) + } + os.Exit(code) +} + +// ErrExit may be passed to CheckError to instruct it to output nothing but exit with +// status code 1. +var ErrExit = fmt.Errorf("exit") + +// CheckErr prints a user friendly error to STDERR and exits with a non-zero +// exit code. Unrecognized errors will be printed with an "error: " prefix. +// +// This method is generic to the command in use and may be used by non-Kubectl +// commands. +func CheckErr(err error) { + checkErr(err, fatalErrHandler) +} + +// CheckDiffErr prints a user friendly error to STDERR and exits with a +// non-zero and non-one exit code. Unrecognized errors will be printed +// with an "error: " prefix. +// +// This method is meant specifically for `kubectl diff` and may be used +// by other commands. +func CheckDiffErr(err error) { + checkErr(err, func(msg string, code int) { + fatalErrHandler(msg, code+1) + }) +} + +// isInvalidReasonStatusError returns true if this is an API Status error with reason=Invalid. +// This is distinct from generic 422 errors we want to fall back to generic error handling. +func isInvalidReasonStatusError(err error) bool { + if !apierrors.IsInvalid(err) { + return false + } + statusError, isStatusError := err.(*apierrors.StatusError) + if !isStatusError { + return false + } + status := statusError.Status() + return status.Reason == metav1.StatusReasonInvalid +} + +// checkErr formats a given error as a string and calls the passed handleErr +// func with that string and an kubectl exit code. +func checkErr(err error, handleErr func(string, int)) { + // unwrap aggregates of 1 + if agg, ok := err.(utilerrors.Aggregate); ok && len(agg.Errors()) == 1 { + err = agg.Errors()[0] + } + + if err == nil { + return + } + + switch { + case err == ErrExit: + handleErr("", DefaultErrorExitCode) + case isInvalidReasonStatusError(err): + status := err.(*apierrors.StatusError).Status() + details := status.Details + s := "The request is invalid" + if details == nil { + // if we have no other details, include the message from the server if present + if len(status.Message) > 0 { + s += ": " + status.Message + } + handleErr(s, DefaultErrorExitCode) + return + } + if len(details.Kind) != 0 || len(details.Name) != 0 { + s = fmt.Sprintf("The %s %q is invalid", details.Kind, details.Name) + } else if len(status.Message) > 0 && len(details.Causes) == 0 { + // only append the message if we have no kind/name details and no causes, + // since default invalid error constructors duplicate that information in the message + s += ": " + status.Message + } + + if len(details.Causes) > 0 { + errs := statusCausesToAggrError(details.Causes) + handleErr(MultilineError(s+": ", errs), DefaultErrorExitCode) + } else { + handleErr(s, DefaultErrorExitCode) + } + case clientcmd.IsConfigurationInvalid(err): + handleErr(MultilineError("Error in configuration: ", err), DefaultErrorExitCode) + default: + switch err := err.(type) { + case *meta.NoResourceMatchError: + switch { + case len(err.PartialResource.Group) > 0 && len(err.PartialResource.Version) > 0: + handleErr(fmt.Sprintf("the server doesn't have a resource type %q in group %q and version %q", err.PartialResource.Resource, err.PartialResource.Group, err.PartialResource.Version), DefaultErrorExitCode) + case len(err.PartialResource.Group) > 0: + handleErr(fmt.Sprintf("the server doesn't have a resource type %q in group %q", err.PartialResource.Resource, err.PartialResource.Group), DefaultErrorExitCode) + case len(err.PartialResource.Version) > 0: + handleErr(fmt.Sprintf("the server doesn't have a resource type %q in version %q", err.PartialResource.Resource, err.PartialResource.Version), DefaultErrorExitCode) + default: + handleErr(fmt.Sprintf("the server doesn't have a resource type %q", err.PartialResource.Resource), DefaultErrorExitCode) + } + case utilerrors.Aggregate: + handleErr(MultipleErrors(``, err.Errors()), DefaultErrorExitCode) + case utilexec.ExitError: + handleErr(err.Error(), err.ExitStatus()) + default: // for any other error type + msg, ok := StandardErrorMessage(err) + if !ok { + msg = err.Error() + if !strings.HasPrefix(msg, "error: ") { + msg = fmt.Sprintf("error: %s", msg) + } + } + handleErr(msg, DefaultErrorExitCode) + } + } +} + +func statusCausesToAggrError(scs []metav1.StatusCause) utilerrors.Aggregate { + errs := make([]error, 0, len(scs)) + errorMsgs := sets.NewString() + for _, sc := range scs { + // check for duplicate error messages and skip them + msg := fmt.Sprintf("%s: %s", sc.Field, sc.Message) + if errorMsgs.Has(msg) { + continue + } + errorMsgs.Insert(msg) + errs = append(errs, errors.New(msg)) + } + return utilerrors.NewAggregate(errs) +} + +// StandardErrorMessage translates common errors into a human readable message, or returns +// false if the error is not one of the recognized types. It may also log extended +// information to klog. +// +// This method is generic to the command in use and may be used by non-Kubectl +// commands. +func StandardErrorMessage(err error) (string, bool) { + if debugErr, ok := err.(debugError); ok { + klog.V(4).Infof(debugErr.DebugError()) + } + status, isStatus := err.(apierrors.APIStatus) + switch { + case isStatus: + switch s := status.Status(); { + case s.Reason == metav1.StatusReasonUnauthorized: + return fmt.Sprintf("error: You must be logged in to the server (%s)", s.Message), true + case len(s.Reason) > 0: + return fmt.Sprintf("Error from server (%s): %s", s.Reason, err.Error()), true + default: + return fmt.Sprintf("Error from server: %s", err.Error()), true + } + case apierrors.IsUnexpectedObjectError(err): + return fmt.Sprintf("Server returned an unexpected response: %s", err.Error()), true + } + switch t := err.(type) { + case *url.Error: + klog.V(4).Infof("Connection error: %s %s: %v", t.Op, t.URL, t.Err) + switch { + case strings.Contains(t.Err.Error(), "connection refused"): + host := t.URL + if server, err := url.Parse(t.URL); err == nil { + host = server.Host + } + return fmt.Sprintf("The connection to the server %s was refused - did you specify the right host or port?", host), true + } + return fmt.Sprintf("Unable to connect to the server: %v", t.Err), true + } + return "", false +} + +// MultilineError returns a string representing an error that splits sub errors into their own +// lines. The returned string will end with a newline. +func MultilineError(prefix string, err error) string { + if agg, ok := err.(utilerrors.Aggregate); ok { + errs := utilerrors.Flatten(agg).Errors() + buf := &bytes.Buffer{} + switch len(errs) { + case 0: + return fmt.Sprintf("%s%v\n", prefix, err) + case 1: + return fmt.Sprintf("%s%v\n", prefix, messageForError(errs[0])) + default: + fmt.Fprintln(buf, prefix) + for _, err := range errs { + fmt.Fprintf(buf, "* %v\n", messageForError(err)) + } + return buf.String() + } + } + return fmt.Sprintf("%s%s\n", prefix, err) +} + +// PrintErrorWithCauses prints an error's kind, name, and each of the error's causes in a new line. +// The returned string will end with a newline. +// Returns true if a case exists to handle the error type, or false otherwise. +func PrintErrorWithCauses(err error, errOut io.Writer) bool { + switch t := err.(type) { + case *apierrors.StatusError: + errorDetails := t.Status().Details + if errorDetails != nil { + fmt.Fprintf(errOut, "error: %s %q is invalid\n\n", errorDetails.Kind, errorDetails.Name) + for _, cause := range errorDetails.Causes { + fmt.Fprintf(errOut, "* %s: %s\n", cause.Field, cause.Message) + } + return true + } + } + + fmt.Fprintf(errOut, "error: %v\n", err) + return false +} + +// MultipleErrors returns a newline delimited string containing +// the prefix and referenced errors in standard form. +func MultipleErrors(prefix string, errs []error) string { + buf := &bytes.Buffer{} + for _, err := range errs { + fmt.Fprintf(buf, "%s%v\n", prefix, messageForError(err)) + } + return buf.String() +} + +// messageForError returns the string representing the error. +func messageForError(err error) string { + msg, ok := StandardErrorMessage(err) + if !ok { + msg = err.Error() + } + return msg +} + +func UsageErrorf(cmd *cobra.Command, format string, args ...interface{}) error { + msg := fmt.Sprintf(format, args...) + return fmt.Errorf("%s\nSee '%s -h' for help and examples", msg, cmd.CommandPath()) +} + +func IsFilenameSliceEmpty(filenames []string, directory string) bool { + return len(filenames) == 0 && directory == "" +} + +func GetFlagString(cmd *cobra.Command, flag string) string { + s, err := cmd.Flags().GetString(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return s +} + +// GetFlagStringSlice can be used to accept multiple argument with flag repetition (e.g. -f arg1,arg2 -f arg3 ...) +func GetFlagStringSlice(cmd *cobra.Command, flag string) []string { + s, err := cmd.Flags().GetStringSlice(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return s +} + +// GetFlagStringArray can be used to accept multiple argument with flag repetition (e.g. -f arg1 -f arg2 ...) +func GetFlagStringArray(cmd *cobra.Command, flag string) []string { + s, err := cmd.Flags().GetStringArray(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return s +} + +func GetFlagBool(cmd *cobra.Command, flag string) bool { + b, err := cmd.Flags().GetBool(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return b +} + +// Assumes the flag has a default value. +func GetFlagInt(cmd *cobra.Command, flag string) int { + i, err := cmd.Flags().GetInt(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return i +} + +// Assumes the flag has a default value. +func GetFlagInt32(cmd *cobra.Command, flag string) int32 { + i, err := cmd.Flags().GetInt32(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return i +} + +// Assumes the flag has a default value. +func GetFlagInt64(cmd *cobra.Command, flag string) int64 { + i, err := cmd.Flags().GetInt64(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return i +} + +func GetFlagDuration(cmd *cobra.Command, flag string) time.Duration { + d, err := cmd.Flags().GetDuration(flag) + if err != nil { + klog.Fatalf("error accessing flag %s for command %s: %v", flag, cmd.Name(), err) + } + return d +} + +func GetPodRunningTimeoutFlag(cmd *cobra.Command) (time.Duration, error) { + timeout := GetFlagDuration(cmd, "pod-running-timeout") + if timeout <= 0 { + return timeout, fmt.Errorf("--pod-running-timeout must be higher than zero") + } + return timeout, nil +} + +type FeatureGate string + +const ( + ApplySet FeatureGate = "KUBECTL_APPLYSET" + CmdPluginAsSubcommand FeatureGate = "KUBECTL_ENABLE_CMD_SHADOW" + InteractiveDelete FeatureGate = "KUBECTL_INTERACTIVE_DELETE" + OpenAPIV3Patch FeatureGate = "KUBECTL_OPENAPIV3_PATCH" + RemoteCommandWebsockets FeatureGate = "KUBECTL_REMOTE_COMMAND_WEBSOCKETS" +) + +// IsEnabled returns true iff environment variable is set to true. +// All other cases, it returns false. +func (f FeatureGate) IsEnabled() bool { + return strings.ToLower(os.Getenv(string(f))) == "true" +} + +// IsDisabled returns true iff environment variable is set to false. +// All other cases, it returns true. +// This function is used for the cases where feature is enabled by default, +// but it may be needed to provide a way to ability to disable this feature. +func (f FeatureGate) IsDisabled() bool { + return strings.ToLower(os.Getenv(string(f))) == "false" +} + +func AddValidateFlags(cmd *cobra.Command) { + cmd.Flags().String( + "validate", + "strict", + `Must be one of: strict (or true), warn, ignore (or false). + "true" or "strict" will use a schema to validate the input and fail the request if invalid. It will perform server side validation if ServerSideFieldValidation is enabled on the api-server, but will fall back to less reliable client-side validation if not. + "warn" will warn about unknown or duplicate fields without blocking the request if server-side field validation is enabled on the API server, and behave as "ignore" otherwise. + "false" or "ignore" will not perform any schema validation, silently dropping any unknown or duplicate fields.`, + ) + + cmd.Flags().Lookup("validate").NoOptDefVal = "strict" +} + +func AddFilenameOptionFlags(cmd *cobra.Command, options *resource.FilenameOptions, usage string) { + AddJsonFilenameFlag(cmd.Flags(), &options.Filenames, "Filename, directory, or URL to files "+usage) + AddKustomizeFlag(cmd.Flags(), &options.Kustomize) + cmd.Flags().BoolVarP(&options.Recursive, "recursive", "R", options.Recursive, "Process the directory used in -f, --filename recursively. Useful when you want to manage related manifests organized within the same directory.") +} + +func AddJsonFilenameFlag(flags *pflag.FlagSet, value *[]string, usage string) { + flags.StringSliceVarP(value, "filename", "f", *value, usage) + annotations := make([]string, 0, len(resource.FileExtensions)) + for _, ext := range resource.FileExtensions { + annotations = append(annotations, strings.TrimLeft(ext, ".")) + } + flags.SetAnnotation("filename", cobra.BashCompFilenameExt, annotations) +} + +// AddKustomizeFlag adds kustomize flag to a command +func AddKustomizeFlag(flags *pflag.FlagSet, value *string) { + flags.StringVarP(value, "kustomize", "k", *value, "Process the kustomization directory. This flag can't be used together with -f or -R.") +} + +// AddDryRunFlag adds dry-run flag to a command. Usually used by mutations. +func AddDryRunFlag(cmd *cobra.Command) { + cmd.Flags().String( + "dry-run", + "none", + `Must be "none", "server", or "client". If client strategy, only print the object that would be sent, without sending it. If server strategy, submit server-side request without persisting the resource.`, + ) + cmd.Flags().Lookup("dry-run").NoOptDefVal = "unchanged" +} + +func AddFieldManagerFlagVar(cmd *cobra.Command, p *string, defaultFieldManager string) { + cmd.Flags().StringVar(p, "field-manager", defaultFieldManager, "Name of the manager used to track field ownership.") +} + +func AddContainerVarFlags(cmd *cobra.Command, p *string, containerName string) { + cmd.Flags().StringVarP(p, "container", "c", containerName, "Container name. If omitted, use the kubectl.kubernetes.io/default-container annotation for selecting the container to be attached or the first container in the pod will be chosen") +} + +func AddServerSideApplyFlags(cmd *cobra.Command) { + cmd.Flags().Bool("server-side", false, "If true, apply runs in the server instead of the client.") + cmd.Flags().Bool("force-conflicts", false, "If true, server-side apply will force the changes against conflicts.") +} + +func AddPodRunningTimeoutFlag(cmd *cobra.Command, defaultTimeout time.Duration) { + cmd.Flags().Duration("pod-running-timeout", defaultTimeout, "The length of time (like 5s, 2m, or 3h, higher than zero) to wait until at least one pod is running") +} + +func AddApplyAnnotationFlags(cmd *cobra.Command) { + cmd.Flags().Bool(ApplyAnnotationsFlag, false, "If true, the configuration of current object will be saved in its annotation. Otherwise, the annotation will be unchanged. This flag is useful when you want to perform kubectl apply on this object in the future.") +} + +func AddApplyAnnotationVarFlags(cmd *cobra.Command, applyAnnotation *bool) { + cmd.Flags().BoolVar(applyAnnotation, ApplyAnnotationsFlag, *applyAnnotation, "If true, the configuration of current object will be saved in its annotation. Otherwise, the annotation will be unchanged. This flag is useful when you want to perform kubectl apply on this object in the future.") +} + +func AddChunkSizeFlag(cmd *cobra.Command, value *int64) { + cmd.Flags().Int64Var(value, "chunk-size", *value, + "Return large lists in chunks rather than all at once. Pass 0 to disable. This flag is beta and may change in the future.") +} + +func AddLabelSelectorFlagVar(cmd *cobra.Command, p *string) { + cmd.Flags().StringVarP(p, "selector", "l", *p, "Selector (label query) to filter on, supports '=', '==', and '!='.(e.g. -l key1=value1,key2=value2). Matching objects must satisfy all of the specified label constraints.") +} + +func AddPruningFlags(cmd *cobra.Command, prune *bool, pruneAllowlist *[]string, pruneWhitelist *[]string, all *bool, applySetRef *string) { + // Flags associated with the original allowlist-based alpha + cmd.Flags().StringArrayVar(pruneAllowlist, "prune-allowlist", *pruneAllowlist, "Overwrite the default allowlist with for --prune") + cmd.Flags().StringArrayVar(pruneWhitelist, "prune-whitelist", *pruneWhitelist, "Overwrite the default whitelist with for --prune") // TODO: Remove this in kubectl 1.28 or later + _ = cmd.Flags().MarkDeprecated("prune-whitelist", "Use --prune-allowlist instead.") + cmd.Flags().BoolVar(all, "all", *all, "Select all resources in the namespace of the specified resource types.") + + // Flags associated with the new ApplySet-based alpha + if ApplySet.IsEnabled() { + cmd.Flags().StringVar(applySetRef, "applyset", *applySetRef, "[alpha] The name of the ApplySet that tracks which resources are being managed, for the purposes of determining what to prune. Live resources that are part of the ApplySet but have been removed from the provided configs will be deleted. Format: [RESOURCE][.GROUP]/NAME. A Secret will be used if no resource or group is specified.") + cmd.Flags().BoolVar(prune, "prune", *prune, "Automatically delete previously applied resource objects that do not appear in the provided configs. For alpha1, use with either -l or --all. For alpha2, use with --applyset.") + } else { + // different docs for the shared --prune flag if only alpha1 is enabled + cmd.Flags().BoolVar(prune, "prune", *prune, "Automatically delete resource objects, that do not appear in the configs and are created by either apply or create --save-config. Should be used with either -l or --all.") + } +} + +func AddSubresourceFlags(cmd *cobra.Command, subresource *string, usage string, allowedSubresources ...string) { + cmd.Flags().StringVar(subresource, "subresource", "", fmt.Sprintf("%s Must be one of %v. This flag is beta and may change in the future.", usage, allowedSubresources)) + CheckErr(cmd.RegisterFlagCompletionFunc("subresource", func(*cobra.Command, []string, string) ([]string, cobra.ShellCompDirective) { + return allowedSubresources, cobra.ShellCompDirectiveNoFileComp + })) +} + +type ValidateOptions struct { + ValidationDirective string +} + +// Merge converts the passed in object to JSON, merges the fragment into it using an RFC7396 JSON Merge Patch, +// and returns the resulting object +// TODO: merge assumes JSON serialization, and does not properly abstract API retrieval +func Merge(codec runtime.Codec, dst runtime.Object, fragment string) (runtime.Object, error) { + // encode dst into versioned json and apply fragment directly too it + target, err := runtime.Encode(codec, dst) + if err != nil { + return nil, err + } + patched, err := jsonpatch.MergePatch(target, []byte(fragment)) + if err != nil { + return nil, err + } + out, err := runtime.Decode(codec, patched) + if err != nil { + return nil, err + } + return out, nil +} + +// StrategicMerge converts the passed in object to JSON, merges the fragment into it using a Strategic Merge Patch, +// and returns the resulting object +func StrategicMerge(codec runtime.Codec, dst runtime.Object, fragment string, dataStruct runtime.Object) (runtime.Object, error) { + target, err := runtime.Encode(codec, dst) + if err != nil { + return nil, err + } + patched, err := strategicpatch.StrategicMergePatch(target, []byte(fragment), dataStruct) + if err != nil { + return nil, err + } + out, err := runtime.Decode(codec, patched) + if err != nil { + return nil, err + } + return out, nil +} + +// JSONPatch converts the passed in object to JSON, performs an RFC6902 JSON Patch using operations specified in the +// fragment, and returns the resulting object +func JSONPatch(codec runtime.Codec, dst runtime.Object, fragment string) (runtime.Object, error) { + target, err := runtime.Encode(codec, dst) + if err != nil { + return nil, err + } + patch, err := jsonpatch.DecodePatch([]byte(fragment)) + if err != nil { + return nil, err + } + patched, err := patch.Apply(target) + if err != nil { + return nil, err + } + out, err := runtime.Decode(codec, patched) + if err != nil { + return nil, err + } + return out, nil +} + +// DumpReaderToFile writes all data from the given io.Reader to the specified file +// (usually for temporary use). +func DumpReaderToFile(reader io.Reader, filename string) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + + buffer := make([]byte, 1024) + for { + count, err := reader.Read(buffer) + if err == io.EOF { + break + } + if err != nil { + return err + } + _, err = f.Write(buffer[:count]) + if err != nil { + return err + } + } + return nil +} + +func GetServerSideApplyFlag(cmd *cobra.Command) bool { + return GetFlagBool(cmd, "server-side") +} + +func GetForceConflictsFlag(cmd *cobra.Command) bool { + return GetFlagBool(cmd, "force-conflicts") +} + +func GetFieldManagerFlag(cmd *cobra.Command) string { + return GetFlagString(cmd, "field-manager") +} + +func GetValidationDirective(cmd *cobra.Command) (string, error) { + var validateFlag = GetFlagString(cmd, "validate") + b, err := strconv.ParseBool(validateFlag) + if err != nil { + switch validateFlag { + case "strict": + return metav1.FieldValidationStrict, nil + case "warn": + return metav1.FieldValidationWarn, nil + case "ignore": + return metav1.FieldValidationIgnore, nil + default: + return metav1.FieldValidationStrict, fmt.Errorf(`invalid - validate option %q; must be one of: strict (or true), warn, ignore (or false)`, validateFlag) + } + } + // The flag was a boolean + if b { + return metav1.FieldValidationStrict, nil + } + return metav1.FieldValidationIgnore, nil +} + +type DryRunStrategy int + +const ( + // DryRunNone indicates the client will make all mutating calls + DryRunNone DryRunStrategy = iota + + // DryRunClient, or client-side dry-run, indicates the client will prevent + // making mutating calls such as CREATE, PATCH, and DELETE + DryRunClient + + // DryRunServer, or server-side dry-run, indicates the client will send + // mutating calls to the APIServer with the dry-run parameter to prevent + // persisting changes. + // + // Note that clients sending server-side dry-run calls should verify that + // the APIServer and the resource supports server-side dry-run, and otherwise + // clients should fail early. + // + // If a client sends a server-side dry-run call to an APIServer that doesn't + // support server-side dry-run, then the APIServer will persist changes inadvertently. + DryRunServer +) + +func GetDryRunStrategy(cmd *cobra.Command) (DryRunStrategy, error) { + var dryRunFlag = GetFlagString(cmd, "dry-run") + b, err := strconv.ParseBool(dryRunFlag) + // The flag is not a boolean + if err != nil { + switch dryRunFlag { + case cmd.Flag("dry-run").NoOptDefVal: + klog.Warning(`--dry-run is deprecated and can be replaced with --dry-run=client.`) + return DryRunClient, nil + case "client": + return DryRunClient, nil + case "server": + return DryRunServer, nil + case "none": + return DryRunNone, nil + default: + return DryRunNone, fmt.Errorf(`Invalid dry-run value (%v). Must be "none", "server", or "client".`, dryRunFlag) + } + } + // The flag was a boolean + if b { + klog.Warningf(`--dry-run=%v is deprecated (boolean value) and can be replaced with --dry-run=%s.`, dryRunFlag, "client") + return DryRunClient, nil + } + klog.Warningf(`--dry-run=%v is deprecated (boolean value) and can be replaced with --dry-run=%s.`, dryRunFlag, "none") + return DryRunNone, nil +} + +// PrintFlagsWithDryRunStrategy sets a success message at print time for the dry run strategy +// +// TODO(juanvallejo): This can be cleaned up even further by creating +// a PrintFlags struct that binds the --dry-run flag, and whose +// ToPrinter method returns a printer that understands how to print +// this success message. +func PrintFlagsWithDryRunStrategy(printFlags *genericclioptions.PrintFlags, dryRunStrategy DryRunStrategy) *genericclioptions.PrintFlags { + switch dryRunStrategy { + case DryRunClient: + printFlags.Complete("%s (dry run)") + case DryRunServer: + printFlags.Complete("%s (server dry run)") + } + return printFlags +} + +// GetResourcesAndPairs retrieves resources and "KEY=VALUE or KEY-" pair args from given args +func GetResourcesAndPairs(args []string, pairType string) (resources []string, pairArgs []string, err error) { + foundPair := false + for _, s := range args { + nonResource := (strings.Contains(s, "=") && s[0] != '=') || (strings.HasSuffix(s, "-") && s != "-") + switch { + case !foundPair && nonResource: + foundPair = true + fallthrough + case foundPair && nonResource: + pairArgs = append(pairArgs, s) + case !foundPair && !nonResource: + resources = append(resources, s) + case foundPair && !nonResource: + err = fmt.Errorf("all resources must be specified before %s changes: %s", pairType, s) + return + } + } + return +} + +// ParsePairs retrieves new and remove pairs (if supportRemove is true) from "KEY=VALUE or KEY-" pair args +func ParsePairs(pairArgs []string, pairType string, supportRemove bool) (newPairs map[string]string, removePairs []string, err error) { + newPairs = map[string]string{} + if supportRemove { + removePairs = []string{} + } + var invalidBuf bytes.Buffer + var invalidBufNonEmpty bool + for _, pairArg := range pairArgs { + if strings.Contains(pairArg, "=") && pairArg[0] != '=' { + parts := strings.SplitN(pairArg, "=", 2) + if len(parts) != 2 { + if invalidBufNonEmpty { + invalidBuf.WriteString(", ") + } + invalidBuf.WriteString(pairArg) + invalidBufNonEmpty = true + } else { + newPairs[parts[0]] = parts[1] + } + } else if supportRemove && strings.HasSuffix(pairArg, "-") && pairArg != "-" { + removePairs = append(removePairs, pairArg[:len(pairArg)-1]) + } else { + if invalidBufNonEmpty { + invalidBuf.WriteString(", ") + } + invalidBuf.WriteString(pairArg) + invalidBufNonEmpty = true + } + } + if invalidBufNonEmpty { + err = fmt.Errorf("invalid %s format: %s", pairType, invalidBuf.String()) + return + } + + return +} + +// IsSiblingCommandExists receives a pointer to a cobra command and a target string. +// Returns true if the target string is found in the list of sibling commands. +func IsSiblingCommandExists(cmd *cobra.Command, targetCmdName string) bool { + for _, c := range cmd.Parent().Commands() { + if c.Name() == targetCmdName { + return true + } + } + + return false +} + +// DefaultSubCommandRun prints a command's help string to the specified output if no +// arguments (sub-commands) are provided, or a usage error otherwise. +func DefaultSubCommandRun(out io.Writer) func(c *cobra.Command, args []string) { + return func(c *cobra.Command, args []string) { + c.SetOut(out) + c.SetErr(out) + RequireNoArguments(c, args) + c.Help() + CheckErr(ErrExit) + } +} + +// RequireNoArguments exits with a usage error if extra arguments are provided. +func RequireNoArguments(c *cobra.Command, args []string) { + if len(args) > 0 { + CheckErr(UsageErrorf(c, "unknown command %q", strings.Join(args, " "))) + } +} + +// StripComments will transform a YAML file into JSON, thus dropping any comments +// in it. Note that if the given file has a syntax error, the transformation will +// fail and we will manually drop all comments from the file. +func StripComments(file []byte) []byte { + stripped := file + stripped, err := yaml.ToJSON(stripped) + if err != nil { + stripped = ManualStrip(file) + } + return stripped +} + +// ManualStrip is used for dropping comments from a YAML file +func ManualStrip(file []byte) []byte { + stripped := []byte{} + lines := bytes.Split(file, []byte("\n")) + for i, line := range lines { + trimline := bytes.TrimSpace(line) + + if bytes.HasPrefix(trimline, []byte("#")) && !bytes.HasPrefix(trimline, []byte("#!")) { + continue + } + stripped = append(stripped, line...) + if i < len(lines)-1 { + stripped = append(stripped, '\n') + } + } + return stripped +} + +// ScaleClientFunc provides a ScalesGetter +type ScaleClientFunc func(genericclioptions.RESTClientGetter) (scale.ScalesGetter, error) + +// ScaleClientFn gives a way to easily override the function for unit testing if needed. +var ScaleClientFn ScaleClientFunc = scaleClient + +// scaleClient gives you back scale getter +func scaleClient(restClientGetter genericclioptions.RESTClientGetter) (scale.ScalesGetter, error) { + discoveryClient, err := restClientGetter.ToDiscoveryClient() + if err != nil { + return nil, err + } + + clientConfig, err := restClientGetter.ToRESTConfig() + if err != nil { + return nil, err + } + + setKubernetesDefaults(clientConfig) + restClient, err := rest.RESTClientFor(clientConfig) + if err != nil { + return nil, err + } + resolver := scale.NewDiscoveryScaleKindResolver(discoveryClient) + mapper, err := restClientGetter.ToRESTMapper() + if err != nil { + return nil, err + } + + return scale.New(restClient, mapper, dynamic.LegacyAPIPathResolverFunc, resolver), nil +} + +func Warning(cmdErr io.Writer, newGeneratorName, oldGeneratorName string) { + fmt.Fprintf(cmdErr, "WARNING: New generator %q specified, "+ + "but it isn't available. "+ + "Falling back to %q.\n", + newGeneratorName, + oldGeneratorName, + ) +} + +// Difference removes any elements of subArray from fullArray and returns the result +func Difference(fullArray []string, subArray []string) []string { + exclude := make(map[string]bool, len(subArray)) + for _, elem := range subArray { + exclude[elem] = true + } + var result []string + for _, elem := range fullArray { + if _, found := exclude[elem]; !found { + result = append(result, elem) + } + } + return result +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go b/vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go new file mode 100644 index 0000000000..74308bc5d8 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/kubectl_match_version.go @@ -0,0 +1,129 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "sync" + + "github.com/spf13/pflag" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/discovery" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubectl/pkg/scheme" + + "k8s.io/cli-runtime/pkg/genericclioptions" + "k8s.io/component-base/version" +) + +const ( + flagMatchBinaryVersion = "match-server-version" +) + +// MatchVersionFlags is for setting the "match server version" function. +type MatchVersionFlags struct { + Delegate genericclioptions.RESTClientGetter + + RequireMatchedServerVersion bool + checkServerVersion sync.Once + matchesServerVersionErr error +} + +var _ genericclioptions.RESTClientGetter = &MatchVersionFlags{} + +func (f *MatchVersionFlags) checkMatchingServerVersion() error { + f.checkServerVersion.Do(func() { + if !f.RequireMatchedServerVersion { + return + } + discoveryClient, err := f.Delegate.ToDiscoveryClient() + if err != nil { + f.matchesServerVersionErr = err + return + } + f.matchesServerVersionErr = discovery.MatchesServerVersion(version.Get(), discoveryClient) + }) + + return f.matchesServerVersionErr +} + +// ToRESTConfig implements RESTClientGetter. +// Returns a REST client configuration based on a provided path +// to a .kubeconfig file, loading rules, and config flag overrides. +// Expects the AddFlags method to have been called. +func (f *MatchVersionFlags) ToRESTConfig() (*rest.Config, error) { + if err := f.checkMatchingServerVersion(); err != nil { + return nil, err + } + clientConfig, err := f.Delegate.ToRESTConfig() + if err != nil { + return nil, err + } + // TODO we should not have to do this. It smacks of something going wrong. + setKubernetesDefaults(clientConfig) + return clientConfig, nil +} + +func (f *MatchVersionFlags) ToRawKubeConfigLoader() clientcmd.ClientConfig { + return f.Delegate.ToRawKubeConfigLoader() +} + +func (f *MatchVersionFlags) ToDiscoveryClient() (discovery.CachedDiscoveryInterface, error) { + if err := f.checkMatchingServerVersion(); err != nil { + return nil, err + } + return f.Delegate.ToDiscoveryClient() +} + +// ToRESTMapper returns a mapper. +func (f *MatchVersionFlags) ToRESTMapper() (meta.RESTMapper, error) { + if err := f.checkMatchingServerVersion(); err != nil { + return nil, err + } + return f.Delegate.ToRESTMapper() +} + +func (f *MatchVersionFlags) AddFlags(flags *pflag.FlagSet) { + flags.BoolVar(&f.RequireMatchedServerVersion, flagMatchBinaryVersion, f.RequireMatchedServerVersion, "Require server version to match client version") +} + +func NewMatchVersionFlags(delegate genericclioptions.RESTClientGetter) *MatchVersionFlags { + return &MatchVersionFlags{ + Delegate: delegate, + } +} + +// setKubernetesDefaults sets default values on the provided client config for accessing the +// Kubernetes API or returns an error if any of the defaults are impossible or invalid. +// TODO this isn't what we want. Each clientset should be setting defaults as it sees fit. +func setKubernetesDefaults(config *rest.Config) error { + // TODO remove this hack. This is allowing the GetOptions to be serialized. + config.GroupVersion = &schema.GroupVersion{Group: "", Version: "v1"} + + if config.APIPath == "" { + config.APIPath = "/api" + } + if config.NegotiatedSerializer == nil { + // This codec factory ensures the resources are not converted. Therefore, resources + // will not be round-tripped through internal versions. Defaulting does not happen + // on the client. + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + } + return rest.SetKubernetesDefaults(config) +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/override_options.go b/vendor/k8s.io/kubectl/pkg/cmd/util/override_options.go new file mode 100644 index 0000000000..1e63bc789b --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/override_options.go @@ -0,0 +1,90 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "github.com/spf13/cobra" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/kubectl/pkg/scheme" + "k8s.io/kubectl/pkg/util/i18n" +) + +type OverrideType string + +const ( + // OverrideTypeJSON will use an RFC6902 JSON Patch to alter the generated output + OverrideTypeJSON OverrideType = "json" + + // OverrideTypeMerge will use an RFC7396 JSON Merge Patch to alter the generated output + OverrideTypeMerge OverrideType = "merge" + + // OverrideTypeStrategic will use a Strategic Merge Patch to alter the generated output + OverrideTypeStrategic OverrideType = "strategic" +) + +const DefaultOverrideType = OverrideTypeMerge + +type OverrideOptions struct { + Overrides string + OverrideType OverrideType +} + +func (o *OverrideOptions) AddOverrideFlags(cmd *cobra.Command) { + cmd.Flags().StringVar(&o.Overrides, "overrides", "", i18n.T("An inline JSON override for the generated object. If this is non-empty, it is used to override the generated object. Requires that the object supply a valid apiVersion field.")) + cmd.Flags().StringVar((*string)(&o.OverrideType), "override-type", string(DefaultOverrideType), fmt.Sprintf("The method used to override the generated object: %s, %s, or %s.", OverrideTypeJSON, OverrideTypeMerge, OverrideTypeStrategic)) +} + +func (o *OverrideOptions) NewOverrider(dataStruct runtime.Object) *Overrider { + return &Overrider{ + Options: o, + DataStruct: dataStruct, + } +} + +type Overrider struct { + Options *OverrideOptions + DataStruct runtime.Object +} + +func (o *Overrider) Apply(obj runtime.Object) (runtime.Object, error) { + if len(o.Options.Overrides) == 0 { + return obj, nil + } + + codec := runtime.NewCodec(scheme.DefaultJSONEncoder(), scheme.Codecs.UniversalDecoder(scheme.Scheme.PrioritizedVersionsAllGroups()...)) + + var overrideType OverrideType + if len(o.Options.OverrideType) == 0 { + overrideType = DefaultOverrideType + } else { + overrideType = o.Options.OverrideType + } + + switch overrideType { + case OverrideTypeJSON: + return JSONPatch(codec, obj, o.Options.Overrides) + case OverrideTypeMerge: + return Merge(codec, obj, o.Options.Overrides) + case OverrideTypeStrategic: + return StrategicMerge(codec, obj, o.Options.Overrides, o.DataStruct) + default: + return nil, fmt.Errorf("invalid override type: %v", overrideType) + } +} diff --git a/vendor/k8s.io/kubectl/pkg/cmd/util/printing.go b/vendor/k8s.io/kubectl/pkg/cmd/util/printing.go new file mode 100644 index 0000000000..ebd228821e --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/cmd/util/printing.go @@ -0,0 +1,29 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" + + "k8s.io/kubectl/pkg/util/templates" +) + +// SuggestAPIResources returns a suggestion to use the "api-resources" command +// to retrieve a supported list of resources +func SuggestAPIResources(parent string) string { + return templates.LongDesc(fmt.Sprintf("Use \"%s api-resources\" for a complete list of supported resources.", parent)) +} diff --git a/vendor/k8s.io/kubectl/pkg/rawhttp/raw.go b/vendor/k8s.io/kubectl/pkg/rawhttp/raw.go new file mode 100644 index 0000000000..57d73cde0f --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/rawhttp/raw.go @@ -0,0 +1,95 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rawhttp + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/client-go/rest" +) + +// RawPost uses the REST client to POST content +func RawPost(restClient *rest.RESTClient, streams genericiooptions.IOStreams, url, filename string) error { + return raw(restClient, streams, url, filename, "POST") +} + +// RawPut uses the REST client to PUT content +func RawPut(restClient *rest.RESTClient, streams genericiooptions.IOStreams, url, filename string) error { + return raw(restClient, streams, url, filename, "PUT") +} + +// RawGet uses the REST client to GET content +func RawGet(restClient *rest.RESTClient, streams genericiooptions.IOStreams, url string) error { + return raw(restClient, streams, url, "", "GET") +} + +// RawDelete uses the REST client to DELETE content +func RawDelete(restClient *rest.RESTClient, streams genericiooptions.IOStreams, url, filename string) error { + return raw(restClient, streams, url, filename, "DELETE") +} + +// raw makes a simple HTTP request to the provided path on the server using the default credentials. +func raw(restClient *rest.RESTClient, streams genericiooptions.IOStreams, url, filename, requestType string) error { + var data io.Reader + switch { + case len(filename) == 0: + data = bytes.NewBuffer([]byte{}) + + case filename == "-": + data = streams.In + + default: + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + data = f + } + + var request *rest.Request + switch requestType { + case "GET": + request = restClient.Get().RequestURI(url) + case "PUT": + request = restClient.Put().RequestURI(url).Body(data) + case "POST": + request = restClient.Post().RequestURI(url).Body(data) + case "DELETE": + request = restClient.Delete().RequestURI(url).Body(data) + + default: + return fmt.Errorf("unknown requestType: %q", requestType) + } + + stream, err := request.Stream(context.TODO()) + if err != nil { + return err + } + defer stream.Close() + + _, err = io.Copy(streams.Out, stream) + if err != nil && err != io.EOF { + return err + } + return nil +} diff --git a/vendor/k8s.io/kubectl/pkg/scheme/install.go b/vendor/k8s.io/kubectl/pkg/scheme/install.go new file mode 100644 index 0000000000..52a7ce6a8d --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/scheme/install.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + admissionv1 "k8s.io/api/admission/v1" + admissionv1beta1 "k8s.io/api/admission/v1beta1" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + authorizationv1 "k8s.io/api/authorization/v1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + autoscalingv2 "k8s.io/api/autoscaling/v2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + certificatesv1 "k8s.io/api/certificates/v1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + corev1 "k8s.io/api/core/v1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" + networkingv1 "k8s.io/api/networking/v1" + policyv1 "k8s.io/api/policy/v1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + storagev1 "k8s.io/api/storage/v1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes/scheme" +) + +// Register all groups in the kubectl's registry, but no componentconfig group since it's not in k8s.io/api +// The code in this file mostly duplicate the install under k8s.io/kubernetes/pkg/api and k8s.io/kubernetes/pkg/apis, +// but does NOT register the internal types. +func init() { + // Register external types for Scheme + metav1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(metav1beta1.AddMetaToScheme(Scheme)) + utilruntime.Must(metav1.AddMetaToScheme(Scheme)) + utilruntime.Must(scheme.AddToScheme(Scheme)) + + utilruntime.Must(Scheme.SetVersionPriority(corev1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(admissionv1beta1.SchemeGroupVersion, admissionv1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(admissionregistrationv1beta1.SchemeGroupVersion, admissionregistrationv1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(appsv1beta1.SchemeGroupVersion, appsv1beta2.SchemeGroupVersion, appsv1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(authenticationv1.SchemeGroupVersion, authenticationv1beta1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(authorizationv1.SchemeGroupVersion, authorizationv1beta1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(autoscalingv1.SchemeGroupVersion, autoscalingv2.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(batchv1.SchemeGroupVersion, batchv1beta1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(certificatesv1.SchemeGroupVersion, certificatesv1beta1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(extensionsv1beta1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(imagepolicyv1alpha1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(networkingv1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(policyv1beta1.SchemeGroupVersion, policyv1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(rbacv1.SchemeGroupVersion, rbacv1beta1.SchemeGroupVersion, rbacv1alpha1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(schedulingv1alpha1.SchemeGroupVersion)) + utilruntime.Must(Scheme.SetVersionPriority(storagev1.SchemeGroupVersion, storagev1beta1.SchemeGroupVersion)) +} diff --git a/vendor/k8s.io/kubectl/pkg/scheme/scheme.go b/vendor/k8s.io/kubectl/pkg/scheme/scheme.go new file mode 100644 index 0000000000..d1d7847b8f --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/scheme/scheme.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package scheme + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +// All kubectl code should eventually switch to use this Registry and Scheme instead of the global ones. + +// Scheme is the default instance of runtime.Scheme to which types in the Kubernetes API are already registered. +var Scheme = runtime.NewScheme() + +// Codecs provides access to encoding and decoding for the scheme +var Codecs = serializer.NewCodecFactory(Scheme) + +// ParameterCodec handles versioning of objects that are converted to query parameters. +var ParameterCodec = runtime.NewParameterCodec(Scheme) + +// DefaultJSONEncoder returns a default encoder for our scheme +func DefaultJSONEncoder() runtime.Encoder { + return unstructured.NewJSONFallbackEncoder(Codecs.LegacyCodec(Scheme.PrioritizedVersionsAllGroups()...)) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/i18n.go b/vendor/k8s.io/kubectl/pkg/util/i18n/i18n.go new file mode 100644 index 0000000000..d850b283db --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/i18n.go @@ -0,0 +1,215 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package i18n + +import ( + "archive/zip" + "bytes" + "embed" + "errors" + "fmt" + "os" + "strings" + "sync" + + "github.com/chai2010/gettext-go" + + "k8s.io/klog/v2" +) + +//go:embed translations +var translations embed.FS + +var knownTranslations = map[string][]string{ + "kubectl": { + "default", + "en_US", + "fr_FR", + "zh_CN", + "ja_JP", + "zh_TW", + "it_IT", + "de_DE", + "ko_KR", + "pt_BR", + }, + // only used for unit tests. + "test": { + "default", + "en_US", + }, +} + +var ( + lazyLoadTranslationsOnce sync.Once + LoadTranslationsFunc = func() error { + return LoadTranslations("kubectl", nil) + } + translationsLoaded bool +) + +// SetLoadTranslationsFunc sets the function called to lazy load translations. +// It must be called in an init() func that occurs BEFORE any i18n.T() calls are made by any package. You can +// accomplish this by creating a separate package containing your init() func, and then importing that package BEFORE +// any other packages that call i18n.T(). +// +// Example Usage: +// +// package myi18n +// +// import "k8s.io/kubectl/pkg/util/i18n" +// +// func init() { +// if err := i18n.SetLoadTranslationsFunc(loadCustomTranslations); err != nil { +// panic(err) +// } +// } +// +// func loadCustomTranslations() error { +// // Load your custom translations here... +// } +// +// And then in your main or root command package, import your custom package like this: +// +// import ( +// // Other imports that don't need i18n... +// _ "example.com/myapp/myi18n" +// // Other imports that do need i18n... +// ) +func SetLoadTranslationsFunc(f func() error) error { + if translationsLoaded { + return errors.New("translations have already been loaded") + } + LoadTranslationsFunc = func() error { + if err := f(); err != nil { + return err + } + translationsLoaded = true + return nil + } + return nil +} + +func loadSystemLanguage() string { + // Implements the following locale priority order: LC_ALL, LC_MESSAGES, LANG + // Similarly to: https://www.gnu.org/software/gettext/manual/html_node/Locale-Environment-Variables.html + langStr := os.Getenv("LC_ALL") + if langStr == "" { + langStr = os.Getenv("LC_MESSAGES") + } + if langStr == "" { + langStr = os.Getenv("LANG") + } + + if langStr == "" { + klog.V(3).Infof("Couldn't find the LC_ALL, LC_MESSAGES or LANG environment variables, defaulting to en_US") + return "default" + } + pieces := strings.Split(langStr, ".") + if len(pieces) != 2 { + klog.V(3).Infof("Unexpected system language (%s), defaulting to en_US", langStr) + return "default" + } + return pieces[0] +} + +func findLanguage(root string, getLanguageFn func() string) string { + langStr := getLanguageFn() + + translations := knownTranslations[root] + for ix := range translations { + if translations[ix] == langStr { + return langStr + } + } + klog.V(3).Infof("Couldn't find translations for %s, using default", langStr) + return "default" +} + +// LoadTranslations loads translation files. getLanguageFn should return a language +// string (e.g. 'en-US'). If getLanguageFn is nil, then the loadSystemLanguage function +// is used, which uses the 'LANG' environment variable. +func LoadTranslations(root string, getLanguageFn func() string) error { + if getLanguageFn == nil { + getLanguageFn = loadSystemLanguage + } + + langStr := findLanguage(root, getLanguageFn) + translationFiles := []string{ + fmt.Sprintf("%s/%s/LC_MESSAGES/k8s.po", root, langStr), + fmt.Sprintf("%s/%s/LC_MESSAGES/k8s.mo", root, langStr), + } + + klog.V(3).Infof("Setting language to %s", langStr) + // TODO: list the directory and load all files. + buf := new(bytes.Buffer) + w := zip.NewWriter(buf) + + // Make sure to check the error on Close. + for _, file := range translationFiles { + filename := "translations/" + file + f, err := w.Create(file) + if err != nil { + return err + } + data, err := translations.ReadFile(filename) + if err != nil { + return err + } + if _, err := f.Write(data); err != nil { + return nil + } + } + if err := w.Close(); err != nil { + return err + } + gettext.BindLocale(gettext.New("k8s", root+".zip", buf.Bytes())) + gettext.SetDomain("k8s") + gettext.SetLanguage(langStr) + translationsLoaded = true + return nil +} + +func lazyLoadTranslations() { + lazyLoadTranslationsOnce.Do(func() { + if translationsLoaded { + return + } + if err := LoadTranslationsFunc(); err != nil { + klog.Warning("Failed to load translations") + } + }) +} + +// T translates a string, possibly substituting arguments into it along +// the way. If len(args) is > 0, args1 is assumed to be the plural value +// and plural translation is used. +func T(defaultValue string, args ...int) string { + lazyLoadTranslations() + if len(args) == 0 { + return gettext.PGettext("", defaultValue) + } + return fmt.Sprintf(gettext.PNGettext("", defaultValue, defaultValue+".plural", args[0]), + args[0]) +} + +// Errorf produces an error with a translated error string. +// Substitution is performed via the `T` function above, following +// the same rules. +func Errorf(defaultValue string, args ...int) error { + return errors.New(T(defaultValue, args...)) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/OWNERS b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/OWNERS new file mode 100644 index 0000000000..610dc59f84 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: [] +approvers: + - sig-cli-maintainers +emeritus_approvers: + - brendandburns diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/README.md b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/README.md new file mode 100644 index 0000000000..6318ffe627 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/README.md @@ -0,0 +1,82 @@ +# Translations README + +This is a basic sketch of the workflow needed to add translations: + +# Adding/Updating Translations + +## New languages +Create `staging/src/k8s.io/kubectl/pkg/util/i18n/translations/kubectl//LC_MESSAGES/k8s.po`. There's +no need to update `translations/test/...` which is only used for unit tests. + +There is an example [PR here](https://github.com/kubernetes/kubernetes/pull/40645) which adds support for French. + +Once you've added a new language, you'll need to register it in +`staging/src/k8s.io/kubectl/pkg/util/i18n/i18n.go` by adding it to the `knownTranslations` map. + +## Wrapping strings +There is a simple script in `staging/src/k8s.io/kubectl/pkg/util/i18n/translations/extract.py` that performs +simple regular expression based wrapping of strings. It can always +use improvements to understand additional strings. + +## Extracting strings +Once the strings are wrapped, you can extract strings from go files using +the `go-xgettext` command which can be installed with: + +```console +go get github.com/gosexy/gettext/go-xgettext +``` + +Once that's installed you can run `./hack/update-translations.sh`, which +will extract and sort any new strings. + +## Adding new translations +Edit the appropriate `k8s.po` file, `poedit` is a popular open source tool +for translations. You can load the `staging/src/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/template.pot` file +to find messages that might be missing. + +Once you are done with your `k8s.po` file, generate the corresponding `k8s.mo` +file. `poedit` does this automatically on save, but you can also run +`./hack/update-translations.sh` to perform the `po` to `mo` translation. + +We use the English translation as the `msgid`. + +## Regenerating the bindata file + +> Note: Regeneration of bindata is no more necessary for Kubernetes 1.22+ as +> the translations are now embedded into the binary at compile time. +> See: https://github.com/kubernetes/kubernetes/pull/99829 + +With the `mo` files up to date, you can now convert the generated files +into code using `go-bindata` command which can be installed with: + +```console +go get github.com/go-bindata/go-bindata/... +``` + +Run `./hack/generate-bindata.sh`, this will turn the translation files +into generated code which will in turn be packaged into the Kubernetes +binaries. + +## Extracting strings + +There is a script in `staging/src/k8s.io/kubectl/pkg/util/i18n/translations/extract.py` that knows how to do some +simple extraction. It needs a lot of work. + +# Using translations + +To use translations, you simply need to add: +```go +import pkg/i18n +... +// Get a translated string +translated := i18n.T("Your message in english here") + +// Get a translated plural string +translated := i18n.T("You had % items", items) + +// Translated error +return i18n.Error("Something bad happened") + +// Translated plural error +return i18n.Error("%d bad things happened") +``` diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/extract.py b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/extract.py new file mode 100644 index 0000000000..33f9751287 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/extract.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python3 + +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Extract strings from command files and externalize into translation files. +Expects to be run from the root directory of the repository. + +Usage: + extract.py pkg/kubectl/cmd/apply.go + +""" +import fileinput +import sys +import re + +class MatchHandler(object): + """ Simple holder for a regular expression and a function + to run if that regular expression matches a line. + The function should expect (re.match, file, linenumber) as parameters + """ + def __init__(self, regex, replace_fn): + self.regex = re.compile(regex) + self.replace_fn = replace_fn + +def short_replace(match, file, line_number): + """Replace a Short: ... cobra command description with an internationalization + """ + sys.stdout.write('{}i18n.T({}),\n'.format(match.group(1), match.group(2))) + +SHORT_MATCH = MatchHandler(r'(\s+Short:\s+)("[^"]+"),', short_replace) + +def import_replace(match, file, line_number): + """Add an extra import for the i18n library. + Doesn't try to be smart and detect if it's already present, assumes a + gofmt round wil fix things. + """ + sys.stdout.write('{}\n"k8s.io/kubectl/pkg/util/i18n"\n'.format(match.group(1))) + +IMPORT_MATCH = MatchHandler('(.*"k8s.io/kubectl/pkg/cmd/util")', import_replace) + + +def string_flag_replace(match, file, line_number): + """Replace a cmd.Flags().String("...", "", "...") with an internationalization + """ + sys.stdout.write('{}i18n.T("{})"))\n'.format(match.group(1), match.group(2))) + +STRING_FLAG_MATCH = MatchHandler('(\s+cmd\.Flags\(\).String\("[^"]*", "[^"]*", )"([^"]*)"\)', string_flag_replace) + + +def long_string_replace(match, file, line_number): + return '{}i18n.T({}){}'.format(match.group(1), match.group(2), match.group(3)) + +LONG_DESC_MATCH = MatchHandler('(LongDesc\()(`[^`]+`)([^\n]\n)', long_string_replace) + +EXAMPLE_MATCH = MatchHandler('(Examples\()(`[^`]+`)([^\n]\n)', long_string_replace) + +def replace(filename, matchers, multiline_matchers): + """Given a file and a set of matchers, run those matchers + across the file and replace it with the results. + """ + # Run all the matchers + line_number = 0 + for line in fileinput.input(filename, inplace=True): + line_number += 1 + matched = False + for matcher in matchers: + match = matcher.regex.match(line) + if match: + matcher.replace_fn(match, filename, line_number) + matched = True + break + if not matched: + sys.stdout.write(line) + sys.stdout.flush() + with open(filename, 'r') as datafile: + content = datafile.read() + for matcher in multiline_matchers: + match = matcher.regex.search(content) + while match: + rep = matcher.replace_fn(match, filename, 0) + # Escape back references in the replacement string + # (And escape for Python) + # (And escape for regex) + rep = re.sub('\\\\(\\d)', '\\\\\\\\\\1', rep) + content = matcher.regex.sub(rep, content, 1) + match = matcher.regex.search(content) + sys.stdout.write(content) + + # gofmt the file again + from subprocess import call + call(["goimports", "-w", filename]) + +replace(sys.argv[1], [SHORT_MATCH, IMPORT_MATCH, STRING_FLAG_MATCH], [LONG_DESC_MATCH, EXAMPLE_MATCH]) diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/OWNERS b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/OWNERS new file mode 100644 index 0000000000..56cbb7f7c7 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - sig-cli-maintainers +reviewers: + - sig-cli-reviewers diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000..ee64eb7a8d Binary files /dev/null and b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.mo differ diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.po new file mode 100644 index 0000000000..cd5d53c888 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/de_DE/LC_MESSAGES/k8s.po @@ -0,0 +1,2920 @@ +# German translation. +# Copyright (C) 2017 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR steffenschmitz@hotmail.de, 2017. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: kubernetes\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2017-09-02 01:36+0200\n" +"Last-Translator: Steffen Schmitz \n" +"Language-Team: Steffen Schmitz \n" +"Language: de_DE\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"X-Generator: Poedit 1.8.7.1\n" +"X-Poedit-SourceCharset: UTF-8\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Zeige Metriken für alle Nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Zeige Metriken für den gegebenen Node\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Erhalte die Dokumentation einer Resource und ihrer Felder\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Erhalte die Dokumentation eines speziellen Felds einer Resource\n" +"\t\tkubectl explain pods.spec.containers" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Gebe Optionen aus, die an alle Kommandos vererbt werden\n" +"\t\tkubectl options" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Gebe die Client- und Server-Versionen des aktuellen Kontexts aus\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Gebe die unterstützten API Versionen aus\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Zeige Metriken für alle Pods im Namespace default\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Zeige Metriken für alle Pods im gegebenen namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Zeige Metriken für den gebenen Pod und seine Container\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Zeige Metriken für Pods mit dem Label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tKonvertiere Konfigurationsdateien zwischen API Versionen. Sowohl YAML-\n" +"\t\talsauch JSON-Formate werden akzeptiert.\n" +"\n" +"\t\tDer Befehlt akzeptiert Dateinamen, Ordner oder URL als Parameter und " +"konvertiert es ins Format\n" +"\t\tder mit --output-version gegebenen Version. Wenn die Zielversion nicht \n" +"\t\tangegeben wird oder ungültig ist, wird die neueste Version verwendet.\n" +"\n" +"\t\tDie Standardausgabe wird auf stdout im YAML-Format ausgegeben. Man kann " +"die Option -o verwenden,\n" +"\t\tum das Ausgabeziel festzulegen." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tErstelle einen Namespace mit dem gegebenen Namen." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tErstelle eine Role mit einer einzelnen Rule." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tErstelle einen ServiceAccount mit dem gegebenen Namen." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tMarkiere Knoten als schedulable." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tMarkiere Knoten als unschedulable." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tSetze die aktuelle Annotation Last-Applied-Configuration auf den Inhalt " +"der Datei.\n" +"\t\tDas bedeutet, dass Last-Applied-Configuration aktualisiert wird, als ob " +"'kubectl apply -f ' ausgeführt wurde,\n" +"\t\tohne andere Teile des Objekts zu aktualisieren." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Erstelle einen neuen Namespace my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Erstelle einen neuen ServiceAccount my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tErstelle einen ExternalName-Service mit den gegebenen Namen.\n" +"\n" +"\tExternalName service referenziert eine externe DNS Adresse statt\n" +"\teines pods, was Anwendungsautoren erlaubt, einen Service zu " +"referenzieren,\n" +"\tder abseits der Platform, auf anderen Clustern oder lokal exisiert." + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp hilft bei jedem Befehl in der Anwendung.\n" +"\tGib einfach kubectl help [path to command] für alle Details ein." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Erstelle einen neuen LoadBalancer-Service my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Schreibe den aktuellen Cluster-Zustand auf stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Schreibe aktuellen Cluster-Zustand in /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Schreibe alle Namespaces auf stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Schreibe eine Menge an Namespaces in /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Erstelle einen LoadBalancer-Service mit dem gegebenen Namen." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"Eine komma-separierte Menge von Quota-Scopes, die zu jedem Object passen " +"muss, dass von der Quota betroffen ist." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"Eine komma-separierte Menge von resource=quantity Paaren, die ein hartes " +"Limit definieren." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"Ein Label-Selektor, der für das Budget benutzt werden kann. Nur gleichheits-" +"basierte Auswahlkriterien werden unterstützt." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"Ein Label-Selektor, der für den Service benutzt werden kann. Nur gleichheits-" +"basierte Auswahlkriterien werden unterstützt. Wenn er leer ist (standard), " +"wird der Selektor vom ReplicationController oder ReplicaSet abgeleitet" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Zusätzliche, externe IP Adressen (die nicht von Kubernetes verwaltet " +"werden), die der Service akzeptieren soll. Wenn diese IP zu einem Knoten " +"gerouted wird, kann der Service über die IP angesprochen werden, zusätzlich " +"zu seiner generierten Service-IP." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "Genehmige eine Certificate-Signing-Request" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Weise Deine eigene ClusterIP zu oder setze sie auf 'None' für einen " +"'headless'-Service (kein LoadBalancing)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "Weise einem laufenden Container zu" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP, die dem Service zugewiesen werden soll. Freilassen, für " +"automatische Zuweisung oder auf 'None' setzen für einen headless-Service." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole, die das ClusterRoleBinding referenzieren soll" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole, die das RoleBinding referenzieren soll" + +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "Konvertiere Config-Dateien zwischen verschiedenen API Versionen" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "Kopiere Dateien und Ordner aus/in Container(n)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "Erstelle ein TLS-Secret" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "Erstelle einen Namespace mit dem gegebenen Namen" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "Erstelle ein Secret für die Benutzung mit einer Docker-Registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "Erstelle ein Secret mit dem angegebenen Sub-Befehl" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "Erstelle einen ServiceAccount mit dem gegebenen Namen" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "Lösche das angegebene Cluster aus der kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "Lösche den angegebenen Kontext aus der kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "Lehne eine Certificate-Signing-Request ab" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "Beschreibe einen oder mehrere Kontexte" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "Zeige Cluster, die in der kubeconfig definiert sind" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "" +"Zeige vereinte kubeconfig-Einstellungen oder die angegebene kubeconfig-Datei" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "Zeige eine oder mehrere Resourcen" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "Leere Knoten, um eine Wartung vorzubereiten" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "Bearbeite eine Resource auf dem Server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "E-Mail für Docker-Registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "Führe einen Befehl im Container aus" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "Leite einen oder mehrere lokale Ports an einen Pod weiter" + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "Hilfe für jeden Befehl" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "Markiere Knoten als schedulable" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "Markiere Knoten als unschedulable" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "Markiere die gegebene Resource als pausiert" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "Verändere Certificate-Resources" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "Verändere kubeconfig Dateien" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Name oder Nummer des Ports in dem Container, zu dem der Service Daten leiten " +"soll. Optional." + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Zeige nur Logs nach einem bestimmten Datum (RFC3339) an. Zeigt standardmäßig " +"alle logs. Es kann entweder since-time oder since benutzt werden." + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "Zeige Shell-Completion-Code für die angegebene Shell (bash oder zsh)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "Passwort für die Authentifizierung bei der Docker-Registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "Pfad des Public-Key-Zertifikats im PEM-Format." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "Pfad zum Private-Key, der zum gegebenen Zertifikat passt." + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Vorbedingung für Resource-Version. Verlangt, dass die aktuelle Resource-" +"Version diesen Wert erfüllt, um zu skalieren." + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "Schreibt die Client- und Server-Versionsinformation" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "Schreibt die Liste von Optionen, die alle Befehle erben" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "Schreibt die Logs für einen Container in einem Pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "Setze eine pausierte Resource fort" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "Role, die dieses RoleBinding referenzieren soll" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "Starte ein bestimmtes Image auf dem Cluster" + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Starte einen Proxy zum Kubernetes-API-Server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "Setze bestimmte Features auf Objekten" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "Setze den Selektor auf einer Resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "Zeige Details zu einer bestimmten Resource oder Gruppe von Resourcen" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "Zeige den Status des Rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "Synonym für --target-port" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "Das Image, dass auf dem Container gestartet werden soll." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"Die Image-Pull-Policy für den Container. Wenn leer, wird der Wert nicht vom " +"Client gesetzt, sondern standardmäßig vom Server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"Die minimale Anzahl oder Prozentzahl von verfügbaren Pods, die das Budget " +"voraussetzt." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "Der Name des neu erstellten Objekts." + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"Der Name des neu erstellten Objekts. Falls nicht angegeben, wird der Name " +"der Input-Resource verwendet." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"Der Name des zu verwendenden API-Generators. Es gibt zwei Generatoren: " +"'service/v1' und 'service/v2'. Der einzige Unterschied ist, dass der " +"Serviceport in v1 'default' heißt, während er in v2 unbenannt bleibt. " +"Standard ist 'service/v2'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "" +"Das Netzwerkprotokoll, für den zu erstellenden Service. Standard ist 'TCP'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"Der Port auf den der Service hören soll. Wird von der angebotenen Resource " +"kopiert, falls nicht angegeben" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "Der Typ des zu erstellenden Secrets" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "Widerrufe ein vorheriges Rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "Aktualisiere Resourcen requests/limits auf Objekten mit Pod-Templates" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "Aktualisiere die Annotationen auf einer Resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "Aktualisiere die Labels auf einer Resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "Aktualisiere die Taints auf einem oder mehreren Knoten" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "Username für Authentifizierung bei der Docker-Registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "Zeige rollout-Verlauf" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl kontrolliert den Kubernetes-Cluster-Manager" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using " +#~ "the cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Erstellt ein ClusterRoleBinding für user1, user2 und group1 mit " +#~ "der ClusterRole cluster-admin\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +#~ "ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Erstellt ein RoleBinding für user1, user2 und group1 mit der " +#~ "ClusterRole admin\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config based on folder bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with specified keys " +#~ "instead of file basenames on disk\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with key1=config1 and " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Erstellt eine neue ConfigMap mit dem Namen my-config basierend " +#~ "auf dem Ordner bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Erstellt eine neue ConfigMap mit dem Namen my-config mit den " +#~ "angegebenen Keys statt des Dateinamens auf der Festplatte.\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Erstellt eine neue ConfigMap mit dem Namen my-config mit " +#~ "key1=config1 und key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" + +#~ msgid "" +#~ "\n" +#~ "\t\t # If you don't already have a .dockercfg file, you can create a " +#~ "dockercfg secret directly by using:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Wenn keine .dockercfg Datei existiert, kann direkt ein dockercfg " +#~ "Secret erstellen mit:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Apply the configuration in pod.json to a pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Apply the JSON passed into stdin to a pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune is still in Alpha\n" +#~ "\t\t# Apply the configuration in manifest.yaml that matches label " +#~ "app=nginx and delete all the other resources that are not in the file and " +#~ "match label app=nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " +#~ "configmaps that are not in the file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Wende die Konfiguration in pod.json auf einen Pod an.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Wende die JSON-Daten von stdin auf einen Pod an.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Hinweis: --prune ist noch in Alpha\n" +#~ "\t\t# Wende die Konfiguration, mit dem Label app=nginx, im manifest.yaml " +#~ "an und lösche alle Resourcen, die nicht in der Datei sind oder nicht das " +#~ "Label app=nginx besitzen.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Wende die Konfiguration im manifest.yaml an und lösche alle " +#~ "ConfigMaps, die nicht in der Datei sind.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 " +#~ "and 10, no target CPU utilization specified so a default autoscaling " +#~ "policy will be used:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale a replication controller \"foo\", with the number of " +#~ "pods between 1 and 5, target CPU utilization at 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Auto-skaliere ein Deployment \"foo\", mit einer Anzahl an Pods " +#~ "zwischen 2 und 10, eine Ziel-CPU-Auslastung ist angegeben, sodass eine " +#~ "Standard-autoskalierungsregel verwendet wird:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto-skaliere einen Replication-Controller \"foo\", mit einer " +#~ "Anzahl an Pods zwischen 1 und 5, mit einer Ziel-CPU-Auslastung von 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Convert the live state of the resource specified by 'pod.yaml' to " +#~ "the latest version\n" +#~ "\t\t# and print to stdout in json format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Convert all files under current directory to latest version and " +#~ "create them all.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Konvertiere 'pod.yaml' zur neuesten Version und schreibe auf " +#~ "stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Konvertiere den aktuellen Zustand der Resource, die in 'pod.yaml' " +#~ "angegeben ist, zur neuesten Version\n" +#~ "\t\t# und schreibe auf stdout im JSON-Format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Konvertiere alle Dateien im aktuellen Ordner zur neuesten Version " +#~ "und erstelle sie.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" that allows user to " +#~ "perform \"get\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName " +#~ "specified\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erstellt eine ClusterRole \"pod-reader\", die es Nutzern erlaubt " +#~ "\"get\", \"watch\" und \"list\" auf den Pods auszuführen\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Erstellt eine ClusterRole \"pod-reader\" mit dem angegebenen " +#~ "ResourceName\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get" +#~ "\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erstellt eine Role \"pod-reader\", die es dem Nutzer erlaubt \"get" +#~ "\", \"watch\" und \"list\" auf den Pods auszuführen\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Erstellt eine Role \"pod-reader\" mit dem angegebenen ResourceName\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a new resourcequota named my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Create a new resourcequota named best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erstellt eine neue ResourceQuota my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Erstellt eine neue ResourceQuota best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=rails label\n" +#~ "\t\t# and require at least one of them being available at any point in " +#~ "time.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=nginx label\n" +#~ "\t\t# and require at least half of the pods selected to be available at " +#~ "any point in time.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erstellt ein Pod-Disruption-Budget my-pdb, dass alle Pods mit dem " +#~ "Label app=rails auswählt\n" +#~ "\t\t# und sicherstellt, dass mindestens einer von ihnen zu jedem " +#~ "Zeitpunkt verfügbar ist.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Erstellt ein Pod-Disruption-Budget my-pdb, dass alle Pods mit dem " +#~ "Label app=nginx auswählt\n" +#~ "\t\t# und sicherstellt, dass mindestens die Hälfte der gewählten Pods zu " +#~ "jedem Zeitpunkt verfügbar ist.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod using the data in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Create a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API " +#~ "format then create the resource using the edited data.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erstellt einen Pod mit den Daten in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Erstellt einen Pod basierend auf den JSON-Daten von stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Verändert die Daten in docker-registry.yaml in JSON mit dem v1 API " +#~ "Format und erstellt eine Resource mit den veränderten Daten.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a replication controller identified by type " +#~ "and name specified in \"nginx-controller.yaml\", which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +#~ "the name \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Create a second service based on the above service, exposing the " +#~ "container port 8443 as port 443 with the name \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated streaming application on port " +#~ "4100 balancing UDP traffic and named 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx using replica set, which " +#~ "serves on port 80 and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for an nginx deployment, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erstellt einen Service für einen replizierten nginx, der auf Port " +#~ "80 hört und verbindet sich mit den Containern auf Port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Erstellt einen Service für einen Replication-Controller, der über " +#~ "type und name in \"nginx-controller.yaml\" identifiziert wird, auf Port " +#~ "80 hört und verbindet sich mit den Containern auf Port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Erstellt einen Service, mit dem Namen \"frontend\", für einen Pod " +#~ "valid-pod, der auf port 444 hört\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Erstellt einen zweiten Service basierend auf dem vorherigen " +#~ "Service, der den Container Port 8443 auf Port 443 mit dem Namen \"nginx-" +#~ "https\" anbietet\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Erstellt einen Service für eine Replicated-Streaming-Application " +#~ "auf Port 4100, die UDP-Traffic verarbeitet und 'video-stream' heißt.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Erstellt einen Service für einen replizierten nginx mit einem " +#~ "Replica-Set, dass auf Port 80 hört und verbindet sich mit den Containern " +#~ "auf Port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Erstellt einen Service für ein nginx Deployment, dass auf Port 80 " +#~ "hört und verbindet sich mit den Containern auf Port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Delete a pod using the type and name specified in pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Delete a pod based on the type and name in the JSON passed into " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Delete pods and services with label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Delete a pod with minimal delay\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Force delete a pod on a dead node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Delete all pods\n" +#~ "\t\tkubectl delete pods --all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Löscht einen Pod mit type und name aus pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Löscht einen Pod mit dem type und name aus den JSON-Daten von " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Löscht Pods und Services mit den Namen \"baz\" und \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Löscht Pods und Services mit dem Label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Löscht einen Pod mit minimaler Verzögerung\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Erzwingt das Löschen eines Pods auf einem toten Node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Löscht alle Pods\n" +#~ "\t\tkubectl delete pods --all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Describe a node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Describe a pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Describe all pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Describe pods by label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Describe all pods managed by the 'frontend' replication controller " +#~ "(rc-created pods\n" +#~ "\t\t# get the name of the rc as a prefix in the pod the name).\n" +#~ "\t\tkubectl describe pods frontend" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Beschreibt einen Knoten\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Beschreibt einen Pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Beschreibt einen Pod mit type und name aus \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Beschreibt alle Pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Beschreibt Pods mit dem Label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Beschreibt alle Pods, die vom ReplicationController 'frontend' " +#~ "verwaltet werden (rc-erstellte Pods\n" +#~ "\t\t# bekommen den Namen des rc als Prefix im Podnamen).\n" +#~ "\t\tkubectl describe pods frontend" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", even if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# As above, but abort if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use " +#~ "a grace period of 15 minutes.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Leere den Knoten \"foo\", selbst wenn er Pods enthält, die nicht " +#~ "von einem ReplicationController, ReplicaSet, Job, DaemonSet oder " +#~ "StatefulSet verwaltet werden.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# Wie zuvor, aber es wird abgebrochen, wenn er Pods enthält, die " +#~ "nicht von einem ReplicationController, ReplicaSet, Job, DaemonSet oder " +#~ "StatefulSet verwaltet werden und mit einer Schonfrist von 15 Minuten.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Edit the service named 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Use an alternative editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +#~ "config in its annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Bearbeite den Service 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Benutze einen anderen Editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Bearbeite den Job 'myjob' in JSON mit dem v1 API Format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Bearbeite das Deployment 'mydeployment' in YAML und speichere die " +#~ "veränderte Konfiguration in ihrer Annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running 'date' from pod 123456-7890, using the " +#~ "first container by default\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Get output from running 'date' in ruby-container from pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erhalte die Ausgabe vom Aufruf von 'date' auf dem Pod 123456-7890, " +#~ "mit dem ersten Container als Standard\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Erhalte die Ausgabe vom Aufruf von 'date' im Ruby-Container aus dem " +#~ "Pod 123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Wechsle in den Terminal-Modus und sende stdin zu 'bash' im Ruby-" +#~ "Container aus dem Pod 123456-7890\n" +#~ "\t\t# und sende stdout/stderr von 'bash' zurück zum Client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running pod 123456-7890, using the first container " +#~ "by default\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Get output from ruby-container from pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Erhalte die Ausgabe vom laufenden Pod 123456-7890, mit dem ersten " +#~ "Container als Standard\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Erhalte die Ausgabe vom Ruby-Container aus dem Pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Wechsle in den Terminal-Modus und sende stdin zu 'bash' im Ruby-" +#~ "Container aus dem Pod 123456-7890\n" +#~ "\t\t# und sende stdout/stderr von 'bash' zurück zum Client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Erhalte die Ausgabe vom ersten Pod eines ReplicaSets nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Install bash completion on a Mac using homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for bash into the current shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Write bash completion code to a file and source if from ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +#~ "\t\tsource <(kubectl completion zsh)" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Installiere bash completion auf einem Mac mit homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Lade den kubectl-Completion-Code für bash in der aktuellen Shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Schreibe den Bash-Completion-Code in eine Datei und source sie im ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Lade den kubectl-Completion-Code für zsh[1] in die aktuelle Shell\n" +#~ "\t\tsource <(kubectl completion zsh)" + +#~ msgid "" +#~ "\n" +#~ "\t\t# List all pods in ps output format.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# List all pods in ps output format with more information (such as " +#~ "node name).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# List a single replication controller with specified NAME in ps " +#~ "output format.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# List a single pod in JSON output format.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +#~ "JSON output format.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Return only the phase value of the specified pod.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# List all replication controllers and services together in ps output " +#~ "format.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# List one or more resources by their type and names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List all resources with different types.\n" +#~ "\t\tkubectl get all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Liste alle Pods im ps-Format auf.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# Liste alle Pods im ps-Format mit zusätzlichen Informationen (wie " +#~ "dem Knotennamen) auf.\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# Liste alle einzelnen ReplicationController mit dem angegebenen " +#~ "Namen im ps-Format auf.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# Liste einen einzelnen Pod im JSON-Format auf.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# Liste einen Pod mit Typ und Namen aus \"pod.yaml\" im JSON-Format " +#~ "auf.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Gib nur den phase-Wert des angegebenen Pods zurück.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# Liste alle ReplicationController und Services im ps-Format auf.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# Liste eine oder mehrere Resourcen über ihren Typ und Namen auf.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# Liste alle Resourcen mit verschiedenen Typen auf.\n" +#~ "\t\tkubectl get all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 0:5000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Hört lokal auf Port 5000 und 6000 und leitet Daten zum/vom Port " +#~ "5000 und 6000 weiter an den Pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Hört lokal auf Port 8888 und leitet an Port 5000 des Pods weiter\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Hört auf einen zufälligen lokalen Port und leitet an Port 5000 des " +#~ "Pods weiter\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Hört auf einen zufälligen lokalen Port und leitet an Port 5000 des " +#~ "Pods weiter\n" +#~ "\t\tkubectl port-forward mypod 0:5000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Markiere Knoten \"foo\" als schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as unschedulable.\n" +#~ "\t\tkubectl cordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Markiere Knoten \"foo\" als unschedulable.\n" +#~ "\t\tkubectl cordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Partially update a node using strategic merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Partially update a node identified by the type and name specified " +#~ "in \"node.json\" using strategic merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image; spec.containers[*].name is required " +#~ "because it's a merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image using a json patch with positional " +#~ "arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aktualisiere einen Knoten teilweise mit einem Strategic-Merge-" +#~ "Patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Aktualisiere einen Knoten, mit type und name aus \"node.json\", mit " +#~ "einem Strategic-Merge-Patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Aktualisiere das Image eines Containers; spec.containers[*].name " +#~ "ist erforderlich, da es der Merge-Key ist\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Aktualisiere das Image eines Containers mit einem JSON-Patch mit " +#~ "Positional-Arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Print the address of the master and cluster services\n" +#~ "\t\tkubectl cluster-info" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Gebe die Adresse des Masters und des Cluster-Services aus\n" +#~ "\t\tkubectl cluster-info" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Replace a pod using the data in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Replace a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Update a single-container pod's image version (tag) to v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Force replace, delete and then re-create the resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Ersetze einen Pod mit den Daten aus pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Ersetze einen Pod mit den JSON-Daten von stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Setze die Pod-Image-Version (tag) eines einzelnen Containers zu v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Erzwinge das Ersetzen, Löschen und Neu-Erstellen der Resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Return snapshot logs from pod nginx with only one container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Gib die Snapshot-Logs des Pods nginx mit nur einem Container " +#~ "zurück\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Gib die Snapshot-Logs für die Pods mit dem Label app=nginx zurück\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Gib die Snapshot-Logs des zuvor gelöschten Ruby-Containers des Pods " +#~ "web-1 zurück\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Starte das Streaming der Logs vom Ruby-Container im Pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Zeige die letzten 20 Zeilen der Ausgabe des Pods nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Zeige alle Logs der letzten Stunde des Pods nginx an\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Gib die Snapshot-Logs des ersten Containers des Jobs hello zurück\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Gib die Snapshot-Logs des Containers nginx-1 eines Deployments " +#~ "nginx zurück\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +#~ "content from ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +#~ "\t\t# The chosen port for the server will be output to stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +#~ "api\n" +#~ "\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +#~ "pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Starte einen Proxy zum Kubernetes-Apiserver auf Port 8011 und sende " +#~ "statische Inhalte von ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Starte einen Proxy zum Kubernetes-Apiserver auf einem zufälligen " +#~ "lokalen Port.\n" +#~ "\t\t# Der gewählte Port für den Server wird im stdout zurückgegeben.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Starte einen Proxy zum Kubernetes-Apiserver und ändere das API-" +#~ "Prefix zu k8s-api\n" +#~ "\t\t# Damit ist die Pods-API bspw. unter localhost:8001/k8s-api/v1/pods/ " +#~ "erreichbar\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Scale a replicaset named 'foo' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale a resource identified by type and name specified in \"foo.yaml" +#~ "\" to 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# If the deployment named mysql's current size is 2, scale mysql to " +#~ "3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale multiple replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale job named 'cron' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Skaliere ein ReplicaSet 'foo' auf 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Skaliere eine Resource mit type und name aus \"foo.yaml\" auf 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# Wenn die aktuelle Größe des Deployments mysql 2 ist, skaliere mysql " +#~ "auf 3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Skaliere mehrere MultiplicationController.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Skaliere den Job cron auf 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file, will create the annotation if it does not already " +#~ "exist.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Setze die Last-Applied-Configuration einer Resource auf den Inhalt " +#~ "einer Datei.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Führe Set-Last-Applied auf jeder Konfigurationsdatei in einem " +#~ "Ordner aus.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Setze die Last-Applied-Configuration einer Resource auf den Inhalt " +#~ "einer Datei; erstellt die Annotation, wenn sie noch nicht existiert.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Stoppe foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stoppe Pods und Services mit dem Label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Stoppe den in service.json definierten Service\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Stoppe alle Resourcen im Ordner path/to/resources\n" +#~ "\t\tkubectl stop -f path/to/resources" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating " +#~ "them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute π to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute π to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Starte eine einzelne Instanz von nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Starte eine einzelne Instanz von hazelcast und öffne Port 5701 auf " +#~ "dem Container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Starte eine einzelne Instanz von hazelcast und setze die Umgebungs-" +#~ "variablen \"DNS_DOMAIN=cluster\" und \"POD_NAMESPACE=default\" im " +#~ "Container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Starte eine replizierte Instanz von nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Testlauf. Zeige die zugehörigen API Objekte ohne sie zu erstellen.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Starte eine einzelne Instanz von nginx, aber überlade die Spec des " +#~ "Deployments mit einer Teilmenge von JSON-Werten.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Starte einen busybox Pod und lass ihn im Vordergrund laufen; starte " +#~ "ihn nicht neu, wenn er existiert.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Starte einen nginx-Container mit dem Standardkommando, aber " +#~ "übergebe die Parameter (arg1 .. argN) an das Kommando.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Starte den nginx-Container mit einem anderen Kommando und " +#~ "Parametern.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Starte den perl-Container, um π auf 2000 Stellen zu berechnen und " +#~ "gib es aus.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Starte den cron-Job, um π auf 2000 Stellen zu berechnen und gib sie " +#~ "alle 5 Minuten aus.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aktualisiere Knoten 'foo' mit einem Taint mit dem Key 'dedicated', " +#~ "dem Value 'special-user' und dem Effect 'NoSchedule'.\n" +#~ "\t\t# Wenn ein Taint mit dem Key und Effect schon existiert, wird sein " +#~ "Value mit den gegebenen Werten ersetzt.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Entferne vom Knoten 'foo' den Taint mit dem Key 'dedicated' und dem " +#~ "Effect 'NoSchedule', wenn er existiert.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Entferne vom Knoten 'foo' alle Tains mit dem Key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aktualisiere den Pod 'foo' mit dem Label 'unhealthy' und dem Value " +#~ "'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Aktualisiere den Pod 'foo' mit dem Label 'status' und dem Value " +#~ "'unhealthy' und überschreibe alle bisherigen Values.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Aktualisiere alle Pods im Namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Aktualisiere den Pod mit type und name aus \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Aktualisiere den Pod 'foo', wenn die Resource sich nicht von " +#~ "version 1 unterscheidet.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Aktualisiere den Pod 'foo', indem das Label 'bar' gelöscht wird, " +#~ "wenn es existiert.\n" +#~ "\t\t# Benötigt kein --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aktualisiere die Pods in frontend-v1 mit den neuen Replication-" +#~ "Controller Daten in frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Aktualisiere die Pods in frontend-v1 mit den JSON-Daten von stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Aktualisiere die Pods von frontend-v1 auf frontend-v2, indem das " +#~ "Image geändert wird und\n" +#~ "\t\t# der Name des ReplicationControllers.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Aktualisiere die Pods in frontend, indem das Image geändert, aber " +#~ "der alte Name beibehalten wird.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Breche ein laufendes Rollout (von frontend-v1 zu frontend-v2) ab " +#~ "und mache es rückgängig.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" + +#~ msgid "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Zeige die Annotation Last-Applied-Configuration mit type/name in " +#~ "YAML an.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# Zeige die Annotation Last-applied-configuration mit der Datei in " +#~ "JSON an\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#~ msgid "" +#~ "\n" +#~ "\t\tApply a configuration to a resource by filename or stdin.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." +#~ msgstr "" +#~ "\n" +#~ "\t\tWende eine Konfiguration auf eine Resource mit Dateinamen oder stdin " +#~ "an.\n" +#~ "\t\tDie Resource wird erstellt, wenn sie noch nicht existiert.\n" +#~ "\t\tUm 'apply' zu benutzen, muss die Resource initital mit 'apply' oder " +#~ "'create --save-config' erstellt werden.\n" +#~ "\n" +#~ "\t\tJSON- und YAML-Formate werden akzeptiert.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: Die --prune Funktion ist noch nicht fertig. Benutze " +#~ "sie nicht, wenn der aktuelle Zustand nicht bekannt ist. Siehe https://" +#~ "issues.k8s.io/34274." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle eine ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRoleBinding for a particular ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle ein ClusterRoleBinding für eine bestimmte ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a RoleBinding for a particular Role or ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle ein RoleBinding für eine bestimmte ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a TLS secret from the given public/private key pair.\n" +#~ "\n" +#~ "\t\tThe public/private key pair must exist before hand. The public key " +#~ "certificate must be .PEM encoded and match the given private key." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle ein TLS-Secret vom gegebenen Public/Private-Schlüsselpaar.\n" +#~ "\n" +#~ "\t\tDas Public/Private-Schlüsselpaar muss vorab bestehen. Das Public-Key-" +#~ "Zertifikat muss im PEM-Format sein und zum gegebenen Private-Key passen." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to " +#~ "the basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle eine ConfigMap basierend auf einer Datei, einem Order oder " +#~ "einem gegebenen Wert.\n" +#~ "\n" +#~ "\t\tEine einzelne ConfigMap kann eins oder mehr Key/Value-Paare " +#~ "beinhalten.\n" +#~ "\n" +#~ "\t\tWenn man eine ConfigMap von einer Datei erstellt, wird der Key " +#~ "standardmäßig der Name der Datei, und der Wert wird\n" +#~ "\t\tstandardmäßig der Dateiinhalt. Wenn der Dateiname ein ungültiger Key " +#~ "ist, kann ein anderer Key angegeben werden.\n" +#~ "\n" +#~ "\t\tWenn man eine ConfigMap von einem Ordner erstellt, wird jede Datei, " +#~ "deren Name ein gültiger Key ist\n" +#~ "\t\tin die ConfigMap aufgenommen. Jegliche Einträge im Ordner, die keine " +#~ "regulären Dateien sind, werden ignoriert (z.B. SubDirectories, \n" +#~ "\t\tSymLinks, Devices, Pipes, usw)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker " +#~ "registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle ein Secret für die Benutzung mit Docker-Registries.\n" +#~ "\n" +#~ "\t\tDockercfg Secrets werden für die Authentifizierung bei Docker-" +#~ "Registries benutzt.\n" +#~ "\n" +#~ "\t\tWenn die Docker-command -line zum pushen von Images benutzt wird, " +#~ "kann man sich bei einer gegebenen Registry authentifizieren mit\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " Dies produziert eine ~/.dockercfg Datei, die für folgende 'docker " +#~ "push' und 'docker pull' Befehle genutzt wird,\n" +#~ "\t\tum sich an der Registry zu authentifizieren. Die E-Mail-Adresse ist " +#~ "optional.\n" +#~ "\n" +#~ "\t\tBei der Erstellung von Applikationen, kann eine Docker-Registry eine " +#~ "Authentifizierung verlangen. Damit\n" +#~ "\t\tdeine Knoten in deinem Namen Images herunterladen können, benötigen " +#~ "sie die Credentials. Man kann diese Information bereitstellen\n" +#~ "\t\tindem man ein dockercfg secret erstellt und zu seinem ServiceAccount " +#~ "hinzufügt." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle ein Pod-Disruption-Budget mit dem gegebenen name, selector " +#~ "und der gewünschten Mindestanzahl verfügbarer Pods" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle eine Resource mit Dateinamen oder stdin.\n" +#~ "\n" +#~ "\t\tJSON- und YAML-Formate werden akzeptiert." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle eine ResourceQuota mit dem gegebenen name, hard limits und " +#~ "optional scopes" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle ein Secret basierend auf einer Datei, einem Ordner oder " +#~ "einem gegebenen Wert.\n" +#~ "\n" +#~ "\t\tEin einzelnes Secret kann eins oder mehr Key/Value-Paare beinhalten.\n" +#~ "\n" +#~ "\t\tWenn man ein Secret von einer Datei erstellt, wird der Key " +#~ "standardmäßig der Name der Datei, und der Wert wird\n" +#~ "\t\tstandardmäßig der Dateiinhalt. Wenn der Dateiname ein ungültiger Key " +#~ "ist, kann ein anderer Key angegeben werden.\n" +#~ "\n" +#~ "\t\tWenn man ein Secret von einem Ordner erstellt, wird jede Datei, deren " +#~ "Name ein gültiger Key ist\n" +#~ "\t\tin das Secret aufgenommen. Jegliche Einträge im Ordner, die keine " +#~ "regulären Dateien sind, werden ignoriert (z.B. SubDirectories, \n" +#~ "\t\tSymLinks, Devices, Pipes, usw)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstelle und starte ein bestimmtes Image, möglicherweise repliziert.\n" +#~ "\n" +#~ "\t\tErstellt ein Deployment oder Job, um den/die erstellten Container zu " +#~ "verwalten." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." +#~ msgstr "" +#~ "\n" +#~ "\t\tErstellt einen AutoScaler der die Anzahl der Pods, die im Kubernetes-" +#~ "Cluster laufen, automatisch wählt und anpasst.\n" +#~ "\n" +#~ "\t\tSucht ein Deployment, ReplicaSet oder ReplicationController mit Namen " +#~ "name und erstellt einen AutoScaler, der die Resource als Referenz nimmt.\n" +#~ "\t\tEin AutoScaler kann die Anzahl der im System deployten Pods nach " +#~ "Bedarf erhöhen oder verringern." + +#~ msgid "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments " +#~ "may be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that " +#~ "the pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of " +#~ "the same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." +#~ msgstr "" +#~ "\n" +#~ "\t\tLöscht die Resourcen mit Dateinamen, stdin, resources- und names- " +#~ "oder mit resources- und label-Selektor.\n" +#~ "\n" +#~ "\t\tJSON- und YAML-Formate werden akzeptiert. Nur einer der Parameter " +#~ "darf verwendet werden: Dateiname,\n" +#~ "\t\tresources- und names-, oder resources- und label-Selektor.\n" +#~ "\n" +#~ "\t\tManche Resourcen, zum Beispiel Pods, unterstützen graziöses Löschen. " +#~ "Sie definieren einen Standardzeitraum\n" +#~ "\t\tbevor das Löschen erzwungen wird (grace-period), aber dieser Wert " +#~ "kann überschrieben werden mit\n" +#~ "\t\tder --grace-period Option, oder mit --now, das die grace-period auf 1 " +#~ "setzt. Da diese Resourcen\n" +#~ "\t\thäufig Einheiten im Cluster sind, kann das Löschen nicht immer direkt " +#~ "bestätigt werden. Wenn der Knoten\n" +#~ "\t\tauf dem der Pod läuft nicht verfügbar ist, oder den API Server nicht " +#~ "erreichen kann, kann das Löschen bedeutend länger dauern\n" +#~ "\t\tals die grace-period. Um das Löschen zu erzwingen, muss eine grace-" +#~ "period von 0 übergeben werden\n" +#~ "\t\tund die --force Option gesetzt sein.\n" +#~ "\n" +#~ "\t\tWICHTIG: Ein erzwungenes Löschen wartet nicht auf die Bestätigung, " +#~ "dass der Prozess\n" +#~ "\t\tbeendet wurde, was den Prozess am Leben erhalten kann, bis der Knoten " +#~ "die Löschung erkennt\n" +#~ "\t\tund das graziöse Löschen beendet. Wenn Prozesse Shared-Storage oder " +#~ "eine Remote-API verwenden und\n" +#~ "\t\tden Namen des Pods brauchen, um sich selbst zu identifizieren, kann " +#~ "das erzwungene Löschen dazu führen, dass\n" +#~ "\t\tmehrere Prozesse auf der gleichen Maschine die gleiche Identität " +#~ "verwenden, was zu\n" +#~ "\t\tDatenkorruption oder Inkonsistenz führen kann. Erzwinge nur ein " +#~ "Löschen, wenn sichergestellt ist, dass der Pod\n" +#~ "\t\tgelöscht ist, oder wenn die Anwendung mehrere gleichzeitig laufende " +#~ "Kopien verarbeiten kann.\n" +#~ "\t\tAußerdem kann es passieren, dass der Scheduler neue Pods auf dem " +#~ "Knoten plaziert, bevor der Knoten\n" +#~ "\t\tdie Resourcen freigegeben hat, sodass diese Pods direkt entfernt " +#~ "werden.\n" +#~ "\n" +#~ "\t\tBerücksichtige außerdem, dass der Delete-Befehl KEINE versionen " +#~ "prüft, sodass, wenn jemand\n" +#~ "\t\tein Update einer Resource gleichzeitig mit Deinem delete anstößt, " +#~ "dessen Update\n" +#~ "\t\tmit dem Rest der Resource entfernt wird." + +#~ msgid "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered " +#~ "by delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." +#~ msgstr "" +#~ "\n" +#~ "\t\tVeraltet: Fahre eine Resource mit Namen oder Dateinamen graziös " +#~ "heruter.\n" +#~ "\n" +#~ "\t\tDer Stop-Befehl ist veraltet und alle Funktioneren werden mit dem " +#~ "Delete-Befehl abgedeckt.\n" +#~ "\t\tSiehe 'kubectl delete --help' für mehr Details.\n" +#~ "\n" +#~ "\t\tVersucht eine Resource, die graziöses Löschen unterstützt, " +#~ "herunterzufahren und zu löschen.\n" +#~ "\t\tWenn die Resource skaliert werden kann, wird sie vor dem Löschen auf " +#~ "0 skaliert." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." +#~ msgstr "" +#~ "\n" +#~ "\t\tZeigt die Resourcennutzung (CPU/Memory/Storage) von Knoten.\n" +#~ "\n" +#~ "\t\tDer top-node-Befehl erlaubt es, die Resourcennutzung von Knoten zu " +#~ "betrachten." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." +#~ msgstr "" +#~ "\n" +#~ "\t\tZeigt die Resourcennutzung (CPU/Memory/Storage) von Pods.\n" +#~ "\n" +#~ "\t\tDer 'top pod'-Befehl erlaubt es, die Resourcennutzung von Pods zu " +#~ "betrachten.\n" +#~ "\n" +#~ "\t\tAufgrund der Metrik-Pipeline-Verzögerung, können sie für ein paar " +#~ "Minuten nicht verfügbar sein,\n" +#~ "\t\tnach der Pod-Erstellung." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " +#~ msgstr "" +#~ "\n" +#~ "\t\tZeige Resourcennutzung (CPU/Memory/Storage).\n" +#~ "\n" +#~ "\t\tDer top-Befehl erlaubt es, die Resourcennutzung von Knoten oder Pods " +#~ "zu betrachten.\n" +#~ "\n" +#~ "\t\tDieser Befehl benötigt eine korrekt konfigurierte und funktionierende " +#~ "Heapster-Installation auf dem Server. " + +#~ msgid "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will " +#~ "use normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tLeere Knoten, um eine Wartung vorzubereiten.\n" +#~ "\n" +#~ "\t\tDer gegebene Knoten wird als unschedulable markiert, um die Zuordnung " +#~ "von neuen Pods zu verhindern.\n" +#~ "\t\t'drain' entfernt den Pod, falls der API-Server die Entfernung " +#~ "unterstützt\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Wenn nicht, wird ein " +#~ "normales DELETE verwendet,\n" +#~ "\t\tum die Pods zu löschen.\n" +#~ "\t\t'drain' entfernt oder löscht alle Pods mit der Ausnahme von Mirror-" +#~ "Pods (welche vom API Server nicht gelöscht werden können)\n" +#~ "\t\tWenn DaemonSet-verwaltete Pods existieren, wird 'drain' nicht " +#~ "fortgesetzt\n" +#~ "\t\tohne die --ignore-daemonsets Option, und es wird in keinem Fall\n" +#~ "\t\tDaemonSet-verwaltete Pods löschen, weil diese Pods direkt ersetzt " +#~ "würden durch den\n" +#~ "\t\tDaemonSet-Controller, der unschedulable Markierungen ignoriert. Wenn " +#~ "es irgendwelche\n" +#~ "\t\tPods gibt, die weder Mirror-Pods sind, noch von einem " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet oder Job verwaltet werden, wird " +#~ "drain keine Pods löschen, außer die\n" +#~ "\t\t--force Option ist gesetzt. --force lässt das Löschen selbst zu, " +#~ "wenn die verwaltende Resource von einem\n" +#~ "\t\toder mehreren Pods fehlt.\n" +#~ "\n" +#~ "\t\t'drain' wartet auf eine graziöse Löschung. Man sollte auf der " +#~ "Maschine nichts tun, während\n" +#~ "\t\tder Befehl läuft.\n" +#~ "\n" +#~ "\t\tWenn der Knoten wieder bereit ist die Arbeit aufzunehmen, benutze " +#~ "kubectl uncordon,\n" +#~ "\t\twas den Knoten als schedulable markiert.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' " +#~ "for Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag --windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." +#~ msgstr "" +#~ "\n" +#~ "\t\tBearbeite eine Resource mit dem Standardeditor.\n" +#~ "\n" +#~ "\t\tDer edit-Befehl erlaubt es jede API Resource direkt zu bearbeiten, " +#~ "wenn sie mit den\n" +#~ "\t\tCommand-Line-Tools erreichbar ist. Er öffnet den Editor, der in der " +#~ "KUBE_EDITOR oder EDITOR\n" +#~ "\t\tUmgebunsvariable festgelegt ist, oder 'vi' auf Linux und 'notepad' " +#~ "auf Windows.\n" +#~ "\t\tEs ist möglich mehrere Objekte zu bearbeiten, aber die Änderungen " +#~ "werden nacheinander angewendet. Der Befehl\n" +#~ "\t\takzeptiert Dateinamen und Command-Line-Parameter, aber die " +#~ "verwendeten Dateien müssen\n" +#~ "\t\tvorab gespeicherte Versionen von Resourcen sein.\n" +#~ "\n" +#~ "\t\tDie Bearbeitung verwendet die API Version, die genutzt wurde, um die " +#~ "Resource zu lesen.\n" +#~ "\t\tUm eine spezifische API Version zu verwenden, muss die vollständige " +#~ "Resource, Version und Group angegeben werden.\n" +#~ "\n" +#~ "\t\tDas Standardformat ist YAML. Um mit JSON zu arbeiten, setze \"-o json" +#~ "\".\n" +#~ "\n" +#~ "\t\tDie Option --windows-line-endings kann benutzt werden, um Windows " +#~ "Zeilen-umbrüche zu verwenden,\n" +#~ "\t\tansonsten wird der Standard des Betriebssystems verwendet.\n" +#~ "\n" +#~ "\t\tFalls beim Update ein Fehler auftritt, wird eine temporäre Datei auf " +#~ "der Festplatte angelegt,\n" +#~ "\t\tdie die nicht verarbeiteten Änderungen enthält. Der häufigste Fehler " +#~ "beim Bearbeiten einer Resource\n" +#~ "\t\tist ein anderer Editor, der die Resource auf dem Server ändert. Wenn " +#~ "das auftritt, muss man\n" +#~ "\t\tseine Änderungen auf die neue Version anwenden oder seine temporäre\n" +#~ "\t\tgespeicherte Kopie mit der neuesten Resourcenversion aktualisieren." + +#~ msgid "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it " +#~ "from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in " +#~ "versions of zsh >= 5.2" +#~ msgstr "" +#~ "\n" +#~ "\t\tGibt den Shell-Completion-Code für die angegebene Shell aus (bash " +#~ "oder zsh).\n" +#~ "\t\tDer Shell-Code muss für eine interaktive Vervollständigung von " +#~ "kubectl \n" +#~ "\t\tausgewertet werden. Das ist möglich, indem man\n" +#~ "\t\tdie .bash_profile Datei sourcet.\n" +#~ "\n" +#~ "\t\tHinweis: Dies setzt das Bash-Completion-Framework voraus, das auf dem " +#~ "Mac nicht standardmäßig installiert ist. \n" +#~ "\t\tEs kann mit homebrew installiert werden:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tSobald es installiert ist, muss bash_completion ausgewertet werden. " +#~ "Dies wird erreicht, indem man\n" +#~ "\t\tdie folgende Zeile zur .bash_profile-Datei hinzufügt\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tHinweis für zsh Nutzer: [1] zsh completions werden nur in Versionen " +#~ "von zsh >= 5.2 unterstützt" + +#~ msgid "" +#~ "\n" +#~ "\t\tPerform a rolling update of the given ReplicationController.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tFühre ein Rolling-Update des gegebenen ReplicationControllers aus.\n" +#~ "\n" +#~ "\t\tErsetzt den gegebenen ReplicationController mit einem neuen " +#~ "Replication-Controller, indem die neue PodTampleta Pod für Pod\n" +#~ "\t\tangewendet wird. Die new-controller.json muss den gleichen Namespace " +#~ "wie\n" +#~ "\t\tder aktuelle ReplicationController besitzen und mindestens ein " +#~ "gemeinsames Label im ReplicaSelector überschreiben.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing " +#~ "resource, the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tErsetze eine Resource mit Dateinamen oder stdin.\n" +#~ "\n" +#~ "\t\tJSON- and YAML-Formate werden akzeptiert. Wenn eine existierende " +#~ "Resource ersetzt wird,\n" +#~ "\t\tmuss die vollständige spSpecec mitgegeben werden. Diese kann hiermit " +#~ "ausgelesen werden\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tBitte konsultiere https://htmlpreview.github.io/?https://github.com/" +#~ "kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions.html um " +#~ "zu erfahren, ob ein Feld verändert werden darf." + +#~ msgid "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." +#~ msgstr "" +#~ "\n" +#~ "\t\tSetze eine neue Größe für ein Deployment, ReplicaSet, Replication-" +#~ "Contoller oder Job.\n" +#~ "\n" +#~ "\t\tScale erlaubt es Nutzern eine oder mehr Voraussetzungen für die " +#~ "Aktion festzulegen.\n" +#~ "\n" +#~ "\t\tWenn --current-replicas oder --resource-version gegeben ist, wird es " +#~ "validiert, bevor\n" +#~ "\t\tscale versucht wird, und es wird garantiert, dass die Voraussetzungen " +#~ "erfüllt sind, wenn\n" +#~ "\t\tscale zum Server geschickt wird." + +#~ msgid "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" +#~ msgstr "" +#~ "\n" +#~ "\t\tProxy alle Teile der Kubernetes-API und sonst nichts:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tProxy nur bestimmte Teile der Kubernetes-API und einige statische " +#~ "Dateien:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tDer Befehl oben lässt dich 'curl localhost:8001/api/v1/pods' " +#~ "aufrufen.\n" +#~ "\n" +#~ "\t\tProxy alle Teile der Kubernetes-API auf einem anderen root:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tDer Befehl oben lässt dich 'curl localhost:8001/custom/api/v1/pods' " +#~ "aufrufen" + +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tAktualisiere Felder einer Resource mit einem Strategic-Merge-Patch\n" +#~ "\n" +#~ "\t\tJSON- und YAML-Formate werden akzeptiert.\n" +#~ "\n" +#~ "\t\tBitte konsultiere https://htmlpreview.github.io/?https://github.com/" +#~ "kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions.html um " +#~ "zu erfahren, ob ein Feld mutable ist." + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new TLS secret named tls-secret with the given key pair:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" +#~ msgstr "" +#~ "\n" +#~ "\t # Erstelle ein neues TLS Secret tl-secret mit dem gegebenen " +#~ "Schlüsselpaar:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" +#~ msgstr "" +#~ "\n" +#~ "\t # Erstelle ein neues Secret my-secret mit einem Key für jede Datei im " +#~ "Ordner bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Erstelle ein neues Secret my-secret mit den gegebenen Keys statt " +#~ "der Namen auf der Festplatte\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Erstelle ein neues Scret my-secret mit key1=supersecret und " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" + +#~ msgid "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" +#~ msgstr "" +#~ "\n" +#~ "\t# Erstelle einen neuen ExternalName-Service my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" + +#~ msgid "" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs (in headless mode)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" +#~ msgstr "" +#~ "\n" +#~ " # Erstelle einen neuen ClusterIP-Service my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Erstelle einen neuen ClusterIP-Service my-cs (im headless-Modus)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" + +#~ msgid "" +#~ "\n" +#~ " # Create a new deployment named my-dep that runs the busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" +#~ msgstr "" +#~ "\n" +#~ " # Erstelle ein neues Deployment my-dep, dass das busybox-Image " +#~ "nutzt.\n" +#~ " kubectl create deployment my-dep --image=busybox" + +#~ msgid "" +#~ "\n" +#~ " # Create a new nodeport service named my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" +#~ msgstr "" +#~ "\n" +#~ " # Erstelle einen neuen NodePort-Service my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" + +#~ msgid "" +#~ "\n" +#~ " Create a clusterIP service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Erstelle einen ClusterIP-Service mit dem gegebenen Namen." + +#~ msgid "" +#~ "\n" +#~ " Create a deployment with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Erstelle ein Deployment mit dem gegebenen Namen." + +#~ msgid "" +#~ "\n" +#~ " Create a nodeport service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Erstelle einen NodePort-Service mit dem gegebenen Namen." + +#~ msgid "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." +#~ msgstr "" +#~ "\n" +#~ " Zeigt Adressen des Master und von Services mit Label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " Für das weitere Debugging und die Diagnose von Clusterproblemen nutze " +#~ "'kubectl cluster-info dump'." + +#~ msgid "A schedule in the Cron format the job should be run with." +#~ msgstr "Ein Schedule im Cron Format, dass der Job nutzen soll." + +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "" +#~ "Wende eine Konfiguration auf eine Resource über den Dateinamen oder stdin " +#~ "an" + +#~ msgid "Auto-scale a Deployment, ReplicaSet, or ReplicationController" +#~ msgstr "Auto-skaliere ein Deployment, ReplicaSet oder ReplicationController" + +#~ msgid "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" +#~ msgstr "" +#~ "Name des Containers dessen Image aktualisiert wird. Nur relevant, wenn --" +#~ "image angegeben ist, sonst ignoriert. Verpflichtend, wenn --image auf " +#~ "einem Multi-Container-Pod verwendet wird" + +#~ msgid "Create a ClusterRoleBinding for a particular ClusterRole" +#~ msgstr "Erstelle ein ClusterRoleBinding für eine bestimmte ClusterRole" + +#~ msgid "Create a LoadBalancer service." +#~ msgstr "Erstelle einen LoadBalancer-Service." + +#~ msgid "Create a NodePort service." +#~ msgstr "Erstelle einen NodePort-Service." + +#~ msgid "Create a RoleBinding for a particular Role or ClusterRole" +#~ msgstr "Erstelle ein RoleBinding für eine bestimmte Role oder ClusterRole" + +#~ msgid "Create a clusterIP service." +#~ msgstr "Erstelle einen ClusterIP-Service" + +#~ msgid "Create a configmap from a local file, directory or literal value" +#~ msgstr "" +#~ "Erstelle eine ConfigMap von einer Datei, einem Ordner oder einem festen " +#~ "Wert" + +#~ msgid "Create a deployment with the specified name." +#~ msgstr "Erstelle ein Deployment mit dem gegebenen Namen." + +#~ msgid "Create a pod disruption budget with the specified name." +#~ msgstr "Erstelle ein Pod-Disruption-Budget mit dem gegebenen Namen." + +#~ msgid "Create a quota with the specified name." +#~ msgstr "Erstelle eine Quota mit dem gegebenen Namen." + +#~ msgid "Create a resource by filename or stdin" +#~ msgstr "Erstelle eine Resource von einer Datei oder stdin" + +#~ msgid "Create a secret from a local file, directory or literal value" +#~ msgstr "" +#~ "Erstelle ein Secret von einer lokalen Datei, einem Ordner oder einem " +#~ "festen Wert" + +#~ msgid "Create a service using specified subcommand." +#~ msgstr "Erstelle einen Servuce mit dem angegebenen Sub-Befehl" + +#~ msgid "Create an ExternalName service." +#~ msgstr "Erstelle einen ExternalName-Service." + +#~ msgid "" +#~ "Delete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector" +#~ msgstr "" +#~ "Lösche Resourcen von einer Datei, stdin, resources- und names- oder mit " +#~ "resources- und label-Selektor" + +#~ msgid "Deprecated: Gracefully shut down a resource by name or filename" +#~ msgstr "" +#~ "Veraltet: Graziöses herunterfahren einer Resource über den Namen oder " +#~ "Dateinamen" + +#~ msgid "Display Resource (CPU/Memory) usage of nodes" +#~ msgstr "Zeige Resourcennutzung (CPU/Memory) von Knoten" + +#~ msgid "Display Resource (CPU/Memory) usage of pods" +#~ msgstr "Zeige Resourcennutzung (CPU/Memory) von Pods" + +#~ msgid "Display Resource (CPU/Memory) usage." +#~ msgstr "Zeige Resourcennutzung (CPU/Memory)." + +#~ msgid "Display cluster info" +#~ msgstr "Zeige Cluster-Info" + +#~ msgid "Displays the current-context" +#~ msgstr "Zeige den aktuellen Kontext" + +#~ msgid "Documentation of resources" +#~ msgstr "Dokumentation einer Resource" + +#~ msgid "Dump lots of relevant info for debugging and diagnosis" +#~ msgstr "Zeige viele relevante Informationen für Debugging und Diagnose" + +#~ msgid "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." +#~ msgstr "" +#~ "Explizite Vorgabe, wann Container-Images gepullt werden. Verpflichtend, " +#~ "wenn --image ist gleich dem aktuellen Image ist - sonst ignoriert." + +#~ msgid "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." +#~ msgstr "" +#~ "IP, die dem Load-Balancer zugewiesen wird. Falls leer, wird eine " +#~ "temporäre IP erstellt und verwendet (Cloud-Provider spezifisch)" + +#~ msgid "Manage a deployment rollout" +#~ msgstr "Verwalte ein Deployment-Rollout" + +#~ msgid "Perform a rolling update of the given ReplicationController" +#~ msgstr "Führe ein Rolling-Update des gegebenen ReplicationControllers aus" + +#~ msgid "Replace a resource by filename or stdin" +#~ msgstr "Ersetze eine Resource von einem Dateinamen oder stdin" + +#~ msgid "" +#~ "Set a new size for a Deployment, ReplicaSet, Replication Controller, or " +#~ "Job" +#~ msgstr "" +#~ "Setze eine neue Größe für ein Deployment, ReplicaSet, " +#~ "ReplicationController oder Job" + +#~ msgid "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." +#~ msgstr "" +#~ "Setze die Annotation Last-Applied-Configuration auf einem Live-Objekt auf " +#~ "den Inhalt einer Datei." + +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "Setze einen Cluster-Eintrag in der kubeconfig" + +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "Setze einen Kontext-Eintrag in der kubeconfig" + +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "Setze einen User-Eintrag in der kubeconfig" + +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "Setze einen einzelnen Value in einer kubeconfig-Datei" + +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "Setze den aktuellen Kontext in einer kubeconfig-Datei" + +#~ msgid "" +#~ "Take a replication controller, service, deployment or pod and expose it " +#~ "as a new Kubernetes Service" +#~ msgstr "" +#~ "Nehme einen Replication Controller, Service, Deployment oder Pod und " +#~ "biete ihn als neuen Kubernetes-Service an" + +#~ msgid "" +#~ "The name of the API generator to use, see http://kubernetes.io/docs/user-" +#~ "guide/kubectl-conventions/#generators for a list." +#~ msgstr "" +#~ "Der Name des zu verwendenden API-Generators. Siehe http://kubernetes.io/" +#~ "docs/user-guide/kubectl-conventions/#generators für eine Übersicht." + +#~ msgid "" +#~ "The name of the API generator to use. Currently there is only 1 generator." +#~ msgstr "" +#~ "Der Name des zu verwendenden API-Generators. Zur Zeit gibt es nur einen " +#~ "Generator." + +#~ msgid "" +#~ "The name of the generator to use for creating a service. Only used if --" +#~ "expose is true" +#~ msgstr "" +#~ "Der Name des zu verwendenden Generators, um einen Service zu erstellen. " +#~ "Wird nur benutzt, wenn --expose true ist" + +#~ msgid "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." +#~ msgstr "" +#~ "Der Port, den der Container anbietet. Wenn --expose true ist, ist es " +#~ "auch der Port, den der zu erstellende Service verwendet" + +#~ msgid "" +#~ "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +#~ "'ClusterIP'." +#~ msgstr "" +#~ "Typ für diesen Service: ClusterIP, NodePort oder LoadBalancer. Standard " +#~ "ist 'ClusterIP'." + +#~ msgid "Update field(s) of a resource using strategic merge patch" +#~ msgstr "Aktualisiere Felder einer Resource mit einem Strategic-Merge-Patch" + +#~ msgid "Update image of a pod template" +#~ msgstr "Aktualisiere das Image einer Pod-Template" + +#~ msgid "" +#~ "View latest last-applied-configuration annotations of a resource/object" +#~ msgstr "" +#~ "Zeige die aktuelle Annotation Last-Applied-Configuration einer Resource / " +#~ "eines Object" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000..77b13524a3 Binary files /dev/null and b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.mo differ diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.po new file mode 100644 index 0000000000..538c7b2d58 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/default/LC_MESSAGES/k8s.po @@ -0,0 +1,5077 @@ +# Test translations for unit tests. +# Copyright (C) 2016 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR brendan.d.burns@gmail.com, 2016. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2017-05-24 18:01+0800\n" +"Last-Translator: Brendan Burns \n" +"Language-Team: \n" +"Language: en\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.8.12\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:138 +msgid "" +"\n" +"\t\t\t# Approve CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate approve csr-sqgzp\n" +"\t\t" +msgstr "" +"\n" +"\t\t\t# Approve CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate approve csr-sqgzp\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:182 +msgid "" +"\n" +"\t\t\t# Deny CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate deny csr-sqgzp\n" +"\t\t" +msgstr "" +"\n" +"\t\t\t# Deny CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate deny csr-sqgzp\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:43 +msgid "" +"\n" +"\t\t\tModify kubeconfig files using subcommands like \"kubectl config set " +"current-context my-context\"\n" +"\n" +"\t\t\tThe loading order follows these rules:\n" +"\n" +"\t\t\t1. If the --" +msgstr "" +"\n" +"\t\t\tModify kubeconfig files using subcommands like \"kubectl config set " +"current-context my-context\"\n" +"\n" +"\t\t\tThe loading order follows these rules:\n" +"\n" +"\t\t\t1. If the --" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:44 +msgid "" +"\n" +"\t\t # Create a cluster role binding for user1, user2, and group1 using the " +"cluster-admin cluster role\n" +"\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-" +"admin --user=user1 --user=user2 --group=group1" +msgstr "" +"\n" +"\t\t # Create a cluster role binding for user1, user2, and group1 using the " +"cluster-admin cluster role\n" +"\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-" +"admin --user=user1 --user=user2 --group=group1" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:58 +msgid "" +"\n" +"\t\t # Create a new config map named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1." +"txt --from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new config map named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-" +"literal=key2=config2\n" +"\n" +"\t\t # Create a new config map named my-config from the key=value pairs in " +"the file\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config from an env file\n" +"\t\t kubectl create configmap my-config --from-env-file=path/to/bar.env" +msgstr "" +"\n" +"\t\t # Create a new config map named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1." +"txt --from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new config map named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-" +"literal=key2=config2\n" +"\n" +"\t\t # Create a new config map named my-config from the key=value pairs in " +"the file\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config from an env file\n" +"\t\t kubectl create configmap my-config --from-env-file=path/to/bar.env" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:43 +msgid "" +"\n" +"\t\t # Create a role binding for user1, user2, and group1 using the admin " +"cluster role\n" +"\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +"user=user2 --group=group1" +msgstr "" +"\n" +"\t\t # Create a role binding for user1, user2, and group1 using the admin " +"cluster role\n" +"\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +"user=user2 --group=group1" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:56 +msgid "" +"\n" +"\t\t # If you don't already have a .dockercfg file, you can create a " +"dockercfg secret directly by using:\n" +"\t\t kubectl create secret docker-registry my-secret --docker-" +"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL\n" +"\n" +"\t\t # Create a new secret named my-secret from ~/.docker/config.json\n" +"\t\t kubectl create secret docker-registry my-secret --from-file=." +"dockerconfigjson=path/to/.docker/config.json" +msgstr "" +"\n" +"\t\t # If you don't already have a .dockercfg file, you can create a " +"dockercfg secret directly by using:\n" +"\t\t kubectl create secret docker-registry my-secret --docker-" +"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL\n" +"\n" +"\t\t # Create a new secret named my-secret from ~/.docker/config.json\n" +"\t\t kubectl create secret docker-registry my-secret --from-file=." +"dockerconfigjson=path/to/.docker/config.json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:45 +msgid "" +"\n" +"\t\t# !!!Important Note!!!\n" +"\t\t# Requires that the 'tar' binary is present in your container\n" +"\t\t# image. If 'tar' is not present, 'kubectl cp' will fail.\n" +"\t\t#\n" +"\t\t# For advanced use cases, such as symlinks, wildcard expansion or\n" +"\t\t# file mode preservation, consider using 'kubectl exec'.\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\ttar cf - /tmp/foo | kubectl exec -i -n -- " +"tar xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl exec -n -- tar cf - /tmp/foo | tar " +"xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" +msgstr "" +"\n" +"\t\t# !!!Important Note!!!\n" +"\t\t# Requires that the 'tar' binary is present in your container\n" +"\t\t# image. If 'tar' is not present, 'kubectl cp' will fail.\n" +"\t\t#\n" +"\t\t# For advanced use cases, such as symlinks, wildcard expansion or\n" +"\t\t# file mode preservation, consider using 'kubectl exec'.\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\ttar cf - /tmp/foo | kubectl exec -i -n -- " +"tar xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl exec -n -- tar cf - /tmp/foo | tar " +"xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:119 +msgid "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl apply -k dir/\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Note: --prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all other resources that are not in the file and match label " +"app=nginx\n" +"\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"config maps that are not in the file\n" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"ConfigMap" +msgstr "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl apply -k dir/\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Note: --prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all other resources that are not in the file and match label " +"app=nginx\n" +"\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"config maps that are not in the file\n" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"ConfigMap" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:48 +#, c-format +msgid "" +"\n" +"\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used\n" +"\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +"\n" +"\t\t# Auto scale a replication controller \"foo\", with the number of pods " +"between 1 and 5, target CPU utilization at 80%\n" +"\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +msgstr "" +"\n" +"\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used\n" +"\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +"\n" +"\t\t# Auto scale a replication controller \"foo\", with the number of pods " +"between 1 and 5, target CPU utilization at 80%\n" +"\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" + +#: pkg/kubectl/cmd/convert/convert.go:51 +msgid "" +"\n" +"\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by 'pod.yaml' to the " +"latest version\n" +"\t\t# and print to stdout in JSON format.\n" +"\t\tkubectl convert -f pod.yaml --local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" +msgstr "" +"\n" +"\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by 'pod.yaml' to the " +"latest version\n" +"\t\t# and print to stdout in JSON format.\n" +"\t\tkubectl convert -f pod.yaml --local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:41 +msgid "" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods\n" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" with ResourceName " +"specified\n" +"\t\tkubectl create clusterrole pod-reader --verb=get --resource=pods --" +"resource-name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with API Group specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=rs." +"extensions\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with SubResource specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=pods," +"pods/status\n" +"\n" +"\t\t# Create a cluster role name \"foo\" with NonResourceURL specified\n" +"\t\tkubectl create clusterrole \"foo\" --verb=get --non-resource-url=/logs/" +"*\n" +"\n" +"\t\t# Create a cluster role name \"monitoring\" with AggregationRule " +"specified\n" +"\t\tkubectl create clusterrole monitoring --aggregation-rule=\"rbac.example." +"com/aggregate-to-monitoring=true\"" +msgstr "" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods\n" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" with ResourceName " +"specified\n" +"\t\tkubectl create clusterrole pod-reader --verb=get --resource=pods --" +"resource-name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with API Group specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=rs." +"extensions\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with SubResource specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=pods," +"pods/status\n" +"\n" +"\t\t# Create a cluster role name \"foo\" with NonResourceURL specified\n" +"\t\tkubectl create clusterrole \"foo\" --verb=get --non-resource-url=/logs/" +"*\n" +"\n" +"\t\t# Create a cluster role name \"monitoring\" with AggregationRule " +"specified\n" +"\t\tkubectl create clusterrole monitoring --aggregation-rule=\"rbac.example." +"com/aggregate-to-monitoring=true\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:43 +msgid "" +"\n" +"\t\t# Create a job\n" +"\t\tkubectl create job my-job --image=busybox\n" +"\n" +"\t\t# Create a job with a command\n" +"\t\tkubectl create job my-job --image=busybox -- date\n" +"\n" +"\t\t# Create a job from a cron job named \"a-cronjob\"\n" +"\t\tkubectl create job test-job --from=cronjob/a-cronjob" +msgstr "" +"\n" +"\t\t# Create a job\n" +"\t\tkubectl create job my-job --image=busybox\n" +"\n" +"\t\t# Create a job with a command\n" +"\t\tkubectl create job my-job --image=busybox -- date\n" +"\n" +"\t\t# Create a job from a cron job named \"a-cronjob\"\n" +"\t\tkubectl create job test-job --from=cronjob/a-cronjob" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:44 +msgid "" +"\n" +"\t\t# Create a new resource quota named my-quota\n" +"\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resource quota named best-effort\n" +"\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +msgstr "" +"\n" +"\t\t# Create a new resource quota named my-quota\n" +"\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resource quota named best-effort\n" +"\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:44 +#, c-format +msgid "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in time\n" +"\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time\n" +"\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +msgstr "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in time\n" +"\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time\n" +"\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:76 +msgid "" +"\n" +"\t\t# Create a pod using the data in pod.json\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in docker-registry.yaml in JSON then create the resource " +"using the edited data\n" +"\t\tkubectl create -f docker-registry.yaml --edit -o json" +msgstr "" +"\n" +"\t\t# Create a pod using the data in pod.json\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in docker-registry.yaml in JSON then create the resource " +"using the edited data\n" +"\t\tkubectl create -f docker-registry.yaml --edit -o json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:43 +msgid "" +"\n" +"\t\t# Create a priority class named high-priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\"\n" +"\n" +"\t\t# Create a priority class named default-priority that is considered as " +"the global default priority\n" +"\t\tkubectl create priorityclass default-priority --value=1000 --global-" +"default=true --description=\"default priority\"\n" +"\n" +"\t\t# Create a priority class named high-priority that cannot preempt pods " +"with lower priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\" --preemption-policy=\"Never\"" +msgstr "" +"\n" +"\t\t# Create a priority class named high-priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\"\n" +"\n" +"\t\t# Create a priority class named default-priority that is considered as " +"the global default priority\n" +"\t\tkubectl create priorityclass default-priority --value=1000 --global-" +"default=true --description=\"default priority\"\n" +"\n" +"\t\t# Create a priority class named high-priority that cannot preempt pods " +"with lower priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\" --preemption-policy=\"Never\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:46 +msgid "" +"\n" +"\t\t# Create a role named \"pod-reader\" that allows user to perform \"get" +"\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +"resource=pods\n" +"\n" +"\t\t# Create a role named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create role pod-reader --verb=get --resource=pods --resource-" +"name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a role named \"foo\" with API Group specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=rs.extensions\n" +"\n" +"\t\t# Create a role named \"foo\" with SubResource specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=pods,pods/status" +msgstr "" +"\n" +"\t\t# Create a role named \"pod-reader\" that allows user to perform \"get" +"\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +"resource=pods\n" +"\n" +"\t\t# Create a role named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create role pod-reader --verb=get --resource=pods --resource-" +"name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a role named \"foo\" with API Group specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=rs.extensions\n" +"\n" +"\t\t# Create a role named \"foo\" with SubResource specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=pods,pods/status" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:61 +msgid "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name \"nginx-https\"\n" +"\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named 'video-stream'.\n" +"\t\tkubectl expose rc streamer --port=4100 --protocol=UDP --name=video-" +"stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000\n" +"\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +msgstr "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name \"nginx-https\"\n" +"\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named 'video-stream'.\n" +"\t\tkubectl expose rc streamer --port=4100 --protocol=UDP --name=video-" +"stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000\n" +"\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose deployment nginx --port=80 --target-port=8000" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:64 +msgid "" +"\n" +"\t\t# Create a single ingress called 'simple' that directs requests to foo." +"com/bar to svc\n" +"\t\t# svc1:8080 with a tls secret \"my-cert\"\n" +"\t\tkubectl create ingress simple --rule=\"foo.com/bar=svc1:8080,tls=my-cert" +"\"\n" +"\n" +"\t\t# Create a catch all ingress of \"/path\" pointing to service svc:port " +"and Ingress Class as \"otheringress\"\n" +"\t\tkubectl create ingress catch-all --class=otheringress --rule=\"/path=svc:" +"port\"\n" +"\n" +"\t\t# Create an ingress with two annotations: ingress.annotation1 and " +"ingress.annotations2\n" +"\t\tkubectl create ingress annotated --class=default --rule=\"foo.com/" +"bar=svc:port\" \\n\t\t\t--annotation ingress.annotation1=foo \\n\t\t\t--" +"annotation ingress.annotation2=bla\n" +"\n" +"\t\t# Create an ingress with the same host and multiple paths\n" +"\t\tkubectl create ingress multipath --class=default \\n\t\t\t--rule=\"foo." +"com/=svc:port\" \\n\t\t\t--rule=\"foo.com/admin/=svcadmin:portadmin\"\n" +"\n" +"\t\t# Create an ingress with multiple hosts and the pathType as Prefix\n" +"\t\tkubectl create ingress ingress1 --class=default \\n\t\t\t--rule=\"foo." +"com/path*=svc:8080\" \\n\t\t\t--rule=\"bar.com/admin*=svc2:http\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using the default ingress " +"certificate and different path types\n" +"\t\tkubectl create ingress ingtls --class=default \\n\t\t --rule=\"foo.com/" +"=svc:https,tls\" \\n\t\t --rule=\"foo.com/path/subpath*=othersvc:8080\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using a specific secret and " +"pathType as Prefix\n" +"\t\tkubectl create ingress ingsecret --class=default \\n\t\t --rule=\"foo." +"com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t# Create an ingress with a default backend\n" +"\t\tkubectl create ingress ingdefault --class=default \\n\t\t --default-" +"backend=defaultsvc:http \\n\t\t --rule=\"foo.com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Create a single ingress called 'simple' that directs requests to foo." +"com/bar to svc\n" +"\t\t# svc1:8080 with a tls secret \"my-cert\"\n" +"\t\tkubectl create ingress simple --rule=\"foo.com/bar=svc1:8080,tls=my-cert" +"\"\n" +"\n" +"\t\t# Create a catch all ingress of \"/path\" pointing to service svc:port " +"and Ingress Class as \"otheringress\"\n" +"\t\tkubectl create ingress catch-all --class=otheringress --rule=\"/path=svc:" +"port\"\n" +"\n" +"\t\t# Create an ingress with two annotations: ingress.annotation1 and " +"ingress.annotations2\n" +"\t\tkubectl create ingress annotated --class=default --rule=\"foo.com/" +"bar=svc:port\" \\n\t\t\t--annotation ingress.annotation1=foo \\n\t\t\t--" +"annotation ingress.annotation2=bla\n" +"\n" +"\t\t# Create an ingress with the same host and multiple paths\n" +"\t\tkubectl create ingress multipath --class=default \\n\t\t\t--rule=\"foo." +"com/=svc:port\" \\n\t\t\t--rule=\"foo.com/admin/=svcadmin:portadmin\"\n" +"\n" +"\t\t# Create an ingress with multiple hosts and the pathType as Prefix\n" +"\t\tkubectl create ingress ingress1 --class=default \\n\t\t\t--rule=\"foo." +"com/path*=svc:8080\" \\n\t\t\t--rule=\"bar.com/admin*=svc2:http\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using the default ingress " +"certificate and different path types\n" +"\t\tkubectl create ingress ingtls --class=default \\n\t\t --rule=\"foo.com/" +"=svc:https,tls\" \\n\t\t --rule=\"foo.com/path/subpath*=othersvc:8080\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using a specific secret and " +"pathType as Prefix\n" +"\t\tkubectl create ingress ingsecret --class=default \\n\t\t --rule=\"foo." +"com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t# Create an ingress with a default backend\n" +"\t\tkubectl create ingress ingdefault --class=default \\n\t\t --default-" +"backend=defaultsvc:http \\n\t\t --rule=\"foo.com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:74 +msgid "" +"\n" +"\t\t# Create an interactive debugging session in pod mypod and immediately " +"attach to it.\n" +"\t\tkubectl debug mypod -it --image=busybox\n" +"\n" +"\t\t# Create a debug container named debugger using a custom automated " +"debugging image.\n" +"\t\tkubectl debug --image=myproj/debug-tools -c debugger mypod\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and attach to it\n" +"\t\tkubectl debug mypod -it --image=busybox --copy-to=my-debugger\n" +"\n" +"\t\t# Create a copy of mypod changing the command of mycontainer\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --container=mycontainer -- " +"sh\n" +"\n" +"\t\t# Create a copy of mypod changing all container images to busybox\n" +"\t\tkubectl debug mypod --copy-to=my-debugger --set-image=*=busybox\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and changing container " +"images\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --image=debian --set-" +"image=app=app:debug,sidecar=sidecar:debug\n" +"\n" +"\t\t# Create an interactive debugging session on a node and immediately " +"attach to it.\n" +"\t\t# The container will run in the host namespaces and the host's " +"filesystem will be mounted at /host\n" +"\t\tkubectl debug node/mynode -it --image=busybox\n" +msgstr "" +"\n" +"\t\t# Create an interactive debugging session in pod mypod and immediately " +"attach to it.\n" +"\t\tkubectl debug mypod -it --image=busybox\n" +"\n" +"\t\t# Create a debug container named debugger using a custom automated " +"debugging image.\n" +"\t\tkubectl debug --image=myproj/debug-tools -c debugger mypod\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and attach to it\n" +"\t\tkubectl debug mypod -it --image=busybox --copy-to=my-debugger\n" +"\n" +"\t\t# Create a copy of mypod changing the command of mycontainer\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --container=mycontainer -- " +"sh\n" +"\n" +"\t\t# Create a copy of mypod changing all container images to busybox\n" +"\t\tkubectl debug mypod --copy-to=my-debugger --set-image=*=busybox\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and changing container " +"images\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --image=debian --set-" +"image=app=app:debug,sidecar=sidecar:debug\n" +"\n" +"\t\t# Create an interactive debugging session on a node and immediately " +"attach to it.\n" +"\t\t# The container will run in the host namespaces and the host's " +"filesystem will be mounted at /host\n" +"\t\tkubectl debug node/mynode -it --image=busybox\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:74 +msgid "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl delete -k dir\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo --now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo --force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods --all" +msgstr "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl delete -k dir\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo --now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo --force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods --all" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:51 +msgid "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe po -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the 'frontend' replication controller (rc-" +"created pods\n" +"\t\t# get the name of the rc as a prefix in the pod the name)\n" +"\t\tkubectl describe pods frontend" +msgstr "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe po -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the 'frontend' replication controller (rc-" +"created pods\n" +"\t\t# get the name of the rc as a prefix in the pod the name)\n" +"\t\tkubectl describe pods frontend" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:76 +msgid "" +"\n" +"\t\t# Diff resources included in pod.json\n" +"\t\tkubectl diff -f pod.json\n" +"\n" +"\t\t# Diff file read from stdin\n" +"\t\tcat service.yaml | kubectl diff -f -" +msgstr "" +"\n" +"\t\t# Diff resources included in pod.json\n" +"\t\tkubectl diff -f pod.json\n" +"\n" +"\t\t# Diff file read from stdin\n" +"\t\tcat service.yaml | kubectl diff -f -" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:138 +msgid "" +"\n" +"\t\t# Drain node \"foo\", even if there are pods not managed by a " +"replication controller, replica set, job, daemon set or stateful set on it\n" +"\t\tkubectl drain foo --force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a replication " +"controller, replica set, job, daemon set or stateful set, and use a grace " +"period of 15 minutes\n" +"\t\tkubectl drain foo --grace-period=900" +msgstr "" +"\n" +"\t\t# Drain node \"foo\", even if there are pods not managed by a " +"replication controller, replica set, job, daemon set or stateful set on it\n" +"\t\tkubectl drain foo --force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a replication " +"controller, replica set, job, daemon set or stateful set, and use a grace " +"period of 15 minutes\n" +"\t\tkubectl drain foo --grace-period=900" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:55 +msgid "" +"\n" +"\t\t# Edit the service named 'docker-registry'\n" +"\t\tkubectl edit svc/docker-registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +"\n" +"\t\t# Edit the job 'myjob' in JSON using the v1 API format\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +"config in its annotation\n" +"\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +msgstr "" +"\n" +"\t\t# Edit the service named 'docker-registry'\n" +"\t\tkubectl edit svc/docker-registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +"\n" +"\t\t# Edit the job 'myjob' in JSON using the v1 API format\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +"config in its annotation\n" +"\t\tkubectl edit deployment/mydeployment -o yaml --save-config" + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:44 +msgid "" +"\n" +"\t\t# Get output from running pod mypod; use the 'kubectl.kubernetes.io/" +"default-container' annotation\n" +"\t\t# for selecting the container to be attached or the first container in " +"the pod will be chosen\n" +"\t\tkubectl attach mypod\n" +"\n" +"\t\t# Get output from ruby-container from pod mypod\n" +"\t\tkubectl attach mypod -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl attach mypod -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a replica set named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Get output from running pod mypod; use the 'kubectl.kubernetes.io/" +"default-container' annotation\n" +"\t\t# for selecting the container to be attached or the first container in " +"the pod will be chosen\n" +"\t\tkubectl attach mypod\n" +"\n" +"\t\t# Get output from ruby-container from pod mypod\n" +"\t\tkubectl attach mypod -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl attach mypod -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a replica set named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:48 +msgid "" +"\n" +"\t\t# Get output from running the 'date' command from pod mypod, using the " +"first container by default\n" +"\t\tkubectl exec mypod -- date\n" +"\n" +"\t\t# Get output from running the 'date' command in ruby-container from pod " +"mypod\n" +"\t\tkubectl exec mypod -c ruby-container -- date\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl exec mypod -c ruby-container -i -t -- bash -il\n" +"\n" +"\t\t# List contents of /usr from the first container of pod mypod and sort " +"by modification time\n" +"\t\t# If the command you want to execute in the pod has any flags in common " +"(e.g. -i),\n" +"\t\t# you must use two dashes (--) to separate your command's flags/" +"arguments\n" +"\t\t# Also note, do not surround your command and its flags/arguments with " +"quotes\n" +"\t\t# unless that is how you would execute it normally (i.e., do ls -t /usr, " +"not \"ls -t /usr\")\n" +"\t\tkubectl exec mypod -i -t -- ls -t /usr\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"deployment mydeployment, using the first container by default\n" +"\t\tkubectl exec deploy/mydeployment -- date\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"service myservice, using the first container by default\n" +"\t\tkubectl exec svc/myservice -- date\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Get output from running the 'date' command from pod mypod, using the " +"first container by default\n" +"\t\tkubectl exec mypod -- date\n" +"\n" +"\t\t# Get output from running the 'date' command in ruby-container from pod " +"mypod\n" +"\t\tkubectl exec mypod -c ruby-container -- date\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl exec mypod -c ruby-container -i -t -- bash -il\n" +"\n" +"\t\t# List contents of /usr from the first container of pod mypod and sort " +"by modification time\n" +"\t\t# If the command you want to execute in the pod has any flags in common " +"(e.g. -i),\n" +"\t\t# you must use two dashes (--) to separate your command's flags/" +"arguments\n" +"\t\t# Also note, do not surround your command and its flags/arguments with " +"quotes\n" +"\t\t# unless that is how you would execute it normally (i.e., do ls -t /usr, " +"not \"ls -t /usr\")\n" +"\t\tkubectl exec mypod -i -t -- ls -t /usr\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"deployment mydeployment, using the first container by default\n" +"\t\tkubectl exec deploy/mydeployment -- date\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"service myservice, using the first container by default\n" +"\t\tkubectl exec svc/myservice -- date\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:65 +msgid "" +"\n" +"\t\t# Installing bash completion on macOS using homebrew\n" +"\t\t## If running Bash 3.2 included with macOS\n" +"\t\t brew install bash-completion\n" +"\t\t## or, if running Bash 4.1+\n" +"\t\t brew install bash-completion@2\n" +"\t\t## If kubectl is installed via homebrew, this should start working " +"immediately\n" +"\t\t## If you've installed via other means, you may need add the completion " +"to your completion directory\n" +"\t\t kubectl completion bash > $(brew --prefix)/etc/bash_completion.d/" +"kubectl\n" +"\n" +"\n" +"\t\t# Installing bash completion on Linux\n" +"\t\t## If bash-completion is not installed on Linux, install the 'bash-" +"completion' package\n" +"\t\t## via your distribution's package manager.\n" +"\t\t## Load the kubectl completion code for bash into the current shell\n" +"\t\t source <(kubectl completion bash)\n" +"\t\t## Write bash completion code to a file and source it from ." +"bash_profile\n" +"\t\t kubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\t printf \"\n" +"\t\t # Kubectl shell completion\n" +"\t\t source '$HOME/.kube/completion.bash.inc'\n" +"\t\t \" >> $HOME/.bash_profile\n" +"\t\t source $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\t source <(kubectl completion zsh)\n" +"\t\t# Set the kubectl completion code for zsh[1] to autoload on startup\n" +"\t\t kubectl completion zsh > \"${fpath[1]}/_kubectl\"" +msgstr "" +"\n" +"\t\t# Installing bash completion on macOS using homebrew\n" +"\t\t## If running Bash 3.2 included with macOS\n" +"\t\t brew install bash-completion\n" +"\t\t## or, if running Bash 4.1+\n" +"\t\t brew install bash-completion@2\n" +"\t\t## If kubectl is installed via homebrew, this should start working " +"immediately\n" +"\t\t## If you've installed via other means, you may need add the completion " +"to your completion directory\n" +"\t\t kubectl completion bash > $(brew --prefix)/etc/bash_completion.d/" +"kubectl\n" +"\n" +"\n" +"\t\t# Installing bash completion on Linux\n" +"\t\t## If bash-completion is not installed on Linux, install the 'bash-" +"completion' package\n" +"\t\t## via your distribution's package manager.\n" +"\t\t## Load the kubectl completion code for bash into the current shell\n" +"\t\t source <(kubectl completion bash)\n" +"\t\t## Write bash completion code to a file and source it from ." +"bash_profile\n" +"\t\t kubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\t printf \"\n" +"\t\t # Kubectl shell completion\n" +"\t\t source '$HOME/.kube/completion.bash.inc'\n" +"\t\t \" >> $HOME/.bash_profile\n" +"\t\t source $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\t source <(kubectl completion zsh)\n" +"\t\t# Set the kubectl completion code for zsh[1] to autoload on startup\n" +"\t\t kubectl completion zsh > \"${fpath[1]}/_kubectl\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:105 +msgid "" +"\n" +"\t\t# List all pods in ps output format\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name)\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List deployments in JSON output format, in the \"v1\" version of the " +"\"apps\" API group\n" +"\t\tkubectl get deployments.v1.apps -o json\n" +"\n" +"\t\t# List a single pod in JSON output format\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +"JSON output format\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# List resources from a directory with kustomization.yaml - e.g. dir/" +"kustomization.yaml\n" +"\t\tkubectl get -k dir/\n" +"\n" +"\t\t# Return only the phase value of the specified pod\n" +"\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n" +"\n" +"\t\t# List resource information in custom columns\n" +"\t\tkubectl get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0]." +"name,IMAGE:.spec.containers[0].image\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7" +msgstr "" +"\n" +"\t\t# List all pods in ps output format\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name)\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List deployments in JSON output format, in the \"v1\" version of the " +"\"apps\" API group\n" +"\t\tkubectl get deployments.v1.apps -o json\n" +"\n" +"\t\t# List a single pod in JSON output format\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +"JSON output format\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# List resources from a directory with kustomization.yaml - e.g. dir/" +"kustomization.yaml\n" +"\t\tkubectl get -k dir/\n" +"\n" +"\t\t# Return only the phase value of the specified pod\n" +"\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n" +"\n" +"\t\t# List resource information in custom columns\n" +"\t\tkubectl get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0]." +"name,IMAGE:.spec.containers[0].image\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:72 +msgid "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 5000 6000\n" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in a pod selected by the deployment\n" +"\t\tkubectl port-forward deployment/mydeployment 5000 6000\n" +"\n" +"\t\t# Listen on port 8443 locally, forwarding to the targetPort of the " +"service's port named \"https\" in a pod selected by the service\n" +"\t\tkubectl port-forward service/myservice 8443:https\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on all addresses, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on localhost and selected IP, forwarding to 5000 " +"in the pod\n" +"\t\tkubectl port-forward --address localhost,10.19.21.23 pod/mypod " +"8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod :5000" +msgstr "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 5000 6000\n" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in a pod selected by the deployment\n" +"\t\tkubectl port-forward deployment/mydeployment 5000 6000\n" +"\n" +"\t\t# Listen on port 8443 locally, forwarding to the targetPort of the " +"service's port named \"https\" in a pod selected by the service\n" +"\t\tkubectl port-forward service/myservice 8443:https\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on all addresses, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on localhost and selected IP, forwarding to 5000 " +"in the pod\n" +"\t\tkubectl port-forward --address localhost,10.19.21.23 pod/mypod " +"8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod :5000" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:87 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as schedulable\n" +"\t\tkubectl uncordon foo" +msgstr "" +"\n" +"\t\t# Mark node \"foo\" as schedulable\n" +"\t\tkubectl uncordon foo" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:58 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as unschedulable\n" +"\t\tkubectl cordon foo" +msgstr "" +"\n" +"\t\t# Mark node \"foo\" as unschedulable\n" +"\t\tkubectl cordon foo" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:83 +msgid "" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as JSON\n" +"\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as YAML\n" +"\t\tkubectl patch node k8s-node-1 -p $'spec:\n" +" unschedulable: true'\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"\"node.json\" using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Update a container's image; spec.containers[*].name is required " +"because it's a merge key\n" +"\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +"\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +"\n" +"\t\t# Update a container's image using a JSON patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +"\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +msgstr "" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as JSON\n" +"\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as YAML\n" +"\t\tkubectl patch node k8s-node-1 -p $'spec:\n" +" unschedulable: true'\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"\"node.json\" using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Update a container's image; spec.containers[*].name is required " +"because it's a merge key\n" +"\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +"\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +"\n" +"\t\t# Update a container's image using a JSON patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +"\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:44 +msgid "" +"\n" +"\t\t# Print the address of the control plane and cluster services\n" +"\t\tkubectl cluster-info" +msgstr "" +"\n" +"\t\t# Print the address of the control plane and cluster services\n" +"\t\tkubectl cluster-info" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:56 +msgid "" +"\n" +"\t\t# Replace a pod using the data in pod.json\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod's image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' | " +"kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace --force -f ./pod.json" +msgstr "" +"\n" +"\t\t# Replace a pod using the data in pod.json\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod's image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' | " +"kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace --force -f ./pod.json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:53 +msgid "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs from pod nginx with multi containers\n" +"\t\tkubectl logs nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -f -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs --tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs --since=1h nginx\n" +"\n" +"\t\t# Show logs from a kubelet with an expired serving certificate\n" +"\t\tkubectl logs --insecure-skip-tls-verify-backend nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" +msgstr "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs from pod nginx with multi containers\n" +"\t\tkubectl logs nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -f -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs --tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs --since=1h nginx\n" +"\n" +"\t\t# Show logs from a kubelet with an expired serving certificate\n" +"\t\tkubectl logs --insecure-skip-tls-verify-backend nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:49 +msgid "" +"\n" +"\t\t# Scale a replica set named 'foo' to 3\n" +"\t\tkubectl scale --replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" " +"to 3\n" +"\t\tkubectl scale --replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql's current size is 2, scale mysql to 3\n" +"\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers\n" +"\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +"\n" +"\t\t# Scale stateful set named 'web' to 3\n" +"\t\tkubectl scale --replicas=3 statefulset/web" +msgstr "" +"\n" +"\t\t# Scale a replica set named 'foo' to 3\n" +"\t\tkubectl scale --replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" " +"to 3\n" +"\t\tkubectl scale --replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql's current size is 2, scale mysql to 3\n" +"\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers\n" +"\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +"\n" +"\t\t# Scale stateful set named 'web' to 3\n" +"\t\tkubectl scale --replicas=3 statefulset/web" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:75 +msgid "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file; will create the annotation if it does not already exist\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file; will create the annotation if it does not already exist\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:62 +msgid "" +"\n" +"\t\t# Start a nginx pod\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a hazelcast pod and let the container expose port 5701\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --port=5701\n" +"\n" +"\t\t# Start a hazelcast pod and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --env=" +"\"DNS_DOMAIN=cluster\" --env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a hazelcast pod and set labels \"app=hazelcast\" and \"env=prod" +"\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --labels=" +"\"app=hazelcast,env=prod\"\n" +"\n" +"\t\t# Dry run; print the corresponding API objects without creating them\n" +"\t\tkubectl run nginx --image=nginx --dry-run=client\n" +"\n" +"\t\t# Start a nginx pod, but overload the spec with a partial set of values " +"parsed from JSON\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a busybox pod and keep it in the foreground, don't restart it if " +"it exits\n" +"\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +"\n" +"\t\t# Start the nginx pod using the default command, but use custom " +"arguments (arg1 .. argN) for that command\n" +"\t\tkubectl run nginx --image=nginx -- ... \n" +"\n" +"\t\t# Start the nginx pod using a different command and custom arguments\n" +"\t\tkubectl run nginx --image=nginx --command -- ... " +msgstr "" +"\n" +"\t\t# Start a nginx pod\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a hazelcast pod and let the container expose port 5701\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --port=5701\n" +"\n" +"\t\t# Start a hazelcast pod and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --env=" +"\"DNS_DOMAIN=cluster\" --env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a hazelcast pod and set labels \"app=hazelcast\" and \"env=prod" +"\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --labels=" +"\"app=hazelcast,env=prod\"\n" +"\n" +"\t\t# Dry run; print the corresponding API objects without creating them\n" +"\t\tkubectl run nginx --image=nginx --dry-run=client\n" +"\n" +"\t\t# Start a nginx pod, but overload the spec with a partial set of values " +"parsed from JSON\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a busybox pod and keep it in the foreground, don't restart it if " +"it exits\n" +"\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +"\n" +"\t\t# Start the nginx pod using the default command, but use custom " +"arguments (arg1 .. argN) for that command\n" +"\t\tkubectl run nginx --image=nginx -- ... \n" +"\n" +"\t\t# Start the nginx pod using a different command and custom arguments\n" +"\t\tkubectl run nginx --image=nginx --command -- ... " + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:73 +msgid "" +"\n" +"\t\t# To proxy all of the Kubernetes API and nothing else\n" +"\t\tkubectl proxy --api-prefix=/\n" +"\n" +"\t\t# To proxy only part of the Kubernetes API and also some static files\n" +"\t\t# You can get pods info with 'curl localhost:8001/api/v1/pods'\n" +"\t\tkubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/\n" +"\n" +"\t\t# To proxy the entire Kubernetes API at a different root\n" +"\t\t# You can get pods info with 'curl localhost:8001/custom/api/v1/pods'\n" +"\t\tkubectl proxy --api-prefix=/custom/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy --port=8011 --www=./local/www/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on an arbitrary local port\n" +"\t\t# The chosen port for the server will be output to stdout\n" +"\t\tkubectl proxy --port=0\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server, changing the API prefix to " +"k8s-api\n" +"\t\t# This makes e.g. the pods API available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy --api-prefix=/k8s-api" +msgstr "" +"\n" +"\t\t# To proxy all of the Kubernetes API and nothing else\n" +"\t\tkubectl proxy --api-prefix=/\n" +"\n" +"\t\t# To proxy only part of the Kubernetes API and also some static files\n" +"\t\t# You can get pods info with 'curl localhost:8001/api/v1/pods'\n" +"\t\tkubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/\n" +"\n" +"\t\t# To proxy the entire Kubernetes API at a different root\n" +"\t\t# You can get pods info with 'curl localhost:8001/custom/api/v1/pods'\n" +"\t\tkubectl proxy --api-prefix=/custom/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy --port=8011 --www=./local/www/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on an arbitrary local port\n" +"\t\t# The chosen port for the server will be output to stdout\n" +"\t\tkubectl proxy --port=0\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server, changing the API prefix to " +"k8s-api\n" +"\t\t# This makes e.g. the pods API available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy --api-prefix=/k8s-api" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:80 +msgid "" +"\n" +"\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-" +"user' and effect 'NoSchedule'\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +"'NoSchedule' if one exists\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +"\t\tkubectl taint nodes foo dedicated-\n" +"\n" +"\t\t# Add a taint with key 'dedicated' on nodes having label mylabel=X\n" +"\t\tkubectl taint node -l myLabel=X dedicated=foo:PreferNoSchedule\n" +"\n" +"\t\t# Add to node 'foo' a taint with key 'bar' and no value\n" +"\t\tkubectl taint nodes foo bar:NoSchedule" +msgstr "" +"\n" +"\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-" +"user' and effect 'NoSchedule'\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +"'NoSchedule' if one exists\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +"\t\tkubectl taint nodes foo dedicated-\n" +"\n" +"\t\t# Add a taint with key 'dedicated' on nodes having label mylabel=X\n" +"\t\tkubectl taint node -l myLabel=X dedicated=foo:PreferNoSchedule\n" +"\n" +"\t\t# Add to node 'foo' a taint with key 'bar' and no value\n" +"\t\tkubectl taint nodes foo bar:NoSchedule" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:95 +msgid "" +"\n" +"\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +"overwriting any existing value\n" +"\t\tkubectl label --overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods --all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod 'foo' only if the resource is unchanged from version 1\n" +"\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +"\n" +"\t\t# Update pod 'foo' by removing a label named 'bar' if it exists\n" +"\t\t# Does not require the --overwrite flag\n" +"\t\tkubectl label pods foo bar-" +msgstr "" +"\n" +"\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +"overwriting any existing value\n" +"\t\tkubectl label --overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods --all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod 'foo' only if the resource is unchanged from version 1\n" +"\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +"\n" +"\t\t# Update pod 'foo' by removing a label named 'bar' if it exists\n" +"\t\t# Does not require the --overwrite flag\n" +"\t\tkubectl label pods foo bar-" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:53 +msgid "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +msgstr "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:61 +msgid "" +"\n" +"\t\t# Wait for the pod \"busybox1\" to contain the status condition of type " +"\"Ready\"\n" +"\t\tkubectl wait --for=condition=Ready pod/busybox1\n" +"\n" +"\t\t# The default value of status condition is true; you can set it to " +"false\n" +"\t\tkubectl wait --for=condition=Ready=false pod/busybox1\n" +"\n" +"\t\t# Wait for the pod \"busybox1\" to be deleted, with a timeout of 60s, " +"after having issued the \"delete\" command\n" +"\t\tkubectl delete pod/busybox1\n" +"\t\tkubectl wait --for=delete pod/busybox1 --timeout=60s" +msgstr "" +"\n" +"\t\t# Wait for the pod \"busybox1\" to contain the status condition of type " +"\"Ready\"\n" +"\t\tkubectl wait --for=condition=Ready pod/busybox1\n" +"\n" +"\t\t# The default value of status condition is true; you can set it to " +"false\n" +"\t\tkubectl wait --for=condition=Ready=false pod/busybox1\n" +"\n" +"\t\t# Wait for the pod \"busybox1\" to be deleted, with a timeout of 60s, " +"after having issued the \"delete\" command\n" +"\t\tkubectl delete pod/busybox1\n" +"\t\tkubectl wait --for=delete pod/busybox1 --timeout=60s" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:110 +msgid "" +"\n" +"\t\tApply a configuration to a resource by file name or stdin.\n" +"\t\tThe resource name must be specified. This resource will be created if it " +"doesn't exist yet.\n" +"\t\tTo use 'apply', always create the resource initially with either 'apply' " +"or 'create --save-config'.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." +msgstr "" +"\n" +"\t\tApply a configuration to a resource by file name or stdin.\n" +"\t\tThe resource name must be specified. This resource will be created if it " +"doesn't exist yet.\n" +"\t\tTo use 'apply', always create the resource initially with either 'apply' " +"or 'create --save-config'.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:126 +msgid "" +"\n" +"\t\tApprove a certificate signing request.\n" +"\n" +"\t\tkubectl certificate approve allows a cluster admin to approve a " +"certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tissue a certificate to the requestor with the attributes requested in " +"the CSR.\n" +"\n" +"\t\tSECURITY NOTICE: Depending on the requested attributes, the issued " +"certificate\n" +"\t\tcan potentially grant a requester access to cluster resources or to " +"authenticate\n" +"\t\tas a requested identity. Before approving a CSR, ensure you understand " +"what the\n" +"\t\tsigned certificate can do.\n" +"\t\t" +msgstr "" +"\n" +"\t\tApprove a certificate signing request.\n" +"\n" +"\t\tkubectl certificate approve allows a cluster admin to approve a " +"certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tissue a certificate to the requestor with the attributes requested in " +"the CSR.\n" +"\n" +"\t\tSECURITY NOTICE: Depending on the requested attributes, the issued " +"certificate\n" +"\t\tcan potentially grant a requester access to cluster resources or to " +"authenticate\n" +"\t\tas a requested identity. Before approving a CSR, ensure you understand " +"what the\n" +"\t\tsigned certificate can do.\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:28 +msgid "" +"\n" +"\t\tConfigure application resources.\n" +"\n" +"\t\tThese commands help you make changes to existing application resources." +msgstr "" +"\n" +"\t\tConfigure application resources.\n" +"\n" +"\t\tThese commands help you make changes to existing application resources." + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:41 +msgid "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist beforehand. The public key " +"certificate must be .PEM encoded and match\n" +"\t\tthe given private key." +msgstr "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist beforehand. The public key " +"certificate must be .PEM encoded and match\n" +"\t\tthe given private key." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:41 +msgid "" +"\n" +"\t\tCreate a cluster role binding for a particular cluster role." +msgstr "" +"\n" +"\t\tCreate a cluster role binding for a particular cluster role." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:38 +msgid "" +"\n" +"\t\tCreate a cluster role." +msgstr "" +"\n" +"\t\tCreate a cluster role." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:46 +msgid "" +"\n" +"\t\tCreate a config map based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single config map may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a config map based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a config map based on a directory, each file whose " +"basename is a valid key in the directory will be\n" +"\t\tpackaged into the config map. Any directory entries except regular " +"files are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" +"\n" +"\t\tCreate a config map based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single config map may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a config map based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a config map based on a directory, each file whose " +"basename is a valid key in the directory will be\n" +"\t\tpackaged into the config map. Any directory entries except regular " +"files are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:40 +msgid "" +"\n" +"\t\tCreate a cron job with the specified name." +msgstr "" +"\n" +"\t\tCreate a cron job with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:40 +msgid "" +"\n" +"\t\tCreate a job with the specified name." +msgstr "" +"\n" +"\t\tCreate a job with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tCreate a namespace with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:41 +msgid "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running:\n" +"\t\t\t'$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +"password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +"\n" +"\tThat produces a ~/.dockercfg file that is used by subsequent 'docker push' " +"and 'docker pull' commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they must have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." +msgstr "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running:\n" +"\t\t\t'$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +"password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +"\n" +"\tThat produces a ~/.dockercfg file that is used by subsequent 'docker push' " +"and 'docker pull' commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they must have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:41 +msgid "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods." +msgstr "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:40 +msgid "" +"\n" +"\t\tCreate a priority class with the specified name, value, globalDefault " +"and description." +msgstr "" +"\n" +"\t\tCreate a priority class with the specified name, value, globalDefault " +"and description." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:71 +msgid "" +"\n" +"\t\tCreate a resource from a file or from stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." +msgstr "" +"\n" +"\t\tCreate a resource from a file or from stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:41 +msgid "" +"\n" +"\t\tCreate a resource quota with the specified name, hard limits, and " +"optional scopes." +msgstr "" +"\n" +"\t\tCreate a resource quota with the specified name, hard limits, and " +"optional scopes." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:40 +msgid "" +"\n" +"\t\tCreate a role binding for a particular role or cluster role." +msgstr "" +"\n" +"\t\tCreate a role binding for a particular role or cluster role." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tCreate a role with single rule." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:61 +msgid "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key or you " +"wish to chose your own, you may specify\n" +"\t\tan alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files are " +"ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key or you " +"wish to chose your own, you may specify\n" +"\t\tan alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files are " +"ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tCreate a service account with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:67 +msgid "" +"\n" +"\t\tCreates a proxy server or application-level gateway between localhost " +"and\n" +"\t\tthe Kubernetes API server. It also allows serving static content over " +"specified\n" +"\t\tHTTP path. All incoming data enters through one port and gets forwarded " +"to\n" +"\t\tthe remote Kubernetes API server port, except for the path matching the " +"static content path." +msgstr "" +"\n" +"\t\tCreates a proxy server or application-level gateway between localhost " +"and\n" +"\t\tthe Kubernetes API server. It also allows serving static content over " +"specified\n" +"\t\tHTTP path. All incoming data enters through one port and gets forwarded " +"to\n" +"\t\tthe remote Kubernetes API server port, except for the path matching the " +"static content path." + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:42 +msgid "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a Kubernetes cluster.\n" +"\n" +"\t\tLooks up a deployment, replica set, stateful set, or replication " +"controller by name and creates an autoscaler that uses the given resource as " +"a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." +msgstr "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a Kubernetes cluster.\n" +"\n" +"\t\tLooks up a deployment, replica set, stateful set, or replication " +"controller by name and creates an autoscaler that uses the given resource as " +"a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:57 +msgid "" +"\n" +"\t\tDebug cluster resources using interactive debugging containers.\n" +"\n" +"\t\t'debug' provides automation for common debugging tasks for cluster " +"objects identified by\n" +"\t\tresource and name. Pods will be used by default if no resource is " +"specified.\n" +"\n" +"\t\tThe action taken by 'debug' varies depending on what resource is " +"specified. Supported\n" +"\t\tactions include:\n" +"\n" +"\t\t* Workload: Create a copy of an existing pod with certain attributes " +"changed,\n" +"\t for example changing the image tag to a new version.\n" +"\t\t* Workload: Add an ephemeral container to an already running pod, for " +"example to add\n" +"\t\t debugging utilities without restarting the pod.\n" +"\t\t* Node: Create a new pod that runs in the node's host namespaces and can " +"access\n" +"\t\t the node's filesystem.\n" +msgstr "" +"\n" +"\t\tDebug cluster resources using interactive debugging containers.\n" +"\n" +"\t\t'debug' provides automation for common debugging tasks for cluster " +"objects identified by\n" +"\t\tresource and name. Pods will be used by default if no resource is " +"specified.\n" +"\n" +"\t\tThe action taken by 'debug' varies depending on what resource is " +"specified. Supported\n" +"\t\tactions include:\n" +"\n" +"\t\t* Workload: Create a copy of an existing pod with certain attributes " +"changed,\n" +"\t for example changing the image tag to a new version.\n" +"\t\t* Workload: Add an ephemeral container to an already running pod, for " +"example to add\n" +"\t\t debugging utilities without restarting the pod.\n" +"\t\t* Node: Create a new pod that runs in the node's host namespaces and can " +"access\n" +"\t\t the node's filesystem.\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:45 +msgid "" +"\n" +"\t\tDelete resources by file names, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of argument may be " +"specified: file names,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource, you must specify the " +"--force flag.\n" +"\t\tNote: only a subset of resources support graceful deletion. In absence " +"of the support,\n" +"\t\tthe --grace-period flag is ignored.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod's processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods, the scheduler may place new pods on " +"those nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone submits an\n" +"\t\tupdate to a resource right when you submit a delete, their update will " +"be lost along with the\n" +"\t\trest of the resource." +msgstr "" +"\n" +"\t\tDelete resources by file names, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of argument may be " +"specified: file names,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource, you must specify the " +"--force flag.\n" +"\t\tNote: only a subset of resources support graceful deletion. In absence " +"of the support,\n" +"\t\tthe --grace-period flag is ignored.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod's processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods, the scheduler may place new pods on " +"those nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone submits an\n" +"\t\tupdate to a resource right when you submit a delete, their update will " +"be lost along with the\n" +"\t\trest of the resource." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:175 +msgid "" +"\n" +"\t\tDeny a certificate signing request.\n" +"\n" +"\t\tkubectl certificate deny allows a cluster admin to deny a certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tnot to issue a certificate to the requestor.\n" +"\t\t" +msgstr "" +"\n" +"\t\tDeny a certificate signing request.\n" +"\n" +"\t\tkubectl certificate deny allows a cluster admin to deny a certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tnot to issue a certificate to the requestor.\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:53 +msgid "" +"\n" +"\t\tDiff configurations specified by file name or stdin between the current " +"online\n" +"\t\tconfiguration, and the configuration as it would be if applied.\n" +"\n" +"\t\tThe output is always YAML.\n" +"\n" +"\t\tKUBECTL_EXTERNAL_DIFF environment variable can be used to select your " +"own\n" +"\t\tdiff command. Users can use external commands with params too, example:\n" +"\t\tKUBECTL_EXTERNAL_DIFF=\"colordiff -N -u\"\n" +"\n" +"\t\tBy default, the \"diff\" command available in your path will be\n" +"\t\trun with the \"-u\" (unified diff) and \"-N\" (treat absent files as " +"empty) options.\n" +"\n" +"\t\tExit status:\n" +"\t\t 0\n" +"\t\tNo differences were found.\n" +"\t\t 1\n" +"\t\tDifferences were found.\n" +"\t\t >1\n" +"\t\tKubectl or diff failed with an error.\n" +"\n" +"\t\tNote: KUBECTL_EXTERNAL_DIFF, if used, is expected to follow that " +"convention." +msgstr "" +"\n" +"\t\tDiff configurations specified by file name or stdin between the current " +"online\n" +"\t\tconfiguration, and the configuration as it would be if applied.\n" +"\n" +"\t\tThe output is always YAML.\n" +"\n" +"\t\tKUBECTL_EXTERNAL_DIFF environment variable can be used to select your " +"own\n" +"\t\tdiff command. Users can use external commands with params too, example:\n" +"\t\tKUBECTL_EXTERNAL_DIFF=\"colordiff -N -u\"\n" +"\n" +"\t\tBy default, the \"diff\" command available in your path will be\n" +"\t\trun with the \"-u\" (unified diff) and \"-N\" (treat absent files as " +"empty) options.\n" +"\n" +"\t\tExit status:\n" +"\t\t 0\n" +"\t\tNo differences were found.\n" +"\t\t 1\n" +"\t\tDifferences were found.\n" +"\t\t >1\n" +"\t\tKubectl or diff failed with an error.\n" +"\n" +"\t\tNote: KUBECTL_EXTERNAL_DIFF, if used, is expected to follow that " +"convention." + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:39 +msgid "" +"\n" +"\t\tDisplay Resource (CPU/Memory) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Metrics Server to be correctly configured and " +"working on the server. " +msgstr "" +"\n" +"\t\tDisplay Resource (CPU/Memory) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Metrics Server to be correctly configured and " +"working on the server. " + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:54 +msgid "" +"\n" +"\t\tDisplay merged kubeconfig settings or a specified kubeconfig file.\n" +"\n" +"\t\tYou can use --output jsonpath={...} to extract specific values using a " +"jsonpath expression." +msgstr "" +"\n" +"\t\tDisplay merged kubeconfig settings or a specified kubeconfig file.\n" +"\n" +"\t\tYou can use --output jsonpath={...} to extract specific values using a " +"jsonpath expression." + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:92 +msgid "" +"\n" +"\t\tDisplay one or many resources.\n" +"\n" +"\t\tPrints a table of the most important information about the specified " +"resources.\n" +"\t\tYou can filter the list using a label selector and the --selector flag. " +"If the\n" +"\t\tdesired resource type is namespaced you will only see results in your " +"current\n" +"\t\tnamespace unless you pass --all-namespaces.\n" +"\n" +"\t\tUninitialized objects are not shown unless --include-uninitialized is " +"passed.\n" +"\n" +"\t\tBy specifying the output as 'template' and providing a Go template as " +"the value\n" +"\t\tof the --template flag, you can filter the attributes of the fetched " +"resources." +msgstr "" +"\n" +"\t\tDisplay one or many resources.\n" +"\n" +"\t\tPrints a table of the most important information about the specified " +"resources.\n" +"\t\tYou can filter the list using a label selector and the --selector flag. " +"If the\n" +"\t\tdesired resource type is namespaced you will only see results in your " +"current\n" +"\t\tnamespace unless you pass --all-namespaces.\n" +"\n" +"\t\tUninitialized objects are not shown unless --include-uninitialized is " +"passed.\n" +"\n" +"\t\tBy specifying the output as 'template' and providing a Go template as " +"the value\n" +"\t\tof the --template flag, you can filter the attributes of the fetched " +"resources." + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:57 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." +msgstr "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:67 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of pods.\n" +"\n" +"\t\tThe 'top pod' command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." +msgstr "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of pods.\n" +"\n" +"\t\tThe 'top pod' command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:37 +msgid "" +"\n" +"\t\tDisplay the current-context." +msgstr "" +"\n" +"\t\tDisplay the current-context." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:113 +msgid "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the API server supports\n" +"\t\t[eviction](https://kubernetes.io/docs/concepts/workloads/pods/" +"disruptions/). Otherwise, it will use normal\n" +"\t\tDELETE to delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are daemon set-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tdaemon set-managed pods, because those pods would be immediately " +"replaced by the\n" +"\t\tdaemon set controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by a replication " +"controller,\n" +"\t\treplica set, daemon set, stateful set, or job, then drain will not " +"delete any pods unless you\n" +"\t\tuse --force. --force will also allow deletion to proceed if the " +"managing resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t'drain' waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](https://kubernetes.io/images/docs/kubectl_drain.svg)" +msgstr "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the API server supports\n" +"\t\t[eviction](https://kubernetes.io/docs/concepts/workloads/pods/" +"disruptions/). Otherwise, it will use normal\n" +"\t\tDELETE to delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are daemon set-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tdaemon set-managed pods, because those pods would be immediately " +"replaced by the\n" +"\t\tdaemon set controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by a replication " +"controller,\n" +"\t\treplica set, daemon set, stateful set, or job, then drain will not " +"delete any pods unless you\n" +"\t\tuse --force. --force will also allow deletion to proceed if the " +"managing resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t'drain' waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](https://kubernetes.io/images/docs/kubectl_drain.svg)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:31 +msgid "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go:31 +msgid "" +"\n" +"\t\tEdit the latest last-applied-configuration annotations of resources from " +"the default editor.\n" +"\n" +"\t\tThe edit-last-applied command allows you to directly edit any API " +"resource you can retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" +"\n" +"\t\tEdit the latest last-applied-configuration annotations of resources from " +"the default editor.\n" +"\n" +"\t\tThe edit-last-applied command allows you to directly edit any API " +"resource you can retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:49 +msgid "" +"\n" +"\t\tExperimental: Wait for a specific condition on one or many resources.\n" +"\n" +"\t\tThe command takes multiple resources and waits until the specified " +"condition\n" +"\t\tis seen in the Status field of every given resource.\n" +"\n" +"\t\tAlternatively, the command can wait for the given set of resources to be " +"deleted\n" +"\t\tby providing the \"delete\" keyword as the value to the --for flag.\n" +"\n" +"\t\tA successful message will be printed to stdout indicating when the " +"specified\n" +" condition has been met. You can use -o option to change to output " +"destination." +msgstr "" +"\n" +"\t\tExperimental: Wait for a specific condition on one or many resources.\n" +"\n" +"\t\tThe command takes multiple resources and waits until the specified " +"condition\n" +"\t\tis seen in the Status field of every given resource.\n" +"\n" +"\t\tAlternatively, the command can wait for the given set of resources to be " +"deleted\n" +"\t\tby providing the \"delete\" keyword as the value to the --for flag.\n" +"\n" +"\t\tA successful message will be printed to stdout indicating when the " +"specified\n" +" condition has been met. You can use -o option to change to output " +"destination." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:47 +msgid "" +"\n" +"\t\tExpose a resource as a new Kubernetes service.\n" +"\n" +"\t\tLooks up a deployment, service, replica set, replication controller or " +"pod by name and uses the selector\n" +"\t\tfor that resource as the selector for a new service on the specified " +"port. A deployment or replica set\n" +"\t\twill be exposed as a service only if its selector is convertible to a " +"selector that service supports,\n" +"\t\ti.e. when the selector contains only the matchLabels component. Note " +"that if no port is specified via\n" +"\t\t--port and the exposed resource has multiple ports, all will be re-used " +"by the new service. Also if no\n" +"\t\tlabels are specified, the new service will re-use the labels from the " +"resource it exposes.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\n" +"\t\t" +msgstr "" +"\n" +"\t\tExpose a resource as a new Kubernetes service.\n" +"\n" +"\t\tLooks up a deployment, service, replica set, replication controller or " +"pod by name and uses the selector\n" +"\t\tfor that resource as the selector for a new service on the specified " +"port. A deployment or replica set\n" +"\t\twill be exposed as a service only if its selector is convertible to a " +"selector that service supports,\n" +"\t\ti.e. when the selector contains only the matchLabels component. Note " +"that if no port is specified via\n" +"\t\t--port and the exposed resource has multiple ports, all will be re-used " +"by the new service. Also if no\n" +"\t\tlabels are specified, the new service will re-use the labels from the " +"resource it exposes.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:46 +msgid "" +"\n" +"\t\tList all available plugin files on a user's PATH.\n" +"\n" +"\t\tAvailable plugin files are those that are:\n" +"\t\t- executable\n" +"\t\t- anywhere on the user's PATH\n" +"\t\t- begin with \"kubectl-\"\n" +msgstr "" +"\n" +"\t\tList all available plugin files on a user's PATH.\n" +"\n" +"\t\tAvailable plugin files are those that are:\n" +"\t\t- executable\n" +"\t\t- anywhere on the user's PATH\n" +"\t\t- begin with \"kubectl-\"\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:35 +msgid "" +"\n" +"\t\tList the fields for supported resources.\n" +"\n" +"\t\tThis command describes the fields associated with each supported API " +"resource.\n" +"\t\tFields are identified via a simple JSONPath identifier:\n" +"\n" +"\t\t\t.[.]\n" +"\n" +"\t\tAdd the --recursive flag to display all of the fields at once without " +"descriptions.\n" +"\t\tInformation about each field is retrieved from the server in OpenAPI " +"format." +msgstr "" +"\n" +"\t\tList the fields for supported resources.\n" +"\n" +"\t\tThis command describes the fields associated with each supported API " +"resource.\n" +"\t\tFields are identified via a simple JSONPath identifier:\n" +"\n" +"\t\t\t.[.]\n" +"\n" +"\t\tAdd the --recursive flag to display all of the fields at once without " +"descriptions.\n" +"\t\tInformation about each field is retrieved from the server in OpenAPI " +"format." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:30 +msgid "" +"\n" +"\t\tManage the rollout of a resource." +msgstr "" +"\n" +"\t\tManage the rollout of a resource." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tMark node as schedulable." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tMark node as unschedulable." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:57 +msgid "" +"\n" +"\t\tMark the provided resource as paused.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller.\n" +"\t\tUse \"kubectl rollout resume\" to resume a paused resource.\n" +"\t\tCurrently only deployments support being paused." +msgstr "" +"\n" +"\t\tMark the provided resource as paused.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller.\n" +"\t\tUse \"kubectl rollout resume\" to resume a paused resource.\n" +"\t\tCurrently only deployments support being paused." + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:46 +msgid "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tDetailed instructions on how to do this are available here:\n" +"\n" +" for macOS:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#enable-" +"shell-autocompletion\n" +"\n" +" for linux:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#enable-" +"shell-autocompletion\n" +"\n" +" for windows:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/" +"#enable-shell-autocompletion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2." +msgstr "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tDetailed instructions on how to do this are available here:\n" +"\n" +" for macOS:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#enable-" +"shell-autocompletion\n" +"\n" +" for linux:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#enable-" +"shell-autocompletion\n" +"\n" +" for windows:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/" +"#enable-shell-autocompletion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2." + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:49 +msgid "" +"\n" +"\t\tPrint the logs for a container in a pod or specified resource. \n" +"\t\tIf the pod has only one container, the container name is optional." +msgstr "" +"\n" +"\t\tPrint the logs for a container in a pod or specified resource. \n" +"\t\tIf the pod has only one container, the container name is optional." + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:37 +msgid "" +"\n" +"\t\tProvides utilities for interacting with plugins.\n" +"\n" +"\t\tPlugins provide extended functionality that is not part of the major " +"command-line distribution.\n" +"\t\tPlease refer to the documentation and examples for more information " +"about how write your own plugins.\n" +"\n" +"\t\tThe easiest way to discover and install plugins is via the kubernetes " +"sub-project krew.\n" +"\t\tTo install krew, visit [krew.sigs.k8s.io](https://krew.sigs.k8s.io/docs/" +"user-guide/setup/install/)" +msgstr "" +"\n" +"\t\tProvides utilities for interacting with plugins.\n" +"\n" +"\t\tPlugins provide extended functionality that is not part of the major " +"command-line distribution.\n" +"\t\tPlease refer to the documentation and examples for more information " +"about how write your own plugins.\n" +"\n" +"\t\tThe easiest way to discover and install plugins is via the kubernetes " +"sub-project krew.\n" +"\t\tTo install krew, visit [krew.sigs.k8s.io](https://krew.sigs.k8s.io/docs/" +"user-guide/setup/install/)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/rename_context.go:47 +msgid "" +"\n" +"\t\tRenames a context from the kubeconfig file.\n" +"\n" +"\t\tCONTEXT_NAME is the context name that you want to change.\n" +"\n" +"\t\tNEW_NAME is the new name you want to set.\n" +"\n" +"\t\tNote: If the context being renamed is the 'current-context', this field " +"will also be updated." +msgstr "" +"\n" +"\t\tRenames a context from the kubeconfig file.\n" +"\n" +"\t\tCONTEXT_NAME is the context name that you want to change.\n" +"\n" +"\t\tNEW_NAME is the new name you want to set.\n" +"\n" +"\t\tNote: If the context being renamed is the 'current-context', this field " +"will also be updated." + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:48 +msgid "" +"\n" +"\t\tReplace a resource by file name or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml" +msgstr "" +"\n" +"\t\tReplace a resource by file name or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:57 +msgid "" +"\n" +"\t\tRestart a resource.\n" +"\n" +"\t Resource rollout will be restarted." +msgstr "" +"\n" +"\t\tRestart a resource.\n" +"\n" +"\t Resource rollout will be restarted." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:58 +msgid "" +"\n" +"\t\tResume a paused resource.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller. By resuming a\n" +"\t\tresource, we allow it to be reconciled again.\n" +"\t\tCurrently only deployments support being resumed." +msgstr "" +"\n" +"\t\tResume a paused resource.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller. By resuming a\n" +"\t\tresource, we allow it to be reconciled again.\n" +"\t\tCurrently only deployments support being resumed." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:55 +msgid "" +"\n" +"\t\tRoll back to a previous rollout." +msgstr "" +"\n" +"\t\tRoll back to a previous rollout." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_cluster.go:47 +msgid "" +"\n" +"\t\tSet a cluster entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." +msgstr "" +"\n" +"\t\tSet a cluster entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_context.go:44 +msgid "" +"\n" +"\t\tSet a context entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." +msgstr "" +"\n" +"\t\tSet a context entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:40 +msgid "" +"\n" +"\t\tSet a new size for a deployment, replica set, replication controller, or " +"stateful set.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf --current-replicas or --resource-version is specified, it is " +"validated before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." +msgstr "" +"\n" +"\t\tSet a new size for a deployment, replica set, replication controller, or " +"stateful set.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf --current-replicas or --resource-version is specified, it is " +"validated before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_authinfo.go:70 +#, c-format +msgid "" +"\n" +"\t\tSet a user entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values.\n" +"\n" +"\t\t Client-certificate flags:\n" +"\t\t --%v=certfile --%v=keyfile\n" +"\n" +"\t\t Bearer token flags:\n" +"\t\t\t --%v=bearer_token\n" +"\n" +"\t\t Basic auth flags:\n" +"\t\t\t --%v=basic_user --%v=basic_password\n" +"\n" +"\t\tBearer token and basic auth are mutually exclusive." +msgstr "" +"\n" +"\t\tSet a user entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values.\n" +"\n" +"\t\t Client-certificate flags:\n" +"\t\t --%v=certfile --%v=keyfile\n" +"\n" +"\t\t Bearer token flags:\n" +"\t\t\t --%v=bearer_token\n" +"\n" +"\t\t Basic auth flags:\n" +"\t\t\t --%v=basic_user --%v=basic_password\n" +"\n" +"\t\tBearer token and basic auth are mutually exclusive." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:67 +#, c-format +msgid "" +"\n" +"\t\tSet the selector on a resource. Note that the new selector will " +"overwrite the old selector if the resource had one prior to the invocation\n" +"\t\tof 'set selector'.\n" +"\n" +"\t\tA selector must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\tIf --resource-version is specified, then updates will use this resource " +"version, otherwise the existing resource-version will be used.\n" +" Note: currently selectors can only be set on Service objects." +msgstr "" +"\n" +"\t\tSet the selector on a resource. Note that the new selector will " +"overwrite the old selector if the resource had one prior to the invocation\n" +"\t\tof 'set selector'.\n" +"\n" +"\t\tA selector must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\tIf --resource-version is specified, then updates will use this resource " +"version, otherwise the existing resource-version will be used.\n" +" Note: currently selectors can only be set on Service objects." + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:39 +msgid "" +"\n" +"\t\tShow details of a specific resource or group of resources.\n" +"\n" +"\t\tPrint a detailed description of the selected resources, including " +"related resources such\n" +"\t\tas events or controllers. You may select a single object by name, all " +"objects of that\n" +"\t\ttype, provide a name prefix, or label selector. For example:\n" +"\n" +"\t\t $ kubectl describe TYPE NAME_PREFIX\n" +"\n" +"\t\twill first check for an exact match on TYPE and NAME_PREFIX. If no such " +"resource\n" +"\t\texists, it will output details for every resource that has a name " +"prefixed with NAME_PREFIX." +msgstr "" +"\n" +"\t\tShow details of a specific resource or group of resources.\n" +"\n" +"\t\tPrint a detailed description of the selected resources, including " +"related resources such\n" +"\t\tas events or controllers. You may select a single object by name, all " +"objects of that\n" +"\t\ttype, provide a name prefix, or label selector. For example:\n" +"\n" +"\t\t $ kubectl describe TYPE NAME_PREFIX\n" +"\n" +"\t\twill first check for an exact match on TYPE and NAME_PREFIX. If no such " +"resource\n" +"\t\texists, it will output details for every resource that has a name " +"prefixed with NAME_PREFIX." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:47 +msgid "" +"\n" +"\t\tShow the status of the rollout.\n" +"\n" +"\t\tBy default 'rollout status' will watch the status of the latest rollout\n" +"\t\tuntil it's done. If you don't want to wait for the rollout to finish " +"then\n" +"\t\tyou can use --watch=false. Note that if a new rollout starts in-between, " +"then\n" +"\t\t'rollout status' will continue watching the latest revision. If you want " +"to\n" +"\t\tpin to a specific revision and abort if it is rolled over by another " +"revision,\n" +"\t\tuse --revision=N where N is the revision you need to watch for." +msgstr "" +"\n" +"\t\tShow the status of the rollout.\n" +"\n" +"\t\tBy default 'rollout status' will watch the status of the latest rollout\n" +"\t\tuntil it's done. If you don't want to wait for the rollout to finish " +"then\n" +"\t\tyou can use --watch=false. Note that if a new rollout starts in-between, " +"then\n" +"\t\t'rollout status' will continue watching the latest revision. If you want " +"to\n" +"\t\tpin to a specific revision and abort if it is rolled over by another " +"revision,\n" +"\t\tuse --revision=N where N is the revision you need to watch for." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:41 +#, c-format +msgid "" +"\n" +"\t\tSpecify compute resource requirements (CPU, memory) for any resource " +"that defines a pod template. If a pod is successfully scheduled, it is " +"guaranteed the amount of resource requested, but may burst up to its " +"specified limits.\n" +"\n" +"\t\tFor each compute resource, if a limit is specified and a request is " +"omitted, the request will default to the limit.\n" +"\n" +"\t\tPossible resources include (case insensitive): %s." +msgstr "" +"\n" +"\t\tSpecify compute resource requirements (CPU, memory) for any resource " +"that defines a pod template. If a pod is successfully scheduled, it is " +"guaranteed the amount of resource requested, but may burst up to its " +"specified limits.\n" +"\n" +"\t\tFor each compute resource, if a limit is specified and a request is " +"omitted, the request will default to the limit.\n" +"\n" +"\t\tPossible resources include (case insensitive): %s." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:50 +msgid "" +"\n" +"\t\tUpdate environment variables on a pod template.\n" +"\n" +"\t\tList environment variable definitions in one or more pods, pod " +"templates.\n" +"\t\tAdd, update, or remove container environment variable definitions in one " +"or\n" +"\t\tmore pod templates (within replication controllers or deployment " +"configurations).\n" +"\t\tView or modify the environment variable definitions on all containers in " +"the\n" +"\t\tspecified pods or pod templates, or just those that match a wildcard.\n" +"\n" +"\t\tIf \"--env -\" is passed, environment variables can be read from STDIN " +"using the standard env\n" +"\t\tsyntax.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" +msgstr "" +"\n" +"\t\tUpdate environment variables on a pod template.\n" +"\n" +"\t\tList environment variable definitions in one or more pods, pod " +"templates.\n" +"\t\tAdd, update, or remove container environment variable definitions in one " +"or\n" +"\t\tmore pod templates (within replication controllers or deployment " +"configurations).\n" +"\t\tView or modify the environment variable definitions on all containers in " +"the\n" +"\t\tspecified pods or pod templates, or just those that match a wildcard.\n" +"\n" +"\t\tIf \"--env -\" is passed, environment variables can be read from STDIN " +"using the standard env\n" +"\t\tsyntax.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:71 +msgid "" +"\n" +"\t\tUpdate existing container image(s) of resources.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" +msgstr "" +"\n" +"\t\tUpdate existing container image(s) of resources.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:78 +msgid "" +"\n" +"\t\tUpdate fields of a resource using strategic merge patch, a JSON merge " +"patch, or a JSON patch.\n" +"\n" +"\t\tJSON and YAML formats are accepted." +msgstr "" +"\n" +"\t\tUpdate fields of a resource using strategic merge patch, a JSON merge " +"patch, or a JSON patch.\n" +"\n" +"\t\tJSON and YAML formats are accepted." + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:83 +msgid "" +"\n" +"\t\tUpdate the annotations on one or more resources.\n" +"\n" +"\t\tAll Kubernetes objects support the ability to store additional data with " +"the object as\n" +"\t\tannotations. Annotations are key/value pairs that can be larger than " +"labels and include\n" +"\t\tarbitrary string values such as structured JSON. Tools and system " +"extensions may use\n" +"\t\tannotations to store their own data.\n" +"\n" +"\t\tAttempting to set an annotation that already exists will fail unless --" +"overwrite is set.\n" +"\t\tIf --resource-version is specified and does not match the current " +"resource version on\n" +"\t\tthe server the command will fail." +msgstr "" +"\n" +"\t\tUpdate the annotations on one or more resources.\n" +"\n" +"\t\tAll Kubernetes objects support the ability to store additional data with " +"the object as\n" +"\t\tannotations. Annotations are key/value pairs that can be larger than " +"labels and include\n" +"\t\tarbitrary string values such as structured JSON. Tools and system " +"extensions may use\n" +"\t\tannotations to store their own data.\n" +"\n" +"\t\tAttempting to set an annotation that already exists will fail unless --" +"overwrite is set.\n" +"\t\tIf --resource-version is specified and does not match the current " +"resource version on\n" +"\t\tthe server the command will fail." + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:87 +#, c-format +msgid "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label key and value must begin with a letter or number, and may " +"contain letters, numbers, hyphens, dots, and underscores, up to %[1]d " +"characters each.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* If --overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If --resource-version is specified, then updates will use this " +"resource version, otherwise the existing resource-version will be used." +msgstr "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label key and value must begin with a letter or number, and may " +"contain letters, numbers, hyphens, dots, and underscores, up to %[1]d " +"characters each.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* If --overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If --resource-version is specified, then updates will use this " +"resource version, otherwise the existing resource-version will be used." + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:70 +#, c-format +msgid "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* The value is optional. If given, it must begin with a letter or " +"number, and may contain letters, numbers, hyphens, dots, and underscores, up " +"to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." +msgstr "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* The value is optional. If given, it must begin with a letter or " +"number, and may contain letters, numbers, hyphens, dots, and underscores, up " +"to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:36 +msgid "" +"\n" +"\t\tView previous rollout revisions and configurations." +msgstr "" +"\n" +"\t\tView previous rollout revisions and configurations." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:47 +msgid "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. You can use " +"the -o option\n" +"\t\tto change the output format." +msgstr "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. You can use " +"the -o option\n" +"\t\tto change the output format." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:47 +msgid "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair\n" +"\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/" +"to/tls.key" +msgstr "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair\n" +"\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/" +"to/tls.key" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:74 +msgid "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret --from-literal=key1=supersecret " +"--from-literal=key2=topsecret\n" +"\n" +"\t # Create a new secret named my-secret using a combination of a file and " +"a literal\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-literal=passphrase=topsecret\n" +"\n" +"\t # Create a new secret named my-secret from an env file\n" +"\t kubectl create secret generic my-secret --from-env-file=path/to/bar.env" +msgstr "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret --from-literal=key1=supersecret " +"--from-literal=key2=topsecret\n" +"\n" +"\t # Create a new secret named my-secret using a combination of a file and " +"a literal\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-literal=passphrase=topsecret\n" +"\n" +"\t # Create a new secret named my-secret from an env file\n" +"\t kubectl create secret generic my-secret --from-env-file=path/to/bar.env" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:45 +msgid "" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image\n" +"\tkubectl create deployment my-dep --image=busybox\n" +"\n" +"\t# Create a deployment with a command\n" +"\tkubectl create deployment my-dep --image=busybox -- date\n" +"\n" +"\t# Create a deployment named my-dep that runs the nginx image with 3 " +"replicas\n" +"\tkubectl create deployment my-dep --image=nginx --replicas=3\n" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image and expose " +"port 5701\n" +"\tkubectl create deployment my-dep --image=busybox --port=5701" +msgstr "" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image\n" +"\tkubectl create deployment my-dep --image=busybox\n" +"\n" +"\t# Create a deployment with a command\n" +"\tkubectl create deployment my-dep --image=busybox -- date\n" +"\n" +"\t# Create a deployment named my-dep that runs the nginx image with 3 " +"replicas\n" +"\tkubectl create deployment my-dep --image=nginx --replicas=3\n" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image and expose " +"port 5701\n" +"\tkubectl create deployment my-dep --image=busybox --port=5701" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:351 +msgid "" +"\n" +"\t# Create a new ExternalName service named my-ns\n" +"\tkubectl create service externalname my-ns --external-name bar.com" +msgstr "" +"\n" +"\t# Create a new ExternalName service named my-ns\n" +"\tkubectl create service externalname my-ns --external-name bar.com" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:50 +msgid "" +"\n" +"\t# Set deployment nginx-deployment's service account to serviceaccount1\n" +"\tkubectl set serviceaccount deployment nginx-deployment serviceaccount1\n" +"\n" +"\t# Print the result (in YAML format) of updated nginx deployment with the " +"service account from local file, without hitting the API server\n" +"\tkubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-" +"run=client -o yaml\n" +"\t" +msgstr "" +"\n" +"\t# Set deployment nginx-deployment's service account to serviceaccount1\n" +"\tkubectl set serviceaccount deployment nginx-deployment serviceaccount1\n" +"\n" +"\t# Print the result (in YAML format) of updated nginx deployment with the " +"service account from local file, without hitting the API server\n" +"\tkubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-" +"run=client -o yaml\n" +"\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:42 +msgid "" +"\n" +"\tCreate a deployment with the specified name." +msgstr "" +"\n" +"\tCreate a deployment with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:61 +msgid "" +"\n" +"\tCreate an ingress with the specified name." +msgstr "" +"\n" +"\tCreate an ingress with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set.go:44 +msgid "" +"\n" +"\tSet an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots.\n" +"\n" +"\tPROPERTY_VALUE is the new value you want to set. Binary fields such as " +"'certificate-authority-data' expect a base64 encoded string unless the --set-" +"raw-bytes flag is used.\n" +"\n" +"\tSpecifying an attribute name that already exists will merge new fields on " +"top of existing values." +msgstr "" +"\n" +"\tSet an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots.\n" +"\n" +"\tPROPERTY_VALUE is the new value you want to set. Binary fields such as " +"'certificate-authority-data' expect a base64 encoded string unless the --set-" +"raw-bytes flag is used.\n" +"\n" +"\tSpecifying an attribute name that already exists will merge new fields on " +"top of existing values." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/unset.go:39 +msgid "" +"\n" +"\tUnset an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots." +msgstr "" +"\n" +"\tUnset an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:43 +msgid "" +"\n" +"\tUpdate the service account of pod template resources.\n" +"\n" +"\tPossible resources (case insensitive) can be:\n" +"\n" +"\t" +msgstr "" +"\n" +"\tUpdate the service account of pod template resources.\n" +"\n" +"\tPossible resources (case insensitive) can be:\n" +"\n" +"\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:40 +msgid "" +"\n" +"\tUpdate the user, group, or service account in a role binding or cluster " +"role binding." +msgstr "" +"\n" +"\tUpdate the user, group, or service account in a role binding or cluster " +"role binding." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:68 +msgid "" +"\n" +" \tpod (po), replicationcontroller (rc), deployment (deploy), daemonset " +"(ds), replicaset (rs)" +msgstr "" +"\n" +" \tpod (po), replicationcontroller (rc), deployment (deploy), daemonset " +"(ds), replicaset (rs)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:63 +msgid "" +"\n" +" Forward one or more local ports to a pod.\n" +"\n" +" Use resource type/name such as deployment/mydeployment to " +"select a pod. Resource type defaults to 'pod' if omitted.\n" +"\n" +" If there are multiple pods matching the criteria, a pod will " +"be selected automatically. The\n" +" forwarding session ends when the selected pod terminates, " +"and a rerun of the command is needed\n" +" to resume forwarding." +msgstr "" +"\n" +" Forward one or more local ports to a pod.\n" +"\n" +" Use resource type/name such as deployment/mydeployment to " +"select a pod. Resource type defaults to 'pod' if omitted.\n" +"\n" +" If there are multiple pods matching the criteria, a pod will " +"be selected automatically. The\n" +" forwarding session ends when the selected pod terminates, " +"and a rerun of the command is needed\n" +" to resume forwarding." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:233 +msgid "" +"\n" +" # Create a new ClusterIP service named my-cs\n" +" kubectl create service clusterip my-cs --tcp=5678:8080\n" +"\n" +" # Create a new ClusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs --clusterip=\"None\"" +msgstr "" +"\n" +" # Create a new ClusterIP service named my-cs\n" +" kubectl create service clusterip my-cs --tcp=5678:8080\n" +"\n" +" # Create a new ClusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs --clusterip=\"None\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:274 +msgid "" +"\n" +" # Create a new NodePort service named my-ns\n" +" kubectl create service nodeport my-ns --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new NodePort service named my-ns\n" +" kubectl create service nodeport my-ns --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:95 +msgid "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description='my frontend'\n" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend running nginx', overwriting any existing value\n" +" kubectl annotate --overwrite pods foo description='my frontend running " +"nginx'\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods --all description='my frontend running nginx'\n" +"\n" +" # Update pod 'foo' only if the resource is unchanged from version 1\n" +" kubectl annotate pods foo description='my frontend running nginx' --" +"resource-version=1\n" +"\n" +" # Update pod 'foo' by removing an annotation named 'description' if it " +"exists\n" +" # Does not require the --overwrite flag\n" +" kubectl annotate pods foo description-" +msgstr "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description='my frontend'\n" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend running nginx', overwriting any existing value\n" +" kubectl annotate --overwrite pods foo description='my frontend running " +"nginx'\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods --all description='my frontend running nginx'\n" +"\n" +" # Update pod 'foo' only if the resource is unchanged from version 1\n" +" kubectl annotate pods foo description='my frontend running nginx' --" +"resource-version=1\n" +"\n" +" # Update pod 'foo' by removing an annotation named 'description' if it " +"exists\n" +" # Does not require the --overwrite flag\n" +" kubectl annotate pods foo description-" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:230 +msgid "" +"\n" +" Create a ClusterIP service with the specified name." +msgstr "" +"\n" +" Create a ClusterIP service with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Create a LoadBalancer service with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:271 +msgid "" +"\n" +" Create a NodePort service with the specified name." +msgstr "" +"\n" +" Create a NodePort service with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:93 +msgid "" +"\n" +" Dump cluster information out suitable for debugging and diagnosing " +"cluster problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with --output-directory. " +"If you specify a directory, Kubernetes will\n" +" build a set of files in that directory. By default, only dumps things " +"in the current namespace and 'kube-system' namespace, but you can\n" +" switch to a different namespace with the --namespaces flag, or specify --" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster; these " +"logs are dumped into different directories\n" +" based on namespace and pod name." +msgstr "" +"\n" +" Dump cluster information out suitable for debugging and diagnosing " +"cluster problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with --output-directory. " +"If you specify a directory, Kubernetes will\n" +" build a set of files in that directory. By default, only dumps things " +"in the current namespace and 'kube-system' namespace, but you can\n" +" switch to a different namespace with the --namespaces flag, or specify --" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster; these " +"logs are dumped into different directories\n" +" based on namespace and pod name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:40 +msgid "" +"\n" +" Display addresses of the control plane and services with label kubernetes." +"io/cluster-service=true.\n" +" To further debug and diagnose cluster problems, use 'kubectl cluster-info " +"dump'." +msgstr "" +"\n" +" Display addresses of the control plane and services with label kubernetes." +"io/cluster-service=true.\n" +" To further debug and diagnose cluster problems, use 'kubectl cluster-info " +"dump'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:49 +msgid "" +" environment variable is set, then it is used as a list of paths (normal " +"path delimiting rules for your system). These paths are merged. When a value " +"is modified, it is modified in the file that defines the stanza. When a " +"value is created, it is created in the first file that exists. If no files " +"in the chain exist, then it creates the last file in the list.\n" +"\t\t\t3. Otherwise, " +msgstr "" +" environment variable is set, then it is used as a list of paths (normal " +"path delimiting rules for your system). These paths are merged. When a value " +"is modified, it is modified in the file that defines the stanza. When a " +"value is created, it is created in the first file that exists. If no files " +"in the chain exist, then it creates the last file in the list.\n" +"\t\t\t3. Otherwise, " + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:48 +msgid "" +" flag is set, then only that file is loaded. The flag may only be set once " +"and no merging takes place.\n" +"\t\t\t2. If $" +msgstr "" +" flag is set, then only that file is loaded. The flag may only be set once " +"and no merging takes place.\n" +"\t\t\t2. If $" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:50 +msgid " is used and no merging takes place." +msgstr " is used and no merging takes place." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L61 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L60 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L63 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L106 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L111 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:178 +msgid "Allocate a TTY for the debugging container." +msgstr "Allocate a TTY for the debugging container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L119 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:158 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:178 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:173 +msgid "Annotations to apply to the pod." +msgstr "Annotations to apply to the pod." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:173 +msgid "Apply a configuration to a resource by file name or stdin" +msgstr "Apply a configuration to a resource by file name or stdin" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "Approve a certificate signing request" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:106 +msgid "" +"Attach to a process that is already running inside an existing container." +msgstr "" +"Attach to a process that is already running inside an existing container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/attach.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "Attach to a running container" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:107 +msgid "" +"Auto-scale a deployment, replica set, stateful set, or replication controller" +msgstr "" +"Auto-scale a deployment, replica set, stateful set, or replication controller" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L115 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole this ClusterRoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole this RoleBinding should reference" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:32 +msgid "Commands for features in alpha" +msgstr "Commands for features in alpha" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:170 +msgid "Container image to use for debug container." +msgstr "Container image to use for debug container." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:166 +msgid "Container name to use for debug container." +msgstr "Container name to use for debug container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/convert.go#L67 +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "Convert config files between different API versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:105 +msgid "Copy files and directories to and from containers" +msgstr "Copy files and directories to and from containers" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cp.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "Copy files and directories to and from containers." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:248 +msgid "Create a ClusterIP service" +msgstr "Create a ClusterIP service" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:323 +msgid "Create a LoadBalancer service" +msgstr "Create a LoadBalancer service" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:286 +msgid "Create a NodePort service" +msgstr "Create a NodePort service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L214 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "Create a TLS secret" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:81 +msgid "Create a cluster role" +msgstr "Create a cluster role" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:87 +msgid "Create a cluster role binding for a particular cluster role" +msgstr "Create a cluster role binding for a particular cluster role" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:124 +msgid "Create a config map from a local file, directory or literal value" +msgstr "Create a config map from a local file, directory or literal value" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:167 +msgid "Create a copy of the target Pod with this name." +msgstr "Create a copy of the target Pod with this name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:90 +msgid "Create a cron job with the specified name" +msgstr "Create a cron job with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:100 +msgid "Create a deployment with the specified name" +msgstr "Create a deployment with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:91 +msgid "Create a job with the specified name" +msgstr "Create a job with the specified name" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "Create a namespace with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:95 +msgid "Create a pod disruption budget with the specified name" +msgstr "Create a pod disruption budget with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:92 +msgid "Create a priority class with the specified name" +msgstr "Create a priority class with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:91 +msgid "Create a quota with the specified name" +msgstr "Create a quota with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:106 +msgid "Create a resource from a file or from stdin" +msgstr "Create a resource from a file or from stdin" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:89 +msgid "Create a role binding for a particular role or cluster role" +msgstr "Create a role binding for a particular role or cluster role" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:161 +msgid "Create a role with single rule" +msgstr "Create a role with single rule" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L143 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "Create a secret for use with a Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:137 +msgid "Create a secret from a local file, directory, or literal value" +msgstr "Create a secret from a local file, directory, or literal value" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L34 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "Create a secret using specified subcommand" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:50 +msgid "Create a secret using specified subcommand." +msgstr "Create a secret using specified subcommand." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "Create a service account with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:48 +msgid "Create a service using a specified subcommand" +msgstr "Create a service using a specified subcommand" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:49 +msgid "Create a service using a specified subcommand." +msgstr "Create a service using a specified subcommand." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:363 +msgid "Create an ExternalName service" +msgstr "Create an ExternalName service" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:145 +msgid "Create an ingress with the specified name" +msgstr "Create an ingress with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:60 +msgid "Create and run a particular image in a pod." +msgstr "Create and run a particular image in a pod." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:149 +msgid "Create debugging sessions for troubleshooting workloads and nodes" +msgstr "Create debugging sessions for troubleshooting workloads and nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:137 +msgid "" +"Delete resources by file names, stdin, resources and names, or by resources " +"and label selector" +msgstr "" +"Delete resources by file names, stdin, resources and names, or by resources " +"and label selector" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "Delete the specified cluster from the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:43 +msgid "Delete the specified cluster from the kubeconfig." +msgstr "Delete the specified cluster from the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "Delete the specified context from the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:43 +msgid "Delete the specified context from the kubeconfig." +msgstr "Delete the specified context from the kubeconfig." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_user.go:64 +msgid "Delete the specified user from the kubeconfig" +msgstr "Delete the specified user from the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_user.go:65 +msgid "Delete the specified user from the kubeconfig." +msgstr "Delete the specified user from the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L121 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "Deny a certificate signing request" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "Describe one or many contexts" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:142 +msgid "Diff the live version against a would-be applied version" +msgstr "Diff the live version against a would-be applied version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:65 +msgid "Display cluster information" +msgstr "Display cluster information" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "Display clusters defined in the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:42 +msgid "Display clusters defined in the kubeconfig." +msgstr "Display clusters defined in the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "Display merged kubeconfig settings or a specified kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:50 +msgid "Display one or many contexts from the kubeconfig file." +msgstr "Display one or many contexts from the kubeconfig file." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/get.go#L107 +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "Display one or many resources" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:50 +msgid "Display resource (CPU/memory) usage" +msgstr "Display resource (CPU/memory) usage" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:81 +msgid "Display resource (CPU/memory) usage of nodes" +msgstr "Display resource (CPU/memory) usage of nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:100 +msgid "Display resource (CPU/memory) usage of pods" +msgstr "Display resource (CPU/memory) usage of pods" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:51 +msgid "Display the current-context" +msgstr "Display the current-context" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_users.go:60 +msgid "Display users defined in the kubeconfig" +msgstr "Display users defined in the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_users.go:61 +msgid "Display users defined in the kubeconfig." +msgstr "Display users defined in the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L176 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "Drain node in preparation for maintenance" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:74 +msgid "Dump relevant information for debugging and diagnosis" +msgstr "Dump relevant information for debugging and diagnosis" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/edit.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "Edit a resource on the server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go:67 +msgid "Edit latest last-applied-configuration annotations of a resource/object" +msgstr "" +"Edit latest last-applied-configuration annotations of a resource/object" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L159 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "Email for Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:169 +msgid "Environment variables to set in the container." +msgstr "Environment variables to set in the container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/exec.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "Execute a command in a container" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:90 +msgid "Execute a command in a container." +msgstr "Execute a command in a container." + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:115 +msgid "Experimental: Wait for a specific condition on one or many resources" +msgstr "Experimental: Wait for a specific condition on one or many resources" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:378 +msgid "External name of service" +msgstr "External name of service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/portforward.go#L75 +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "Forward one or more local ports to a pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:79 +msgid "Get documentation for a resource" +msgstr "Get documentation for a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/help.go#L36 +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "Help about any command" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:151 +msgid "" +"IP to assign to the LoadBalancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." +msgstr "" +"IP to assign to the LoadBalancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:160 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/annotate.go#L135 +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:157 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L132 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:154 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:164 +msgid "" +"If specified, everything after -- will be passed to the new container as " +"Args instead of Command." +msgstr "" +"If specified, everything after -- will be passed to the new container as " +"Args instead of Command." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:198 +msgid "If true, run the container in privileged mode." +msgstr "If true, run the container in privileged mode." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:174 +msgid "If true, suppress informational messages." +msgstr "If true, suppress informational messages." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:165 +msgid "" +"If true, wait for the container to start running, and then attach as if " +"'kubectl attach ...' were called. Default false, unless '-i/--stdin' is " +"set, in which case the default is true." +msgstr "" +"If true, wait for the container to start running, and then attach as if " +"'kubectl attach ...' were called. Default false, unless '-i/--stdin' is " +"set, in which case the default is true." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:173 +msgid "" +"Keep stdin open on the container(s) in the pod, even if nothing is attached." +msgstr "" +"Keep stdin open on the container(s) in the pod, even if nothing is attached." + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:90 +msgid "List all visible plugin executables on a user's PATH" +msgstr "List all visible plugin executables on a user's PATH" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:54 +msgid "Manage the rollout of a resource" +msgstr "Manage the rollout of a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "Mark node as schedulable" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "Mark node as unschedulable" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_pause.go#L73 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "Mark the provided resource as paused" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L35 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "Modify certificate resources." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "Modify kubeconfig files" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L110 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:43 +msgid "No alpha commands are available in this version of kubectl" +msgstr "No alpha commands are available in this version of kubectl" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/completion.go#L97 +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "Output shell completion code for the specified shell (bash or zsh)" + +#: pkg/kubectl/cmd/convert/convert.go:105 +msgid "" +"Output the formatted object with the given group version (for ex: " +"'extensions/v1beta1')." +msgstr "" +"Output the formatted object with the given group version (for ex: " +"'extensions/v1beta1')." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L157 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "Password for Docker registry authentication" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L226 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "Path to PEM encoded public key certificate." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L227 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "Path to private key associated with given certificate." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/scale.go#L82 +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "Print the client and server version information" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:74 +msgid "" +"Print the client and server version information for the current context." +msgstr "" +"Print the client and server version information for the current context." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "Print the list of flags inherited by all commands" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L86 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "Print the logs for a container in a pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:97 +msgid "Print the supported API resources on the server" +msgstr "Print the supported API resources on the server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:98 +msgid "Print the supported API resources on the server." +msgstr "Print the supported API resources on the server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:58 +msgid "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"" +msgstr "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:59 +msgid "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"." +msgstr "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"." + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:62 +msgid "Provides utilities for interacting with plugins" +msgstr "Provides utilities for interacting with plugins" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/rename_context.go:45 +msgid "Rename a context from the kubeconfig file" +msgstr "Rename a context from the kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:115 +msgid "Replace a resource by file name or stdin" +msgstr "Replace a resource by file name or stdin" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:87 +msgid "Restart a resource" +msgstr "Restart a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_resume.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "Resume a paused resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L56 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "Role this RoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L94 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "Run a particular image on the cluster" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/proxy.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Run a proxy to the Kubernetes API server" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L161 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "Server location for Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_cluster.go:73 +msgid "Set a cluster entry in kubeconfig" +msgstr "Set a cluster entry in kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_context.go:61 +msgid "Set a context entry in kubeconfig" +msgstr "Set a context entry in kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:114 +msgid "Set a new size for a deployment, replica set, or replication controller" +msgstr "" +"Set a new size for a deployment, replica set, or replication controller" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_authinfo.go:152 +msgid "Set a user entry in kubeconfig" +msgstr "Set a user entry in kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set.go:74 +msgid "Set an individual value in a kubeconfig file" +msgstr "Set an individual value in a kubeconfig file" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "Set specific features on objects" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/use_context.go:52 +msgid "Set the current-context in a kubeconfig file" +msgstr "Set the current-context in a kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:101 +msgid "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file" +msgstr "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_selector.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "Set the selector on a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/describe.go#L80 +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "Show details of a specific resource or group of resources" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_status.go#L57 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "Show the status of the rollout" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "Synonym for --target-port" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:134 +msgid "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes service" +msgstr "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "The image for the container to run." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L116 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:172 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server." +msgstr "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:112 +msgid "" +"The maximum number or percentage of unavailable pods this budget requires." +msgstr "" +"The maximum number or percentage of unavailable pods this budget requires." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"The minimum number or percentage of available pods this budget requires." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L113 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "The name for the newly created object." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L98 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L99 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "The network protocol for the service to be created. Default is 'TCP'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:182 +msgid "The port that this container exposes." +msgstr "The port that this container exposes." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L131 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L130 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:190 +msgid "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]." +msgstr "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L87 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "The type of secret to create" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:33 +msgid "" +"These commands correspond to alpha features that are not enabled in " +"Kubernetes clusters by default." +msgstr "" +"These commands correspond to alpha features that are not enabled in " +"Kubernetes clusters by default." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:150 +msgid "" +"Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. " +"Default is 'ClusterIP'." +msgstr "" +"Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. " +"Default is 'ClusterIP'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_undo.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "Undo a previous rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/unset.go:59 +msgid "Unset an individual value in a kubeconfig file" +msgstr "Unset an individual value in a kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:154 +msgid "Update environment variables on a pod template" +msgstr "Update environment variables on a pod template" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:115 +msgid "Update fields of a resource" +msgstr "Update fields of a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_resources.go#L101 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "Update resource requests/limits on objects with pod templates" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "Update the annotations on a resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:110 +msgid "Update the image of a pod template" +msgstr "Update the image of a pod template" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L109 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "Update the labels on a resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:102 +msgid "Update the service account of a resource" +msgstr "Update the service account of a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/taint.go#L88 +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "Update the taints on one or more nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:99 +msgid "" +"Update the user, group, or service account in a role binding or cluster role " +"binding" +msgstr "" +"Update the user, group, or service account in a role binding or cluster role " +"binding" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L155 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "Username for Docker registry authentication" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_history.go#L51 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "View rollout history" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:77 +msgid "" +"View the latest last-applied-configuration annotations of a resource/object" +msgstr "" +"View the latest last-applied-configuration annotations of a resource/object" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:171 +msgid "" +"When used with '--copy-to', a list of name=image pairs for changing " +"container images, similar to how 'kubectl set image' works." +msgstr "" +"When used with '--copy-to', a list of name=image pairs for changing " +"container images, similar to how 'kubectl set image' works." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:168 +msgid "When used with '--copy-to', delete the original Pod." +msgstr "When used with '--copy-to', delete the original Pod." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:176 +msgid "" +"When used with '--copy-to', enable process namespace sharing in the copy." +msgstr "" +"When used with '--copy-to', enable process namespace sharing in the copy." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:175 +msgid "" +"When used with '--copy-to', schedule the copy of target Pod on the same node." +msgstr "" +"When used with '--copy-to', schedule the copy of target Pod on the same node." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:177 +msgid "" +"When using an ephemeral container, target processes in this container name." +msgstr "" +"When using an ephemeral container, target processes in this container name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo_dump.go#L45 +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:108 +msgid "" +"description is an arbitrary string that usually provides guidelines on when " +"this priority class should be used." +msgstr "" +"description is an arbitrary string that usually provides guidelines on when " +"this priority class should be used." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88 +msgid "dummy restart flag)" +msgstr "dummy restart flag)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:107 +msgid "" +"global-default specifies whether this PriorityClass should be considered as " +"the default priority." +msgstr "" +"global-default specifies whether this PriorityClass should be considered as " +"the default priority." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cmd.go#L217 +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl controls the Kubernetes cluster manager" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:45 +msgid "" +"pod (po), service (svc), replicationcontroller (rc), deployment (deploy), " +"replicaset (rs)" +msgstr "" +"pod (po), service (svc), replicationcontroller (rc), deployment (deploy), " +"replicaset (rs)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:109 +msgid "" +"preemption-policy is the policy for preempting pods with lower priority." +msgstr "" +"preemption-policy is the policy for preempting pods with lower priority." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:41 +msgid "" +"replicationcontroller (rc), deployment (deploy), daemonset (ds), job, " +"replicaset (rs), statefulset" +msgstr "" +"replicationcontroller (rc), deployment (deploy), daemonset (ds), job, " +"replicaset (rs), statefulset" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:106 +msgid "the value of this priority class." +msgstr "the value of this priority class." diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000..9c0c708fcd Binary files /dev/null and b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.mo differ diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.po new file mode 100644 index 0000000000..d07da117b2 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/en_US/LC_MESSAGES/k8s.po @@ -0,0 +1,5077 @@ +# Test translations for unit tests. +# Copyright (C) 2016 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR brendan.d.burns@gmail.com, 2016. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2017-03-14 21:33-0800\n" +"Last-Translator: Brendan Burns \n" +"Language-Team: \n" +"Language: en\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.6.10\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:138 +msgid "" +"\n" +"\t\t\t# Approve CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate approve csr-sqgzp\n" +"\t\t" +msgstr "" +"\n" +"\t\t\t# Approve CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate approve csr-sqgzp\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:182 +msgid "" +"\n" +"\t\t\t# Deny CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate deny csr-sqgzp\n" +"\t\t" +msgstr "" +"\n" +"\t\t\t# Deny CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate deny csr-sqgzp\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:43 +msgid "" +"\n" +"\t\t\tModify kubeconfig files using subcommands like \"kubectl config set " +"current-context my-context\"\n" +"\n" +"\t\t\tThe loading order follows these rules:\n" +"\n" +"\t\t\t1. If the --" +msgstr "" +"\n" +"\t\t\tModify kubeconfig files using subcommands like \"kubectl config set " +"current-context my-context\"\n" +"\n" +"\t\t\tThe loading order follows these rules:\n" +"\n" +"\t\t\t1. If the --" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:44 +msgid "" +"\n" +"\t\t # Create a cluster role binding for user1, user2, and group1 using the " +"cluster-admin cluster role\n" +"\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-" +"admin --user=user1 --user=user2 --group=group1" +msgstr "" +"\n" +"\t\t # Create a cluster role binding for user1, user2, and group1 using the " +"cluster-admin cluster role\n" +"\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-" +"admin --user=user1 --user=user2 --group=group1" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:58 +msgid "" +"\n" +"\t\t # Create a new config map named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1." +"txt --from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new config map named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-" +"literal=key2=config2\n" +"\n" +"\t\t # Create a new config map named my-config from the key=value pairs in " +"the file\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config from an env file\n" +"\t\t kubectl create configmap my-config --from-env-file=path/to/bar.env" +msgstr "" +"\n" +"\t\t # Create a new config map named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1." +"txt --from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new config map named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-" +"literal=key2=config2\n" +"\n" +"\t\t # Create a new config map named my-config from the key=value pairs in " +"the file\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config from an env file\n" +"\t\t kubectl create configmap my-config --from-env-file=path/to/bar.env" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:43 +msgid "" +"\n" +"\t\t # Create a role binding for user1, user2, and group1 using the admin " +"cluster role\n" +"\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +"user=user2 --group=group1" +msgstr "" +"\n" +"\t\t # Create a role binding for user1, user2, and group1 using the admin " +"cluster role\n" +"\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +"user=user2 --group=group1" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:56 +msgid "" +"\n" +"\t\t # If you don't already have a .dockercfg file, you can create a " +"dockercfg secret directly by using:\n" +"\t\t kubectl create secret docker-registry my-secret --docker-" +"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL\n" +"\n" +"\t\t # Create a new secret named my-secret from ~/.docker/config.json\n" +"\t\t kubectl create secret docker-registry my-secret --from-file=." +"dockerconfigjson=path/to/.docker/config.json" +msgstr "" +"\n" +"\t\t # If you don't already have a .dockercfg file, you can create a " +"dockercfg secret directly by using:\n" +"\t\t kubectl create secret docker-registry my-secret --docker-" +"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL\n" +"\n" +"\t\t # Create a new secret named my-secret from ~/.docker/config.json\n" +"\t\t kubectl create secret docker-registry my-secret --from-file=." +"dockerconfigjson=path/to/.docker/config.json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:45 +msgid "" +"\n" +"\t\t# !!!Important Note!!!\n" +"\t\t# Requires that the 'tar' binary is present in your container\n" +"\t\t# image. If 'tar' is not present, 'kubectl cp' will fail.\n" +"\t\t#\n" +"\t\t# For advanced use cases, such as symlinks, wildcard expansion or\n" +"\t\t# file mode preservation, consider using 'kubectl exec'.\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\ttar cf - /tmp/foo | kubectl exec -i -n -- " +"tar xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl exec -n -- tar cf - /tmp/foo | tar " +"xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" +msgstr "" +"\n" +"\t\t# !!!Important Note!!!\n" +"\t\t# Requires that the 'tar' binary is present in your container\n" +"\t\t# image. If 'tar' is not present, 'kubectl cp' will fail.\n" +"\t\t#\n" +"\t\t# For advanced use cases, such as symlinks, wildcard expansion or\n" +"\t\t# file mode preservation, consider using 'kubectl exec'.\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\ttar cf - /tmp/foo | kubectl exec -i -n -- " +"tar xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl exec -n -- tar cf - /tmp/foo | tar " +"xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:119 +msgid "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl apply -k dir/\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Note: --prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all other resources that are not in the file and match label " +"app=nginx\n" +"\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"config maps that are not in the file\n" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"ConfigMap" +msgstr "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl apply -k dir/\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Note: --prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all other resources that are not in the file and match label " +"app=nginx\n" +"\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"config maps that are not in the file\n" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +"ConfigMap" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:48 +#, c-format +msgid "" +"\n" +"\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used\n" +"\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +"\n" +"\t\t# Auto scale a replication controller \"foo\", with the number of pods " +"between 1 and 5, target CPU utilization at 80%\n" +"\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +msgstr "" +"\n" +"\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used\n" +"\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +"\n" +"\t\t# Auto scale a replication controller \"foo\", with the number of pods " +"between 1 and 5, target CPU utilization at 80%\n" +"\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" + +#: pkg/kubectl/cmd/convert/convert.go:51 +msgid "" +"\n" +"\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by 'pod.yaml' to the " +"latest version\n" +"\t\t# and print to stdout in JSON format.\n" +"\t\tkubectl convert -f pod.yaml --local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" +msgstr "" +"\n" +"\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by 'pod.yaml' to the " +"latest version\n" +"\t\t# and print to stdout in JSON format.\n" +"\t\tkubectl convert -f pod.yaml --local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:41 +msgid "" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods\n" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" with ResourceName " +"specified\n" +"\t\tkubectl create clusterrole pod-reader --verb=get --resource=pods --" +"resource-name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with API Group specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=rs." +"extensions\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with SubResource specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=pods," +"pods/status\n" +"\n" +"\t\t# Create a cluster role name \"foo\" with NonResourceURL specified\n" +"\t\tkubectl create clusterrole \"foo\" --verb=get --non-resource-url=/logs/" +"*\n" +"\n" +"\t\t# Create a cluster role name \"monitoring\" with AggregationRule " +"specified\n" +"\t\tkubectl create clusterrole monitoring --aggregation-rule=\"rbac.example." +"com/aggregate-to-monitoring=true\"" +msgstr "" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods\n" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" with ResourceName " +"specified\n" +"\t\tkubectl create clusterrole pod-reader --verb=get --resource=pods --" +"resource-name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with API Group specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=rs." +"extensions\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with SubResource specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=pods," +"pods/status\n" +"\n" +"\t\t# Create a cluster role name \"foo\" with NonResourceURL specified\n" +"\t\tkubectl create clusterrole \"foo\" --verb=get --non-resource-url=/logs/" +"*\n" +"\n" +"\t\t# Create a cluster role name \"monitoring\" with AggregationRule " +"specified\n" +"\t\tkubectl create clusterrole monitoring --aggregation-rule=\"rbac.example." +"com/aggregate-to-monitoring=true\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:43 +msgid "" +"\n" +"\t\t# Create a job\n" +"\t\tkubectl create job my-job --image=busybox\n" +"\n" +"\t\t# Create a job with a command\n" +"\t\tkubectl create job my-job --image=busybox -- date\n" +"\n" +"\t\t# Create a job from a cron job named \"a-cronjob\"\n" +"\t\tkubectl create job test-job --from=cronjob/a-cronjob" +msgstr "" +"\n" +"\t\t# Create a job\n" +"\t\tkubectl create job my-job --image=busybox\n" +"\n" +"\t\t# Create a job with a command\n" +"\t\tkubectl create job my-job --image=busybox -- date\n" +"\n" +"\t\t# Create a job from a cron job named \"a-cronjob\"\n" +"\t\tkubectl create job test-job --from=cronjob/a-cronjob" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:44 +msgid "" +"\n" +"\t\t# Create a new resource quota named my-quota\n" +"\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resource quota named best-effort\n" +"\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +msgstr "" +"\n" +"\t\t# Create a new resource quota named my-quota\n" +"\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resource quota named best-effort\n" +"\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:44 +#, c-format +msgid "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in time\n" +"\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time\n" +"\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +msgstr "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in time\n" +"\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time\n" +"\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:76 +msgid "" +"\n" +"\t\t# Create a pod using the data in pod.json\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in docker-registry.yaml in JSON then create the resource " +"using the edited data\n" +"\t\tkubectl create -f docker-registry.yaml --edit -o json" +msgstr "" +"\n" +"\t\t# Create a pod using the data in pod.json\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in docker-registry.yaml in JSON then create the resource " +"using the edited data\n" +"\t\tkubectl create -f docker-registry.yaml --edit -o json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:43 +msgid "" +"\n" +"\t\t# Create a priority class named high-priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\"\n" +"\n" +"\t\t# Create a priority class named default-priority that is considered as " +"the global default priority\n" +"\t\tkubectl create priorityclass default-priority --value=1000 --global-" +"default=true --description=\"default priority\"\n" +"\n" +"\t\t# Create a priority class named high-priority that cannot preempt pods " +"with lower priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\" --preemption-policy=\"Never\"" +msgstr "" +"\n" +"\t\t# Create a priority class named high-priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\"\n" +"\n" +"\t\t# Create a priority class named default-priority that is considered as " +"the global default priority\n" +"\t\tkubectl create priorityclass default-priority --value=1000 --global-" +"default=true --description=\"default priority\"\n" +"\n" +"\t\t# Create a priority class named high-priority that cannot preempt pods " +"with lower priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --description=" +"\"high priority\" --preemption-policy=\"Never\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:46 +msgid "" +"\n" +"\t\t# Create a role named \"pod-reader\" that allows user to perform \"get" +"\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +"resource=pods\n" +"\n" +"\t\t# Create a role named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create role pod-reader --verb=get --resource=pods --resource-" +"name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a role named \"foo\" with API Group specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=rs.extensions\n" +"\n" +"\t\t# Create a role named \"foo\" with SubResource specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=pods,pods/status" +msgstr "" +"\n" +"\t\t# Create a role named \"pod-reader\" that allows user to perform \"get" +"\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +"resource=pods\n" +"\n" +"\t\t# Create a role named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create role pod-reader --verb=get --resource=pods --resource-" +"name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a role named \"foo\" with API Group specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=rs.extensions\n" +"\n" +"\t\t# Create a role named \"foo\" with SubResource specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=pods,pods/status" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:61 +msgid "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name \"nginx-https\"\n" +"\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named 'video-stream'.\n" +"\t\tkubectl expose rc streamer --port=4100 --protocol=UDP --name=video-" +"stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000\n" +"\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +msgstr "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name \"nginx-https\"\n" +"\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named 'video-stream'.\n" +"\t\tkubectl expose rc streamer --port=4100 --protocol=UDP --name=video-" +"stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000\n" +"\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose deployment nginx --port=80 --target-port=8000" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:64 +msgid "" +"\n" +"\t\t# Create a single ingress called 'simple' that directs requests to foo." +"com/bar to svc\n" +"\t\t# svc1:8080 with a tls secret \"my-cert\"\n" +"\t\tkubectl create ingress simple --rule=\"foo.com/bar=svc1:8080,tls=my-cert" +"\"\n" +"\n" +"\t\t# Create a catch all ingress of \"/path\" pointing to service svc:port " +"and Ingress Class as \"otheringress\"\n" +"\t\tkubectl create ingress catch-all --class=otheringress --rule=\"/path=svc:" +"port\"\n" +"\n" +"\t\t# Create an ingress with two annotations: ingress.annotation1 and " +"ingress.annotations2\n" +"\t\tkubectl create ingress annotated --class=default --rule=\"foo.com/" +"bar=svc:port\" \\n\t\t\t--annotation ingress.annotation1=foo \\n\t\t\t--" +"annotation ingress.annotation2=bla\n" +"\n" +"\t\t# Create an ingress with the same host and multiple paths\n" +"\t\tkubectl create ingress multipath --class=default \\n\t\t\t--rule=\"foo." +"com/=svc:port\" \\n\t\t\t--rule=\"foo.com/admin/=svcadmin:portadmin\"\n" +"\n" +"\t\t# Create an ingress with multiple hosts and the pathType as Prefix\n" +"\t\tkubectl create ingress ingress1 --class=default \\n\t\t\t--rule=\"foo." +"com/path*=svc:8080\" \\n\t\t\t--rule=\"bar.com/admin*=svc2:http\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using the default ingress " +"certificate and different path types\n" +"\t\tkubectl create ingress ingtls --class=default \\n\t\t --rule=\"foo.com/" +"=svc:https,tls\" \\n\t\t --rule=\"foo.com/path/subpath*=othersvc:8080\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using a specific secret and " +"pathType as Prefix\n" +"\t\tkubectl create ingress ingsecret --class=default \\n\t\t --rule=\"foo." +"com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t# Create an ingress with a default backend\n" +"\t\tkubectl create ingress ingdefault --class=default \\n\t\t --default-" +"backend=defaultsvc:http \\n\t\t --rule=\"foo.com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Create a single ingress called 'simple' that directs requests to foo." +"com/bar to svc\n" +"\t\t# svc1:8080 with a tls secret \"my-cert\"\n" +"\t\tkubectl create ingress simple --rule=\"foo.com/bar=svc1:8080,tls=my-cert" +"\"\n" +"\n" +"\t\t# Create a catch all ingress of \"/path\" pointing to service svc:port " +"and Ingress Class as \"otheringress\"\n" +"\t\tkubectl create ingress catch-all --class=otheringress --rule=\"/path=svc:" +"port\"\n" +"\n" +"\t\t# Create an ingress with two annotations: ingress.annotation1 and " +"ingress.annotations2\n" +"\t\tkubectl create ingress annotated --class=default --rule=\"foo.com/" +"bar=svc:port\" \\n\t\t\t--annotation ingress.annotation1=foo \\n\t\t\t--" +"annotation ingress.annotation2=bla\n" +"\n" +"\t\t# Create an ingress with the same host and multiple paths\n" +"\t\tkubectl create ingress multipath --class=default \\n\t\t\t--rule=\"foo." +"com/=svc:port\" \\n\t\t\t--rule=\"foo.com/admin/=svcadmin:portadmin\"\n" +"\n" +"\t\t# Create an ingress with multiple hosts and the pathType as Prefix\n" +"\t\tkubectl create ingress ingress1 --class=default \\n\t\t\t--rule=\"foo." +"com/path*=svc:8080\" \\n\t\t\t--rule=\"bar.com/admin*=svc2:http\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using the default ingress " +"certificate and different path types\n" +"\t\tkubectl create ingress ingtls --class=default \\n\t\t --rule=\"foo.com/" +"=svc:https,tls\" \\n\t\t --rule=\"foo.com/path/subpath*=othersvc:8080\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using a specific secret and " +"pathType as Prefix\n" +"\t\tkubectl create ingress ingsecret --class=default \\n\t\t --rule=\"foo." +"com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t# Create an ingress with a default backend\n" +"\t\tkubectl create ingress ingdefault --class=default \\n\t\t --default-" +"backend=defaultsvc:http \\n\t\t --rule=\"foo.com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:74 +msgid "" +"\n" +"\t\t# Create an interactive debugging session in pod mypod and immediately " +"attach to it.\n" +"\t\tkubectl debug mypod -it --image=busybox\n" +"\n" +"\t\t# Create a debug container named debugger using a custom automated " +"debugging image.\n" +"\t\tkubectl debug --image=myproj/debug-tools -c debugger mypod\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and attach to it\n" +"\t\tkubectl debug mypod -it --image=busybox --copy-to=my-debugger\n" +"\n" +"\t\t# Create a copy of mypod changing the command of mycontainer\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --container=mycontainer -- " +"sh\n" +"\n" +"\t\t# Create a copy of mypod changing all container images to busybox\n" +"\t\tkubectl debug mypod --copy-to=my-debugger --set-image=*=busybox\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and changing container " +"images\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --image=debian --set-" +"image=app=app:debug,sidecar=sidecar:debug\n" +"\n" +"\t\t# Create an interactive debugging session on a node and immediately " +"attach to it.\n" +"\t\t# The container will run in the host namespaces and the host's " +"filesystem will be mounted at /host\n" +"\t\tkubectl debug node/mynode -it --image=busybox\n" +msgstr "" +"\n" +"\t\t# Create an interactive debugging session in pod mypod and immediately " +"attach to it.\n" +"\t\tkubectl debug mypod -it --image=busybox\n" +"\n" +"\t\t# Create a debug container named debugger using a custom automated " +"debugging image.\n" +"\t\tkubectl debug --image=myproj/debug-tools -c debugger mypod\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and attach to it\n" +"\t\tkubectl debug mypod -it --image=busybox --copy-to=my-debugger\n" +"\n" +"\t\t# Create a copy of mypod changing the command of mycontainer\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --container=mycontainer -- " +"sh\n" +"\n" +"\t\t# Create a copy of mypod changing all container images to busybox\n" +"\t\tkubectl debug mypod --copy-to=my-debugger --set-image=*=busybox\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and changing container " +"images\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --image=debian --set-" +"image=app=app:debug,sidecar=sidecar:debug\n" +"\n" +"\t\t# Create an interactive debugging session on a node and immediately " +"attach to it.\n" +"\t\t# The container will run in the host namespaces and the host's " +"filesystem will be mounted at /host\n" +"\t\tkubectl debug node/mynode -it --image=busybox\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:74 +msgid "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl delete -k dir\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo --now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo --force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods --all" +msgstr "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl delete -k dir\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo --now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo --force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods --all" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:51 +msgid "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe po -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the 'frontend' replication controller (rc-" +"created pods\n" +"\t\t# get the name of the rc as a prefix in the pod the name)\n" +"\t\tkubectl describe pods frontend" +msgstr "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe po -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the 'frontend' replication controller (rc-" +"created pods\n" +"\t\t# get the name of the rc as a prefix in the pod the name)\n" +"\t\tkubectl describe pods frontend" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:76 +msgid "" +"\n" +"\t\t# Diff resources included in pod.json\n" +"\t\tkubectl diff -f pod.json\n" +"\n" +"\t\t# Diff file read from stdin\n" +"\t\tcat service.yaml | kubectl diff -f -" +msgstr "" +"\n" +"\t\t# Diff resources included in pod.json\n" +"\t\tkubectl diff -f pod.json\n" +"\n" +"\t\t# Diff file read from stdin\n" +"\t\tcat service.yaml | kubectl diff -f -" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:138 +msgid "" +"\n" +"\t\t# Drain node \"foo\", even if there are pods not managed by a " +"replication controller, replica set, job, daemon set or stateful set on it\n" +"\t\tkubectl drain foo --force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a replication " +"controller, replica set, job, daemon set or stateful set, and use a grace " +"period of 15 minutes\n" +"\t\tkubectl drain foo --grace-period=900" +msgstr "" +"\n" +"\t\t# Drain node \"foo\", even if there are pods not managed by a " +"replication controller, replica set, job, daemon set or stateful set on it\n" +"\t\tkubectl drain foo --force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a replication " +"controller, replica set, job, daemon set or stateful set, and use a grace " +"period of 15 minutes\n" +"\t\tkubectl drain foo --grace-period=900" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:55 +msgid "" +"\n" +"\t\t# Edit the service named 'docker-registry'\n" +"\t\tkubectl edit svc/docker-registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +"\n" +"\t\t# Edit the job 'myjob' in JSON using the v1 API format\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +"config in its annotation\n" +"\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +msgstr "" +"\n" +"\t\t# Edit the service named 'docker-registry'\n" +"\t\tkubectl edit svc/docker-registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +"\n" +"\t\t# Edit the job 'myjob' in JSON using the v1 API format\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +"config in its annotation\n" +"\t\tkubectl edit deployment/mydeployment -o yaml --save-config" + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:44 +msgid "" +"\n" +"\t\t# Get output from running pod mypod; use the 'kubectl.kubernetes.io/" +"default-container' annotation\n" +"\t\t# for selecting the container to be attached or the first container in " +"the pod will be chosen\n" +"\t\tkubectl attach mypod\n" +"\n" +"\t\t# Get output from ruby-container from pod mypod\n" +"\t\tkubectl attach mypod -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl attach mypod -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a replica set named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Get output from running pod mypod; use the 'kubectl.kubernetes.io/" +"default-container' annotation\n" +"\t\t# for selecting the container to be attached or the first container in " +"the pod will be chosen\n" +"\t\tkubectl attach mypod\n" +"\n" +"\t\t# Get output from ruby-container from pod mypod\n" +"\t\tkubectl attach mypod -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl attach mypod -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a replica set named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:48 +msgid "" +"\n" +"\t\t# Get output from running the 'date' command from pod mypod, using the " +"first container by default\n" +"\t\tkubectl exec mypod -- date\n" +"\n" +"\t\t# Get output from running the 'date' command in ruby-container from pod " +"mypod\n" +"\t\tkubectl exec mypod -c ruby-container -- date\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl exec mypod -c ruby-container -i -t -- bash -il\n" +"\n" +"\t\t# List contents of /usr from the first container of pod mypod and sort " +"by modification time\n" +"\t\t# If the command you want to execute in the pod has any flags in common " +"(e.g. -i),\n" +"\t\t# you must use two dashes (--) to separate your command's flags/" +"arguments\n" +"\t\t# Also note, do not surround your command and its flags/arguments with " +"quotes\n" +"\t\t# unless that is how you would execute it normally (i.e., do ls -t /usr, " +"not \"ls -t /usr\")\n" +"\t\tkubectl exec mypod -i -t -- ls -t /usr\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"deployment mydeployment, using the first container by default\n" +"\t\tkubectl exec deploy/mydeployment -- date\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"service myservice, using the first container by default\n" +"\t\tkubectl exec svc/myservice -- date\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Get output from running the 'date' command from pod mypod, using the " +"first container by default\n" +"\t\tkubectl exec mypod -- date\n" +"\n" +"\t\t# Get output from running the 'date' command in ruby-container from pod " +"mypod\n" +"\t\tkubectl exec mypod -c ruby-container -- date\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl exec mypod -c ruby-container -i -t -- bash -il\n" +"\n" +"\t\t# List contents of /usr from the first container of pod mypod and sort " +"by modification time\n" +"\t\t# If the command you want to execute in the pod has any flags in common " +"(e.g. -i),\n" +"\t\t# you must use two dashes (--) to separate your command's flags/" +"arguments\n" +"\t\t# Also note, do not surround your command and its flags/arguments with " +"quotes\n" +"\t\t# unless that is how you would execute it normally (i.e., do ls -t /usr, " +"not \"ls -t /usr\")\n" +"\t\tkubectl exec mypod -i -t -- ls -t /usr\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"deployment mydeployment, using the first container by default\n" +"\t\tkubectl exec deploy/mydeployment -- date\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"service myservice, using the first container by default\n" +"\t\tkubectl exec svc/myservice -- date\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:65 +msgid "" +"\n" +"\t\t# Installing bash completion on macOS using homebrew\n" +"\t\t## If running Bash 3.2 included with macOS\n" +"\t\t brew install bash-completion\n" +"\t\t## or, if running Bash 4.1+\n" +"\t\t brew install bash-completion@2\n" +"\t\t## If kubectl is installed via homebrew, this should start working " +"immediately\n" +"\t\t## If you've installed via other means, you may need add the completion " +"to your completion directory\n" +"\t\t kubectl completion bash > $(brew --prefix)/etc/bash_completion.d/" +"kubectl\n" +"\n" +"\n" +"\t\t# Installing bash completion on Linux\n" +"\t\t## If bash-completion is not installed on Linux, install the 'bash-" +"completion' package\n" +"\t\t## via your distribution's package manager.\n" +"\t\t## Load the kubectl completion code for bash into the current shell\n" +"\t\t source <(kubectl completion bash)\n" +"\t\t## Write bash completion code to a file and source it from ." +"bash_profile\n" +"\t\t kubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\t printf \"\n" +"\t\t # Kubectl shell completion\n" +"\t\t source '$HOME/.kube/completion.bash.inc'\n" +"\t\t \" >> $HOME/.bash_profile\n" +"\t\t source $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\t source <(kubectl completion zsh)\n" +"\t\t# Set the kubectl completion code for zsh[1] to autoload on startup\n" +"\t\t kubectl completion zsh > \"${fpath[1]}/_kubectl\"" +msgstr "" +"\n" +"\t\t# Installing bash completion on macOS using homebrew\n" +"\t\t## If running Bash 3.2 included with macOS\n" +"\t\t brew install bash-completion\n" +"\t\t## or, if running Bash 4.1+\n" +"\t\t brew install bash-completion@2\n" +"\t\t## If kubectl is installed via homebrew, this should start working " +"immediately\n" +"\t\t## If you've installed via other means, you may need add the completion " +"to your completion directory\n" +"\t\t kubectl completion bash > $(brew --prefix)/etc/bash_completion.d/" +"kubectl\n" +"\n" +"\n" +"\t\t# Installing bash completion on Linux\n" +"\t\t## If bash-completion is not installed on Linux, install the 'bash-" +"completion' package\n" +"\t\t## via your distribution's package manager.\n" +"\t\t## Load the kubectl completion code for bash into the current shell\n" +"\t\t source <(kubectl completion bash)\n" +"\t\t## Write bash completion code to a file and source it from ." +"bash_profile\n" +"\t\t kubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\t printf \"\n" +"\t\t # Kubectl shell completion\n" +"\t\t source '$HOME/.kube/completion.bash.inc'\n" +"\t\t \" >> $HOME/.bash_profile\n" +"\t\t source $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\t source <(kubectl completion zsh)\n" +"\t\t# Set the kubectl completion code for zsh[1] to autoload on startup\n" +"\t\t kubectl completion zsh > \"${fpath[1]}/_kubectl\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:105 +msgid "" +"\n" +"\t\t# List all pods in ps output format\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name)\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List deployments in JSON output format, in the \"v1\" version of the " +"\"apps\" API group\n" +"\t\tkubectl get deployments.v1.apps -o json\n" +"\n" +"\t\t# List a single pod in JSON output format\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +"JSON output format\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# List resources from a directory with kustomization.yaml - e.g. dir/" +"kustomization.yaml\n" +"\t\tkubectl get -k dir/\n" +"\n" +"\t\t# Return only the phase value of the specified pod\n" +"\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n" +"\n" +"\t\t# List resource information in custom columns\n" +"\t\tkubectl get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0]." +"name,IMAGE:.spec.containers[0].image\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7" +msgstr "" +"\n" +"\t\t# List all pods in ps output format\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name)\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List deployments in JSON output format, in the \"v1\" version of the " +"\"apps\" API group\n" +"\t\tkubectl get deployments.v1.apps -o json\n" +"\n" +"\t\t# List a single pod in JSON output format\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +"JSON output format\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# List resources from a directory with kustomization.yaml - e.g. dir/" +"kustomization.yaml\n" +"\t\tkubectl get -k dir/\n" +"\n" +"\t\t# Return only the phase value of the specified pod\n" +"\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n" +"\n" +"\t\t# List resource information in custom columns\n" +"\t\tkubectl get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0]." +"name,IMAGE:.spec.containers[0].image\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:72 +msgid "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 5000 6000\n" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in a pod selected by the deployment\n" +"\t\tkubectl port-forward deployment/mydeployment 5000 6000\n" +"\n" +"\t\t# Listen on port 8443 locally, forwarding to the targetPort of the " +"service's port named \"https\" in a pod selected by the service\n" +"\t\tkubectl port-forward service/myservice 8443:https\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on all addresses, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on localhost and selected IP, forwarding to 5000 " +"in the pod\n" +"\t\tkubectl port-forward --address localhost,10.19.21.23 pod/mypod " +"8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod :5000" +msgstr "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 5000 6000\n" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in a pod selected by the deployment\n" +"\t\tkubectl port-forward deployment/mydeployment 5000 6000\n" +"\n" +"\t\t# Listen on port 8443 locally, forwarding to the targetPort of the " +"service's port named \"https\" in a pod selected by the service\n" +"\t\tkubectl port-forward service/myservice 8443:https\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on all addresses, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on localhost and selected IP, forwarding to 5000 " +"in the pod\n" +"\t\tkubectl port-forward --address localhost,10.19.21.23 pod/mypod " +"8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod :5000" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:87 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as schedulable\n" +"\t\tkubectl uncordon foo" +msgstr "" +"\n" +"\t\t# Mark node \"foo\" as schedulable\n" +"\t\tkubectl uncordon foo" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:58 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as unschedulable\n" +"\t\tkubectl cordon foo" +msgstr "" +"\n" +"\t\t# Mark node \"foo\" as unschedulable\n" +"\t\tkubectl cordon foo" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:83 +msgid "" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as JSON\n" +"\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as YAML\n" +"\t\tkubectl patch node k8s-node-1 -p $'spec:\n" +" unschedulable: true'\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"\"node.json\" using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Update a container's image; spec.containers[*].name is required " +"because it's a merge key\n" +"\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +"\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +"\n" +"\t\t# Update a container's image using a JSON patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +"\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +msgstr "" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as JSON\n" +"\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as YAML\n" +"\t\tkubectl patch node k8s-node-1 -p $'spec:\n" +" unschedulable: true'\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"\"node.json\" using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Update a container's image; spec.containers[*].name is required " +"because it's a merge key\n" +"\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +"\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +"\n" +"\t\t# Update a container's image using a JSON patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +"\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:44 +msgid "" +"\n" +"\t\t# Print the address of the control plane and cluster services\n" +"\t\tkubectl cluster-info" +msgstr "" +"\n" +"\t\t# Print the address of the control plane and cluster services\n" +"\t\tkubectl cluster-info" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:56 +msgid "" +"\n" +"\t\t# Replace a pod using the data in pod.json\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod's image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' | " +"kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace --force -f ./pod.json" +msgstr "" +"\n" +"\t\t# Replace a pod using the data in pod.json\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod's image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' | " +"kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace --force -f ./pod.json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:53 +msgid "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs from pod nginx with multi containers\n" +"\t\tkubectl logs nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -f -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs --tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs --since=1h nginx\n" +"\n" +"\t\t# Show logs from a kubelet with an expired serving certificate\n" +"\t\tkubectl logs --insecure-skip-tls-verify-backend nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" +msgstr "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs from pod nginx with multi containers\n" +"\t\tkubectl logs nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -f -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs --tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs --since=1h nginx\n" +"\n" +"\t\t# Show logs from a kubelet with an expired serving certificate\n" +"\t\tkubectl logs --insecure-skip-tls-verify-backend nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:49 +msgid "" +"\n" +"\t\t# Scale a replica set named 'foo' to 3\n" +"\t\tkubectl scale --replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" " +"to 3\n" +"\t\tkubectl scale --replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql's current size is 2, scale mysql to 3\n" +"\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers\n" +"\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +"\n" +"\t\t# Scale stateful set named 'web' to 3\n" +"\t\tkubectl scale --replicas=3 statefulset/web" +msgstr "" +"\n" +"\t\t# Scale a replica set named 'foo' to 3\n" +"\t\tkubectl scale --replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" " +"to 3\n" +"\t\tkubectl scale --replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql's current size is 2, scale mysql to 3\n" +"\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers\n" +"\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +"\n" +"\t\t# Scale stateful set named 'web' to 3\n" +"\t\tkubectl scale --replicas=3 statefulset/web" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:75 +msgid "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file; will create the annotation if it does not already exist\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n" +"\t\t" +msgstr "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file; will create the annotation if it does not already exist\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:62 +msgid "" +"\n" +"\t\t# Start a nginx pod\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a hazelcast pod and let the container expose port 5701\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --port=5701\n" +"\n" +"\t\t# Start a hazelcast pod and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --env=" +"\"DNS_DOMAIN=cluster\" --env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a hazelcast pod and set labels \"app=hazelcast\" and \"env=prod" +"\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --labels=" +"\"app=hazelcast,env=prod\"\n" +"\n" +"\t\t# Dry run; print the corresponding API objects without creating them\n" +"\t\tkubectl run nginx --image=nginx --dry-run=client\n" +"\n" +"\t\t# Start a nginx pod, but overload the spec with a partial set of values " +"parsed from JSON\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a busybox pod and keep it in the foreground, don't restart it if " +"it exits\n" +"\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +"\n" +"\t\t# Start the nginx pod using the default command, but use custom " +"arguments (arg1 .. argN) for that command\n" +"\t\tkubectl run nginx --image=nginx -- ... \n" +"\n" +"\t\t# Start the nginx pod using a different command and custom arguments\n" +"\t\tkubectl run nginx --image=nginx --command -- ... " +msgstr "" +"\n" +"\t\t# Start a nginx pod\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a hazelcast pod and let the container expose port 5701\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --port=5701\n" +"\n" +"\t\t# Start a hazelcast pod and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --env=" +"\"DNS_DOMAIN=cluster\" --env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a hazelcast pod and set labels \"app=hazelcast\" and \"env=prod" +"\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --labels=" +"\"app=hazelcast,env=prod\"\n" +"\n" +"\t\t# Dry run; print the corresponding API objects without creating them\n" +"\t\tkubectl run nginx --image=nginx --dry-run=client\n" +"\n" +"\t\t# Start a nginx pod, but overload the spec with a partial set of values " +"parsed from JSON\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a busybox pod and keep it in the foreground, don't restart it if " +"it exits\n" +"\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +"\n" +"\t\t# Start the nginx pod using the default command, but use custom " +"arguments (arg1 .. argN) for that command\n" +"\t\tkubectl run nginx --image=nginx -- ... \n" +"\n" +"\t\t# Start the nginx pod using a different command and custom arguments\n" +"\t\tkubectl run nginx --image=nginx --command -- ... " + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:73 +msgid "" +"\n" +"\t\t# To proxy all of the Kubernetes API and nothing else\n" +"\t\tkubectl proxy --api-prefix=/\n" +"\n" +"\t\t# To proxy only part of the Kubernetes API and also some static files\n" +"\t\t# You can get pods info with 'curl localhost:8001/api/v1/pods'\n" +"\t\tkubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/\n" +"\n" +"\t\t# To proxy the entire Kubernetes API at a different root\n" +"\t\t# You can get pods info with 'curl localhost:8001/custom/api/v1/pods'\n" +"\t\tkubectl proxy --api-prefix=/custom/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy --port=8011 --www=./local/www/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on an arbitrary local port\n" +"\t\t# The chosen port for the server will be output to stdout\n" +"\t\tkubectl proxy --port=0\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server, changing the API prefix to " +"k8s-api\n" +"\t\t# This makes e.g. the pods API available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy --api-prefix=/k8s-api" +msgstr "" +"\n" +"\t\t# To proxy all of the Kubernetes API and nothing else\n" +"\t\tkubectl proxy --api-prefix=/\n" +"\n" +"\t\t# To proxy only part of the Kubernetes API and also some static files\n" +"\t\t# You can get pods info with 'curl localhost:8001/api/v1/pods'\n" +"\t\tkubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/\n" +"\n" +"\t\t# To proxy the entire Kubernetes API at a different root\n" +"\t\t# You can get pods info with 'curl localhost:8001/custom/api/v1/pods'\n" +"\t\tkubectl proxy --api-prefix=/custom/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy --port=8011 --www=./local/www/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on an arbitrary local port\n" +"\t\t# The chosen port for the server will be output to stdout\n" +"\t\tkubectl proxy --port=0\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server, changing the API prefix to " +"k8s-api\n" +"\t\t# This makes e.g. the pods API available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy --api-prefix=/k8s-api" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:80 +msgid "" +"\n" +"\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-" +"user' and effect 'NoSchedule'\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +"'NoSchedule' if one exists\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +"\t\tkubectl taint nodes foo dedicated-\n" +"\n" +"\t\t# Add a taint with key 'dedicated' on nodes having label mylabel=X\n" +"\t\tkubectl taint node -l myLabel=X dedicated=foo:PreferNoSchedule\n" +"\n" +"\t\t# Add to node 'foo' a taint with key 'bar' and no value\n" +"\t\tkubectl taint nodes foo bar:NoSchedule" +msgstr "" +"\n" +"\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-" +"user' and effect 'NoSchedule'\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +"'NoSchedule' if one exists\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +"\t\tkubectl taint nodes foo dedicated-\n" +"\n" +"\t\t# Add a taint with key 'dedicated' on nodes having label mylabel=X\n" +"\t\tkubectl taint node -l myLabel=X dedicated=foo:PreferNoSchedule\n" +"\n" +"\t\t# Add to node 'foo' a taint with key 'bar' and no value\n" +"\t\tkubectl taint nodes foo bar:NoSchedule" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:95 +msgid "" +"\n" +"\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +"overwriting any existing value\n" +"\t\tkubectl label --overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods --all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod 'foo' only if the resource is unchanged from version 1\n" +"\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +"\n" +"\t\t# Update pod 'foo' by removing a label named 'bar' if it exists\n" +"\t\t# Does not require the --overwrite flag\n" +"\t\tkubectl label pods foo bar-" +msgstr "" +"\n" +"\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +"overwriting any existing value\n" +"\t\tkubectl label --overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods --all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod 'foo' only if the resource is unchanged from version 1\n" +"\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +"\n" +"\t\t# Update pod 'foo' by removing a label named 'bar' if it exists\n" +"\t\t# Does not require the --overwrite flag\n" +"\t\tkubectl label pods foo bar-" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:53 +msgid "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +msgstr "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:61 +msgid "" +"\n" +"\t\t# Wait for the pod \"busybox1\" to contain the status condition of type " +"\"Ready\"\n" +"\t\tkubectl wait --for=condition=Ready pod/busybox1\n" +"\n" +"\t\t# The default value of status condition is true; you can set it to " +"false\n" +"\t\tkubectl wait --for=condition=Ready=false pod/busybox1\n" +"\n" +"\t\t# Wait for the pod \"busybox1\" to be deleted, with a timeout of 60s, " +"after having issued the \"delete\" command\n" +"\t\tkubectl delete pod/busybox1\n" +"\t\tkubectl wait --for=delete pod/busybox1 --timeout=60s" +msgstr "" +"\n" +"\t\t# Wait for the pod \"busybox1\" to contain the status condition of type " +"\"Ready\"\n" +"\t\tkubectl wait --for=condition=Ready pod/busybox1\n" +"\n" +"\t\t# The default value of status condition is true; you can set it to " +"false\n" +"\t\tkubectl wait --for=condition=Ready=false pod/busybox1\n" +"\n" +"\t\t# Wait for the pod \"busybox1\" to be deleted, with a timeout of 60s, " +"after having issued the \"delete\" command\n" +"\t\tkubectl delete pod/busybox1\n" +"\t\tkubectl wait --for=delete pod/busybox1 --timeout=60s" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:110 +msgid "" +"\n" +"\t\tApply a configuration to a resource by file name or stdin.\n" +"\t\tThe resource name must be specified. This resource will be created if it " +"doesn't exist yet.\n" +"\t\tTo use 'apply', always create the resource initially with either 'apply' " +"or 'create --save-config'.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." +msgstr "" +"\n" +"\t\tApply a configuration to a resource by file name or stdin.\n" +"\t\tThe resource name must be specified. This resource will be created if it " +"doesn't exist yet.\n" +"\t\tTo use 'apply', always create the resource initially with either 'apply' " +"or 'create --save-config'.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:126 +msgid "" +"\n" +"\t\tApprove a certificate signing request.\n" +"\n" +"\t\tkubectl certificate approve allows a cluster admin to approve a " +"certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tissue a certificate to the requestor with the attributes requested in " +"the CSR.\n" +"\n" +"\t\tSECURITY NOTICE: Depending on the requested attributes, the issued " +"certificate\n" +"\t\tcan potentially grant a requester access to cluster resources or to " +"authenticate\n" +"\t\tas a requested identity. Before approving a CSR, ensure you understand " +"what the\n" +"\t\tsigned certificate can do.\n" +"\t\t" +msgstr "" +"\n" +"\t\tApprove a certificate signing request.\n" +"\n" +"\t\tkubectl certificate approve allows a cluster admin to approve a " +"certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tissue a certificate to the requestor with the attributes requested in " +"the CSR.\n" +"\n" +"\t\tSECURITY NOTICE: Depending on the requested attributes, the issued " +"certificate\n" +"\t\tcan potentially grant a requester access to cluster resources or to " +"authenticate\n" +"\t\tas a requested identity. Before approving a CSR, ensure you understand " +"what the\n" +"\t\tsigned certificate can do.\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:28 +msgid "" +"\n" +"\t\tConfigure application resources.\n" +"\n" +"\t\tThese commands help you make changes to existing application resources." +msgstr "" +"\n" +"\t\tConfigure application resources.\n" +"\n" +"\t\tThese commands help you make changes to existing application resources." + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:41 +msgid "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist beforehand. The public key " +"certificate must be .PEM encoded and match\n" +"\t\tthe given private key." +msgstr "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist beforehand. The public key " +"certificate must be .PEM encoded and match\n" +"\t\tthe given private key." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:41 +msgid "" +"\n" +"\t\tCreate a cluster role binding for a particular cluster role." +msgstr "" +"\n" +"\t\tCreate a cluster role binding for a particular cluster role." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:38 +msgid "" +"\n" +"\t\tCreate a cluster role." +msgstr "" +"\n" +"\t\tCreate a cluster role." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:46 +msgid "" +"\n" +"\t\tCreate a config map based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single config map may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a config map based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a config map based on a directory, each file whose " +"basename is a valid key in the directory will be\n" +"\t\tpackaged into the config map. Any directory entries except regular " +"files are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" +"\n" +"\t\tCreate a config map based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single config map may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a config map based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a config map based on a directory, each file whose " +"basename is a valid key in the directory will be\n" +"\t\tpackaged into the config map. Any directory entries except regular " +"files are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:40 +msgid "" +"\n" +"\t\tCreate a cron job with the specified name." +msgstr "" +"\n" +"\t\tCreate a cron job with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:40 +msgid "" +"\n" +"\t\tCreate a job with the specified name." +msgstr "" +"\n" +"\t\tCreate a job with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tCreate a namespace with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:41 +msgid "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running:\n" +"\t\t\t'$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +"password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +"\n" +"\tThat produces a ~/.dockercfg file that is used by subsequent 'docker push' " +"and 'docker pull' commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they must have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." +msgstr "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running:\n" +"\t\t\t'$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +"password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +"\n" +"\tThat produces a ~/.dockercfg file that is used by subsequent 'docker push' " +"and 'docker pull' commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they must have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:41 +msgid "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods." +msgstr "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:40 +msgid "" +"\n" +"\t\tCreate a priority class with the specified name, value, globalDefault " +"and description." +msgstr "" +"\n" +"\t\tCreate a priority class with the specified name, value, globalDefault " +"and description." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:71 +msgid "" +"\n" +"\t\tCreate a resource from a file or from stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." +msgstr "" +"\n" +"\t\tCreate a resource from a file or from stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:41 +msgid "" +"\n" +"\t\tCreate a resource quota with the specified name, hard limits, and " +"optional scopes." +msgstr "" +"\n" +"\t\tCreate a resource quota with the specified name, hard limits, and " +"optional scopes." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:40 +msgid "" +"\n" +"\t\tCreate a role binding for a particular role or cluster role." +msgstr "" +"\n" +"\t\tCreate a role binding for a particular role or cluster role." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tCreate a role with single rule." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:61 +msgid "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key or you " +"wish to chose your own, you may specify\n" +"\t\tan alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files are " +"ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key or you " +"wish to chose your own, you may specify\n" +"\t\tan alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files are " +"ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tCreate a service account with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:67 +msgid "" +"\n" +"\t\tCreates a proxy server or application-level gateway between localhost " +"and\n" +"\t\tthe Kubernetes API server. It also allows serving static content over " +"specified\n" +"\t\tHTTP path. All incoming data enters through one port and gets forwarded " +"to\n" +"\t\tthe remote Kubernetes API server port, except for the path matching the " +"static content path." +msgstr "" +"\n" +"\t\tCreates a proxy server or application-level gateway between localhost " +"and\n" +"\t\tthe Kubernetes API server. It also allows serving static content over " +"specified\n" +"\t\tHTTP path. All incoming data enters through one port and gets forwarded " +"to\n" +"\t\tthe remote Kubernetes API server port, except for the path matching the " +"static content path." + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:42 +msgid "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a Kubernetes cluster.\n" +"\n" +"\t\tLooks up a deployment, replica set, stateful set, or replication " +"controller by name and creates an autoscaler that uses the given resource as " +"a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." +msgstr "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a Kubernetes cluster.\n" +"\n" +"\t\tLooks up a deployment, replica set, stateful set, or replication " +"controller by name and creates an autoscaler that uses the given resource as " +"a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:57 +msgid "" +"\n" +"\t\tDebug cluster resources using interactive debugging containers.\n" +"\n" +"\t\t'debug' provides automation for common debugging tasks for cluster " +"objects identified by\n" +"\t\tresource and name. Pods will be used by default if no resource is " +"specified.\n" +"\n" +"\t\tThe action taken by 'debug' varies depending on what resource is " +"specified. Supported\n" +"\t\tactions include:\n" +"\n" +"\t\t* Workload: Create a copy of an existing pod with certain attributes " +"changed,\n" +"\t for example changing the image tag to a new version.\n" +"\t\t* Workload: Add an ephemeral container to an already running pod, for " +"example to add\n" +"\t\t debugging utilities without restarting the pod.\n" +"\t\t* Node: Create a new pod that runs in the node's host namespaces and can " +"access\n" +"\t\t the node's filesystem.\n" +msgstr "" +"\n" +"\t\tDebug cluster resources using interactive debugging containers.\n" +"\n" +"\t\t'debug' provides automation for common debugging tasks for cluster " +"objects identified by\n" +"\t\tresource and name. Pods will be used by default if no resource is " +"specified.\n" +"\n" +"\t\tThe action taken by 'debug' varies depending on what resource is " +"specified. Supported\n" +"\t\tactions include:\n" +"\n" +"\t\t* Workload: Create a copy of an existing pod with certain attributes " +"changed,\n" +"\t for example changing the image tag to a new version.\n" +"\t\t* Workload: Add an ephemeral container to an already running pod, for " +"example to add\n" +"\t\t debugging utilities without restarting the pod.\n" +"\t\t* Node: Create a new pod that runs in the node's host namespaces and can " +"access\n" +"\t\t the node's filesystem.\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:45 +msgid "" +"\n" +"\t\tDelete resources by file names, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of argument may be " +"specified: file names,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource, you must specify the " +"--force flag.\n" +"\t\tNote: only a subset of resources support graceful deletion. In absence " +"of the support,\n" +"\t\tthe --grace-period flag is ignored.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod's processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods, the scheduler may place new pods on " +"those nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone submits an\n" +"\t\tupdate to a resource right when you submit a delete, their update will " +"be lost along with the\n" +"\t\trest of the resource." +msgstr "" +"\n" +"\t\tDelete resources by file names, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of argument may be " +"specified: file names,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource, you must specify the " +"--force flag.\n" +"\t\tNote: only a subset of resources support graceful deletion. In absence " +"of the support,\n" +"\t\tthe --grace-period flag is ignored.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod's processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods, the scheduler may place new pods on " +"those nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone submits an\n" +"\t\tupdate to a resource right when you submit a delete, their update will " +"be lost along with the\n" +"\t\trest of the resource." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:175 +msgid "" +"\n" +"\t\tDeny a certificate signing request.\n" +"\n" +"\t\tkubectl certificate deny allows a cluster admin to deny a certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tnot to issue a certificate to the requestor.\n" +"\t\t" +msgstr "" +"\n" +"\t\tDeny a certificate signing request.\n" +"\n" +"\t\tkubectl certificate deny allows a cluster admin to deny a certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tnot to issue a certificate to the requestor.\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:53 +msgid "" +"\n" +"\t\tDiff configurations specified by file name or stdin between the current " +"online\n" +"\t\tconfiguration, and the configuration as it would be if applied.\n" +"\n" +"\t\tThe output is always YAML.\n" +"\n" +"\t\tKUBECTL_EXTERNAL_DIFF environment variable can be used to select your " +"own\n" +"\t\tdiff command. Users can use external commands with params too, example:\n" +"\t\tKUBECTL_EXTERNAL_DIFF=\"colordiff -N -u\"\n" +"\n" +"\t\tBy default, the \"diff\" command available in your path will be\n" +"\t\trun with the \"-u\" (unified diff) and \"-N\" (treat absent files as " +"empty) options.\n" +"\n" +"\t\tExit status:\n" +"\t\t 0\n" +"\t\tNo differences were found.\n" +"\t\t 1\n" +"\t\tDifferences were found.\n" +"\t\t >1\n" +"\t\tKubectl or diff failed with an error.\n" +"\n" +"\t\tNote: KUBECTL_EXTERNAL_DIFF, if used, is expected to follow that " +"convention." +msgstr "" +"\n" +"\t\tDiff configurations specified by file name or stdin between the current " +"online\n" +"\t\tconfiguration, and the configuration as it would be if applied.\n" +"\n" +"\t\tThe output is always YAML.\n" +"\n" +"\t\tKUBECTL_EXTERNAL_DIFF environment variable can be used to select your " +"own\n" +"\t\tdiff command. Users can use external commands with params too, example:\n" +"\t\tKUBECTL_EXTERNAL_DIFF=\"colordiff -N -u\"\n" +"\n" +"\t\tBy default, the \"diff\" command available in your path will be\n" +"\t\trun with the \"-u\" (unified diff) and \"-N\" (treat absent files as " +"empty) options.\n" +"\n" +"\t\tExit status:\n" +"\t\t 0\n" +"\t\tNo differences were found.\n" +"\t\t 1\n" +"\t\tDifferences were found.\n" +"\t\t >1\n" +"\t\tKubectl or diff failed with an error.\n" +"\n" +"\t\tNote: KUBECTL_EXTERNAL_DIFF, if used, is expected to follow that " +"convention." + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:39 +msgid "" +"\n" +"\t\tDisplay Resource (CPU/Memory) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Metrics Server to be correctly configured and " +"working on the server. " +msgstr "" +"\n" +"\t\tDisplay Resource (CPU/Memory) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Metrics Server to be correctly configured and " +"working on the server. " + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:54 +msgid "" +"\n" +"\t\tDisplay merged kubeconfig settings or a specified kubeconfig file.\n" +"\n" +"\t\tYou can use --output jsonpath={...} to extract specific values using a " +"jsonpath expression." +msgstr "" +"\n" +"\t\tDisplay merged kubeconfig settings or a specified kubeconfig file.\n" +"\n" +"\t\tYou can use --output jsonpath={...} to extract specific values using a " +"jsonpath expression." + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:92 +msgid "" +"\n" +"\t\tDisplay one or many resources.\n" +"\n" +"\t\tPrints a table of the most important information about the specified " +"resources.\n" +"\t\tYou can filter the list using a label selector and the --selector flag. " +"If the\n" +"\t\tdesired resource type is namespaced you will only see results in your " +"current\n" +"\t\tnamespace unless you pass --all-namespaces.\n" +"\n" +"\t\tUninitialized objects are not shown unless --include-uninitialized is " +"passed.\n" +"\n" +"\t\tBy specifying the output as 'template' and providing a Go template as " +"the value\n" +"\t\tof the --template flag, you can filter the attributes of the fetched " +"resources." +msgstr "" +"\n" +"\t\tDisplay one or many resources.\n" +"\n" +"\t\tPrints a table of the most important information about the specified " +"resources.\n" +"\t\tYou can filter the list using a label selector and the --selector flag. " +"If the\n" +"\t\tdesired resource type is namespaced you will only see results in your " +"current\n" +"\t\tnamespace unless you pass --all-namespaces.\n" +"\n" +"\t\tUninitialized objects are not shown unless --include-uninitialized is " +"passed.\n" +"\n" +"\t\tBy specifying the output as 'template' and providing a Go template as " +"the value\n" +"\t\tof the --template flag, you can filter the attributes of the fetched " +"resources." + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:57 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." +msgstr "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:67 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of pods.\n" +"\n" +"\t\tThe 'top pod' command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." +msgstr "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of pods.\n" +"\n" +"\t\tThe 'top pod' command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:37 +msgid "" +"\n" +"\t\tDisplay the current-context." +msgstr "" +"\n" +"\t\tDisplay the current-context." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:113 +msgid "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the API server supports\n" +"\t\t[eviction](https://kubernetes.io/docs/concepts/workloads/pods/" +"disruptions/). Otherwise, it will use normal\n" +"\t\tDELETE to delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are daemon set-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tdaemon set-managed pods, because those pods would be immediately " +"replaced by the\n" +"\t\tdaemon set controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by a replication " +"controller,\n" +"\t\treplica set, daemon set, stateful set, or job, then drain will not " +"delete any pods unless you\n" +"\t\tuse --force. --force will also allow deletion to proceed if the " +"managing resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t'drain' waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](https://kubernetes.io/images/docs/kubectl_drain.svg)" +msgstr "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the API server supports\n" +"\t\t[eviction](https://kubernetes.io/docs/concepts/workloads/pods/" +"disruptions/). Otherwise, it will use normal\n" +"\t\tDELETE to delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are daemon set-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tdaemon set-managed pods, because those pods would be immediately " +"replaced by the\n" +"\t\tdaemon set controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by a replication " +"controller,\n" +"\t\treplica set, daemon set, stateful set, or job, then drain will not " +"delete any pods unless you\n" +"\t\tuse --force. --force will also allow deletion to proceed if the " +"managing resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t'drain' waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](https://kubernetes.io/images/docs/kubectl_drain.svg)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:31 +msgid "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go:31 +msgid "" +"\n" +"\t\tEdit the latest last-applied-configuration annotations of resources from " +"the default editor.\n" +"\n" +"\t\tThe edit-last-applied command allows you to directly edit any API " +"resource you can retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" +"\n" +"\t\tEdit the latest last-applied-configuration annotations of resources from " +"the default editor.\n" +"\n" +"\t\tThe edit-last-applied command allows you to directly edit any API " +"resource you can retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:49 +msgid "" +"\n" +"\t\tExperimental: Wait for a specific condition on one or many resources.\n" +"\n" +"\t\tThe command takes multiple resources and waits until the specified " +"condition\n" +"\t\tis seen in the Status field of every given resource.\n" +"\n" +"\t\tAlternatively, the command can wait for the given set of resources to be " +"deleted\n" +"\t\tby providing the \"delete\" keyword as the value to the --for flag.\n" +"\n" +"\t\tA successful message will be printed to stdout indicating when the " +"specified\n" +" condition has been met. You can use -o option to change to output " +"destination." +msgstr "" +"\n" +"\t\tExperimental: Wait for a specific condition on one or many resources.\n" +"\n" +"\t\tThe command takes multiple resources and waits until the specified " +"condition\n" +"\t\tis seen in the Status field of every given resource.\n" +"\n" +"\t\tAlternatively, the command can wait for the given set of resources to be " +"deleted\n" +"\t\tby providing the \"delete\" keyword as the value to the --for flag.\n" +"\n" +"\t\tA successful message will be printed to stdout indicating when the " +"specified\n" +" condition has been met. You can use -o option to change to output " +"destination." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:47 +msgid "" +"\n" +"\t\tExpose a resource as a new Kubernetes service.\n" +"\n" +"\t\tLooks up a deployment, service, replica set, replication controller or " +"pod by name and uses the selector\n" +"\t\tfor that resource as the selector for a new service on the specified " +"port. A deployment or replica set\n" +"\t\twill be exposed as a service only if its selector is convertible to a " +"selector that service supports,\n" +"\t\ti.e. when the selector contains only the matchLabels component. Note " +"that if no port is specified via\n" +"\t\t--port and the exposed resource has multiple ports, all will be re-used " +"by the new service. Also if no\n" +"\t\tlabels are specified, the new service will re-use the labels from the " +"resource it exposes.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\n" +"\t\t" +msgstr "" +"\n" +"\t\tExpose a resource as a new Kubernetes service.\n" +"\n" +"\t\tLooks up a deployment, service, replica set, replication controller or " +"pod by name and uses the selector\n" +"\t\tfor that resource as the selector for a new service on the specified " +"port. A deployment or replica set\n" +"\t\twill be exposed as a service only if its selector is convertible to a " +"selector that service supports,\n" +"\t\ti.e. when the selector contains only the matchLabels component. Note " +"that if no port is specified via\n" +"\t\t--port and the exposed resource has multiple ports, all will be re-used " +"by the new service. Also if no\n" +"\t\tlabels are specified, the new service will re-use the labels from the " +"resource it exposes.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:46 +msgid "" +"\n" +"\t\tList all available plugin files on a user's PATH.\n" +"\n" +"\t\tAvailable plugin files are those that are:\n" +"\t\t- executable\n" +"\t\t- anywhere on the user's PATH\n" +"\t\t- begin with \"kubectl-\"\n" +msgstr "" +"\n" +"\t\tList all available plugin files on a user's PATH.\n" +"\n" +"\t\tAvailable plugin files are those that are:\n" +"\t\t- executable\n" +"\t\t- anywhere on the user's PATH\n" +"\t\t- begin with \"kubectl-\"\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:35 +msgid "" +"\n" +"\t\tList the fields for supported resources.\n" +"\n" +"\t\tThis command describes the fields associated with each supported API " +"resource.\n" +"\t\tFields are identified via a simple JSONPath identifier:\n" +"\n" +"\t\t\t.[.]\n" +"\n" +"\t\tAdd the --recursive flag to display all of the fields at once without " +"descriptions.\n" +"\t\tInformation about each field is retrieved from the server in OpenAPI " +"format." +msgstr "" +"\n" +"\t\tList the fields for supported resources.\n" +"\n" +"\t\tThis command describes the fields associated with each supported API " +"resource.\n" +"\t\tFields are identified via a simple JSONPath identifier:\n" +"\n" +"\t\t\t.[.]\n" +"\n" +"\t\tAdd the --recursive flag to display all of the fields at once without " +"descriptions.\n" +"\t\tInformation about each field is retrieved from the server in OpenAPI " +"format." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:30 +msgid "" +"\n" +"\t\tManage the rollout of a resource." +msgstr "" +"\n" +"\t\tManage the rollout of a resource." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tMark node as schedulable." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tMark node as unschedulable." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:57 +msgid "" +"\n" +"\t\tMark the provided resource as paused.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller.\n" +"\t\tUse \"kubectl rollout resume\" to resume a paused resource.\n" +"\t\tCurrently only deployments support being paused." +msgstr "" +"\n" +"\t\tMark the provided resource as paused.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller.\n" +"\t\tUse \"kubectl rollout resume\" to resume a paused resource.\n" +"\t\tCurrently only deployments support being paused." + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:46 +msgid "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tDetailed instructions on how to do this are available here:\n" +"\n" +" for macOS:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#enable-" +"shell-autocompletion\n" +"\n" +" for linux:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#enable-" +"shell-autocompletion\n" +"\n" +" for windows:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/" +"#enable-shell-autocompletion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2." +msgstr "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tDetailed instructions on how to do this are available here:\n" +"\n" +" for macOS:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#enable-" +"shell-autocompletion\n" +"\n" +" for linux:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#enable-" +"shell-autocompletion\n" +"\n" +" for windows:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/" +"#enable-shell-autocompletion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2." + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:49 +msgid "" +"\n" +"\t\tPrint the logs for a container in a pod or specified resource. \n" +"\t\tIf the pod has only one container, the container name is optional." +msgstr "" +"\n" +"\t\tPrint the logs for a container in a pod or specified resource. \n" +"\t\tIf the pod has only one container, the container name is optional." + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:37 +msgid "" +"\n" +"\t\tProvides utilities for interacting with plugins.\n" +"\n" +"\t\tPlugins provide extended functionality that is not part of the major " +"command-line distribution.\n" +"\t\tPlease refer to the documentation and examples for more information " +"about how write your own plugins.\n" +"\n" +"\t\tThe easiest way to discover and install plugins is via the kubernetes " +"sub-project krew.\n" +"\t\tTo install krew, visit [krew.sigs.k8s.io](https://krew.sigs.k8s.io/docs/" +"user-guide/setup/install/)" +msgstr "" +"\n" +"\t\tProvides utilities for interacting with plugins.\n" +"\n" +"\t\tPlugins provide extended functionality that is not part of the major " +"command-line distribution.\n" +"\t\tPlease refer to the documentation and examples for more information " +"about how write your own plugins.\n" +"\n" +"\t\tThe easiest way to discover and install plugins is via the kubernetes " +"sub-project krew.\n" +"\t\tTo install krew, visit [krew.sigs.k8s.io](https://krew.sigs.k8s.io/docs/" +"user-guide/setup/install/)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/rename_context.go:47 +msgid "" +"\n" +"\t\tRenames a context from the kubeconfig file.\n" +"\n" +"\t\tCONTEXT_NAME is the context name that you want to change.\n" +"\n" +"\t\tNEW_NAME is the new name you want to set.\n" +"\n" +"\t\tNote: If the context being renamed is the 'current-context', this field " +"will also be updated." +msgstr "" +"\n" +"\t\tRenames a context from the kubeconfig file.\n" +"\n" +"\t\tCONTEXT_NAME is the context name that you want to change.\n" +"\n" +"\t\tNEW_NAME is the new name you want to set.\n" +"\n" +"\t\tNote: If the context being renamed is the 'current-context', this field " +"will also be updated." + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:48 +msgid "" +"\n" +"\t\tReplace a resource by file name or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml" +msgstr "" +"\n" +"\t\tReplace a resource by file name or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:57 +msgid "" +"\n" +"\t\tRestart a resource.\n" +"\n" +"\t Resource rollout will be restarted." +msgstr "" +"\n" +"\t\tRestart a resource.\n" +"\n" +"\t Resource rollout will be restarted." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:58 +msgid "" +"\n" +"\t\tResume a paused resource.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller. By resuming a\n" +"\t\tresource, we allow it to be reconciled again.\n" +"\t\tCurrently only deployments support being resumed." +msgstr "" +"\n" +"\t\tResume a paused resource.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller. By resuming a\n" +"\t\tresource, we allow it to be reconciled again.\n" +"\t\tCurrently only deployments support being resumed." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:55 +msgid "" +"\n" +"\t\tRoll back to a previous rollout." +msgstr "" +"\n" +"\t\tRoll back to a previous rollout." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_cluster.go:47 +msgid "" +"\n" +"\t\tSet a cluster entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." +msgstr "" +"\n" +"\t\tSet a cluster entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_context.go:44 +msgid "" +"\n" +"\t\tSet a context entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." +msgstr "" +"\n" +"\t\tSet a context entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:40 +msgid "" +"\n" +"\t\tSet a new size for a deployment, replica set, replication controller, or " +"stateful set.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf --current-replicas or --resource-version is specified, it is " +"validated before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." +msgstr "" +"\n" +"\t\tSet a new size for a deployment, replica set, replication controller, or " +"stateful set.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf --current-replicas or --resource-version is specified, it is " +"validated before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_authinfo.go:70 +#, c-format +msgid "" +"\n" +"\t\tSet a user entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values.\n" +"\n" +"\t\t Client-certificate flags:\n" +"\t\t --%v=certfile --%v=keyfile\n" +"\n" +"\t\t Bearer token flags:\n" +"\t\t\t --%v=bearer_token\n" +"\n" +"\t\t Basic auth flags:\n" +"\t\t\t --%v=basic_user --%v=basic_password\n" +"\n" +"\t\tBearer token and basic auth are mutually exclusive." +msgstr "" +"\n" +"\t\tSet a user entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values.\n" +"\n" +"\t\t Client-certificate flags:\n" +"\t\t --%v=certfile --%v=keyfile\n" +"\n" +"\t\t Bearer token flags:\n" +"\t\t\t --%v=bearer_token\n" +"\n" +"\t\t Basic auth flags:\n" +"\t\t\t --%v=basic_user --%v=basic_password\n" +"\n" +"\t\tBearer token and basic auth are mutually exclusive." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:67 +#, c-format +msgid "" +"\n" +"\t\tSet the selector on a resource. Note that the new selector will " +"overwrite the old selector if the resource had one prior to the invocation\n" +"\t\tof 'set selector'.\n" +"\n" +"\t\tA selector must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\tIf --resource-version is specified, then updates will use this resource " +"version, otherwise the existing resource-version will be used.\n" +" Note: currently selectors can only be set on Service objects." +msgstr "" +"\n" +"\t\tSet the selector on a resource. Note that the new selector will " +"overwrite the old selector if the resource had one prior to the invocation\n" +"\t\tof 'set selector'.\n" +"\n" +"\t\tA selector must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\tIf --resource-version is specified, then updates will use this resource " +"version, otherwise the existing resource-version will be used.\n" +" Note: currently selectors can only be set on Service objects." + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:39 +msgid "" +"\n" +"\t\tShow details of a specific resource or group of resources.\n" +"\n" +"\t\tPrint a detailed description of the selected resources, including " +"related resources such\n" +"\t\tas events or controllers. You may select a single object by name, all " +"objects of that\n" +"\t\ttype, provide a name prefix, or label selector. For example:\n" +"\n" +"\t\t $ kubectl describe TYPE NAME_PREFIX\n" +"\n" +"\t\twill first check for an exact match on TYPE and NAME_PREFIX. If no such " +"resource\n" +"\t\texists, it will output details for every resource that has a name " +"prefixed with NAME_PREFIX." +msgstr "" +"\n" +"\t\tShow details of a specific resource or group of resources.\n" +"\n" +"\t\tPrint a detailed description of the selected resources, including " +"related resources such\n" +"\t\tas events or controllers. You may select a single object by name, all " +"objects of that\n" +"\t\ttype, provide a name prefix, or label selector. For example:\n" +"\n" +"\t\t $ kubectl describe TYPE NAME_PREFIX\n" +"\n" +"\t\twill first check for an exact match on TYPE and NAME_PREFIX. If no such " +"resource\n" +"\t\texists, it will output details for every resource that has a name " +"prefixed with NAME_PREFIX." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:47 +msgid "" +"\n" +"\t\tShow the status of the rollout.\n" +"\n" +"\t\tBy default 'rollout status' will watch the status of the latest rollout\n" +"\t\tuntil it's done. If you don't want to wait for the rollout to finish " +"then\n" +"\t\tyou can use --watch=false. Note that if a new rollout starts in-between, " +"then\n" +"\t\t'rollout status' will continue watching the latest revision. If you want " +"to\n" +"\t\tpin to a specific revision and abort if it is rolled over by another " +"revision,\n" +"\t\tuse --revision=N where N is the revision you need to watch for." +msgstr "" +"\n" +"\t\tShow the status of the rollout.\n" +"\n" +"\t\tBy default 'rollout status' will watch the status of the latest rollout\n" +"\t\tuntil it's done. If you don't want to wait for the rollout to finish " +"then\n" +"\t\tyou can use --watch=false. Note that if a new rollout starts in-between, " +"then\n" +"\t\t'rollout status' will continue watching the latest revision. If you want " +"to\n" +"\t\tpin to a specific revision and abort if it is rolled over by another " +"revision,\n" +"\t\tuse --revision=N where N is the revision you need to watch for." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:41 +#, c-format +msgid "" +"\n" +"\t\tSpecify compute resource requirements (CPU, memory) for any resource " +"that defines a pod template. If a pod is successfully scheduled, it is " +"guaranteed the amount of resource requested, but may burst up to its " +"specified limits.\n" +"\n" +"\t\tFor each compute resource, if a limit is specified and a request is " +"omitted, the request will default to the limit.\n" +"\n" +"\t\tPossible resources include (case insensitive): %s." +msgstr "" +"\n" +"\t\tSpecify compute resource requirements (CPU, memory) for any resource " +"that defines a pod template. If a pod is successfully scheduled, it is " +"guaranteed the amount of resource requested, but may burst up to its " +"specified limits.\n" +"\n" +"\t\tFor each compute resource, if a limit is specified and a request is " +"omitted, the request will default to the limit.\n" +"\n" +"\t\tPossible resources include (case insensitive): %s." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:50 +msgid "" +"\n" +"\t\tUpdate environment variables on a pod template.\n" +"\n" +"\t\tList environment variable definitions in one or more pods, pod " +"templates.\n" +"\t\tAdd, update, or remove container environment variable definitions in one " +"or\n" +"\t\tmore pod templates (within replication controllers or deployment " +"configurations).\n" +"\t\tView or modify the environment variable definitions on all containers in " +"the\n" +"\t\tspecified pods or pod templates, or just those that match a wildcard.\n" +"\n" +"\t\tIf \"--env -\" is passed, environment variables can be read from STDIN " +"using the standard env\n" +"\t\tsyntax.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" +msgstr "" +"\n" +"\t\tUpdate environment variables on a pod template.\n" +"\n" +"\t\tList environment variable definitions in one or more pods, pod " +"templates.\n" +"\t\tAdd, update, or remove container environment variable definitions in one " +"or\n" +"\t\tmore pod templates (within replication controllers or deployment " +"configurations).\n" +"\t\tView or modify the environment variable definitions on all containers in " +"the\n" +"\t\tspecified pods or pod templates, or just those that match a wildcard.\n" +"\n" +"\t\tIf \"--env -\" is passed, environment variables can be read from STDIN " +"using the standard env\n" +"\t\tsyntax.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:71 +msgid "" +"\n" +"\t\tUpdate existing container image(s) of resources.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" +msgstr "" +"\n" +"\t\tUpdate existing container image(s) of resources.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:78 +msgid "" +"\n" +"\t\tUpdate fields of a resource using strategic merge patch, a JSON merge " +"patch, or a JSON patch.\n" +"\n" +"\t\tJSON and YAML formats are accepted." +msgstr "" +"\n" +"\t\tUpdate fields of a resource using strategic merge patch, a JSON merge " +"patch, or a JSON patch.\n" +"\n" +"\t\tJSON and YAML formats are accepted." + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:83 +msgid "" +"\n" +"\t\tUpdate the annotations on one or more resources.\n" +"\n" +"\t\tAll Kubernetes objects support the ability to store additional data with " +"the object as\n" +"\t\tannotations. Annotations are key/value pairs that can be larger than " +"labels and include\n" +"\t\tarbitrary string values such as structured JSON. Tools and system " +"extensions may use\n" +"\t\tannotations to store their own data.\n" +"\n" +"\t\tAttempting to set an annotation that already exists will fail unless --" +"overwrite is set.\n" +"\t\tIf --resource-version is specified and does not match the current " +"resource version on\n" +"\t\tthe server the command will fail." +msgstr "" +"\n" +"\t\tUpdate the annotations on one or more resources.\n" +"\n" +"\t\tAll Kubernetes objects support the ability to store additional data with " +"the object as\n" +"\t\tannotations. Annotations are key/value pairs that can be larger than " +"labels and include\n" +"\t\tarbitrary string values such as structured JSON. Tools and system " +"extensions may use\n" +"\t\tannotations to store their own data.\n" +"\n" +"\t\tAttempting to set an annotation that already exists will fail unless --" +"overwrite is set.\n" +"\t\tIf --resource-version is specified and does not match the current " +"resource version on\n" +"\t\tthe server the command will fail." + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:87 +#, c-format +msgid "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label key and value must begin with a letter or number, and may " +"contain letters, numbers, hyphens, dots, and underscores, up to %[1]d " +"characters each.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* If --overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If --resource-version is specified, then updates will use this " +"resource version, otherwise the existing resource-version will be used." +msgstr "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label key and value must begin with a letter or number, and may " +"contain letters, numbers, hyphens, dots, and underscores, up to %[1]d " +"characters each.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* If --overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If --resource-version is specified, then updates will use this " +"resource version, otherwise the existing resource-version will be used." + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:70 +#, c-format +msgid "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* The value is optional. If given, it must begin with a letter or " +"number, and may contain letters, numbers, hyphens, dots, and underscores, up " +"to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." +msgstr "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* The value is optional. If given, it must begin with a letter or " +"number, and may contain letters, numbers, hyphens, dots, and underscores, up " +"to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:36 +msgid "" +"\n" +"\t\tView previous rollout revisions and configurations." +msgstr "" +"\n" +"\t\tView previous rollout revisions and configurations." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:47 +msgid "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. You can use " +"the -o option\n" +"\t\tto change the output format." +msgstr "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. You can use " +"the -o option\n" +"\t\tto change the output format." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:47 +msgid "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair\n" +"\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/" +"to/tls.key" +msgstr "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair\n" +"\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/" +"to/tls.key" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:74 +msgid "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret --from-literal=key1=supersecret " +"--from-literal=key2=topsecret\n" +"\n" +"\t # Create a new secret named my-secret using a combination of a file and " +"a literal\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-literal=passphrase=topsecret\n" +"\n" +"\t # Create a new secret named my-secret from an env file\n" +"\t kubectl create secret generic my-secret --from-env-file=path/to/bar.env" +msgstr "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret --from-literal=key1=supersecret " +"--from-literal=key2=topsecret\n" +"\n" +"\t # Create a new secret named my-secret using a combination of a file and " +"a literal\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-literal=passphrase=topsecret\n" +"\n" +"\t # Create a new secret named my-secret from an env file\n" +"\t kubectl create secret generic my-secret --from-env-file=path/to/bar.env" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:45 +msgid "" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image\n" +"\tkubectl create deployment my-dep --image=busybox\n" +"\n" +"\t# Create a deployment with a command\n" +"\tkubectl create deployment my-dep --image=busybox -- date\n" +"\n" +"\t# Create a deployment named my-dep that runs the nginx image with 3 " +"replicas\n" +"\tkubectl create deployment my-dep --image=nginx --replicas=3\n" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image and expose " +"port 5701\n" +"\tkubectl create deployment my-dep --image=busybox --port=5701" +msgstr "" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image\n" +"\tkubectl create deployment my-dep --image=busybox\n" +"\n" +"\t# Create a deployment with a command\n" +"\tkubectl create deployment my-dep --image=busybox -- date\n" +"\n" +"\t# Create a deployment named my-dep that runs the nginx image with 3 " +"replicas\n" +"\tkubectl create deployment my-dep --image=nginx --replicas=3\n" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image and expose " +"port 5701\n" +"\tkubectl create deployment my-dep --image=busybox --port=5701" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:351 +msgid "" +"\n" +"\t# Create a new ExternalName service named my-ns\n" +"\tkubectl create service externalname my-ns --external-name bar.com" +msgstr "" +"\n" +"\t# Create a new ExternalName service named my-ns\n" +"\tkubectl create service externalname my-ns --external-name bar.com" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:50 +msgid "" +"\n" +"\t# Set deployment nginx-deployment's service account to serviceaccount1\n" +"\tkubectl set serviceaccount deployment nginx-deployment serviceaccount1\n" +"\n" +"\t# Print the result (in YAML format) of updated nginx deployment with the " +"service account from local file, without hitting the API server\n" +"\tkubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-" +"run=client -o yaml\n" +"\t" +msgstr "" +"\n" +"\t# Set deployment nginx-deployment's service account to serviceaccount1\n" +"\tkubectl set serviceaccount deployment nginx-deployment serviceaccount1\n" +"\n" +"\t# Print the result (in YAML format) of updated nginx deployment with the " +"service account from local file, without hitting the API server\n" +"\tkubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-" +"run=client -o yaml\n" +"\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:42 +msgid "" +"\n" +"\tCreate a deployment with the specified name." +msgstr "" +"\n" +"\tCreate a deployment with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:61 +msgid "" +"\n" +"\tCreate an ingress with the specified name." +msgstr "" +"\n" +"\tCreate an ingress with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set.go:44 +msgid "" +"\n" +"\tSet an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots.\n" +"\n" +"\tPROPERTY_VALUE is the new value you want to set. Binary fields such as " +"'certificate-authority-data' expect a base64 encoded string unless the --set-" +"raw-bytes flag is used.\n" +"\n" +"\tSpecifying an attribute name that already exists will merge new fields on " +"top of existing values." +msgstr "" +"\n" +"\tSet an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots.\n" +"\n" +"\tPROPERTY_VALUE is the new value you want to set. Binary fields such as " +"'certificate-authority-data' expect a base64 encoded string unless the --set-" +"raw-bytes flag is used.\n" +"\n" +"\tSpecifying an attribute name that already exists will merge new fields on " +"top of existing values." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/unset.go:39 +msgid "" +"\n" +"\tUnset an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots." +msgstr "" +"\n" +"\tUnset an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:43 +msgid "" +"\n" +"\tUpdate the service account of pod template resources.\n" +"\n" +"\tPossible resources (case insensitive) can be:\n" +"\n" +"\t" +msgstr "" +"\n" +"\tUpdate the service account of pod template resources.\n" +"\n" +"\tPossible resources (case insensitive) can be:\n" +"\n" +"\t" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:40 +msgid "" +"\n" +"\tUpdate the user, group, or service account in a role binding or cluster " +"role binding." +msgstr "" +"\n" +"\tUpdate the user, group, or service account in a role binding or cluster " +"role binding." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:68 +msgid "" +"\n" +" \tpod (po), replicationcontroller (rc), deployment (deploy), daemonset " +"(ds), replicaset (rs)" +msgstr "" +"\n" +" \tpod (po), replicationcontroller (rc), deployment (deploy), daemonset " +"(ds), replicaset (rs)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:63 +msgid "" +"\n" +" Forward one or more local ports to a pod.\n" +"\n" +" Use resource type/name such as deployment/mydeployment to " +"select a pod. Resource type defaults to 'pod' if omitted.\n" +"\n" +" If there are multiple pods matching the criteria, a pod will " +"be selected automatically. The\n" +" forwarding session ends when the selected pod terminates, " +"and a rerun of the command is needed\n" +" to resume forwarding." +msgstr "" +"\n" +" Forward one or more local ports to a pod.\n" +"\n" +" Use resource type/name such as deployment/mydeployment to " +"select a pod. Resource type defaults to 'pod' if omitted.\n" +"\n" +" If there are multiple pods matching the criteria, a pod will " +"be selected automatically. The\n" +" forwarding session ends when the selected pod terminates, " +"and a rerun of the command is needed\n" +" to resume forwarding." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:233 +msgid "" +"\n" +" # Create a new ClusterIP service named my-cs\n" +" kubectl create service clusterip my-cs --tcp=5678:8080\n" +"\n" +" # Create a new ClusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs --clusterip=\"None\"" +msgstr "" +"\n" +" # Create a new ClusterIP service named my-cs\n" +" kubectl create service clusterip my-cs --tcp=5678:8080\n" +"\n" +" # Create a new ClusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs --clusterip=\"None\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:274 +msgid "" +"\n" +" # Create a new NodePort service named my-ns\n" +" kubectl create service nodeport my-ns --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new NodePort service named my-ns\n" +" kubectl create service nodeport my-ns --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:95 +msgid "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description='my frontend'\n" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend running nginx', overwriting any existing value\n" +" kubectl annotate --overwrite pods foo description='my frontend running " +"nginx'\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods --all description='my frontend running nginx'\n" +"\n" +" # Update pod 'foo' only if the resource is unchanged from version 1\n" +" kubectl annotate pods foo description='my frontend running nginx' --" +"resource-version=1\n" +"\n" +" # Update pod 'foo' by removing an annotation named 'description' if it " +"exists\n" +" # Does not require the --overwrite flag\n" +" kubectl annotate pods foo description-" +msgstr "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description='my frontend'\n" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend running nginx', overwriting any existing value\n" +" kubectl annotate --overwrite pods foo description='my frontend running " +"nginx'\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods --all description='my frontend running nginx'\n" +"\n" +" # Update pod 'foo' only if the resource is unchanged from version 1\n" +" kubectl annotate pods foo description='my frontend running nginx' --" +"resource-version=1\n" +"\n" +" # Update pod 'foo' by removing an annotation named 'description' if it " +"exists\n" +" # Does not require the --overwrite flag\n" +" kubectl annotate pods foo description-" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:230 +msgid "" +"\n" +" Create a ClusterIP service with the specified name." +msgstr "" +"\n" +" Create a ClusterIP service with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Create a LoadBalancer service with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:271 +msgid "" +"\n" +" Create a NodePort service with the specified name." +msgstr "" +"\n" +" Create a NodePort service with the specified name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:93 +msgid "" +"\n" +" Dump cluster information out suitable for debugging and diagnosing " +"cluster problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with --output-directory. " +"If you specify a directory, Kubernetes will\n" +" build a set of files in that directory. By default, only dumps things " +"in the current namespace and 'kube-system' namespace, but you can\n" +" switch to a different namespace with the --namespaces flag, or specify --" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster; these " +"logs are dumped into different directories\n" +" based on namespace and pod name." +msgstr "" +"\n" +" Dump cluster information out suitable for debugging and diagnosing " +"cluster problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with --output-directory. " +"If you specify a directory, Kubernetes will\n" +" build a set of files in that directory. By default, only dumps things " +"in the current namespace and 'kube-system' namespace, but you can\n" +" switch to a different namespace with the --namespaces flag, or specify --" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster; these " +"logs are dumped into different directories\n" +" based on namespace and pod name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:40 +msgid "" +"\n" +" Display addresses of the control plane and services with label kubernetes." +"io/cluster-service=true.\n" +" To further debug and diagnose cluster problems, use 'kubectl cluster-info " +"dump'." +msgstr "" +"\n" +" Display addresses of the control plane and services with label kubernetes." +"io/cluster-service=true.\n" +" To further debug and diagnose cluster problems, use 'kubectl cluster-info " +"dump'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:49 +msgid "" +" environment variable is set, then it is used as a list of paths (normal " +"path delimiting rules for your system). These paths are merged. When a value " +"is modified, it is modified in the file that defines the stanza. When a " +"value is created, it is created in the first file that exists. If no files " +"in the chain exist, then it creates the last file in the list.\n" +"\t\t\t3. Otherwise, " +msgstr "" +" environment variable is set, then it is used as a list of paths (normal " +"path delimiting rules for your system). These paths are merged. When a value " +"is modified, it is modified in the file that defines the stanza. When a " +"value is created, it is created in the first file that exists. If no files " +"in the chain exist, then it creates the last file in the list.\n" +"\t\t\t3. Otherwise, " + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:48 +msgid "" +" flag is set, then only that file is loaded. The flag may only be set once " +"and no merging takes place.\n" +"\t\t\t2. If $" +msgstr "" +" flag is set, then only that file is loaded. The flag may only be set once " +"and no merging takes place.\n" +"\t\t\t2. If $" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:50 +msgid " is used and no merging takes place." +msgstr " is used and no merging takes place." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L61 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L60 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L63 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L106 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L111 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:178 +msgid "Allocate a TTY for the debugging container." +msgstr "Allocate a TTY for the debugging container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L119 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:158 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:178 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:173 +msgid "Annotations to apply to the pod." +msgstr "Annotations to apply to the pod." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:173 +msgid "Apply a configuration to a resource by file name or stdin" +msgstr "Apply a configuration to a resource by file name or stdin" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "Approve a certificate signing request" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:106 +msgid "" +"Attach to a process that is already running inside an existing container." +msgstr "" +"Attach to a process that is already running inside an existing container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/attach.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "Attach to a running container" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:107 +msgid "" +"Auto-scale a deployment, replica set, stateful set, or replication controller" +msgstr "" +"Auto-scale a deployment, replica set, stateful set, or replication controller" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L115 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole this ClusterRoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole this RoleBinding should reference" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:32 +msgid "Commands for features in alpha" +msgstr "Commands for features in alpha" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:170 +msgid "Container image to use for debug container." +msgstr "Container image to use for debug container." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:166 +msgid "Container name to use for debug container." +msgstr "Container name to use for debug container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/convert.go#L67 +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "Convert config files between different API versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:105 +msgid "Copy files and directories to and from containers" +msgstr "Copy files and directories to and from containers" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cp.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "Copy files and directories to and from containers." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:248 +msgid "Create a ClusterIP service" +msgstr "Create a ClusterIP service" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:323 +msgid "Create a LoadBalancer service" +msgstr "Create a LoadBalancer service" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:286 +msgid "Create a NodePort service" +msgstr "Create a NodePort service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L214 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "Create a TLS secret" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:81 +msgid "Create a cluster role" +msgstr "Create a cluster role" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:87 +msgid "Create a cluster role binding for a particular cluster role" +msgstr "Create a cluster role binding for a particular cluster role" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:124 +msgid "Create a config map from a local file, directory or literal value" +msgstr "Create a config map from a local file, directory or literal value" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:167 +msgid "Create a copy of the target Pod with this name." +msgstr "Create a copy of the target Pod with this name." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:90 +msgid "Create a cron job with the specified name" +msgstr "Create a cron job with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:100 +msgid "Create a deployment with the specified name" +msgstr "Create a deployment with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:91 +msgid "Create a job with the specified name" +msgstr "Create a job with the specified name" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "Create a namespace with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:95 +msgid "Create a pod disruption budget with the specified name" +msgstr "Create a pod disruption budget with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:92 +msgid "Create a priority class with the specified name" +msgstr "Create a priority class with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:91 +msgid "Create a quota with the specified name" +msgstr "Create a quota with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:106 +msgid "Create a resource from a file or from stdin" +msgstr "Create a resource from a file or from stdin" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:89 +msgid "Create a role binding for a particular role or cluster role" +msgstr "Create a role binding for a particular role or cluster role" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:161 +msgid "Create a role with single rule" +msgstr "Create a role with single rule" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L143 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "Create a secret for use with a Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:137 +msgid "Create a secret from a local file, directory, or literal value" +msgstr "Create a secret from a local file, directory, or literal value" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L34 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "Create a secret using specified subcommand" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:50 +msgid "Create a secret using specified subcommand." +msgstr "Create a secret using specified subcommand." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "Create a service account with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:48 +msgid "Create a service using a specified subcommand" +msgstr "Create a service using a specified subcommand" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:49 +msgid "Create a service using a specified subcommand." +msgstr "Create a service using a specified subcommand." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:363 +msgid "Create an ExternalName service" +msgstr "Create an ExternalName service" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:145 +msgid "Create an ingress with the specified name" +msgstr "Create an ingress with the specified name" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:60 +msgid "Create and run a particular image in a pod." +msgstr "Create and run a particular image in a pod." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:149 +msgid "Create debugging sessions for troubleshooting workloads and nodes" +msgstr "Create debugging sessions for troubleshooting workloads and nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:137 +msgid "" +"Delete resources by file names, stdin, resources and names, or by resources " +"and label selector" +msgstr "" +"Delete resources by file names, stdin, resources and names, or by resources " +"and label selector" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "Delete the specified cluster from the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:43 +msgid "Delete the specified cluster from the kubeconfig." +msgstr "Delete the specified cluster from the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "Delete the specified context from the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:43 +msgid "Delete the specified context from the kubeconfig." +msgstr "Delete the specified context from the kubeconfig." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_user.go:64 +msgid "Delete the specified user from the kubeconfig" +msgstr "Delete the specified user from the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_user.go:65 +msgid "Delete the specified user from the kubeconfig." +msgstr "Delete the specified user from the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L121 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "Deny a certificate signing request" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "Describe one or many contexts" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:142 +msgid "Diff the live version against a would-be applied version" +msgstr "Diff the live version against a would-be applied version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:65 +msgid "Display cluster information" +msgstr "Display cluster information" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "Display clusters defined in the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:42 +msgid "Display clusters defined in the kubeconfig." +msgstr "Display clusters defined in the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "Display merged kubeconfig settings or a specified kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:50 +msgid "Display one or many contexts from the kubeconfig file." +msgstr "Display one or many contexts from the kubeconfig file." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/get.go#L107 +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "Display one or many resources" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:50 +msgid "Display resource (CPU/memory) usage" +msgstr "Display resource (CPU/memory) usage" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:81 +msgid "Display resource (CPU/memory) usage of nodes" +msgstr "Display resource (CPU/memory) usage of nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:100 +msgid "Display resource (CPU/memory) usage of pods" +msgstr "Display resource (CPU/memory) usage of pods" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:51 +msgid "Display the current-context" +msgstr "Display the current-context" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_users.go:60 +msgid "Display users defined in the kubeconfig" +msgstr "Display users defined in the kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_users.go:61 +msgid "Display users defined in the kubeconfig." +msgstr "Display users defined in the kubeconfig." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L176 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "Drain node in preparation for maintenance" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:74 +msgid "Dump relevant information for debugging and diagnosis" +msgstr "Dump relevant information for debugging and diagnosis" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/edit.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "Edit a resource on the server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go:67 +msgid "Edit latest last-applied-configuration annotations of a resource/object" +msgstr "" +"Edit latest last-applied-configuration annotations of a resource/object" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L159 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "Email for Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:169 +msgid "Environment variables to set in the container." +msgstr "Environment variables to set in the container." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/exec.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "Execute a command in a container" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:90 +msgid "Execute a command in a container." +msgstr "Execute a command in a container." + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:115 +msgid "Experimental: Wait for a specific condition on one or many resources" +msgstr "Experimental: Wait for a specific condition on one or many resources" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:378 +msgid "External name of service" +msgstr "External name of service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/portforward.go#L75 +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "Forward one or more local ports to a pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:79 +msgid "Get documentation for a resource" +msgstr "Get documentation for a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/help.go#L36 +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "Help about any command" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:151 +msgid "" +"IP to assign to the LoadBalancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." +msgstr "" +"IP to assign to the LoadBalancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:160 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/annotate.go#L135 +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:157 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L132 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:154 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:164 +msgid "" +"If specified, everything after -- will be passed to the new container as " +"Args instead of Command." +msgstr "" +"If specified, everything after -- will be passed to the new container as " +"Args instead of Command." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:198 +msgid "If true, run the container in privileged mode." +msgstr "If true, run the container in privileged mode." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:174 +msgid "If true, suppress informational messages." +msgstr "If true, suppress informational messages." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:165 +msgid "" +"If true, wait for the container to start running, and then attach as if " +"'kubectl attach ...' were called. Default false, unless '-i/--stdin' is " +"set, in which case the default is true." +msgstr "" +"If true, wait for the container to start running, and then attach as if " +"'kubectl attach ...' were called. Default false, unless '-i/--stdin' is " +"set, in which case the default is true." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:173 +msgid "" +"Keep stdin open on the container(s) in the pod, even if nothing is attached." +msgstr "" +"Keep stdin open on the container(s) in the pod, even if nothing is attached." + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:90 +msgid "List all visible plugin executables on a user's PATH" +msgstr "List all visible plugin executables on a user's PATH" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:54 +msgid "Manage the rollout of a resource" +msgstr "Manage the rollout of a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "Mark node as schedulable" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "Mark node as unschedulable" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_pause.go#L73 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "Mark the provided resource as paused" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L35 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "Modify certificate resources." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "Modify kubeconfig files" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L110 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:43 +msgid "No alpha commands are available in this version of kubectl" +msgstr "No alpha commands are available in this version of kubectl" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/completion.go#L97 +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "Output shell completion code for the specified shell (bash or zsh)" + +#: pkg/kubectl/cmd/convert/convert.go:105 +msgid "" +"Output the formatted object with the given group version (for ex: " +"'extensions/v1beta1')." +msgstr "" +"Output the formatted object with the given group version (for ex: " +"'extensions/v1beta1')." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L157 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "Password for Docker registry authentication" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L226 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "Path to PEM encoded public key certificate." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L227 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "Path to private key associated with given certificate." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/scale.go#L82 +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "Print the client and server version information" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:74 +msgid "" +"Print the client and server version information for the current context." +msgstr "" +"Print the client and server version information for the current context." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "Print the list of flags inherited by all commands" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L86 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "Print the logs for a container in a pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:97 +msgid "Print the supported API resources on the server" +msgstr "Print the supported API resources on the server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:98 +msgid "Print the supported API resources on the server." +msgstr "Print the supported API resources on the server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:58 +msgid "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"" +msgstr "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:59 +msgid "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"." +msgstr "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"." + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:62 +msgid "Provides utilities for interacting with plugins" +msgstr "Provides utilities for interacting with plugins" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/rename_context.go:45 +msgid "Rename a context from the kubeconfig file" +msgstr "Rename a context from the kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:115 +msgid "Replace a resource by file name or stdin" +msgstr "Replace a resource by file name or stdin" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:87 +msgid "Restart a resource" +msgstr "Restart a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_resume.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "Resume a paused resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L56 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "Role this RoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L94 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "Run a particular image on the cluster" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/proxy.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Run a proxy to the Kubernetes API server" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L161 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "Server location for Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_cluster.go:73 +msgid "Set a cluster entry in kubeconfig" +msgstr "Set a cluster entry in kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_context.go:61 +msgid "Set a context entry in kubeconfig" +msgstr "Set a context entry in kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:114 +msgid "Set a new size for a deployment, replica set, or replication controller" +msgstr "" +"Set a new size for a deployment, replica set, or replication controller" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/create_authinfo.go:152 +msgid "Set a user entry in kubeconfig" +msgstr "Set a user entry in kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set.go:74 +msgid "Set an individual value in a kubeconfig file" +msgstr "Set an individual value in a kubeconfig file" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "Set specific features on objects" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/use_context.go:52 +msgid "Set the current-context in a kubeconfig file" +msgstr "Set the current-context in a kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:101 +msgid "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file" +msgstr "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_selector.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "Set the selector on a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/describe.go#L80 +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "Show details of a specific resource or group of resources" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_status.go#L57 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "Show the status of the rollout" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "Synonym for --target-port" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:134 +msgid "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes service" +msgstr "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "The image for the container to run." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L116 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:172 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server." +msgstr "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:112 +msgid "" +"The maximum number or percentage of unavailable pods this budget requires." +msgstr "" +"The maximum number or percentage of unavailable pods this budget requires." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"The minimum number or percentage of available pods this budget requires." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L113 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "The name for the newly created object." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L98 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L99 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "The network protocol for the service to be created. Default is 'TCP'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:182 +msgid "The port that this container exposes." +msgstr "The port that this container exposes." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L131 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L130 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:190 +msgid "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]." +msgstr "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L87 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "The type of secret to create" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:33 +msgid "" +"These commands correspond to alpha features that are not enabled in " +"Kubernetes clusters by default." +msgstr "" +"These commands correspond to alpha features that are not enabled in " +"Kubernetes clusters by default." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:150 +msgid "" +"Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. " +"Default is 'ClusterIP'." +msgstr "" +"Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. " +"Default is 'ClusterIP'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_undo.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "Undo a previous rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/unset.go:59 +msgid "Unset an individual value in a kubeconfig file" +msgstr "Unset an individual value in a kubeconfig file" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:154 +msgid "Update environment variables on a pod template" +msgstr "Update environment variables on a pod template" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:115 +msgid "Update fields of a resource" +msgstr "Update fields of a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_resources.go#L101 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "Update resource requests/limits on objects with pod templates" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "Update the annotations on a resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:110 +msgid "Update the image of a pod template" +msgstr "Update the image of a pod template" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L109 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "Update the labels on a resource" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:102 +msgid "Update the service account of a resource" +msgstr "Update the service account of a resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/taint.go#L88 +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "Update the taints on one or more nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:99 +msgid "" +"Update the user, group, or service account in a role binding or cluster role " +"binding" +msgstr "" +"Update the user, group, or service account in a role binding or cluster role " +"binding" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L155 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "Username for Docker registry authentication" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_history.go#L51 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "View rollout history" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:77 +msgid "" +"View the latest last-applied-configuration annotations of a resource/object" +msgstr "" +"View the latest last-applied-configuration annotations of a resource/object" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:171 +msgid "" +"When used with '--copy-to', a list of name=image pairs for changing " +"container images, similar to how 'kubectl set image' works." +msgstr "" +"When used with '--copy-to', a list of name=image pairs for changing " +"container images, similar to how 'kubectl set image' works." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:168 +msgid "When used with '--copy-to', delete the original Pod." +msgstr "When used with '--copy-to', delete the original Pod." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:176 +msgid "" +"When used with '--copy-to', enable process namespace sharing in the copy." +msgstr "" +"When used with '--copy-to', enable process namespace sharing in the copy." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:175 +msgid "" +"When used with '--copy-to', schedule the copy of target Pod on the same node." +msgstr "" +"When used with '--copy-to', schedule the copy of target Pod on the same node." + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:177 +msgid "" +"When using an ephemeral container, target processes in this container name." +msgstr "" +"When using an ephemeral container, target processes in this container name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo_dump.go#L45 +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:108 +msgid "" +"description is an arbitrary string that usually provides guidelines on when " +"this priority class should be used." +msgstr "" +"description is an arbitrary string that usually provides guidelines on when " +"this priority class should be used." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88 +msgid "dummy restart flag)" +msgstr "dummy restart flag)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:107 +msgid "" +"global-default specifies whether this PriorityClass should be considered as " +"the default priority." +msgstr "" +"global-default specifies whether this PriorityClass should be considered as " +"the default priority." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cmd.go#L217 +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl controls the Kubernetes cluster manager" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:45 +msgid "" +"pod (po), service (svc), replicationcontroller (rc), deployment (deploy), " +"replicaset (rs)" +msgstr "" +"pod (po), service (svc), replicationcontroller (rc), deployment (deploy), " +"replicaset (rs)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:109 +msgid "" +"preemption-policy is the policy for preempting pods with lower priority." +msgstr "" +"preemption-policy is the policy for preempting pods with lower priority." + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:41 +msgid "" +"replicationcontroller (rc), deployment (deploy), daemonset (ds), job, " +"replicaset (rs), statefulset" +msgstr "" +"replicationcontroller (rc), deployment (deploy), daemonset (ds), job, " +"replicaset (rs), statefulset" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:106 +msgid "the value of this priority class." +msgstr "the value of this priority class." diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/fr_FR/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/fr_FR/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000..5a22e42887 Binary files /dev/null and b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/fr_FR/LC_MESSAGES/k8s.mo differ diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/fr_FR/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/fr_FR/LC_MESSAGES/k8s.po new file mode 100644 index 0000000000..78b601a372 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/fr_FR/LC_MESSAGES/k8s.po @@ -0,0 +1,103 @@ +# Test translations for unit tests. +# Copyright (C) 2016 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR brendan.d.burns@gmail.com, 2016. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2017-01-29 22:54-0800\n" +"Last-Translator: Brendan Burns \n" +"Language-Team: \n" +"Language: fr\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.6.10\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "Supprimer le cluster spécifié du kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "Supprimer le contexte spécifié du kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "Décrire un ou plusieurs contextes" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "Afficher les cluster définis dans kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "" +"Afficher les paramètres fusionnés de kubeconfig ou d'un fichier kubeconfig " +"spécifié" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "Modifier des fichiers kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "Mettre à jour les annotations d'une ressource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/apply.go#L98 +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "" +#~ "Appliquer une configuration à une ressource par nom de fichier ou depuis " +#~ "stdin" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/current_context.go#L48 +#~ msgid "Displays the current-context" +#~ msgstr "Affiche le contexte actuel" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_cluster.go#L67 +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "Définit un cluster dans kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_context.go#L57 +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "Définit un contexte dans kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_authinfo.go#L103 +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "Définit un utilisateur dans kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/set.go#L59 +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "Définit une valeur individuelle dans un fichier kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/use_context.go#L48 +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "Définit le contexte courant dans un fichier kubeconfig" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/unset.go#L47 +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "Supprime une valeur individuelle dans un fichier kubeconfig" + +#~ msgid "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgid_plural "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgstr[0] "" +#~ "watch n'est compatible qu'avec les ressources individuelles et les " +#~ "collections de ressources. - %d ressource a été trouvée. " +#~ msgstr[1] "" +#~ "watch n'est compatible qu'avec les ressources individuelles et les " +#~ "collections de ressources. - %d ressources ont été trouvées. " diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000..afe881717e Binary files /dev/null and b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.mo differ diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.po new file mode 100644 index 0000000000..ca119f64f4 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/it_IT/LC_MESSAGES/k8s.po @@ -0,0 +1,3249 @@ +# Italian translation. +# Copyright (C) 2017 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR evolution85@gmail.com, 2017. +# +msgid "" +msgstr "" +"Project-Id-Version: kubernetes\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2017-08-28 15:20+0200\n" +"Last-Translator: Luca Berton \n" +"Language-Team: Luca Berton \n" +"Language: it_IT\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"X-Generator: Poedit 1.8.7.1\n" +"X-Poedit-SourceCharset: UTF-8\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Mostra metriche per tutti i nodi\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Mostra metriche per un determinato nodo\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Ottieni la documentazione della risorsa e i relativi campi\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Ottieni la documentazione di un campo specifico di una risorsa\n" +"\t\tkubectl explain pods.spec.containers" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Stampa i flag ereditati da tutti i comandi\n" +"\t\tkubectl options" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Stampa le versioni client e server per il current context\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Stampa le versioni API supportate\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Mostra metriche di tutti i pod nello spazio dei nomi predefinito\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Mostra metriche di tutti i pod nello spazio dei nomi specificato\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Mostra metriche per un pod e i suoi relativi container\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Mostra metriche per i pod definiti da label name = myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tConvertire i file di configurazione tra diverse versioni API. Sono\n" +"\t\taccettati i formati YAML e JSON.\n" +"\n" +"\t\tIl comando prende il nome di file, la directory o l'URL come input e lo " +"converte nel formato\n" +"\t\tdi versione specificata dal flag -output-version. Se la versione di " +"destinazione non è specificata o\n" +"\t\tnon supportata, viene convertita nella versione più recente.\n" +"\n" +"\t\tL'output predefinito verrà stampato su stdout nel formato YAML. Si può " +"usare l'opzione -o\n" +"\t\tper cambiare la destinazione di output." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tCreare un namespace con il nome specificato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tCrea un ruolo con una singola regola." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tCreare un service account con il nome specificato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tContrassegna il nodo come programmabile." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tContrassegnare il nodo come non programmabile." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tImposta le annotazioni dell'ultima-configurazione-applicata impostandola " +"in modo che corrisponda al contenuto di un file.\n" +"\t\tCiò determina l'aggiornamento dell'ultima-configurazione-applicata come " +"se 'kubectl apply -f ' fosse stato eseguito,\n" +"\t\tsenza aggiornare altre parti dell'oggetto." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Crea un nuovo namespace denominato my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Crea un nuovo service account denominato my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tCrea un servizio ExternalName con il nome specificato.\n" +"\n" +"\tIl servizio ExternalName fa riferimento a un indirizzo DNS esterno \n" +"\tsolo pod, che permetteranno agli autori delle applicazioni di utilizzare i " +"servizi di riferimento\n" +"\tche esistono fuori dalla piattaforma, su altri cluster, o localmente.." + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp fornisce assistenza per qualsiasi comando nell'applicazione.\n" +"\tBasta digitare kubectl help [path to command] per i dettagli completi." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Creare un nuovo servizio LoadBalancer denominato my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Dump dello stato corrente del cluster verso stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump dello stato corrente del cluster verso /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump di tutti i namespaces verso stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump di un set di namespace verso /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Crea un servizio LoadBalancer con il nome specificato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"Un insieme delimitato-da-virgole di quota scopes che devono corrispondere a " +"ciascun oggetto gestito dalla quota." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"Un insieme delimitato-da-virgola di coppie risorsa = quantità che " +"definiscono un hard limit." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"Un label selector da utilizzare per questo budget. Sono supportati solo i " +"selettori equality-based selector." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"Un selettore di label da utilizzare per questo servizio. Sono supportati " +"solo equality-based selector. Se vuota (default) dedurre il selettore dal " +"replication controller o replica set.)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Indirizzo IP esterno aggiuntivo (non gestito da Kubernetes) da accettare per " +"il servizio. Se questo IP viene indirizzato a un nodo, è possibile accedere " +"da questo IP in aggiunta al service IP generato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:158 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:178 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"Un override JSON inline per l'oggetto generato. Se questo non è vuoto, viene " +"utilizzato per ignorare l'oggetto generato. Richiede che l'oggetto fornisca " +"un campo valido apiVersion." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "Approva una richiesta di firma del certificato" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Assegnare il proprio ClusterIP o impostare su 'None' per un servizio " +"'headless' (nessun bilanciamento del carico)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "Collega a un container in esecuzione" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP da assegnare al servizio. Lasciare vuoto per allocare " +"automaticamente o impostare su 'None' per creare un servizio headless." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole a cui questo ClusterRoleBinding fa riferimento" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole a cui questo RoleBinding fa riferimento" + +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "Convertire i file di configurazione tra diverse versioni APIs" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "Copiare file e directory da e verso i container." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "Crea un secret TLS" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "Crea un namespace con il nome specificato" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "Crea un secret da utilizzare con un registro Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "Crea un secret utilizzando un subcommand specificato" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "Creare un account di servizio con il nome specificato" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "Elimina il cluster specificato dal kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "Elimina il context specificato dal kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "Nega una richiesta di firma del certificato" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "Descrive uno o più context" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "Mostra i cluster definiti nel kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "" +"Visualizza le impostazioni merged di kubeconfig o un file kubeconfig " +"specificato" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "Visualizza una o più risorse" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "Drain node in preparazione alla manutenzione" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "Modificare una risorsa sul server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "Email per il registro Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "Esegui un comando in un contenitore" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "Inoltra una o più porte locali a un pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "Aiuto per qualsiasi comando" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:160 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" +"Se non è vuoto, impostare l'affinità di sessione per il servizio; Valori " +"validi: 'None', 'ClientIP'" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:157 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"Se non è vuoto, l'aggiornamento delle annotazioni avrà successo solo se " +"questa è la resource-version corrente per l'oggetto. Valido solo quando si " +"specifica una singola risorsa." + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:154 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"Se non vuoto, l'aggiornamento delle label avrà successo solo se questa è la " +"resource-version corrente per l'oggetto. Valido solo quando si specifica una " +"singola risorsa." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "Contrassegnare il nodo come programmabile" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "Contrassegnare il nodo come non programmabile" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "Imposta la risorsa indicata in pausa" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "Modificare le risorse del certificato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "Modifica i file kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Nome o numero di porta nel container verso il quale il servizio deve " +"dirigere il traffico. Opzionale." + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Restituisce solo i log dopo una data specificata (RFC3339). Predefinito " +"tutti i log. È possibile utilizzare solo uno tra data-inizio/a-partire-da." + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "" +"Codice di completamento shell di output per la shell specificata (bash o zsh)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "Password per l'autenticazione al registro di Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "Percorso certificato di chiave pubblica codificato PEM." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "Percorso alla chiave privata associata a un certificato specificato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Prerequisito per la versione delle risorse. Richiede che la versione " +"corrente delle risorse corrisponda a questo valore per scalare." + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "Stampa per client e server le informazioni sulla versione" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "Stampa l'elenco flag ereditati da tutti i comandi" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "Stampa i log per container in un pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "Riprendere una risorsa in pausa" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "Ruolo di riferimento per RoleBinding" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "Esegui una particolare immagine nel cluster" + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Eseguire un proxy al server Kubernetes API" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "Posizione del server per il Registro Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "Imposta caratteristiche specifiche sugli oggetti" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "Impostare il selettore di una risorsa" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "Mostra i dettagli di una specifica risorsa o un gruppo di risorse" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "Mostra lo stato del rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "Sinonimo di --target-port" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "L'immagine per il container da eseguire." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"La politica di pull dell'immagine per il container. Se lasciato vuoto, " +"questo valore non verrà specificato dal client e predefinito dal server" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"Il numero minimo o la percentuale di pod disponibili che questo budget " +"richiede." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "Il nome dell'oggetto appena creato." + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"Il nome dell'oggetto appena creato. Se non specificato, verrà utilizzato il " +"nome della risorsa di input." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"Il nome del generatore API da utilizzare. Ci sono 2 generatori: 'service/v1' " +"e 'service/v2'. L'unica differenza tra loro è che la porta di servizio in v1 " +"è denominata \"predefinita\", mentre viene lasciata unnamed in v2. Il valore " +"predefinito è 'service/v2'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "" +"Il protocollo di rete per il servizio da creare. Il valore predefinito è " +"'TCP'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"La porta che il servizio deve servire. Copiato dalla risorsa esposta, se non " +"specificata" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"I limiti delle richieste di risorse per questo contenitore. Ad esempio, " +"'cpu=200m,memory=512Mi'. Si noti che i componenti lato server possono " +"assegnare i limiti a seconda della configurazione del server, ad esempio " +"intervalli di limiti." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." +msgstr "" +"La risorsa necessita di richieste di requisiti per questo pod. Ad esempio, " +"'cpu = 100m, memoria = 256Mi'. Si noti che i componenti lato server possono " +"assegnare i requisiti a seconda della configurazione del server, ad esempio " +"intervalli di limiti." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "Tipo di segreto da creare" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "Annulla un precedente rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "Aggiorna richieste di risorse/limiti sugli oggetti con pod template" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "Aggiorna annotazioni di risorsa" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "Aggiorna label di una risorsa" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "Aggiorna i taints su uno o più nodi" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "Nome utente per l'autenticazione nel registro Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "Visualizza la storia del rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"Dove eseguire l'output dei file. Se vuota o '-' utilizza lo stdout, " +"altrimenti crea una gerarchia di directory in quella directory" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88 +msgid "dummy restart flag)" +msgstr "flag di riavvio finto)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "Kubectl controlla il gestore cluster di Kubernetes" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using " +#~ "the cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Creare un ClusterRoleBinding per user1, user2 e group1 utilizzando " +#~ "il cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +#~ "ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Crea un RoleBinding per user1, user2, and group1 utilizzando " +#~ "l'admin ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config based on folder bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with specified keys " +#~ "instead of file basenames on disk\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with key1=config1 and " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Crea un nuovo configmap denominato my-config in base alla " +#~ "cartella bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Crea un nuovo configmap denominato my-config con le chiavi " +#~ "specificate anziché i nomi dei file su disco\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Crea un nuovo configmap denominato my-config con key1 = config1 e " +#~ "key2 = config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" + +#~ msgid "" +#~ "\n" +#~ "\t\t # If you don't already have a .dockercfg file, you can create a " +#~ "dockercfg secret directly by using:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Se non si dispone ancora di un file .dockercfg, è possibile " +#~ "creare un secret dockercfg direttamente utilizzando:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Apply the configuration in pod.json to a pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Apply the JSON passed into stdin to a pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune is still in Alpha\n" +#~ "\t\t# Apply the configuration in manifest.yaml that matches label " +#~ "app=nginx and delete all the other resources that are not in the file and " +#~ "match label app=nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " +#~ "configmaps that are not in the file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Applica la configurazione pod.json a un pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Applicare il JSON passato in stdin a un pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Nota: --prune è ancora in in Alpha\n" +#~ "\t\t# Applica la configurazione manifest.yaml che corrisponde alla label " +#~ "app = nginx ed elimina tutte le altre risorse che non sono nel file e " +#~ "nella label corrispondente app = nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Applica la configurazione manifest.yaml ed elimina tutti gli altri " +#~ "configmaps non presenti nel file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 " +#~ "and 10, no target CPU utilization specified so a default autoscaling " +#~ "policy will be used:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale a replication controller \"foo\", with the number of " +#~ "pods between 1 and 5, target CPU utilization at 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Auto scale un deployment \"foo\", con il numero di pod compresi " +#~ "tra 2 e 10, utilizzo della CPU target specificato in modo da utilizzare " +#~ "una politica di autoscaling predefinita:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale un controller di replica \"foo\", con il numero di pod " +#~ "compresi tra 1 e 5, utilizzo dell'utilizzo della CPU a 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Convert the live state of the resource specified by 'pod.yaml' to " +#~ "the latest version\n" +#~ "\t\t# and print to stdout in json format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Convert all files under current directory to latest version and " +#~ "create them all.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Converte 'pod.yaml' alla versione più recente e stampa in stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Converte lo stato live della risorsa specificata da 'pod.yaml' " +#~ "nella versione più recente.\n" +#~ "\t\t# e stampa in stdout nel formato json.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Converte tutti i file nella directory corrente alla versione più " +#~ "recente e li crea tutti.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" that allows user to " +#~ "perform \"get\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName " +#~ "specified\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Crea un ClusterRole denominato \"pod-reader\" che consente " +#~ "all'utente di eseguire \"get\", \"watch\" e \"list\" sui pod\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Crea un ClusterRole denominato \"pod-reader\" con ResourceName " +#~ "specificato\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get" +#~ "\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Crea un ruolo denominato \"pod-reader\" che consente all'utente di " +#~ "eseguire \"get\", \"watch\" e \"list\" sui pod\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Crea un ruolo denominato \"pod-reader\" con ResourceName " +#~ "specificato\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a new resourcequota named my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Create a new resourcequota named best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Crea una nuova resourcequota chiamata my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Creare una nuova resourcequota denominata best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=rails label\n" +#~ "\t\t# and require at least one of them being available at any point in " +#~ "time.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=nginx label\n" +#~ "\t\t# and require at least half of the pods selected to be available at " +#~ "any point in time.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Crea un pod disruption budget chiamato my-pdb che seleziona tutti i " +#~ "pod con label app = rail\n" +#~ "\t\t# e richiede che almeno uno di essi sia disponibile in qualsiasi " +#~ "momento.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Crea un pod disruption budget con nome my-pdb che seleziona tutti i " +#~ "pod con label app = nginx \n" +#~ "\t\t# e richiede che almeno la metà dei pod selezionati sia disponibile " +#~ "in qualsiasi momento.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod using the data in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Create a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API " +#~ "format then create the resource using the edited data.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Crea un pod utilizzando i dati in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Crea un pod basato sul JSON passato in stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Modifica i dati in docker-registry.yaml in JSON utilizzando il " +#~ "formato API v1 quindi creare la risorsa utilizzando i dati modificati.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a replication controller identified by type " +#~ "and name specified in \"nginx-controller.yaml\", which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +#~ "the name \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Create a second service based on the above service, exposing the " +#~ "container port 8443 as port 443 with the name \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated streaming application on port " +#~ "4100 balancing UDP traffic and named 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx using replica set, which " +#~ "serves on port 80 and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for an nginx deployment, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Crea un servizio per un nginx replicato, che serve nella porta 80 e " +#~ "si collega ai container sulla porta 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Crea un servizio per un controller di replica identificato per tipo " +#~ "e nome specificato in \"nginx-controller.yaml\", che serve nella porta 80 " +#~ "e si collega ai container sulla porta 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Crea un servizio per un pod valid-pod, che serve nella porta 444 " +#~ "con il nome \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Crea un secondo servizio basato sul servizio sopra, esponendo la " +#~ "porta container 8443 come porta 443 con il nome \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Crea un servizio per un'applicazione di replica in porta 4100 che " +#~ "bilanci il traffico UDP e denominato \"video stream\".\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Crea un servizio per un nginx replicato utilizzando l'insieme di " +#~ "replica, che serve nella porta 80 e si collega ai contenitori sulla porta " +#~ "8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Crea un servizio per una distribuzione di nginx, che serve nella " +#~ "porta 80 e si collega ai contenitori della porta 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Delete a pod using the type and name specified in pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Delete a pod based on the type and name in the JSON passed into " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Delete pods and services with label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Delete a pod with minimal delay\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Force delete a pod on a dead node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Delete all pods\n" +#~ "\t\tkubectl delete pods --all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Elimina un pod utilizzando il tipo e il nome specificati in pod." +#~ "json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Elimina un pod in base al tipo e al nome del JSON passato in " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Elimina i baccelli ei servizi con gli stessi nomi \"baz\" e \"foo" +#~ "\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Elimina i baccelli ei servizi con il nome dell'etichetta = " +#~ "myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Eliminare un pod con un ritardo minimo\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Forza elimina un pod in un nodo morto\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Elimina tutti i pod\n" +#~ "\t\tkubectl delete pods --all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Describe a node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Describe a pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Describe all pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Describe pods by label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Describe all pods managed by the 'frontend' replication controller " +#~ "(rc-created pods\n" +#~ "\t\t# get the name of the rc as a prefix in the pod the name).\n" +#~ "\t\tkubectl describe pods frontend" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Descrive un nodo\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Descrive un pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Descrive un pod identificato da tipo e nome in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Descrive tutti i pod\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Descrive i pod con label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Descrivere tutti i pod gestiti dal controller di replica \"frontend" +#~ "\" (rc-created pods\n" +#~ "\t\t# ottiene il nome del rc come un prefisso del nome pod).\n" +#~ "\t\tkubectl describe pods frontend" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", even if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# As above, but abort if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use " +#~ "a grace period of 15 minutes.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", anche se ci sono i baccelli non gestiti da " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet o StatefulSet su di " +#~ "esso.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# Come sopra, ma interrompere se ci sono i baccelli non gestiti da " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, e " +#~ "utilizzare un periodo di grazia di 15 minuti.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Edit the service named 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Use an alternative editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +#~ "config in its annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Modifica il servizio denominato 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Usa un editor alternativo\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Modifica il lavoro 'myjob' in JSON utilizzando il formato API v1:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Modifica la distribuzione 'mydeployment' in YAML e salvare la " +#~ "configurazione modificata nella sua annotazione:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running 'date' from pod 123456-7890, using the " +#~ "first container by default\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Get output from running 'date' in ruby-container from pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Ottieni l'output dalla 'data' di esecuzione del pod 123456-7890, " +#~ "utilizzando il primo contenitore per impostazione predefinita\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Ottieni l'output dalla data di esecuzione in ruby-container del pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Passare alla modalità raw terminal, invia stdin a 'bash' in ruby-" +#~ "container del pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running pod 123456-7890, using the first container " +#~ "by default\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Get output from ruby-container from pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Ottieni l'output dal pod 123456-7890 in esecuzione, utilizzando il " +#~ "primo contenitore per impostazione predefinita\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Ottieni l'output dal ruby-container del pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Passa alla modalità raw terminal, invia stdin a 'bash' in ruby-" +#~ "container del pod 123456-7890\n" +#~ "\t\t# e invia stdout/stderr da 'bash' al client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Ottieni l'output dal primo pod di una ReplicaSet denominata nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Install bash completion on a Mac using homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for bash into the current shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Write bash completion code to a file and source if from ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +#~ "\t\tsource <(kubectl completion zsh)" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Installa il completamento di bash su un Mac utilizzando homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Carica il codice di completamento kubectl per bash nella shell " +#~ "corrente\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Scrive il codice di completamento bash in un file e lo carica da ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Carica il codice di completamento kubectl per zsh [1] nella shell " +#~ "corrente\n" +#~ "\t\tsource <(kubectl completion zsh)" + +#~ msgid "" +#~ "\n" +#~ "\t\t# List all pods in ps output format.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# List all pods in ps output format with more information (such as " +#~ "node name).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# List a single replication controller with specified NAME in ps " +#~ "output format.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# List a single pod in JSON output format.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +#~ "JSON output format.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Return only the phase value of the specified pod.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# List all replication controllers and services together in ps output " +#~ "format.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# List one or more resources by their type and names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List all resources with different types.\n" +#~ "\t\tkubectl get all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Elenca tutti i pod in formato output ps.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# Elenca tutti i pod in formato output ps con maggiori informazioni " +#~ "(ad esempio il nome del nodo).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# Elenca un controller di replica singolo con NAME specificato nel " +#~ "formato di output ps.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# Elenca un singolo pod nel formato di uscita JSON.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# Elenca un pod identificato per tipo e nome specificato in \"pod.yaml" +#~ "\" nel formato di uscita JSON.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Restituisce solo il valore di fase del pod specificato.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# Elenca tutti i controller e servizi di replica insieme in formato " +#~ "output ps.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# Elenca una o più risorse per il tipo e per i nomi.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# Elenca tutte le risorse con tipi diversi.\n" +#~ "\t\tkubectl get all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 0:5000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Ascolta localmente le porte 5000 e 6000, inoltrando i dati da/verso " +#~ "le porte 5000 e 6000 nel pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Ascolta localmente la porta 8888, inoltra a 5000 nel pod\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Ascolta localmente una porta casuale, inoltra a 5000 nel pod\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Ascolta localmente una porta casuale, inoltra a 5000 nel pod\n" +#~ "\t\tkubectl port-forward mypod 0:5000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Segna il nodo \"foo\" come programmabile.\n" +#~ "\t\t$ Kubectl uncordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as unschedulable.\n" +#~ "\t\tkubectl cordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Segna il nodo \"foo\" come non programmabile.\n" +#~ "\t\tkubectl cordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Partially update a node using strategic merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Partially update a node identified by the type and name specified " +#~ "in \"node.json\" using strategic merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image; spec.containers[*].name is required " +#~ "because it's a merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image using a json patch with positional " +#~ "arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aggiorna parzialmente un nodo utilizzando merge patch strategica\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Aggiorna parzialmente un nodo identificato dal tipo e dal nome " +#~ "specificato in \"node.json\" utilizzando merge patch strategica\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Aggiorna l'immagine di un contenitore; spec.containers [*]. name è " +#~ "richiesto perché è una chiave di fusione\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Aggiorna l'immagine di un contenitore utilizzando una patch json " +#~ "con array posizionali\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Print the address of the master and cluster services\n" +#~ "\t\tkubectl cluster-info" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Stampa l'indirizzo dei servizi master e cluster\n" +#~ "\t\tkubectl cluster-info" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Replace a pod using the data in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Replace a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Update a single-container pod's image version (tag) to v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Force replace, delete and then re-create the resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Sostituire un pod utilizzando i dati in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Sostituire un pod usando il JSON passato da stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Aggiorna la versione dell'immagine (tag) di un singolo container di " +#~ "pod a v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Forza la sostituzione, cancellazione e quindi ricreare la risorsa\n" +#~ "\t\tkubectl replace --force -f ./pod.json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Return snapshot logs from pod nginx with only one container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Restituisce snapshot log dal pod nginx con un solo container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Restituisce snapshot log dei pod definiti dalla label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Restituisce snapshot log del container ruby terminato nel pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Iniziare a trasmettere i log del contenitore ruby nel pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Visualizza solo le ultime 20 righe di output del pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Mostra tutti i log del pod nginx scritti nell'ultima ora\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Restituisce snapshot log dal primo contenitore di un lavoro " +#~ "chiamato hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Restituisce snapshot logs del container nginx-1 del deployment " +#~ "chiamato nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +#~ "content from ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +#~ "\t\t# The chosen port for the server will be output to stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +#~ "api\n" +#~ "\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +#~ "pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Esegui un proxy verso kubernetes apiserver sulla porta 8011, che " +#~ "fornisce contenuti statici da ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Esegui un proxy verso kubernetes apiserver su una porta locale " +#~ "arbitraria.\n" +#~ "\t\t# La porta selezionata per il server verrà inviata a stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Esegui un proxy verso kubernetes apiserver, cambiando il prefisso " +#~ "api in k8s-api\n" +#~ "\t\t# Questo comporta, ad es., pod api disponibili presso localhost:8001/" +#~ "k8s-api/v1/pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Scale a replicaset named 'foo' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale a resource identified by type and name specified in \"foo.yaml" +#~ "\" to 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# If the deployment named mysql's current size is 2, scale mysql to " +#~ "3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale multiple replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale job named 'cron' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Scala un replicaset denominato 'foo' a 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scala una risorsa identificata per tipo e nome specificato in \"foo." +#~ "yaml\" a 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# Se la distribuzione corrente di mysql è 2, scala mysql a 3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scalare più controllori di replica.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scala il lavoro denominato 'cron' a 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file, will create the annotation if it does not already " +#~ "exist.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Imposta l'ultima-configurazione-applicata di una risorsa che " +#~ "corrisponda al contenuto di un file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Esegue set-last-applied per ogni file di configurazione in una " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Imposta la configurazione dell'ultima applicazione di una risorsa " +#~ "che corrisponda al contenuto di un file, creerà l'annotazione se non " +#~ "esiste già.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Spegni foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop di tutti i pod e servizi con label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Spegnere il servizio definito in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Spegnere tutte le resources in path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating " +#~ "them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute π to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute π to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Avviare un'unica istanza di nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Avviare un'unica istanza di hazelcast e lasciare che il container " +#~ "esponga la porta 5701.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Avviare una singola istanza di hazelcast ed imposta le variabili " +#~ "ambiente \"DNS_DOMAIN=cluster\" e \"POD_NAMESPACE=default\" nel " +#~ "container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Avviare un'istanza replicata di nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Stampare gli oggetti API corrispondenti senza crearli.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Avviare un'unica istanza di nginx, ma overload le spec del " +#~ "deployment con un insieme parziale di valori analizzati da JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Avviare un pod di busybox e tenerlo in primo piano, non riavviarlo " +#~ "se esce.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Avviare il container nginx utilizzando il comando predefinito, ma " +#~ "utilizzare argomenti personalizzati (arg1 .. argN) per quel comando.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Avviare il container nginx utilizzando un diverso comando e " +#~ "argomenti personalizzati.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Avviare il contenitore perl per calcolare π a 2000 posti e " +#~ "stamparlo.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Avviare il cron job per calcolare π a 2000 posti e stampare ogni 5 " +#~ "minuti.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aggiorna il nodo \"foo\" con un marcatore con il tasto 'dedicated' " +#~ "e il valore 'special-user' ed effettua 'NoSchedule'.\n" +#~ "\t\t# Se un marcatore con quel tasto e l'effetto già esiste, il suo " +#~ "valore viene sostituito come specificato.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Rimuove dal nodo 'foo' il marcatore con il tasto 'dedicated' ed " +#~ "effettua 'NoSchedule' se esiste.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Rimuovi dal nodo 'foo' tutti i marcatori con chiave 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aggiorna il pod 'foo' con l'etichetta 'unhealthy' e il valore " +#~ "'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Aggiorna il pod 'foo' con l'etichetta 'status' e il valore " +#~ "'unhealthy', sovrascrivendo qualsiasi valore esistente.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Aggiorna tutti i pod nello spazio dei nomi\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Aggiorna un pod identificato dal tipo e dal nome in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Aggiorna il pod 'foo' solo se la risorsa è invariata dalla versione " +#~ "1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Aggiorna il pod 'foo' rimuovendo un'etichetta denominata 'bar' se " +#~ "esiste.\n" +#~ "\t\t# Non richiede la flag -overwrite.\n" +#~ "\t\tkubectl label pods foo bar-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aggiorna i pod di frontend-v1 usando i dati del replication " +#~ "controller in frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Aggiorna i pod di frontend-v1 usando i dati JSON passati da stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Aggiorna i pod di frontend-v1 in frontend-v2 solo cambiando " +#~ "l'immagine e modificando\n" +#~ "\t\t# il nome del replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Aggiorna i pod di frontend solo cambiando l'immaginee mantenendo il " +#~ "vecchio none.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Interrompee ed invertire un rollout esistente in corso (da " +#~ "frontend-v1 a frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" + +#~ msgid "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Visualizza le annotazioni dell'ultima-configurazione-applicata per " +#~ "tipo/nome in YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# # Visualizza le annotazioni dell'ultima-configurazione-applicata " +#~ "per file in JSON.\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#~ msgid "" +#~ "\n" +#~ "\t\tApply a configuration to a resource by filename or stdin.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." +#~ msgstr "" +#~ "\n" +#~ "\t\tApplicare una configurazione a una risorsa per nomefile o stdin.\n" +#~ "\t\tQuesta risorsa verrà creata se non esiste ancora.\n" +#~ "\t\tPer utilizzare 'apply', creare sempre la risorsa inizialmente con " +#~ "'apply' o 'create --save-config'.\n" +#~ "\n" +#~ "\t\tSono accettati i formati JSON e YAML.\n" +#~ "\n" +#~ "\t\tDisclaimer Alpha: la funzionalità --prune non è ancora completa. Non " +#~ "utilizzare a meno che non si sia a conoscenza di quale sia lo stato " +#~ "attuale. Vedi https://issues.k8s.io/34274." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\n" +#~ "Crea un ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRoleBinding for a particular ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea un ClusterRoleBinding per un ClusterRole particolare." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a RoleBinding for a particular Role or ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea un RoleBinding per un particolare Ruolo o ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a TLS secret from the given public/private key pair.\n" +#~ "\n" +#~ "\t\tThe public/private key pair must exist before hand. The public key " +#~ "certificate must be .PEM encoded and match the given private key." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea un TLS secret dalla coppia di chiavi pubblica/privata.\n" +#~ "\n" +#~ "\t\tLa coppia di chiavi pubblica/privata deve esistere prima. Il " +#~ "certificato chiave pubblica deve essere .PEM codificato e corrispondere " +#~ "alla chiave privata data." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to " +#~ "the basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreare un configmap basato su un file, una directory o un valore " +#~ "literal specificato.\n" +#~ "\n" +#~ "\t\tUn singolo configmap può includere una o più coppie chiave/valore.\n" +#~ "\n" +#~ "\t\tQuando si crea una configmap basata su un file, il valore predefinito " +#~ "sarà il nome di base del file e il valore sarà\n" +#~ "\t\tpredefinito per il contenuto del file. Se il nome di base è una " +#~ "chiave non valida, è possibile specificare un tasto alternativo.\n" +#~ "\n" +#~ "\t\tQuando si crea un configmap basato su una directory, ogni file il cui " +#~ "nome di base è una chiave valida nella directory verrà\n" +#~ "\t\tpacchettizzata nel configmap. Le voci di directory tranne i file " +#~ "regolari vengono ignorati (ad esempio sottodirectory,\n" +#~ "\t\tsymlinks, devices, pipes, ecc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker " +#~ "registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreare un nuovo secret per l'utilizzo con i registri Docker.\n" +#~ "\n" +#~ "\t\tDockercfg secrets vengono utilizzati per autenticare i registri " +#~ "Docker.\n" +#~ "\n" +#~ "\t\tQuando utilizzi la riga di comando Docker per il push delle immagini, " +#~ "è possibile eseguire l'autenticazione eseguendo correttamente un " +#~ "determinato registry\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " Questo produce un file ~ / .dockercfg che viene utilizzato dai " +#~ "successivi comandi \"docker push\" e \"docker pull\"\n" +#~ "\t\tper autenticarsi nel registry. L'indirizzo email è facoltativo.\n" +#~ "\n" +#~ "\t\tDurante la creazione di applicazioni, è possibile avere un Docker " +#~ "registry che richiede l'autenticazione. Affinché i \n" +#~ "\t\tnodi eseguano pull di immagini per vostro conto, devono avere le " +#~ "credenziali. È possibile fornire queste informazioni \n" +#~ "\t\tcreando un dockercfg secret e collegandolo al tuo account di servizio." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea un pod disruption budget con il nome specificato, selector e il " +#~ "numero minimo di pod disponibili" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea una risorsa per nome file o stdin.\n" +#~ "\n" +#~ "\t\tSono accettati i formati JSON e YAML." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea una resourcequota con il nome specificato, hard limits e gli " +#~ "scope opzionali" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea un secret basato su un file, una directory o un valore specifico " +#~ "literal.\n" +#~ "\n" +#~ "\t\tUn singolo secret può includere una o più coppie chiave/valore.\n" +#~ "\n" +#~ "\t\tQuando si crea un secret basato su un file, la chiave per " +#~ "impostazione predefinita sarà il nome di base del file e il valore sarà\n" +#~ "\t\tpredefinito al contenuto del file. Se il nome di base è una chiave " +#~ "non valida, è possibile specificare un tasto alternativo.\n" +#~ "\n" +#~ "\n" +#~ "\t\tQuando si crea un segreto basato su una directory, ogni file il cui " +#~ "nome di base è una chiave valida nella directory verrà \n" +#~ "\t\\paccehttizzataw in un secret. Le voci di directory tranne i file " +#~ "regolari vengono ignorati (ad esempio sottodirectory,\n" +#~ "\t\tsymlinks, devices, pipes, ecc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea ed esegue un'immagine particolare, eventualmente replicata.\n" +#~ "\n" +#~ "\t\tCrea un deployment o un job per gestire i container creati." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." +#~ msgstr "" +#~ "\n" +#~ "\t\tCrea un autoscaler che automaticamente sceglie e imposta il numero di " +#~ "pod che vengono eseguiti in un cluster di kubernetes.\n" +#~ "\n" +#~ "\t\tEsegue una ricerca di un Deployment, ReplicaSet o " +#~ "ReplicationController per nome e crea un autoscaler che utilizza la " +#~ "risorsa indicata come riferimento.\n" +#~ "\t\tUn autoscaler può aumentare o diminuire automaticamente il numero di " +#~ "pod distribuiti all'interno del sistema se necessario." + +#~ msgid "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments " +#~ "may be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that " +#~ "the pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of " +#~ "the same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." +#~ msgstr "" +#~ "\n" +#~ "\t\tCancella risorse secondo nomi di file, stdin, risorse e nomi, o per " +#~ "selettori di risorse e etichette.\n" +#~ "\n" +#~ "\t\tSono accettati i formati JSON e YAML. È possibile specificare un solo " +#~ "tipo di argomenti: nome file,\n" +#~ "\t\trisorse e nomi, o risorse e selettore di etichette.\n" +#~ "\n" +#~ "\t\tAlcune risorse, come i pod, supportano cacellazione corretta. Queste " +#~ "risorse definiscono un periodo di default\n" +#~ "\t\tprima che siano forzatamente terminate (il grace period) ma si può " +#~ "sostituire quel valore con\n" +#~ "\t\til falg --grace-period, o passare --now per impostare il grace-period " +#~ "a 1. Poiché queste risorse spesso\n" +#~ "\t\trappresentano entità del cluster, la cancellazione non può essere " +#~ "presa in carico immediatamente. Se il nodo\n" +#~ "\t\tche ospita un pod è spento o non raggiungibile da API server, " +#~ "termination può richiedere molto più tempo\n" +#~ "\t\tdel grace period. Per forzare la cancellazione di una resource,\tdevi " +#~ "obbligatoriamente indicare un grace\tperiod di 0 e specificare\n" +#~ "\t\til flag --force.\n" +#~ "\n" +#~ "\t\tIMPORTANTE: Fozare la cancellazione dei pod non attende conferma che " +#~ "i processi del pod siano\n" +#~ "\t\tterminati, che può lasciare questi processi in esecuzione fino a " +#~ "quando il nodo rileva la cancellazione\n" +#~ "\t\tcompletata correttamente. Se i tuoi processi utilizzano " +#~ "l'archiviazione condivisa o parlano con un'API remota e\n" +#~ "\t\tdipendono dal nome del pod per identificarsi, la forzata eliminazione " +#~ "di questi pod può comportare\n" +#~ "\t\tpiù processi in esecuzione su macchine diverse che utilizzando la " +#~ "stessa identificazione che può portare\n" +#~ "\t\tcorruzione o inconsistenza dei dati. Forza i pod solo quando si è " +#~ "sicuri che il pod sia\n" +#~ "\t\tterminato, o se la tua applicazione può can tollerare più copie dello " +#~ "stesso pod in esecuzione contemporaneamente.\n" +#~ "\t\tInoltre, se forzate l'eliminazione dei i nodi, lo scheduler può può " +#~ "creare nuovi nodi su questi nodi prima che il nodo\n" +#~ "\t\tabbia liberato quelle risorse e provocando immediatamente evict di " +#~ "tali pod.\n" +#~ "\n" +#~ "\n" +#~ "\t\tNotare che il comando di eliminazione NON fa verificare la versione " +#~ "delle risorse, quindi se qualcuno\n" +#~ "\t\tinvia un aggiornamento ad una risorsa quando invii un eliminazione, " +#~ "il loro aggiornamento\n" +#~ "\t\tsaranno persi insieme al resto della risorsa." + +#~ msgid "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered " +#~ "by delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." +#~ msgstr "" +#~ "\n" +#~ "\t\tDeprecated: chiudere correttamente una risorsa per nome o nome file.\n" +#~ "\n" +#~ "\t\tIl comando stop è deprecato, tutte le sue funzionalità sono coperte " +#~ "dal comando delete.\n" +#~ "\t\tVedere 'kubectl delete --help' per ulteriori dettagli.\n" +#~ "\n" +#~ "\t\tTenta di arrestare ed eliminare una risorsa che supporta la corretta " +#~ "terminazione.\n" +#~ "\t\tSe la risorsa è scalabile, verrà scalata a 0 prima dell'eliminazione." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." +#~ msgstr "" +#~ "\n" +#~ "\t\tVisualizza l'utilizzo di risorse (CPU/Memoria/Storage) dei nodi.\n" +#~ "\n" +#~ "\t\tIl comando top-node consente di visualizzare il consumo di risorse " +#~ "dei nodi." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." +#~ msgstr "" +#~ "\n" +#~ "\t\tVisualizza l'utilizzo di risorse (CPU/Memoria/Storage) dei pod.\n" +#~ "\n" +#~ "\t\tIl comando \"top pod\" consente di visualizzare il consumo delle " +#~ "risorse dei pod.\n" +#~ "\n" +#~ "\t\tA causa del ritardo della pipeline metrica, potrebbero non essere " +#~ "disponibili per alcuni minuti\n" +#~ "\t\teal momento della creazione dei pod." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " +#~ msgstr "" +#~ "\n" +#~ "\t\tVisualizza l'utilizzo di risorse (CPU/Memoria/Storage).\n" +#~ "\n" +#~ "\t\tIl comando top consente di visualizzare il consumo di risorse per " +#~ "nodi o pod.\n" +#~ "\n" +#~ "\t\tQuesto comando richiede che Heapster sia configurato correttamente e " +#~ "che funzioni sul server." + +#~ msgid "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will " +#~ "use normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tDrain node in preparazione alla manutenzione.\n" +#~ "\n" +#~ "\t\tIl nodo indicato verrà contrassegnato unschedulable per impedire che " +#~ "nuovi pod arrivino.\n" +#~ "\t\t'drain' evict i pod se l'APIServer supporta eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Altrimenti, usa il " +#~ "normale DELETE\n" +#~ "\t\tper eliminare i pod.\n" +#~ "\t\tIl 'drain' evicts o la cancellazione di tutti all pod tranne mirror " +#~ "pods (che non possono essere eliminati\n" +#~ "\t\tattraverso API server). Se ci sono i pod gestiti da DaemonSet, " +#~ "drain non procederà\n" +#~ "\t\tsenza --ignore-daemonsets e, a prescindere da ciò, non cancellerà " +#~ "alcun\n" +#~ "\t\tpod gestitto da DaemonSet,poiché questi pods verrebbero " +#~ "immediatamente sostituiti dal\n" +#~ "\t\tDaemonSet controller, che ignora le marcature unschedulable. Se ci " +#~ "sono\n" +#~ "\t\tpod che non sono né mirror pod né gestiti dal ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet o Job, allora drain non cancellerà " +#~ "alcun pod finché non\n" +#~ "\t\tuserai --force. --force permetterà alla cancellazione di procedere " +#~ "se la risorsa gestita da uno\n" +#~ "\t\to più pod è mancante.\n" +#~ "\n" +#~ "\t\t'drain' attende il termine corretto. Non devi operare sulla macchina " +#~ "finché\n" +#~ "\t\til comando non viene completato.\n" +#~ "\n" +#~ "\t\tQuando sei pronto per riportare il nodo al servizio, utilizza kubectl " +#~ "uncordon, per\n" +#~ "\t\trimettere il nodo schedulable nuovamente.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' " +#~ "for Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag --windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." +#~ msgstr "" +#~ "\n" +#~ "\t\tModificare una risorsa dall'editor predefinito.\n" +#~ "\n" +#~ "\t\tIl comando di modifica consente di modificare direttamente qualsiasi " +#~ "risorsa API che è possibile recuperare tramite gli\n" +#~ "\t\tstrumenti di riga di comando. Apre l'editor definito dalle variabili " +#~ "d'ambiente\n" +#~ "\t\tKUBE_EDITOR o EDITOR, o ritornare a 'vi' per Linux o 'notepad' per " +#~ "Windows.\n" +#~ "\t\tÈ possibile modificare più oggetti, anche se le modifiche vengono " +#~ "applicate una alla volta. Il comando\n" +#~ "\t\taccetta sia nomi di file che argomenti da riga di comando, anche se i " +#~ "file a cui fa riferimento devono\n" +#~ "\t\tessere state salvate precedentemente le versioni delle risorse.\n" +#~ "\n" +#~ "\t\tLa modifica viene eseguita con la versione API utilizzata per " +#~ "recuperare la risorsa.\n" +#~ "\t\tPer modificare utilizzando una specifica versione API, fully-qualify " +#~ "la risorsa, versione e il gruppo.\n" +#~ "\n" +#~ "\t\tIl formato predefinito è YAML. Per modificare in JSON, specificare \"-" +#~ "o json\".\n" +#~ "\n" +#~ "\t\tIl flag --windows-line-endings può essere utilizzato per forzare i " +#~ "fine linea Windows,\n" +#~ "\t\taltrimenti verrà utilizzato il default per il sistema operativo.\n" +#~ "\n" +#~ "\t\tNel caso in cui si verifica un errore durante l'aggiornamento, verrà " +#~ "creato un file temporaneo sul disco\n" +#~ "\t\tche contiene le modifiche non apportate. L'errore più comune durante " +#~ "l'aggiornamento di una risorsa\n" +#~ "\t\tè una modifica da pare di un altro editor della risorsa sul server. " +#~ "Quando questo si verifica, dovrai\n" +#~ "\t\tapplicare le modifiche alla versione più recente della risorsa o " +#~ "aggiornare il tua copia\n" +#~ "\t\ttemporanea salvata per includere l'ultima versione delle risorse." + +#~ msgid "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it " +#~ "from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in " +#~ "versions of zsh >= 5.2" +#~ msgstr "" +#~ "\n" +#~ "\t\tIn output codice di completamento shell output per la shell " +#~ "specificata (bash o zsh).\n" +#~ "\t\tIl codice di shell deve essere valorizzato per fornire completamento\n" +#~ "\t\tinterattivo dei comandi kubectl. Questo può essere eseguito " +#~ "richiamandolo\n" +#~ "\t\tda .bash_profile.\n" +#~ "\n" +#~ "\t\tNota: questo richiede il framework di completamento bash, che non è " +#~ "installato\n" +#~ "\t\tper impostazione predefinita su Mac. Questo può essere installato " +#~ "utilizzando homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tUna volta installato, bash_completion deve essere valutato. Ciò può " +#~ "essere fatto aggiungendo la\n" +#~ "\t\tseguente riga al file .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNota per gli utenti zsh: [1] i completamenti zsh sono supportati solo " +#~ "nelle versioni zsh> = 5.2" + +#~ msgid "" +#~ "\n" +#~ "\t\tPerform a rolling update of the given ReplicationController.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tEseguire un rolling update del ReplicationController specificato.\n" +#~ "\n" +#~ "\t\tSostituisce il replication controller specificato con un nuovo " +#~ "replication controller aggiornando un pod alla volta per usare il\n" +#~ "\t\tnuovo PodTemplate. Il new-controller.json deve specificare lo stesso " +#~ "namespace del\n" +#~ "\t\tcontroller di replica esistente e sovrascrivere almeno una etichetta " +#~ "(comune) nella sua replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing " +#~ "resource, the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tSostituire una risorsa per nomefile o stdin.\n" +#~ "\n" +#~ "\t\tSono accettati i formati JSON e YAML. Se si sostituisce una risorsa " +#~ "esistente, \n" +#~ "\t\tè necessario fornire la specifica completa delle risorse. Questo può " +#~ "essere ottenuta da\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tFare riferimento ai modelli https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html per trovare se un campo è mutevole." + +#~ msgid "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." +#~ msgstr "" +#~ "\n" +#~ "\t\tImposta una nuova dimensione per Deployment, ReplicaSet, Replication " +#~ "Controller, o Job.\n" +#~ "\n" +#~ "\t\tScala consente anche agli utenti di specificare una o più condizioni " +#~ "preliminari per l'azione della scala.\n" +#~ "\n" +#~ "\t\tSe --current-replicas o --resource-version sono specificate, viene " +#~ "convalidata prima di\n" +#~ "\t\ttentare scale, ed è garantito che la precondizione vale quando\n" +#~ "\t\tscale viene inviata al server.." + +#~ msgid "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" +#~ msgstr "" +#~ "\n" +#~ "\t\tPer proxy tutti i kubernetes api e nient'altro, utilizzare:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tPer proxy solo una parte dei kubernetes api e anche alcuni file " +#~ "static\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tQuanto sopra consente 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tPer eseguire il proxy tutti i kubernetes api in una radice diversa, " +#~ "utilizzare:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tQuanto sopra ti permette 'curl localhost:8001/custom/api/v1/pods'" + +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tAggiorna i campi di una risorsa utilizzando la merge patch " +#~ "strategica\n" +#~ "\n" +#~ "\t\tSono accettati i formati JSON e YAML.\n" +#~ "\n" +#~ "\t\tSi prega di fare riferimento ai modelli in https://htmlpreview.github." +#~ "io/?https://github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/" +#~ "v1/definitions.html per trovare se un campo è mutevole." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the labels on a resource.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." +#~ msgstr "" +#~ "\n" +#~ "\t\tAggiorna le label di una risorsa.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the taints on one or more nodes.\n" +#~ "\n" +#~ "\t\t* A taint consists of a key, value, and effect. As an argument here, " +#~ "it is expressed as key=value:effect.\n" +#~ "\t\t* The key must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* The value must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[2]d " +#~ "characters.\n" +#~ "\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +#~ "\t\t* Currently taint can only apply to node." +#~ msgstr "" +#~ "\n" +#~ "\t\tAggiorna i marcatori su uno o più nodi.\n" +#~ "\n" +#~ "\t\t* Un marcatore è costituita da una chiave, un valore e un effetto. " +#~ "Come argomento qui, viene espresso come chiave = valore: effetto.\n" +#~ "\t\t* La chiave deve iniziare con una lettera o un numero e può contenere " +#~ "lettere, numeri, trattini, punti e sottolineature, fino a% [1] d " +#~ "caratteri.\n" +#~ "\t\t* Il valore deve iniziare con una lettera o un numero e può contenere " +#~ "lettere, numeri, trattini, punti e sottolineature, fino a% [2] d " +#~ "caratteri.\n" +#~ "\t\t* L'effetto deve essere NoSchedule, PreferNoSchedule o NoExecute.\n" +#~ "\t\t* Attualmente il marcatore può essere applicato solo al nodo." + +#~ msgid "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name " +#~ "or file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." +#~ msgstr "" +#~ "\n" +#~ "\t\tVisualizza le annotazioni dell'ultima-configurazione-applicata per " +#~ "tipo/nome o file.\n" +#~ "\n" +#~ "\t\tL'output predefinito verrà stampato su stdout nel formato YAML. Si " +#~ "può usare l'opzione -o\n" +#~ "\t\tPer cambiare il formato di output." + +#~ msgid "" +#~ "\n" +#~ "\t # !!!Important Note!!!\n" +#~ "\t # Requires that the 'tar' binary is present in your container\n" +#~ "\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +#~ "\n" +#~ "\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod " +#~ "in the default namespace\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copy /tmp/foo local file to /tmp/bar in a remote pod in a " +#~ "specific container\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +#~ "\n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" +#~ msgstr "" +#~ "\n" +#~ "\t # !!!Nota importante!!!\n" +#~ "\t # Richiede che il binario 'tar' sia presente nel tuo contenitore\n" +#~ "\t # immagine. Se 'tar' non è presente, 'kubectl cp' non riesce.\n" +#~ "\n" +#~ "\t # Copia /tmp/foo_dir directory locale in /tmp/bar_dir in un pod " +#~ "remoto nello spazio dei nomi predefinito\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copia /tmp/foo file locale in /tmp/bar in un pod remoto in un " +#~ "contenitore specifico\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copia /tmp/foo file locale in /tmp/bar in un pod remoto nello " +#~ "spazio dei nomi \n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copia /tmp/foo da un pod remoto in /tmp/bar localmente\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new TLS secret named tls-secret with the given key pair:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" +#~ msgstr "" +#~ "\n" +#~ "\t # Crea un nuovo secret TLS denominato tls-secret con la coppia di " +#~ "dati fornita:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" +#~ msgstr "" +#~ "\n" +#~ "\t # Crea un nuovo secret denominato my-secret con i tasti per ogni file " +#~ "nella barra delle cartelle\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Crea un nuovo secret denominato my-secret con le chiavi specificate " +#~ "anziché i nomi sul disco\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Crea un nuovo secret denominato my-secret con key1 = supersecret e " +#~ "key2 = topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" + +#~ msgid "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" +#~ msgstr "" +#~ "\n" +#~ "\t# Crea un nuovo servizio ExternalName denominato my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" + +#~ msgid "" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs (in headless mode)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" +#~ msgstr "" +#~ "\n" +#~ " # Creare un nuovo servizio clusterIP denominato my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Creare un nuovo servizio clusterIP denominato my-cs (in modalità " +#~ "headless)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" + +#~ msgid "" +#~ "\n" +#~ " # Create a new deployment named my-dep that runs the busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" +#~ msgstr "" +#~ "\n" +#~ " # Crea una nuovo deployment chiamato my-dep che esegue l'immagine " +#~ "busybox.\n" +#~ " kubectl create deployment my-dep --image=busybox" + +#~ msgid "" +#~ "\n" +#~ " # Create a new nodeport service named my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" +#~ msgstr "" +#~ "\n" +#~ " # Creare un nuovo servizio nodeport denominato my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" + +#~ msgid "" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend'.\n" +#~ " # If the same annotation is set multiple times, only the last value " +#~ "will be applied\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Update a pod identified by type and name in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend running nginx', overwriting any existing value.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend " +#~ "running nginx'\n" +#~ "\n" +#~ " # Update all pods in the namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Update pod 'foo' by removing an annotation named 'description' if " +#~ "it exists.\n" +#~ " # Does not require the --overwrite flag.\n" +#~ " kubectl annotate pods foo description-" +#~ msgstr "" +#~ "\n" +#~ " # Aggiorna il pod 'foo' con annotazione 'description'e il valore 'my " +#~ "frontend'.\n" +#~ " # Se la stessa annotazione è impostata più volte, verrà applicato " +#~ "solo l'ultimo valore\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Aggiorna un pod identificato per tipo e nome in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Aggiorna pod 'foo' con la annotazione 'description' e il valore 'my " +#~ "frontend running nginx', sovrascrivendo qualsiasi valore esistente.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend " +#~ "running nginx'\n" +#~ "\n" +#~ " # Aggiorna tutti i baccelli nel namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Aggiorna il pod 'foo' solo se la risorsa è invariata dalla versione " +#~ "1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Aggiorna il pod 'foo' rimuovendo un'annotazione denominata " +#~ "'descrizione' se esiste.\n" +#~ " # Non richiede flag -overwrite.\n" +#~ " kubectl annotate pods foo description-" + +#~ msgid "" +#~ "\n" +#~ " Create a clusterIP service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Crea un servizio clusterIP con il nome specificato." + +#~ msgid "" +#~ "\n" +#~ " Create a deployment with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Creare un deployment con il nome specificato." + +#~ msgid "" +#~ "\n" +#~ " Create a nodeport service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Creare un servizio nodeport con il nome specificato." + +#~ msgid "" +#~ "\n" +#~ " Dumps cluster info out suitable for debugging and diagnosing cluster " +#~ "problems. By default, dumps everything to\n" +#~ " stdout. You can optionally specify a directory with --output-" +#~ "directory. If you specify a directory, kubernetes will\n" +#~ " build a set of files in that directory. By default only dumps things " +#~ "in the 'kube-system' namespace, but you can\n" +#~ " switch to a different namespace with the --namespaces flag, or " +#~ "specify --all-namespaces to dump all namespaces.\n" +#~ "\n" +#~ " The command also dumps the logs of all of the pods in the cluster, " +#~ "these logs are dumped into different directories\n" +#~ " based on namespace and pod name." +#~ msgstr "" +#~ "\n" +#~ " Dump delle informazioni di cluster idonee per il debug e la " +#~ "diagnostica di problemi di cluster. Per impostazione predefinita, tutto\n" +#~ "    verso stdout. È possibile specificare opzionalmente una directory con " +#~ "--output-directory. Se si specifica una directory, kubernetes \n" +#~ " creearà un insieme di file in quella directory. Per impostazione " +#~ "predefinita, dumps solo i dati del namespace \"kube-system\", ma è\n" +#~ " possibile passare ad namespace diverso con il flag --namespaces o " +#~ "specificare --all-namespaces per il dump di tutti i namespace.\n" +#~ "\n" +#~ "     Il comando esegue dump anche dei log di tutti i pod del cluster, " +#~ "questi log vengono scaricati in directory differenti\n" +#~ "     basati sul namespace e sul nome del pod." + +#~ msgid "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." +#~ msgstr "" +#~ "\n" +#~ " Visualizza gli indirizzi del master e dei servizi con label kubernetes." +#~ "io/cluster-service=true\n" +#~ "  Per ulteriore debug e diagnosticare i problemi di cluster, utilizzare " +#~ "'kubectl cluster-info dump'." + +#~ msgid "A schedule in the Cron format the job should be run with." +#~ msgstr "Un calendario in formato Cron del lavoro che deve essere eseguito." + +#~ msgid "" +#~ "An inline JSON override for the generated service object. If this is non-" +#~ "empty, it is used to override the generated object. Requires that the " +#~ "object supply a valid apiVersion field. Only used if --expose is true." +#~ msgstr "" +#~ "Un override JSON inline per l'oggetto di servizio generato. Se questo non " +#~ "è vuoto, viene utilizzato per ignorare l'oggetto generato. Richiede che " +#~ "l'oggetto fornisca un campo valido apiVersion. Utilizzato solo se --" +#~ "expose è true." + +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "Applica una configurazione risorsa per nomefile o stdin" + +#~ msgid "Auto-scale a Deployment, ReplicaSet, or ReplicationController" +#~ msgstr "Auto-scale a Deployment, ReplicaSet, o ReplicationController" + +#~ msgid "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" +#~ msgstr "" +#~ "Nome container che avrà la sua immagine aggiornata. Soltanto rilevante " +#~ "quando --image è specificato, altrimenti ignorato. Necessario quando si " +#~ "utilizza --image su un contenitore a più contenitori" + +#~ msgid "Create a ClusterRoleBinding for a particular ClusterRole" +#~ msgstr "Crea un ClusterRoleBinding per un ClusterRole particolare" + +#~ msgid "Create a LoadBalancer service." +#~ msgstr "Creare un servizio LoadBalancer." + +#~ msgid "Create a NodePort service." +#~ msgstr "Crea un servizio NodePort." + +#~ msgid "Create a RoleBinding for a particular Role or ClusterRole" +#~ msgstr "Crea un RoleBinding per un particolare Role o ClusterRole" + +#~ msgid "Create a clusterIP service." +#~ msgstr "Crea un servizio clusterIP." + +#~ msgid "Create a configmap from a local file, directory or literal value" +#~ msgstr "" +#~ "Crea un configmap da un file locale, una directory o un valore letterale" + +#~ msgid "Create a deployment with the specified name." +#~ msgstr "Creare un deployment con il nome specificato." + +#~ msgid "Create a pod disruption budget with the specified name." +#~ msgstr "Crea un pod disruption budget con il nome specificato." + +#~ msgid "Create a quota with the specified name." +#~ msgstr "Crea una quota con il nome specificato." + +#~ msgid "Create a resource by filename or stdin" +#~ msgstr "Crea una risorsa per nome file o stdin" + +#~ msgid "Create a secret from a local file, directory or literal value" +#~ msgstr "" +#~ "Crea un secret da un file locale, una directory o un valore letterale" + +#~ msgid "Create a service using specified subcommand." +#~ msgstr "Crea un servizio utilizzando il subcommand specificato." + +#~ msgid "Create an ExternalName service." +#~ msgstr "Crea un servizio ExternalName." + +#~ msgid "" +#~ "Delete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector" +#~ msgstr "" +#~ "Elimina risorse selezionate per nomi di file, stdin, risorse e nomi, o " +#~ "per risorsa e selettore di label" + +#~ msgid "Deprecated: Gracefully shut down a resource by name or filename" +#~ msgstr "Deprecated: spegne correttamente una risorsa per nome o nome file" + +#~ msgid "Display Resource (CPU/Memory) usage of nodes" +#~ msgstr "Visualizza l'utilizzo di risorse (CPU/Memoria) per nodo" + +#~ msgid "Display Resource (CPU/Memory) usage of pods" +#~ msgstr "Visualizza l'utilizzo di risorse (CPU/Memoria) per pod." + +#~ msgid "Display Resource (CPU/Memory) usage." +#~ msgstr "Visualizza l'utilizzo di risorse (CPU/Memoria)." + +#~ msgid "Display cluster info" +#~ msgstr "Visualizza informazioni sul cluster" + +#~ msgid "Displays the current-context" +#~ msgstr "Visualizza il current-context" + +#~ msgid "Documentation of resources" +#~ msgstr "Documentazione delle risorse" + +#~ msgid "Dump lots of relevant info for debugging and diagnosis" +#~ msgstr "" +#~ "Dump di un sacco di informazioni pertinenti per il debug e la diagnosi" + +#~ msgid "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." +#~ msgstr "" +#~ "Politica esplicita per il pull delle immagini container. Richiesto quando " +#~ "--image è uguale all'immagine esistente, altrimenti ignorata." + +#~ msgid "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." +#~ msgstr "" +#~ "IP da assegnare al Load Balancer. Se vuota, un IP effimero verrà creato e " +#~ "utilizzato (specifico per provider cloud)." + +#~ msgid "" +#~ "Image to use for upgrading the replication controller. Must be distinct " +#~ "from the existing image (either new image or new image tag). Can not be " +#~ "used with --filename/-f" +#~ msgstr "" +#~ "Immagine da utilizzare per aggiornare il replication controller. Deve " +#~ "essere diversa dall'immagine esistente (nuova immagine o nuovo tag " +#~ "immagine). Non può essere utilizzata con --filename/-f" + +#~ msgid "Manage a deployment rollout" +#~ msgstr "Gestisci un deployment rollout" + +#~ msgid "" +#~ "Output the formatted object with the given group version (for ex: " +#~ "'extensions/v1beta1').)" +#~ msgstr "" +#~ "Output dell'oggetto formattato con la versione del gruppo fornito (per " +#~ "esempio: 'extensions/v1beta1').)" + +#~ msgid "Perform a rolling update of the given ReplicationController" +#~ msgstr "Eseguire un rolling update del ReplicationController specificato" + +#~ msgid "Replace a resource by filename or stdin" +#~ msgstr "Sostituire una risorsa per nomefile o stdin" + +#~ msgid "" +#~ "Set a new size for a Deployment, ReplicaSet, Replication Controller, or " +#~ "Job" +#~ msgstr "" +#~ "Imposta una nuova dimensione per Deployment, ReplicaSet, Replication " +#~ "Controller, o Job" + +#~ msgid "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." +#~ msgstr "" +#~ "Imposta l'annotazione dell'ultima-configurazione-applicata ad un oggetto " +#~ "live per abbinare il contenuto di un file." + +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "Imposta una voce cluster in kubeconfig" + +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "Imposta una voce context in kubeconfig" + +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "Imposta una voce utente in kubeconfig" + +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "Imposta un singolo valore in un file kubeconfig" + +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "Imposta il current-context in un file kubeconfig" + +#~ msgid "" +#~ "Take a replication controller, service, deployment or pod and expose it " +#~ "as a new Kubernetes Service" +#~ msgstr "" +#~ "Prende un replication controller, service, deployment o un pod e lo " +#~ "espone come nuovo servizio Kubernetes" + +#~ msgid "" +#~ "The key to use to differentiate between two different controllers, " +#~ "default 'deployment'. Only relevant when --image is specified, ignored " +#~ "otherwise" +#~ msgstr "" +#~ "La chiave da utilizzare per distinguere tra due controller diversi, " +#~ "predefinito \"deployment\". Rilevante soltanto quando --image è " +#~ "specificato, altrimenti ignorato" + +#~ msgid "" +#~ "The name of the API generator to use, see http://kubernetes.io/docs/user-" +#~ "guide/kubectl-conventions/#generators for a list." +#~ msgstr "" +#~ "Il nome del generatore API da utilizzare, si veda http://kubernetes.io/" +#~ "docs/user-guide/kubectl-conventions/#generators per un elenco." + +#~ msgid "" +#~ "The name of the API generator to use. Currently there is only 1 generator." +#~ msgstr "" +#~ "Il nome del generatore API da utilizzare. Attualmente c'è solo 1 " +#~ "generatore." + +#~ msgid "" +#~ "The name of the generator to use for creating a service. Only used if --" +#~ "expose is true" +#~ msgstr "" +#~ "Il nome del generatore da utilizzare per la creazione di un servizio. " +#~ "Utilizzato solo se --expose è true" + +#~ msgid "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." +#~ msgstr "" +#~ "La porta che questo contenitore espone. Se --expose è true, questa è " +#~ "anche la porta utilizzata dal servizio creato." + +#~ msgid "" +#~ "The restart policy for this Pod. Legal values [Always, OnFailure, " +#~ "Never]. If set to 'Always' a deployment is created, if set to " +#~ "'OnFailure' a job is created, if set to 'Never', a regular pod is " +#~ "created. For the latter two --replicas must be 1. Default 'Always', for " +#~ "CronJobs `Never`." +#~ msgstr "" +#~ "La politica di riavvio per questo Pod. Valori accettati [Always, " +#~ "OnFailure, Never]. Se impostato su 'Always' viene creato un deployment, " +#~ "se impostato su 'OnFailure' viene creato un job, se impostato su 'Never', " +#~ "viene creato un pod. Per questi ultimi due le - repliche devono essere 1. " +#~ "Predefinito 'Always', per CronJobs `Never`." + +#~ msgid "" +#~ "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +#~ "'ClusterIP'." +#~ msgstr "" +#~ "Digitare per questo servizio: ClusterIP, NodePort o LoadBalancer. " +#~ "Ppredefinito è 'ClusterIP'." + +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "Annulla singolo valore in un file kubeconfig" + +#~ msgid "Update field(s) of a resource using strategic merge patch" +#~ msgstr "Aggiornare campo/i risorsa utilizzando merge patch strategici" + +#~ msgid "Update image of a pod template" +#~ msgstr "Aggiorna immagine di un pod template" + +#~ msgid "" +#~ "View latest last-applied-configuration annotations of a resource/object" +#~ msgstr "" +#~ "Visualizza ultime annotazioni dell'ultima configurazione applicata per " +#~ "risorsa/oggetto" + +#~ msgid "external name of service" +#~ msgstr "nome esterno del servizio" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000..f917b6c5bf Binary files /dev/null and b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.mo differ diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po new file mode 100644 index 0000000000..d6f4aa2c4b --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ja_JP/LC_MESSAGES/k8s.po @@ -0,0 +1,3365 @@ +# Test translations for unit tests. +# Copyright (C) 2017 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR girikuncoro@gmail.com, 2017. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2020-01-05 09:55+0900\n" +"Last-Translator: Kohei Ota \n" +"Language-Team: \n" +"Language: ja\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 2.2.4\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tCreate a namespace with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tCreate a role with single rule." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tCreate a service account with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tMark node as schedulable." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tMark node as unschedulable." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Create a LoadBalancer service with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L61 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L60 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L63 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L106 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L111 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L119 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:158 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:178 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "Approve a certificate signing request" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/attach.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "Attach to a running container" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L115 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole this ClusterRoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole this RoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/convert.go#L67 +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "Convert config files between different API versions" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cp.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "Copy files and directories to and from containers." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L214 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "Create a TLS secret" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "Create a namespace with the specified name" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L143 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "Create a secret for use with a Docker registry" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L34 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "Create a secret using specified subcommand" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "Create a service account with the specified name" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "指定したコンテキストをkubeconfigから削除する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "指定したコンテキストをkubeconfigから削除する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L121 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "Deny a certificate signing request" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "1つまたは複数のコンテキストを記述する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "kubeconfigで定義されたクラスターを表示する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "マージされたkubeconfigの設定または指定されたkubeconfigを表示する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/get.go#L107 +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "1つまたは複数のリソースを表示する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L176 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "Drain node in preparation for maintenance" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/edit.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "Edit a resource on the server" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L159 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "Email for Docker registry" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/exec.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "Execute a command in a container" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/portforward.go#L75 +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "Forward one or more local ports to a pod" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/help.go#L36 +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "Help about any command" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:160 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/annotate.go#L135 +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:157 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L132 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:154 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "Mark node as schedulable" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "Mark node as unschedulable" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_pause.go#L73 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "Mark the provided resource as paused" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L35 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "Modify certificate resources." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "kubeconfigを変更する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L110 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/completion.go#L97 +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "Output shell completion code for the specified shell (bash or zsh)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L157 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "Password for Docker registry authentication" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L226 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "Path to PEM encoded public key certificate." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L227 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "Path to private key associated with given certificate." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/scale.go#L82 +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "Print the client and server version information" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "Print the list of flags inherited by all commands" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L86 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "Print the logs for a container in a pod" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_resume.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "Resume a paused resource" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L56 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "Role this RoleBinding should reference" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L94 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "Run a particular image on the cluster" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/proxy.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Run a proxy to the Kubernetes API server" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L161 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "Server location for Docker registry" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "Set specific features on objects" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_selector.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "リソースのセレクターを設定する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/describe.go#L80 +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "Show details of a specific resource or group of resources" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_status.go#L57 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "Show the status of the rollout" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "Synonym for --target-port" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "The image for the container to run." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L116 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"The minimum number or percentage of available pods this budget requires." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L113 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "The name for the newly created object." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L98 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L99 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "The network protocol for the service to be created. Default is 'TCP'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L131 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L130 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." +msgstr "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L87 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "The type of secret to create" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_undo.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "現在のロールアウトを取り消す" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_resources.go#L101 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "Update resource requests/limits on objects with pod templates" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "リソースのアノテーションを更新する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L109 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "リソースのラベルを更新する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/taint.go#L88 +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "Update the taints on one or more nodes" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L155 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "Username for Docker registry authentication" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_history.go#L51 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "ロールアウトの履歴を表示する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo_dump.go#L45 +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88 +msgid "dummy restart flag)" +msgstr "dummy restart flag)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cmd.go#L217 +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl controls the Kubernetes cluster manager" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using " +#~ "the cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using " +#~ "the cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +#~ "ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +#~ "ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config based on folder bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with specified keys " +#~ "instead of file basenames on disk\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with key1=config1 and " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config based on folder bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with specified keys " +#~ "instead of file basenames on disk\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with key1=config1 and " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" + +#~ msgid "" +#~ "\n" +#~ "\t\t # If you don't already have a .dockercfg file, you can create a " +#~ "dockercfg secret directly by using:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" +#~ msgstr "" +#~ "\n" +#~ "\t\t # If you don't already have a .dockercfg file, you can create a " +#~ "dockercfg secret directly by using:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Apply the configuration in pod.json to a pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Apply the JSON passed into stdin to a pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune is still in Alpha\n" +#~ "\t\t# Apply the configuration in manifest.yaml that matches label " +#~ "app=nginx and delete all the other resources that are not in the file and " +#~ "match label app=nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " +#~ "configmaps that are not in the file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Apply the configuration in pod.json to a pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Apply the JSON passed into stdin to a pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune is still in Alpha\n" +#~ "\t\t# Apply the configuration in manifest.yaml that matches label " +#~ "app=nginx and delete all the other resources that are not in the file and " +#~ "match label app=nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " +#~ "configmaps that are not in the file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 " +#~ "and 10, no target CPU utilization specified so a default autoscaling " +#~ "policy will be used:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale a replication controller \"foo\", with the number of " +#~ "pods between 1 and 5, target CPU utilization at 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 " +#~ "and 10, no target CPU utilization specified so a default autoscaling " +#~ "policy will be used:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale a replication controller \"foo\", with the number of " +#~ "pods between 1 and 5, target CPU utilization at 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Convert the live state of the resource specified by 'pod.yaml' to " +#~ "the latest version\n" +#~ "\t\t# and print to stdout in json format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Convert all files under current directory to latest version and " +#~ "create them all.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Convert the live state of the resource specified by 'pod.yaml' to " +#~ "the latest version\n" +#~ "\t\t# and print to stdout in json format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Convert all files under current directory to latest version and " +#~ "create them all.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" that allows user to " +#~ "perform \"get\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName " +#~ "specified\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" that allows user to " +#~ "perform \"get\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName " +#~ "specified\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get" +#~ "\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get" +#~ "\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a new resourcequota named my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Create a new resourcequota named best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Create a new resourcequota named my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Create a new resourcequota named best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=rails label\n" +#~ "\t\t# and require at least one of them being available at any point in " +#~ "time.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=nginx label\n" +#~ "\t\t# and require at least half of the pods selected to be available at " +#~ "any point in time.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=rails label\n" +#~ "\t\t# and require at least one of them being available at any point in " +#~ "time.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=nginx label\n" +#~ "\t\t# and require at least half of the pods selected to be available at " +#~ "any point in time.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod using the data in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Create a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API " +#~ "format then create the resource using the edited data.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Create a pod using the data in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Create a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API " +#~ "format then create the resource using the edited data.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a replication controller identified by type " +#~ "and name specified in \"nginx-controller.yaml\", which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +#~ "the name \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Create a second service based on the above service, exposing the " +#~ "container port 8443 as port 443 with the name \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated streaming application on port " +#~ "4100 balancing UDP traffic and named 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx using replica set, which " +#~ "serves on port 80 and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for an nginx deployment, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a replication controller identified by type " +#~ "and name specified in \"nginx-controller.yaml\", which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +#~ "the name \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Create a second service based on the above service, exposing the " +#~ "container port 8443 as port 443 with the name \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated streaming application on port " +#~ "4100 balancing UDP traffic and named 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx using replica set, which " +#~ "serves on port 80 and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for an nginx deployment, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Delete a pod using the type and name specified in pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Delete a pod based on the type and name in the JSON passed into " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Delete pods and services with label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Delete a pod with minimal delay\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Force delete a pod on a dead node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Delete all pods\n" +#~ "\t\tkubectl delete pods --all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Delete a pod using the type and name specified in pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Delete a pod based on the type and name in the JSON passed into " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Delete pods and services with label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Delete a pod with minimal delay\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Force delete a pod on a dead node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Delete all pods\n" +#~ "\t\tkubectl delete pods --all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Describe a node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Describe a pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Describe all pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Describe pods by label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Describe all pods managed by the 'frontend' replication controller " +#~ "(rc-created pods\n" +#~ "\t\t# get the name of the rc as a prefix in the pod the name).\n" +#~ "\t\tkubectl describe pods frontend" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Describe a node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Describe a pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Describe all pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Describe pods by label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Describe all pods managed by the 'frontend' replication controller " +#~ "(rc-created pods\n" +#~ "\t\t# get the name of the rc as a prefix in the pod the name).\n" +#~ "\t\tkubectl describe pods frontend" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", even if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# As above, but abort if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use " +#~ "a grace period of 15 minutes.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", even if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# As above, but abort if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use " +#~ "a grace period of 15 minutes.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Edit the service named 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Use an alternative editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +#~ "config in its annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Edit the service named 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Use an alternative editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +#~ "config in its annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running 'date' from pod 123456-7890, using the " +#~ "first container by default\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Get output from running 'date' in ruby-container from pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Get output from running 'date' from pod 123456-7890, using the " +#~ "first container by default\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Get output from running 'date' in ruby-container from pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running pod 123456-7890, using the first container " +#~ "by default\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Get output from ruby-container from pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Get output from running pod 123456-7890, using the first container " +#~ "by default\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Get output from ruby-container from pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Install bash completion on a Mac using homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for bash into the current shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Write bash completion code to a file and source if from ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +#~ "\t\tsource <(kubectl completion zsh)" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Install bash completion on a Mac using homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for bash into the current shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Write bash completion code to a file and source if from ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +#~ "\t\tsource <(kubectl completion zsh)" + +#~ msgid "" +#~ "\n" +#~ "\t\t# List all pods in ps output format.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# List all pods in ps output format with more information (such as " +#~ "node name).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# List a single replication controller with specified NAME in ps " +#~ "output format.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# List a single pod in JSON output format.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +#~ "JSON output format.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Return only the phase value of the specified pod.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# List all replication controllers and services together in ps output " +#~ "format.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# List one or more resources by their type and names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List all resources with different types.\n" +#~ "\t\tkubectl get all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# List all pods in ps output format.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# List all pods in ps output format with more information (such as " +#~ "node name).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# List a single replication controller with specified NAME in ps " +#~ "output format.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# List a single pod in JSON output format.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +#~ "JSON output format.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Return only the phase value of the specified pod.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# List all replication controllers and services together in ps output " +#~ "format.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# List one or more resources by their type and names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List all resources with different types.\n" +#~ "\t\tkubectl get all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in the pod\t\tkubectl port-forward pod/mypod 5000 " +#~ "6000\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in a pod selected by the deployment\t\tkubectl port-" +#~ "forward deployment/mydeployment 5000 6000\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in a pod selected by the service\t\tkubectl port-" +#~ "forward service/myservice 5000 6000\n" +#~ "\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\t" +#~ "\tkubectl port-forward pod/mypod 8888:5000\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\t" +#~ "\tkubectl port-forward pod/mypod :5000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in the pod\t\tkubectl port-forward pod/mypod 5000 " +#~ "6000\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in a pod selected by the deployment\t\tkubectl port-" +#~ "forward deployment/mydeployment 5000 6000\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in a pod selected by the service\t\tkubectl port-" +#~ "forward service/myservice 5000 6000\n" +#~ "\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\t" +#~ "\tkubectl port-forward pod/mypod 8888:5000\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\t" +#~ "\tkubectl port-forward pod/mypod :5000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as unschedulable.\n" +#~ "\t\tkubectl cordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as unschedulable.\n" +#~ "\t\tkubectl cordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Partially update a node using strategic merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Partially update a node identified by the type and name specified " +#~ "in \"node.json\" using strategic merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image; spec.containers[*].name is required " +#~ "because it's a merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image using a json patch with positional " +#~ "arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Partially update a node using strategic merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Partially update a node identified by the type and name specified " +#~ "in \"node.json\" using strategic merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image; spec.containers[*].name is required " +#~ "because it's a merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image using a json patch with positional " +#~ "arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Print the address of the master and cluster services\n" +#~ "\t\tkubectl cluster-info" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Print the address of the master and cluster services\n" +#~ "\t\tkubectl cluster-info" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Replace a pod using the data in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Replace a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Update a single-container pod's image version (tag) to v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Force replace, delete and then re-create the resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Replace a pod using the data in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Replace a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Update a single-container pod's image version (tag) to v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Force replace, delete and then re-create the resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Return snapshot logs from pod nginx with only one container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Return snapshot logs from pod nginx with only one container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +#~ "content from ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +#~ "\t\t# The chosen port for the server will be output to stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +#~ "api\n" +#~ "\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +#~ "pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +#~ "content from ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +#~ "\t\t# The chosen port for the server will be output to stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +#~ "api\n" +#~ "\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +#~ "pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Scale a replicaset named 'foo' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale a resource identified by type and name specified in \"foo.yaml" +#~ "\" to 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# If the deployment named mysql's current size is 2, scale mysql to " +#~ "3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale multiple replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale job named 'cron' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Scale a replicaset named 'foo' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale a resource identified by type and name specified in \"foo.yaml" +#~ "\" to 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# If the deployment named mysql's current size is 2, scale mysql to " +#~ "3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale multiple replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale job named 'cron' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file, will create the annotation if it does not already " +#~ "exist.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file, will create the annotation if it does not already " +#~ "exist.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating " +#~ "them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute π to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute π to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating " +#~ "them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute π to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute π to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" + +#~ msgid "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#~ msgid "" +#~ "\n" +#~ "\t\tApply a configuration to a resource by filename or stdin.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." +#~ msgstr "" +#~ "\n" +#~ "\t\tApply a configuration to a resource by filename or stdin.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L68 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a ClusterRole." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L43 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRoleBinding for a particular ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a ClusterRoleBinding for a particular ClusterRole." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L43 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a RoleBinding for a particular Role or ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a RoleBinding for a particular Role or ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a TLS secret from the given public/private key pair.\n" +#~ "\n" +#~ "\t\tThe public/private key pair must exist before hand. The public key " +#~ "certificate must be .PEM encoded and match the given private key." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a TLS secret from the given public/private key pair.\n" +#~ "\n" +#~ "\t\tThe public/private key pair must exist before hand. The public key " +#~ "certificate must be .PEM encoded and match the given private key." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to " +#~ "the basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to " +#~ "the basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker " +#~ "registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker " +#~ "registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L49 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create.go#L56 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." + +#~ msgid "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments " +#~ "may be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that " +#~ "the pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of " +#~ "the same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." +#~ msgstr "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments " +#~ "may be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that " +#~ "the pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of " +#~ "the same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." + +#~ msgid "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered " +#~ "by delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." +#~ msgstr "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered " +#~ "by delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." +#~ msgstr "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." +#~ msgstr "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " +#~ msgstr "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " + +#~ msgid "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will " +#~ "use normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will " +#~ "use normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' " +#~ "for Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag --windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." +#~ msgstr "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' " +#~ "for Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag --windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." + +#~ msgid "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it " +#~ "from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in " +#~ "versions of zsh >= 5.2" +#~ msgstr "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it " +#~ "from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in " +#~ "versions of zsh >= 5.2" + +#~ msgid "" +#~ "\n" +#~ "\t\tPerform a rolling update of the given ReplicationController.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tPerform a rolling update of the given ReplicationController.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing " +#~ "resource, the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing " +#~ "resource, the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." + +#~ msgid "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." +#~ msgstr "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." + +#~ msgid "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" +#~ msgstr "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" + +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the labels on a resource.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." +#~ msgstr "" +#~ "\n" +#~ "\t\tUpdate the labels on a resource.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the taints on one or more nodes.\n" +#~ "\n" +#~ "\t\t* A taint consists of a key, value, and effect. As an argument here, " +#~ "it is expressed as key=value:effect.\n" +#~ "\t\t* The key must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* The value must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[2]d " +#~ "characters.\n" +#~ "\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +#~ "\t\t* Currently taint can only apply to node." +#~ msgstr "" +#~ "\n" +#~ "\t\tUpdate the taints on one or more nodes.\n" +#~ "\n" +#~ "\t\t* A taint consists of a key, value, and effect. As an argument here, " +#~ "it is expressed as key=value:effect.\n" +#~ "\t\t* The key must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* The value must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[2]d " +#~ "characters.\n" +#~ "\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +#~ "\t\t* Currently taint can only apply to node." + +#~ msgid "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name " +#~ "or file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." +#~ msgstr "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name " +#~ "or file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." + +#~ msgid "" +#~ "\n" +#~ "\t # !!!Important Note!!!\n" +#~ "\t # Requires that the 'tar' binary is present in your container\n" +#~ "\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +#~ "\n" +#~ "\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod " +#~ "in the default namespace\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copy /tmp/foo local file to /tmp/bar in a remote pod in a " +#~ "specific container\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +#~ "\n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" +#~ msgstr "" +#~ "\n" +#~ "\t # !!!Important Note!!!\n" +#~ "\t # Requires that the 'tar' binary is present in your container\n" +#~ "\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +#~ "\n" +#~ "\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod " +#~ "in the default namespace\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copy /tmp/foo local file to /tmp/bar in a remote pod in a " +#~ "specific container\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +#~ "\n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new TLS secret named tls-secret with the given key pair:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" +#~ msgstr "" +#~ "\n" +#~ "\t # Create a new TLS secret named tls-secret with the given key pair:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" +#~ msgstr "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" + +#~ msgid "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" +#~ msgstr "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" + +#~ msgid "" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs (in headless mode)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" +#~ msgstr "" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs (in headless mode)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" + +#~ msgid "" +#~ "\n" +#~ " # Create a new deployment named my-dep that runs the busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" +#~ msgstr "" +#~ "\n" +#~ " # Create a new deployment named my-dep that runs the busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" + +#~ msgid "" +#~ "\n" +#~ " # Create a new nodeport service named my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" +#~ msgstr "" +#~ "\n" +#~ " # Create a new nodeport service named my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" + +#~ msgid "" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend'.\n" +#~ " # If the same annotation is set multiple times, only the last value " +#~ "will be applied\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Update a pod identified by type and name in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend running nginx', overwriting any existing value.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend " +#~ "running nginx'\n" +#~ "\n" +#~ " # Update all pods in the namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Update pod 'foo' by removing an annotation named 'description' if " +#~ "it exists.\n" +#~ " # Does not require the --overwrite flag.\n" +#~ " kubectl annotate pods foo description-" +#~ msgstr "" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend'.\n" +#~ " # If the same annotation is set multiple times, only the last value " +#~ "will be applied\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Update a pod identified by type and name in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend running nginx', overwriting any existing value.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend " +#~ "running nginx'\n" +#~ "\n" +#~ " # Update all pods in the namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Update pod 'foo' by removing an annotation named 'description' if " +#~ "it exists.\n" +#~ " # Does not require the --overwrite flag.\n" +#~ " kubectl annotate pods foo description-" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#~ msgid "" +#~ "\n" +#~ " Create a clusterIP service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Create a clusterIP service with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_deployment.go#L44 +#~ msgid "" +#~ "\n" +#~ " Create a deployment with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Create a deployment with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_deployment.go#L44 +#~ msgid "" +#~ "\n" +#~ " Create a nodeport service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Create a nodeport service with the specified name." + +#~ msgid "" +#~ "\n" +#~ " Dumps cluster info out suitable for debugging and diagnosing cluster " +#~ "problems. By default, dumps everything to\n" +#~ " stdout. You can optionally specify a directory with --output-" +#~ "directory. If you specify a directory, kubernetes will\n" +#~ " build a set of files in that directory. By default only dumps things " +#~ "in the 'kube-system' namespace, but you can\n" +#~ " switch to a different namespace with the --namespaces flag, or " +#~ "specify --all-namespaces to dump all namespaces.\n" +#~ "\n" +#~ " The command also dumps the logs of all of the pods in the cluster, " +#~ "these logs are dumped into different directories\n" +#~ " based on namespace and pod name." +#~ msgstr "" +#~ "\n" +#~ " Dumps cluster info out suitable for debugging and diagnosing cluster " +#~ "problems. By default, dumps everything to\n" +#~ " stdout. You can optionally specify a directory with --output-" +#~ "directory. If you specify a directory, kubernetes will\n" +#~ " build a set of files in that directory. By default only dumps things " +#~ "in the 'kube-system' namespace, but you can\n" +#~ " switch to a different namespace with the --namespaces flag, or " +#~ "specify --all-namespaces to dump all namespaces.\n" +#~ "\n" +#~ " The command also dumps the logs of all of the pods in the cluster, " +#~ "these logs are dumped into different directories\n" +#~ " based on namespace and pod name." + +#~ msgid "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." +#~ msgstr "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L136 +#~ msgid "A schedule in the Cron format the job should be run with." +#~ msgstr "A schedule in the Cron format the job should be run with." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L134 +#~ msgid "" +#~ "An inline JSON override for the generated service object. If this is non-" +#~ "empty, it is used to override the generated object. Requires that the " +#~ "object supply a valid apiVersion field. Only used if --expose is true." +#~ msgstr "" +#~ "An inline JSON override for the generated service object. If this is non-" +#~ "empty, it is used to override the generated object. Requires that the " +#~ "object supply a valid apiVersion field. Only used if --expose is true." + +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "ファイル名または標準入力でリソースにコンフィグを適用する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L55 +#~ msgid "Auto-scale a Deployment, ReplicaSet, or ReplicationController" +#~ msgstr "Auto-scale a Deployment, ReplicaSet, or ReplicationController" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L101 +#~ msgid "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" +#~ msgstr "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L43 +#~ msgid "Create a ClusterRoleBinding for a particular ClusterRole" +#~ msgstr "Create a ClusterRoleBinding for a particular ClusterRole" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L181 +#~ msgid "Create a LoadBalancer service." +#~ msgstr "Create a LoadBalancer service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L124 +#~ msgid "Create a NodePort service." +#~ msgstr "Create a NodePort service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L43 +#~ msgid "Create a RoleBinding for a particular Role or ClusterRole" +#~ msgstr "Create a RoleBinding for a particular Role or ClusterRole" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L68 +#~ msgid "Create a clusterIP service." +#~ msgstr "Create a clusterIP service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_configmap.go#L59 +#~ msgid "Create a configmap from a local file, directory or literal value" +#~ msgstr "Create a configmap from a local file, directory or literal value" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_deployment.go#L44 +#~ msgid "Create a deployment with the specified name." +#~ msgstr "Create a deployment with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L49 +#~ msgid "Create a pod disruption budget with the specified name." +#~ msgstr "Create a pod disruption budget with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#~ msgid "Create a quota with the specified name." +#~ msgstr "Create a quota with the specified name." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create.go#L56 +#~ msgid "Create a resource by filename or stdin" +#~ msgstr "ファイル名または標準入力でリソースを作成する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L73 +#~ msgid "Create a secret from a local file, directory or literal value" +#~ msgstr "Create a secret from a local file, directory or literal value" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L36 +#~ msgid "Create a service using specified subcommand." +#~ msgstr "Create a service using specified subcommand." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L240 +#~ msgid "Create an ExternalName service." +#~ msgstr "Create an ExternalName service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/delete.go#L130 +#~ msgid "" +#~ "Delete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector" +#~ msgstr "" +#~ "Delete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/stop.go#L58 +#~ msgid "Deprecated: Gracefully shut down a resource by name or filename" +#~ msgstr "Deprecated: Gracefully shut down a resource by name or filename" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top_node.go#L77 +#~ msgid "Display Resource (CPU/Memory) usage of nodes" +#~ msgstr "Display Resource (CPU/Memory) usage of nodes" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top_pod.go#L79 +#~ msgid "Display Resource (CPU/Memory) usage of pods" +#~ msgstr "Display Resource (CPU/Memory) usage of pods" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top.go#L43 +#~ msgid "Display Resource (CPU/Memory) usage." +#~ msgstr "Display Resource (CPU/Memory) usage." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo.go#L49 +#~ msgid "Display cluster info" +#~ msgstr "クラスターの情報を表示する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/current_context.go#L48 +#~ msgid "Displays the current-context" +#~ msgstr "current-contextを表示する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/explain.go#L50 +#~ msgid "Documentation of resources" +#~ msgstr "リソースの説明を表示する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo_dump.go#L37 +#~ msgid "Dump lots of relevant info for debugging and diagnosis" +#~ msgstr "Dump lots of relevant info for debugging and diagnosis" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L102 +#~ msgid "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." +#~ msgstr "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L105 +#~ msgid "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." +#~ msgstr "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L98 +#~ msgid "" +#~ "Image to use for upgrading the replication controller. Must be distinct " +#~ "from the existing image (either new image or new image tag). Can not be " +#~ "used with --filename/-f" +#~ msgstr "" +#~ "Image to use for upgrading the replication controller. Must be distinct " +#~ "from the existing image (either new image or new image tag). Can not be " +#~ "used with --filename/-f" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout.go#L46 +#~ msgid "Manage a deployment rollout" +#~ msgstr "Manage a deployment rollout" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/edit.go#L115 +#~ msgid "" +#~ "Output the formatted object with the given group version (for ex: " +#~ "'extensions/v1beta1').)" +#~ msgstr "" +#~ "Output the formatted object with the given group version (for ex: " +#~ "'extensions/v1beta1').)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L84 +#~ msgid "Perform a rolling update of the given ReplicationController" +#~ msgstr "Perform a rolling update of the given ReplicationController" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/replace.go#L70 +#~ msgid "Replace a resource by filename or stdin" +#~ msgstr "Replace a resource by filename or stdin" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/scale.go#L71 +#~ msgid "" +#~ "Set a new size for a Deployment, ReplicaSet, Replication Controller, or " +#~ "Job" +#~ msgstr "" +#~ "Set a new size for a Deployment, ReplicaSet, Replication Controller, or " +#~ "Job" + +#~ msgid "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." +#~ msgstr "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_cluster.go#L67 +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "kubeconfigにクラスターエントリを設定する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_context.go#L57 +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "kubeconfigにコンテキストエントリを設定する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_authinfo.go#L103 +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "kubeconfigにユーザーエントリを設定する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/set.go#L59 +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "kubeconfigに個別の変数を設定する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/use_context.go#L48 +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "kubeconfigにcurrent-contextを設定する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L87 +#~ msgid "" +#~ "Take a replication controller, service, deployment or pod and expose it " +#~ "as a new Kubernetes Service" +#~ msgstr "" +#~ "Take a replication controller, service, deployment or pod and expose it " +#~ "as a new Kubernetes Service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L100 +#~ msgid "" +#~ "The key to use to differentiate between two different controllers, " +#~ "default 'deployment'. Only relevant when --image is specified, ignored " +#~ "otherwise" +#~ msgstr "" +#~ "The key to use to differentiate between two different controllers, " +#~ "default 'deployment'. Only relevant when --image is specified, ignored " +#~ "otherwise" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L113 +#~ msgid "" +#~ "The name of the API generator to use, see http://kubernetes.io/docs/user-" +#~ "guide/kubectl-conventions/#generators for a list." +#~ msgstr "" +#~ "The name of the API generator to use, see http://kubernetes.io/docs/user-" +#~ "guide/kubectl-conventions/#generators for a list." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L66 +#~ msgid "" +#~ "The name of the API generator to use. Currently there is only 1 generator." +#~ msgstr "" +#~ "The name of the API generator to use. Currently there is only 1 generator." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L133 +#~ msgid "" +#~ "The name of the generator to use for creating a service. Only used if --" +#~ "expose is true" +#~ msgstr "" +#~ "The name of the generator to use for creating a service. Only used if --" +#~ "expose is true" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L121 +#~ msgid "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." +#~ msgstr "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L128 +#~ msgid "" +#~ "The restart policy for this Pod. Legal values [Always, OnFailure, " +#~ "Never]. If set to 'Always' a deployment is created, if set to " +#~ "'OnFailure' a job is created, if set to 'Never', a regular pod is " +#~ "created. For the latter two --replicas must be 1. Default 'Always', for " +#~ "CronJobs `Never`." +#~ msgstr "" +#~ "The restart policy for this Pod. Legal values [Always, OnFailure, " +#~ "Never]. If set to 'Always' a deployment is created, if set to " +#~ "'OnFailure' a job is created, if set to 'Never', a regular pod is " +#~ "created. For the latter two --replicas must be 1. Default 'Always', for " +#~ "CronJobs `Never`." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L101 +#~ msgid "" +#~ "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +#~ "'ClusterIP'." +#~ msgstr "" +#~ "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +#~ "'ClusterIP'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/unset.go#L47 +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "kubeconfigから変数を個別に削除する" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/patch.go#L91 +#~ msgid "Update field(s) of a resource using strategic merge patch" +#~ msgstr "Update field(s) of a resource using strategic merge patch" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_image.go#L94 +#~ msgid "Update image of a pod template" +#~ msgstr "Update image of a pod template" + +#~ msgid "" +#~ "View latest last-applied-configuration annotations of a resource/object" +#~ msgstr "" +#~ "View latest last-applied-configuration annotations of a resource/object" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L253 +#~ msgid "external name of service" +#~ msgstr "external name of service" + +#~ msgid "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgid_plural "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgstr[0] "" +#~ "watchは単一リソース及びリソースコレクションのみサポートしています- %d個の" +#~ "リソースが見つかりました" +#~ msgstr[1] "" +#~ "watchは単一リソース及びリソースコレクションのみサポートしています- %d個の" +#~ "リソースが見つかりました" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ko_KR/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ko_KR/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000..70398367b1 Binary files /dev/null and b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ko_KR/LC_MESSAGES/k8s.mo differ diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ko_KR/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ko_KR/LC_MESSAGES/k8s.po new file mode 100644 index 0000000000..b6804ef908 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/ko_KR/LC_MESSAGES/k8s.po @@ -0,0 +1,96 @@ +# Test translations for unit tests. +# Copyright (C) 2017 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR ianyrchoi@gmail.com, 2018. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2018-04-03 06:05+0900\n" +"Last-Translator: Ian Y. Choi \n" +"Language-Team: \n" +"Language: ko_KR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 2.0.6\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Plural-Forms: nplurals=1; plural=0;\n" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "kubeconfig에서 지정된 클러스터를 삭제합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "kubeconfig에서 지정된 컨텍스트를 삭제합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "하나 또는 여러 컨텍스트를 설명합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "kubeconfig에 정의된 클러스터를 표시합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "병합된 kubeconfig 설정 또는 지정된 kubeconfig 파일을 표시합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "kubeconfig 파일을 수정합니다" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "자원에 대한 주석을 업데이트합니다" + +# https://github.com/kubernetes/kubernetes/blob/masterpkg/kubectl/cmd/apply.go#L98 +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "구성을 파일 이름 또는 stdin에 의한 자원에 적용합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/current_context.go#L48 +#~ msgid "Displays the current-context" +#~ msgstr "현재-컨텍스트를 표시합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_cluster.go#L67 +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "kubeconfig에서 클러스터 항목을 설정합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_context.go#L57 +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "kubeconfig에서 컨텍스트 항목을 설정합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_authinfo.go#L103 +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "kubeconfig에서 사용자 항목을 설정합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/set.go#L59 +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "kubeconfig 파일에서 단일값을 설정합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/use_context.go#L48 +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "kubeconfig 파일에서 현재-컨텍스트를 설정합니다" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/unset.go#L47 +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "kubeconfig 파일에서 단일값 설정을 해제합니다" + +#~ msgid "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgid_plural "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgstr[0] "" +#~ "watch는 단일 리소스와 리소스 모음만을 지원합니다 - %d 개 자원을 발견하였습" +#~ "니다" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000..e3c52ec8a6 Binary files /dev/null and b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.mo differ diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.po new file mode 100644 index 0000000000..dad451281f --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/pt_BR/LC_MESSAGES/k8s.po @@ -0,0 +1,3250 @@ +# Brazilian Portuguese translation. +# Copyright (C) 2020 +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR ctadeu@gmail.com, 2020. +# +msgid "" +msgstr "" +"Project-Id-Version: \n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2020-12-11 17:03+0100\n" +"Last-Translator: Carlos Panato \n" +"Language-Team: \n" +"Language: pt_BR\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 2.4.2\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" +"X-Poedit-KeywordsList: \n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # Mostra as métricas para todos os nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Mostra as métricas para um node específico\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# Mostra a documentação do recurso e seus campos\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Mostra a documentação de um campo específico de um recurso\n" +"\t\tkubectl explain pods.spec.containers" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# Mostra as opções herdadas por todos os comandos\n" +"\t\tkubectl options" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# Imprime a versão do cliente e do servidor para o contexto atual\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# Mostra as versões de API suportadas\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# Mostra as métricas para todos os pods no namespace default\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Mostra as métricas para todos os pods em um dado namespace\n" +"\t\tkubectl top pod —namespace=NAMESPACE\n" +"\n" +"\t\t# Mostra as métricas para um dado pod e seus containers\n" +"\t\tkubectl top pod POD_NAME —containers\n" +"\n" +"\t\t# Mostra as métricas para os pods definidos pelo label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\tConvert os arquivos de configuração para diferentes versões de API. " +"Ambos formatos YAML\n" +"\t\\e JSON são aceitos.\n" +"\n" +"\t\tO command recebe o nome do arquivo, diretório ou URL como entrada, e " +"converteno formato\n" +"\t\tpara a versão especificada pelo parametro —output-version. Se a versão " +"desejada não é especificada ou \n" +"\t\tnão é suportada, converte para a última versã disponível.\n" +"\n" +"\t\tA saída padrão é no formato YAML. Pode ser utilizadoa opção -o\n" +"\t\tpara mudar o formato de saída." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\tCria um namespace com um nome especificado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\tCria uma role com uma única regra." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\tCria uma conta de serviço com um nome especificado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\tRemove a restrição de execução de workloads no node." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\tAplica a restrição de execução de workloads no node." + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\tDefine a annotation last-applied-configuration configurando para ser " +"igual ao conteúdo do arquivo.\n" +"\t\tIsto resulta no last-applied-configuration ser atualizado quando o " +"'kubectl apply -f ' executa,\n" +"\t\tnão atualizando as outras partes do objeto." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # Cria um novo namespace chamado my-namespace\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # Cria um novo service account chamado my-service-account\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\tCria um serviço do tipo ExternalName com o nome especificado.\n" +"\n" +"\tServiço ExternalName referencia um endereço externo de DNS ao invés de\n" +"\tapenas pods, o que permite aos desenvolvedores de aplicações referenciar " +"serviços\n" +"\tque existem fora da plataforma, em outros clusters ou localmente." + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp provê ajuda para qualquer comando na aplicação.\n" +"\tDigite simplesmente kubectl help [caminho do comando] para detalhes " +"completos." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # Cria um novo serviço do tipo LoadBalancer chamado my-lbs\n" +" kubectl create service loadbalancer my-lbs —tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # Coleta o estado corrente do cluster e exibe no stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Coleta o estado corrente do custer para /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Coleta informação de todos os namespaces para stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Coleta o conjunto especificado de namespaces para /path/to/cluster-" +"state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" Cria um serviço do tipo LoadBalancer com o nome especificado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" +"Lista de valores delimitados por vírgulas para um conjunto de escopos de " +"quota que devem corresponder para cada objeto rastreado pela quota." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" +"Lista de valores delimitados por vírgulas ajusta os pares resource=quantity " +"que define um limite rigído." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" +"Um seletor de label a ser usado para o PDB. Apenas seletores baseado em " +"igualdade são suportados." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"Um seletor de label para ser utilizado neste serviço. Apenas seletores " +"baseado em igualdade são suportados. Se vazio (por padrão) o seletor do " +"replication controller ou replica set será utilizado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"Um IP externo adicional (não gerenciado pelo Kubernetes) para ser usado no " +"serviço. Se este IP for roteado para um nó, o serviço pode ser acessado por " +"este IP além de seu IP de serviço gerado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:158 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:178 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"Uma substituição inline JSON para o objeto gerado. Se não estiver vazio, ele " +"será usado para substituir o objeto gerado. Requer que o objeto forneça um " +"campo apiVersion válido." + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "Aprova uma solicitação de assinatura de certificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" +"Atribuir o seu próprio ClusterIP ou configura para 'None' para um serviço " +"'headless' (sem loadbalancing)." + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "Se conecta a um container em execução" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" +"ClusterIP que será atribuído ao serviço. Deixe vazio para auto atribuição, " +"ou configure para 'None' para criar um serviço headless." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRole que esse ClusterRoleBinding deve referenciar" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "ClusterRole que esse RoleBinding deve referenciar" + +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "Converte arquivos de configuração entre versões de API diferentes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "Copia arquivos e diretórios de e para containers." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "Cria uma secret do tipo TLS" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "Cria a namespace com um nome especificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "Cria um secret para ser utilizado com o Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "Cria um secret utilizando um sub-comando especificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "Cria uma conta de serviço com um nome especificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "Apaga o cluster especificado do kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "Apaga o contexto especificado do kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "Rejeita o pedido de assinatura do certificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "Mostra um ou mais contextos" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "Mostra os clusters definidos no kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "" +"Mostra a configuração do kubeconfig mescladas ou um arquivo kubeconfig " +"especificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "Mostra um ou mais recursos" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "Drenar o node para preparação de manutenção" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "Edita um recurso no servidor" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "Email para o Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "Executa um comando em um container" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "Encaminhar uma ou mais portas locais para um pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "Ajuda sobre qualquer comando" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:160 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" +"Se não vazio, configura a afinidade de sessão para o serviço; valores " +"válidos: 'None', 'ClientIP'" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:157 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"Se não estiver vazio, a atualização dos annotation só terá êxito se esta for " +"a versão do recurso atual para o objeto. Válido apenas ao especificar um " +"único recurso." + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:154 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" +"Se não estiver vazio, a atualização dos labels só terá êxito se esta for a " +"versão do recurso atual para o objeto. Válido apenas ao especificar um único " +"recurso." + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "Marca o node como agendável" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "Marca o node como não agendável" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "Marca o recurso fornecido como pausado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "Edita o certificado dos recursos." + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "Edita o arquivo kubeconfig" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"Nome ou o número da porta em um container em que o serviço deve direcionar o " +"tráfego. Opcional." + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"Apenas retorna os logs após uma data específica (RFC3339). Padrão para todos " +"os logs. Apenas um since-time / since deve ser utilizado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "Saída do autocomplete de shell para um Shell específico (bash ou zsh)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "Senha para a autenticação do registro do Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "Caminho para a chave pública em formato PEM." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "Caminho para a chave private associada a um certificado fornecido." + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" +"Pré-condição para a versão do recurso. Requer que a versão do recurso atual " +"corresponda a este valor para escalar." + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "Mostra a informação de versão do cliente e do servidor" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "Mostra a lista de opções herdadas por todos os comandos" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "Mostra os logs de um container em um pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "Retoma um recurso pausado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "Role que a RoleBinding deve referenciar" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "Executa uma imagem específica no cluster" + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "Executa um proxy para o servidor de API do Kubernetes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "Localização do servidor para o registro do Docker" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "Define funcionalidades específicas em objetos" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "Define um seletor em um recurso" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "Mostra os detalhes de um recurso específico ou de um grupo de recursos" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "Mostra o status de uma atualização dinamica" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "Sinônimo para —target-port" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "A imagem para o container executar." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "" +"A política de obtenção de imagens. Se deixado em branco, este valor não será " +"especificado pelo cliente e será utilizado o padrão do servidor" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" +"Um número mínimo ou porcentagem de pods disponíveis que este budget requer." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "O nome para o objeto recém criado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" +"O nome para o objeto recém criado. Se não especificado, o nome do input " +"resource será utilizado." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in " +"v1 is named 'default', while it is left unnamed in v2. Default is 'service/" +"v2'." +msgstr "" +"O nome do gerador de API a ser usado. Existem 2 geradores: 'service/v1' e " +"'service/v2'. A única diferença entre eles é que a porta de serviço na v1 é " +"chamada de 'default', enquanto ela é deixada sem nome na v2. O padrão é " +"'service/v2'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "O protocolo de rede para o serviço ser criado. Padrão é 'TCP'." + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" +"A porta para que o serviço possa servir. Copiado do recurso sendo exposto, " +"se não especificado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"O recurso requerido para este container. Por exemplo, 'cpu=200m," +"memory=512Mi'. Observe que os componentes do lado do servidor podem " +"atribuir limites, dependendo da configuração do servidor, como intervalos de " +"limite." + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "" +"The resource requirement requests for this container. For example, " +"'cpu=100m,memory=256Mi'. Note that server side components may assign " +"requests depending on the server configuration, such as limit ranges." +msgstr "" +"O recurso requerido de requests para este container. Por exemplo, 'cpu=100m," +"memory=256Mi'. Observe que os componentes do lado do servidor podem " +"atribuir requests, dependendo da configuração do servidor, como intervalos " +"de limite." + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "O tipo de segredo para criar" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "Desfazer o rollout anterior" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "" +"Atualizar os recursos de request/limites em um objeto com template de pod" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "Atualizar as anotações de um recurso" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "Atualizar os labels de um recurso" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "Atualizar o taints de um ou mais nodes" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "Nome de usuário para a autenticação no Docker registry" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "Visualizar o histórico de rollout" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"Onde colocar os arquivos de saída. Se vazio ou '-' usa o stdout do terminal, " +"caso contrário, cria uma hierarquia no diretório configurado" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88 +msgid "dummy restart flag)" +msgstr "dummy restart flag)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl controla o gerenciador de cluster do Kubernetes" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using " +#~ "the cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Criar o ClusterRoleBinding para user1, user2, e group1 utilizando " +#~ "o ClusterRole cluster-admin\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin —user=user1 —user=user2 —group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +#~ "ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Criar uma RoleBinding para user1, user2, e group1 utilizando o " +#~ "admin ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin —user=user1 —" +#~ "user=user2 —group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config based on folder bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with specified keys " +#~ "instead of file basenames on disk\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with key1=config1 and " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Criar um novo configmap com o nome de my-config baseado na pasta " +#~ "bar\n" +#~ "\t\t kubectl create configmap my-config —from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Cria um novo configmap com o nome my-config, onde cada chave " +#~ "possui o valor especificado em um arquivo distinto no disco\n" +#~ "\t\t kubectl create configmap my-config —from-file=key1=/path/to/bar/" +#~ "file1.txt —from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Criar um novo configmap com o nome de my-config com key1=config1 " +#~ "e key2=config2\n" +#~ "\t\t kubectl create configmap my-config —from-literal=key1=config1 —from-" +#~ "literal=key2=config2" + +#~ msgid "" +#~ "\n" +#~ "\t\t # If you don't already have a .dockercfg file, you can create a " +#~ "dockercfg secret directly by using:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" +#~ msgstr "" +#~ "\n" +#~ "\t\t # Se você ainda não tem o arquivo .dockercfg, você pode gerar " +#~ "diretamente o dockercfg secret utilizando o comando:\n" +#~ "\t\t kubectl create secret docker-registry my-secret —docker-" +#~ "server=DOCKER_REGISTRY_SERVER —docker-username=DOCKER_USER —docker-" +#~ "password=DOCKER_PASSWORD —docker-email=DOCKER_EMAIL" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Apply the configuration in pod.json to a pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Apply the JSON passed into stdin to a pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune is still in Alpha\n" +#~ "\t\t# Apply the configuration in manifest.yaml that matches label " +#~ "app=nginx and delete all the other resources that are not in the file and " +#~ "match label app=nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " +#~ "configmaps that are not in the file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/" +#~ "v1/ConfigMap" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Aplica a configuração do arquivo pod.json a um pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Aplica o JSON recebido via stdin para um pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Nota: —prune ainda está em Alpha\n" +#~ "\t\t# Aplica a configuração do manifest.yaml que conter o label app=nginx " +#~ "e remove todos os outros recursos que não estejam no arquivo e não " +#~ "contenham o label.\n" +#~ "\t\tkubectl apply —prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Aplica a configuração do manifest.yaml e remove todos os outros " +#~ "configmaps que não estão no arquivo.\n" +#~ "\t\tkubectl apply —prune -f manifest.yaml —all —prune-whitelist=core/v1/" +#~ "ConfigMap" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 " +#~ "and 10, no target CPU utilization specified so a default autoscaling " +#~ "policy will be used:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale a replication controller \"foo\", with the number of " +#~ "pods between 1 and 5, target CPU utilization at 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Escala automaticamente um deployment \"foo\", com o número de pods " +#~ "entre 2 e 10, sem especificar a utilização da CPU o padrão da política de " +#~ "autoscaling será utilizado:\n" +#~ "\t\tkubectl autoscale deployment foo —min=2 —max=10\n" +#~ "\n" +#~ "\t\t# Escala automaticamente um replication controller \"foo\", com o " +#~ "número de pods entre 1 and 5, e definindo a utilização da CPU em 80%:\n" +#~ "\t\tkubectl autoscale rc foo —max=5 —cpu-percent=80" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Convert the live state of the resource specified by 'pod.yaml' to " +#~ "the latest version\n" +#~ "\t\t# and print to stdout in json format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Convert all files under current directory to latest version and " +#~ "create them all.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" +#~ msgstr "" +#~ "\n" +#~ "\t\t# converte o arquivo 'pod.yaml' para a versão mais atual e imprime a " +#~ "saída para o stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Converte o estado atual do recurso especificado pelo 'pod.yaml' " +#~ "para a versão mais atual\n" +#~ "\t\t# e imprime a saída para o stdout no formato json.\n" +#~ "\t\tkubectl convert -f pod.yaml —local -o json\n" +#~ "\n" +#~ "\t\t# Converte todos os arquivos dentro do diretório atual para a versão " +#~ "mais recente e cria todos.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" that allows user to " +#~ "perform \"get\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName " +#~ "specified\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Cria um ClusterRole com o nome de \"pod-reader\" que permite o " +#~ "usuário realizar \"get\", \"watch\" e \"list\" em pods\n" +#~ "\t\tkubectl create clusterrole pod-reader —verb=get,list,watch —" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Cria a ClusterRole com o nome de \"pod-reader\" com um ResourceName " +#~ "especificado\n" +#~ "\t\tkubectl create clusterrole pod-reader —verb=get,list,watch —" +#~ "resource=pods —resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" that allows user to perform \"get" +#~ "\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a Role named \"pod-reader\" with ResourceName specified\n" +#~ "\t\tkubectl create role pod-reader --verb=get --verg=list --verb=watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Cria uma Role com o nome de \"pod-reader\" que permite o usuário " +#~ "realizar \"get\", \"watch\" e \"list\" em pods\n" +#~ "\t\tkubectl create role pod-reader —verb=get —verb=list —verb=watch —" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Cria uma Role com o nome de \"pod-reader\" com um ResourceName " +#~ "especificado\n" +#~ "\t\tkubectl create role pod-reader —verb=get —verg=list —verb=watch —" +#~ "resource=pods —resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a new resourcequota named my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2," +#~ "services=3,replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Create a new resourcequota named best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Cria um novo resourcequota com o nome de my-quota\n" +#~ "\t\tkubectl create quota my-quota —hard=cpu=1,memory=1G,pods=2,services=3," +#~ "replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Cria um novo resourcequota com o nome de best-effort\n" +#~ "\t\tkubectl create quota best-effort —hard=pods=100 —scopes=BestEffort" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=rails label\n" +#~ "\t\t# and require at least one of them being available at any point in " +#~ "time.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=nginx label\n" +#~ "\t\t# and require at least half of the pods selected to be available at " +#~ "any point in time.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Cria um pod disruption budget com o nome de my-pdb que irá " +#~ "selecionar todos os pods com o label app=rails\n" +#~ "\t\t# e requer que pelo menos um deles esteja disponível a qualquer " +#~ "momento.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb —selector=app=rails —min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Cria um pod disruption budget com o nome de my-pdb que irá " +#~ "selecionar todos os pods com o label app=nginx\n" +#~ "\t\t# e requer pelo menos que metade dos pods selecionados estejam " +#~ "disponíveis em qualquer momento.\n" +#~ "\t\tkubectl create pdb my-pdb —selector=app=nginx —min-available=50%" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod using the data in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Create a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API " +#~ "format then create the resource using the edited data.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Cria um pod utilizando o arquivo pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Cria um pod utilizando o JSON recebido via stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edita o conteúdo do arquivo docker-registry.yaml em JSON utilizando " +#~ "o formato da API v1, criando o recurso com o conteúdo editado.\n" +#~ "\t\tkubectl create -f docker-registry.yaml —edit —output-version=v1 -o " +#~ "json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a replication controller identified by type " +#~ "and name specified in \"nginx-controller.yaml\", which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +#~ "the name \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Create a second service based on the above service, exposing the " +#~ "container port 8443 as port 443 with the name \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated streaming application on port " +#~ "4100 balancing UDP traffic and named 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx using replica set, which " +#~ "serves on port 80 and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for an nginx deployment, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Cria um serviço para um nginx replicado, que escuta na porta 80 e " +#~ "conecta na porta 8000 dos containers.\n" +#~ "\\t\tkubectl expose rc nginx —port=80 —target-port=8000\n" +#~ "\n" +#~ "\t\t# Cria um serviço para um replication controller identificado por " +#~ "tipo e com o nome especificado em \"nginx-controller.yaml\", que escuta " +#~ "na porta 80 e conecta na porta 8000 dos containers.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml —port=80 —target-port=8000\n" +#~ "\n" +#~ "\t\t# Cria um serviço para um pod valid-pod, que escuta na porta 444 com " +#~ "o nome \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod —port=444 —name=frontend\n" +#~ "\n" +#~ "\t\t# Cria um segundo serviço baseado no serviço acima, expondo a porta " +#~ "8443 do container como porta 443 e com nome \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx —port=443 —target-port=8443 —name=nginx-" +#~ "https\n" +#~ "\n" +#~ "\t\t# Cria um serviço para uma aplicação streaming replicada na porta " +#~ "4100 com trafico balanceado UDP e nome 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer —port=4100 —protocol=udp —name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Cria um serviço para um nginx replicado usando o replica set, que " +#~ "escuta na porta 80 e conecta na porta 8000 dos containers.\n" +#~ "\t\tkubectl expose rs nginx —port=80 —target-port=8000\n" +#~ "\n" +#~ "\t\t# Cria um serviço para um deployment nginx, que escuta na porta 80 e " +#~ "conecta na porta 8000 dos containers.\n" +#~ "\t\tkubectl expose deployment nginx —port=80 —target-port=8000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Delete a pod using the type and name specified in pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Delete a pod based on the type and name in the JSON passed into " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Delete pods and services with label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Delete a pod with minimal delay\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Force delete a pod on a dead node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Delete all pods\n" +#~ "\t\tkubectl delete pods --all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Remove um pod usando o tipo e nome especificado no arquivo pod." +#~ "json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Remove um pod baseado no tipo e nome no JSON passado na entrada de " +#~ "comando(stdin).\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Remove pods e serviços com os nomes \"baz\" e \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Remove pods e serviços com label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Remove um pod com um mínimo de delay\n" +#~ "\t\tkubectl delete pod foo —now\n" +#~ "\n" +#~ "\t\t# Força a remoção de um pod em um node morto\n" +#~ "\t\tkubectl delete pod foo —grace-period=0 —force\n" +#~ "\n" +#~ "\t\t# Remove todos os pods\n" +#~ "\t\tkubectl delete pods —all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Describe a node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Describe a pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Describe all pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Describe pods by label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Describe all pods managed by the 'frontend' replication controller " +#~ "(rc-created pods\n" +#~ "\t\t# get the name of the rc as a prefix in the pod the name).\n" +#~ "\t\tkubectl describe pods frontend" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Descreve um node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Descreve um pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Descreve um pod identificado pelo tipo e nome no arquivo \"pod.json" +#~ "\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Descreve todos os pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Descreve os pods com label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Descreve todos os pods gerenciados pelo replication controller " +#~ "'frontend' (rc-created pods\n" +#~ "\t\t# tem o nome de rc como prefixo no nome do pod).\n" +#~ "\t\tkubectl describe pods frontend" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", even if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# As above, but abort if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet, and use " +#~ "a grace period of 15 minutes.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Drena o node \"foo\", mesmo se os pods não são gerenciados por um " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet ou StatefulSet.\n" +#~ "\t\t$ kubectl drain foo —force\n" +#~ "\n" +#~ "\t\t# Mesmo que acima, mas é interrompido se os pods não são gerenciados " +#~ "por um ReplicationController, ReplicaSet, Job, DaemonSet ou StatefulSet, " +#~ "e tem espera por 15 minutos.\n" +#~ "\t\t$ kubectl drain foo —grace-period=900" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Edit the service named 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Use an alternative editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +#~ "config in its annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Edita o serviço chamado 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Usa um editor alternativo\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edita o Job 'myjob' em JSON utilizando o format da API v1:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edita o deployment 'mydeployment' em YAML e salva a configuração " +#~ "modificada em sua annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml —save-config" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running 'date' from pod 123456-7890, using the " +#~ "first container by default\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Get output from running 'date' in ruby-container from pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Pega a saída de execução do comando 'date' do pod 123456-7890, " +#~ "usando o primeiro container por padrão\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Pega a saída de execução do comando 'date' no ruby-container do pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Troca para raw terminal mode, envia stdin para o 'bash' no ruby-" +#~ "container do pod 123456-7890\n" +#~ "\t\t# e envia stdout/stderr do 'bash' de volta para o cliente\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t — bash -il" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running pod 123456-7890, using the first container " +#~ "by default\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Get output from ruby-container from pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-" +#~ "container from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Pega a saída do pod em execução 123456-7890, utilizando o primeiro " +#~ "container por padrão\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Pega a saída do ruby-container do pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Troca para raw terminal mode, envia stdin para o 'bash' no ruby-" +#~ "container do pod 123456-7890\n" +#~ "\t\t# e envia stdout/stderr do 'bash' de volta para o cliente\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Pega a saída do primeiro pod de um ReplicaSet chamado nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Install bash completion on a Mac using homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for bash into the current shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Write bash completion code to a file and source if from ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +#~ "\t\tsource <(kubectl completion zsh)" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Instala o auto completar do bash no Mac utilizando homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew —prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Carrega o código de auto complentar do kubectl para o bash no shell " +#~ "corrente\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Escreve o código de autocompletar do bash no arquivo de perfil e " +#~ "faz o source se é para o .bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Carrega o código de auto complentar do kubectl para zsh[1] no shell " +#~ "em utilização\n" +#~ "\t\tsource <(kubectl completion zsh)" + +#~ msgid "" +#~ "\n" +#~ "\t\t# List all pods in ps output format.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# List all pods in ps output format with more information (such as " +#~ "node name).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# List a single replication controller with specified NAME in ps " +#~ "output format.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# List a single pod in JSON output format.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +#~ "JSON output format.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Return only the phase value of the specified pod.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# List all replication controllers and services together in ps output " +#~ "format.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# List one or more resources by their type and names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List all resources with different types.\n" +#~ "\t\tkubectl get all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Lista todos os pods no formato de saída ps.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# Lista todos os pods no formato de saída ps com mais informações " +#~ "(como o nome do node).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# Lista um único replication controller com o nome especificado no " +#~ "formato de saída ps\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# Lista um único pod e usa o formato de saída JSON.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# Lista o pod identificado com o tipo e nome especificado no \"pod." +#~ "yaml\" e usa o formato de saída JSON.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Mostra apenas em que estágio o pod especificado está.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 —template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# Lista todos os replication controllers e services juntos no formato " +#~ "de saída ps.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# Lista um ou mais recursos pelo seu tipo e nomes.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# Lista todos os recursos e com tipos diferentes.\n" +#~ "\t\tkubectl get all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from " +#~ "ports 5000 and 6000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 0:5000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Escuta nas portas locais 5000 e 6000, e redireciona os dados de/" +#~ "para as portas 5000 e 6000 no pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Escuta na porta local 8888 localmente, e redireciona para a porta " +#~ "5000 no pod\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Escuta uma porta local aleatória, e redireciona para a porta 5000 " +#~ "no pod\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Escuta uma porta local aleatória, e redireciona para a porta 5000 " +#~ "no pod\\n\n" +#~ "\t\tkubectl port-forward mypod 0:5000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Remove a restrição de execução de Pods no node \"foo\".\n" +#~ "\t\t$ kubectl uncordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as unschedulable.\n" +#~ "\t\tkubectl cordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Restringe a execução de novos Pods no node \"foo\".\n" +#~ "\t\tkubectl cordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Partially update a node using strategic merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Partially update a node identified by the type and name specified " +#~ "in \"node.json\" using strategic merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image; spec.containers[*].name is required " +#~ "because it's a merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image using a json patch with positional " +#~ "arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Atualiza parcialmente um node utilizando a estratégia merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Atualiza parcialmente um node identificado pelo tipo e nome no " +#~ "arquivo \"node.json\" utilizando a estratégia merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Atualiza uma imagem em um container; spec.containers[*].name é " +#~ "requerido pois será usado como índice para a mudança\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Atualiza uma imagem em um container utilizando o json patch com " +#~ "positional arrays\n" +#~ "\t\tkubectl patch pod valid-pod —type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Print the address of the master and cluster services\n" +#~ "\t\tkubectl cluster-info" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Mostra o endereço do servidor de gerenciamento e dos serviços do " +#~ "cluster\n" +#~ "\t\tkubectl cluster-info" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Replace a pod using the data in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Replace a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Update a single-container pod's image version (tag) to v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Force replace, delete and then re-create the resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Substitui um pod utlizando os dados contidos em pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Troca um pod com base no JSON fornecido no stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Atualiza uma versão de imagem (tag) de um pod com um único " +#~ "container para v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Força a troca, removendo e recriando o recurso\n" +#~ "\t\tkubectl replace —force -f ./pod.json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Return snapshot logs from pod nginx with only one container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Retorna os logs do pod nginx com um único container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Retorna os logs dos pods definidos pelo label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Retorna os logs do container ruby finalizado do pod web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Começa o streaming de logs de um ruby container no pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Mostra apenas as 20 linhas mais recentes de saída do pod nginx\n" +#~ "\t\tkubectl logs —tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Mostra todos os logs do pod nginx escrito na última hora\n" +#~ "\t\tkubectl logs —since=1h nginx\n" +#~ "\n" +#~ "\t\t# Retorna os logs do primeiro container com o Job chamado hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Retorna os logs do container nginx-1 de um deployment chamado " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +#~ "content from ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +#~ "\t\t# The chosen port for the server will be output to stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +#~ "api\n" +#~ "\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +#~ "pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Executa um proxy para o apiserver do kubernetes na porta 8011, " +#~ "servindo um conteúdo estático do caminho ./local/www/\n" +#~ "\t\tkubectl proxy —port=8011 —www=./local/www/\n" +#~ "\n" +#~ "\t\t# Executa um proxy para o apiserver do kubernetes em uma porta local " +#~ "arbitrária.\n" +#~ "\t\t# A porta escolhida para o servidor será utilizada para o saída de " +#~ "stdout.\n" +#~ "\t\tkubectl proxy —port=0\n" +#~ "\n" +#~ "\t\t# Executa um proxy para o apiserver do kubernetes, mudando o prefixo " +#~ "do api para k8s-api\n" +#~ "\t\t# Com isso a api dos pods estarão disponível em localhost:8001/k8s-" +#~ "api/v1/pods/\n" +#~ "\t\tkubectl proxy —api-prefix=/k8s-api" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Scale a replicaset named 'foo' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale a resource identified by type and name specified in \"foo.yaml" +#~ "\" to 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# If the deployment named mysql's current size is 2, scale mysql to " +#~ "3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale multiple replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale job named 'cron' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Escala um replicaset chamado 'foo' para 3.\n" +#~ "\t\tkubectl scale —replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Escala um recurso identificado pelo tipo e nome especificado no " +#~ "arquivo \"foo.yaml\" para 3.\n" +#~ "\t\tkubectl scale —replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# Se um deployment chamado mysql tem tamanho 2, escala o mysql para " +#~ "3.\n" +#~ "\t\tkubectl scale —current-replicas=2 —replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Escala múltiplos replication controllers.\n" +#~ "\t\tkubectl scale —replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Escala um Job chamado 'cron' para 3.\n" +#~ "\t\tkubectl scale —replicas=3 job/cron" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file, will create the annotation if it does not already " +#~ "exist.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Ajusta o last-applied-configuration de um recurso para corresponder " +#~ "ao conteúdo de um arquivo.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Executa o set-last-applied em todos os arquivos de configuração no " +#~ "diretório.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Ajusta o last-applied-configuration de um recurso para corresponder " +#~ "ao conteúdo de um arquivo, será criada uma annotation se esta ainda não " +#~ "existe.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml —create-" +#~ "annotation=true\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Termina o replicationcontroller foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Para os pods e serviços com o label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Termina o serviço definido no arquivo service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Termina todos os recursos no caminho do diretório path/to/" +#~ "resources\n" +#~ "\t\tkubectl stop -f path/to/resources" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" " +#~ "--env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating " +#~ "them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": " +#~ "\"v1\", \"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute π to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute π to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Inicia uma única instância de nginx.\n" +#~ "\t\tkubectl run nginx —image=nginx\n" +#~ "\n" +#~ "\t\t# Inicia uma única instância do hazelcast e expõe a porta 5701 do " +#~ "container.\n" +#~ "\t\tkubectl run hazelcast —image=hazelcast —port=5701\n" +#~ "\n" +#~ "\t\t# Inicia uma única instância do hazelcast e seta as variáveis de " +#~ "ambiente \"DNS_DOMAIN=cluster\" e \"POD_NAMESPACE=default\" no " +#~ "container.\n" +#~ "\t\tkubectl run hazelcast —image=hazelcast —env=\"DNS_DOMAIN=cluster\" —" +#~ "env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Inicia uma instância replicada de nginx.\n" +#~ "\t\tkubectl run nginx —image=nginx —replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Mostra os objetos da API correspondente sem criar elas.\n" +#~ "\t\tkubectl run nginx —image=nginx —dry-run\n" +#~ "\n" +#~ "\t\t# Inicia uma única instância de nginx, mas sobrescreve a spec do " +#~ "deployment com um conjunto parcial de valores passeados do JSON.\n" +#~ "\t\tkubectl run nginx —image=nginx —overrides='{ \"apiVersion\": \"v1\", " +#~ "\"spec\": { … } }'\n" +#~ "\n" +#~ "\t\t# Inicia um pod de busybox e mantém ele em primeiro plano, não " +#~ "reinicia se ele já existe.\n" +#~ "\t\tkubectl run -i -t busybox —image=busybox —restart=Never\n" +#~ "\n" +#~ "\t\t# Inicia um container nginx usando o comando padrão, mas utiliza " +#~ "argumentos customizados (arg1 .. argN) para o comando.\n" +#~ "\t\tkubectl run nginx —image=nginx — \n" +#~ "\n" +#~ "\t\t# Inicia um container nginx usando um comando diferente e argumentos " +#~ "customizados.\n" +#~ "\t\tkubectl run nginx —image=nginx —command — \n" +#~ "\n" +#~ "\t\t# Inicia um container perl para processar π to 2000 posições e mostra " +#~ "a saída.\n" +#~ "\t\tkubectl run pi —image=perl —restart=OnFailure — perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Inicia um cron job para processar as 2000 posições de π e mostra a " +#~ "saída a cada 5 minutos.\n" +#~ "\t\tkubectl run pi —schedule=\"0/5 * * * ?\" —image=perl —" +#~ "restart=OnFailure — perl -Mbignum=bpi -wle 'print bpi(2000)'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Atualiza a restrição para a chave 'dedicated' e o valor 'special-" +#~ "user' e o efeito 'NoSchedule' para o node 'foo'.\n" +#~ "\t\t# Se o taint com esta chave e efeito já existirem, o seu valor é " +#~ "substituído pelo especificado.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove a restrição com a chave 'dedicated' e efeito 'NoSchedule' do " +#~ "nodo 'foo' se existir.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove o node 'foo' todos os taints com a chave 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Atualiza o pod 'foo' com o label 'unhealthy' e valor 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Atualiza o pod 'foo' com o label 'status' e valor 'unhealthy', " +#~ "sobrescrevendo qualquer valor existente.\n" +#~ "\t\tkubectl label —overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Atualiza todos os pods no namespace corrente\n" +#~ "\t\tkubectl label pods —all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Atualiza o pod identificado pelo tipo e nome em \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Atualiza o pod 'foo' apenas se o recurso não foi modificado na " +#~ "versão 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy —resource-version=1\n" +#~ "\n" +#~ "\t\t# Atualiza o pod 'foo' removendo o label chamado 'bar', se ele " +#~ "existir.\n" +#~ "\t\t# Não necessita a flag —overwrite.\n" +#~ "\t\tkubectl label pods foo bar-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Atualiza os pods de frontend-v1 utilizando os dados do novo " +#~ "replication controller definido em frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Atualiza os pods do frontend-v1 utilizando os dados em JSON " +#~ "passados pelo stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Atualiza os pods do frontend-v1 para frontend-v2 trocando a imagem, " +#~ "e trocando o\n" +#~ "\t\t# nome do replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Atualiza os pods do frontend trocando a imagem, e mantendo o nome " +#~ "antigo.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Cancela e reverte um rollout existente em progresso (de frontend-v1 " +#~ "para frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" + +#~ msgid "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Visualiza a anotação last-applied-configuration pelo tipo/nome no " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# Visualiza a anotação last-applied-configuration no arquivo JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#~ msgid "" +#~ "\n" +#~ "\t\tApply a configuration to a resource by filename or stdin.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." +#~ msgstr "" +#~ "\n" +#~ "\t\tAplica a configuração em um recurso usando um nome de arquivo ou " +#~ "stdin.\n" +#~ "\t\tEste recurso será criado se ele não existir.\n" +#~ "\t\tPara utilizar o 'apply', sempre crie o recurso inicialmente com " +#~ "'apply' ou 'create --save-config'.\n" +#~ "\n" +#~ "\t\tFormatos JSON e YAML são aceitos.\n" +#~ "\n" +#~ "\t\tNota Alpha: a funcionalidade --prune não está completa. Não utilize a " +#~ "não ser que você saibe qual é o estado corrente. Veja https://issues.k8s." +#~ "io/34274." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRoleBinding for a particular ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um ClusterRoleBinding para um ClusterRole específico." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a RoleBinding for a particular Role or ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria uma RoleBinding para uma Role específica ou ClusterRole." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a TLS secret from the given public/private key pair.\n" +#~ "\n" +#~ "\t\tThe public/private key pair must exist before hand. The public key " +#~ "certificate must be .PEM encoded and match the given private key." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um TLS secret de uma chave pública/privada fornecida.\n" +#~ "\n" +#~ "\t\tA chave pública/privada deve existir antes. O certificado da chave " +#~ "deve ser codificada como PEM, e ter sido gerada pela chave privada." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to " +#~ "the basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um configmap com base em um arquivo, diretório, ou um valor " +#~ "literal especificado.\n" +#~ "\n" +#~ "\t\tUm configmap único pode conter um ou mais pares de chave/valor.\n" +#~ "\n" +#~ "\t\tQuando criar um configmap com base em um arquivo, a chave será por " +#~ "padrão o nome do arquivo, e o valor será\n" +#~ "\t\tpor padrão o conteúdo do arquivo. Se o nome do arquivo for uma chave " +#~ "inválida, você deve especificar uma chave alternativa.\n" +#~ "\n" +#~ "\t\tQuando criar um configmap com base em um diretório, cada arquivo cujo " +#~ "o nome é uma chave válida no diretório será\n" +#~ "\t\tcolocada no configmap. Qualquer entrada de diretório, exceto as com " +#~ "arquivos válidos serão ignorados (por exemplo: sub-diretórios,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker " +#~ "registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um novo secret para utilizar com Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets são utilizados para autenticar Docker registries.\n" +#~ "\n" +#~ "\t\tQuando utilizando a linha de comando do Docker para realizar envio " +#~ "das images, você pode se autenticar para um registro fornecido " +#~ "executando\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER —username=DOCKER_USER —" +#~ "password=DOCKER_PASSWORD —email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " Isso irá gerar um arquivo ~/.dockercfg que será utilizado para os " +#~ "comandos 'docker push' e 'docker pull' \n" +#~ "\t\tse autenticarem no registro. O endereço de email é opcional.\n" +#~ "\n" +#~ "\t\tQuando criar aplicações, você pode ter um Docker registry que requer " +#~ "autenticação. Para que \n" +#~ "\t\tos nodes possam baixar as imagens em seu nome, eles devem ter as " +#~ "credenciais. Você pode prover esta informação\n" +#~ "\t\tcriando um dockercfg secret e anexando-o à sua conta de serviço." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um pod disruption budget com o nome especificado, seletor, e o " +#~ "número mínimo de pode disponíveis" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um recurso por nome de arquivo ou stdin.\n" +#~ "\n" +#~ "\t\tOs formatos JSON e YAML são aceitos." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um resourcequota com o nome especificado, limits rigídos e " +#~ "escopo opcional" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um secret com base em um arquivo, diretório, ou um valor literal " +#~ "especificado.\n" +#~ "\n" +#~ "\t\tUm secret único pode conter um ou mais pares de chave/valor.\n" +#~ "\n" +#~ "\t\tQuando criar um secret com base em um arquivo, a chave será por " +#~ "padrão o nome do arquivo, e o valor será\n" +#~ "\t\tpor padrão o conteúdo do arquivo. Se o nome do arquivo for uma chave " +#~ "inválida, você deve especificar uma chave alternativa.\n" +#~ "\n" +#~ "\t\tQuando criar um secret com base em um diretório, cada arquivo cujo o " +#~ "nome é uma chave válida no diretório será\n" +#~ "\t\tcolocada no configmap. Qualquer entrada de diretório, exceto as com " +#~ "arquivos válidos serão ignorados (por exemplo: sub-diretórios,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria e executa uma imagem específica, possivelmente replicada.\n" +#~ "\n" +#~ "\t\tCria um deployment ou job para gerenciar o(s) container(s) criado(s)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." +#~ msgstr "" +#~ "\n" +#~ "\t\tCria um autoscaler que automaticamente escolhe e configura quantos " +#~ "pods irão executar em um cluster kubernetes.\n" +#~ "\n" +#~ "\t\tProcura por um Deployment, ReplicaSet, ou ReplicationController por " +#~ "nome e cria um autoscaler que utiliza o recurso fornecido como " +#~ "referência.\n" +#~ "\t\tUm autoscaler pode automaticamente aumentar ou reduzir o número de " +#~ "pods quando necessário." + +#~ msgid "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments " +#~ "may be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that " +#~ "the pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of " +#~ "the same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." +#~ msgstr "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments " +#~ "may be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe —grace-period flag, or pass —now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe —force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that " +#~ "the pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of " +#~ "the same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." + +#~ msgid "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered " +#~ "by delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." +#~ msgstr "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered " +#~ "by delete command.\n" +#~ "\t\tSee 'kubectl delete —help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." +#~ msgstr "" +#~ "\n" +#~ "\t\tMostra os Recursos (CPU/Memória/Armazenamento) utilizados nos nodes.\n" +#~ "\n" +#~ "\t\tO comando top-node permite que você veja o consumo de recursos dos " +#~ "nodes." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." +#~ msgstr "" +#~ "\n" +#~ "\t\tMostra a utilização de recursos dos pods (CPU/Memória/" +#~ "Armazenamento).\n" +#~ "\n" +#~ "\t\tO comando 'top pod' deixa você ver a utilização dos recusrsos dos " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDevido ao atraso da pipeline de métricas, o resultado pode estar " +#~ "indisponível por alguns minutos\n" +#~ "\t\tdesde a criação do pod." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " +#~ msgstr "" +#~ "\n" +#~ "\t\tMostra a utilização de recursos (CPU/Memória/Armazenamento).\n" +#~ "\n" +#~ "\t\tO comando top deixa você ver a utilização de recursos de nodes e " +#~ "pods.\n" +#~ "\n" +#~ "\t\tEste comando necessita que o Heapster esteja corretamente configurado " +#~ "e rodando no servidor. " + +#~ msgid "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will " +#~ "use normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will " +#~ "use normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout —ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse —force. —force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' " +#~ "for Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag --windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." +#~ msgstr "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' " +#~ "for Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag —windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." + +#~ msgid "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it " +#~ "from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in " +#~ "versions of zsh >= 5.2" +#~ msgstr "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it " +#~ "from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew —prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in " +#~ "versions of zsh >= 5.2" + +#~ msgid "" +#~ "\n" +#~ "\t\tPerform a rolling update of the given ReplicationController.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\tAplica uma atualização contínua em um ReplicationController.\n" +#~ "\n" +#~ "\t\tTroca o replication controller especificado por um novo replication " +#~ "controller atualizando um pod por vez para utilizar o\n" +#~ "\t\tnovo PodTemplate. O new-controller.json deve ser especificado no " +#~ "mesmo namespace que o\n" +#~ "\t\treplication controller existente e sobrescrever pelo menos uma label " +#~ "comum no seu replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate." +#~ "svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing " +#~ "resource, the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tSubstitui um recurso pelo especificado em um arquivo ou via stdin.\n" +#~ "\n" +#~ "\t\tOs formatos JSON and YAML são aceitos. Quando substituindo recursos " +#~ "existentes,\n" +#~ "\t\t especificação completa do recurso deve ser fornecida. Isto pode ser " +#~ "obtido com\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ "\n" +#~ "\t\tConsulte os modelos em https://htmlpreview.github.io/?https://github." +#~ "com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions." +#~ "html para descobrir se um campo é mutável." + +#~ msgid "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." +#~ msgstr "" +#~ "\n" +#~ "\t\tDefine um novo tamanho para um Deployment, ReplicaSet, Replication " +#~ "Controller, ou Job.\n" +#~ "\n" +#~ "\t\tScale deixa os usuários especificar uma ou mais pre-condições para a " +#~ "ação de scale.\n" +#~ "\n" +#~ "\t\tSe --current-replicas ou --resource-version forem especificados, será " +#~ "validado antes\n" +#~ "\t\tda tentativa de scale, e garante que a pre-condição é verdadeira " +#~ "quando\n" +#~ "\t\to scale é enviado para o servidor." + +#~ msgid "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" +#~ msgstr "" +#~ "\n" +#~ "\t\tPara fazer o proxy the todas as apis do kubernetes, utilize:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy —api-prefix=/\n" +#~ "\n" +#~ "\t\tPara fazer o proxy de parte da api do kubernetes e alguns arquivos " +#~ "estáticos:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy —www=/my/files —www-prefix=/static/ —api-prefix=/" +#~ "api/\n" +#~ "\n" +#~ "\t\tCom os comandos acima você pode fazer 'curl localhost:8001/api/v1/" +#~ "pods'.\n" +#~ "\n" +#~ "\t\tPara fazer o proxy the todas as apis do kubernetes em um caminho " +#~ "diferente, utilize:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy —api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tCom o comando acima você pode fazer 'curl localhost:8001/custom/api/" +#~ "v1/pods'" + +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tPlease refer to the models in https://htmlpreview.github.io/?https://" +#~ "github.com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/" +#~ "definitions.html to find if a field is mutable." +#~ msgstr "" +#~ "\n" +#~ "\t\tAtualiza o(s) campo(s) de um recurso usando strategic merge patch\n" +#~ "\n" +#~ "\t\tFormatos JSON e YAML são aceitos.\n" +#~ "\n" +#~ "\t\tConsulte os modelos em https://htmlpreview.github.io/?https://github." +#~ "com/kubernetes/kubernetes/blob/HEAD/docs/api-reference/v1/definitions." +#~ "html para descobrir se um campo é mutável." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the labels on a resource.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." +#~ msgstr "" +#~ "\n" +#~ "\t\tAtualiza labels em um recurso.\n" +#~ "\n" +#~ "\t\t* Um label deve começar com uma letra ou número, e pode conter letra, " +#~ "números, hífens, pontos e sublinhados, com no máximo %[1]d caracteres.\n" +#~ "\t\t* Se --overwrite for verdadeiro, então labels podem ser " +#~ "sobreescritos, caso contrário a sobreescrita irá falhar.\n" +#~ "\t\t* Se --resource-version for especificado, então as atualizações " +#~ "usarão esta versão do recurso, caso contrário, a versão do recurso " +#~ "existente será usada." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the taints on one or more nodes.\n" +#~ "\n" +#~ "\t\t* A taint consists of a key, value, and effect. As an argument here, " +#~ "it is expressed as key=value:effect.\n" +#~ "\t\t* The key must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[1]d " +#~ "characters.\n" +#~ "\t\t* The value must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[2]d " +#~ "characters.\n" +#~ "\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +#~ "\t\t* Currently taint can only apply to node." +#~ msgstr "" +#~ "\n" +#~ "\t\tAtualiza os taints em um ou mais nodes.\n" +#~ "\n" +#~ "\t\t* Um taint consiste em uma chave, valor e efeito. Como arqgumento, é " +#~ "expressado como chave=valor:efeito.\n" +#~ "\t\t* Uma chave deve começar com uma letra ou número, e pode conter " +#~ "letras, números, hífens, pontos e sublinhados, com no máximo %[1]d " +#~ "caractéres.\n" +#~ "\t\t* Um valor deve começar com uma letra ou número, e pode conter " +#~ "letras, números, hífens, pontos e sublinhados, com no máximo %[2]d " +#~ "caractéres.\n" +#~ "\t\t* O efeito deve ser NoSchedule, PreferNoSchedule ou NoExecute.\n" +#~ "\t\t* Atualmente taint pode ser aplicado apenas para nodes." + +#~ msgid "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name " +#~ "or file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." +#~ msgstr "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name " +#~ "or file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." + +#~ msgid "" +#~ "\n" +#~ "\t # !!!Important Note!!!\n" +#~ "\t # Requires that the 'tar' binary is present in your container\n" +#~ "\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +#~ "\n" +#~ "\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod " +#~ "in the default namespace\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copy /tmp/foo local file to /tmp/bar in a remote pod in a " +#~ "specific container\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +#~ "\n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" +#~ msgstr "" +#~ "\n" +#~ "\t # !!!Nota Importante!!!\n" +#~ "\t # Necessita que o binário 'tar' esteja presente na imagem do\n" +#~ "\t # container. Se 'tar' não estiver presente, o 'kubectl cp' irá " +#~ "falhar.\n" +#~ "\n" +#~ "\t # Copia o diretório local /tmp/foo_dir para /tmp/bar_dir no pod " +#~ "remoto no namespace default\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copia o arquivo local /tmp/foo para /tmp/bar no pod remoto no " +#~ "container específico\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copia o arquivo local /tmp/foo para /tmp/bar no pod remoto no " +#~ "namespace \n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copia /tmp/foo do pod remoto para /tmp/bar localmente\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new TLS secret named tls-secret with the given key pair:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" +#~ msgstr "" +#~ "\n" +#~ "\t # Cria um novo segredo TLS chamado tls-secret com o par the chaves " +#~ "fornecido:\n" +#~ "\t kubectl create secret tls tls-secret —cert=path/to/tls.cert —key=path/" +#~ "to/tls.key" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" +#~ msgstr "" +#~ "\n" +#~ "\t # Cria um novo segredo chamado my-secret com as chaves para cada " +#~ "arquivo no diretório bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Cria um novo segredo chamado my-secret com chaves especificadas em " +#~ "vez dos nomes dos arquivos\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Cria um novo segredo chamado my-secret com key1=supersecret e " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" + +#~ msgid "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" +#~ msgstr "" +#~ "\n" +#~ "\t# Cria um novo serviço do tipo ExternalName chamado my-ns \n" +#~ "\tkubectl create service externalname my-ns —external-name bar.com" + +#~ msgid "" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs (in headless mode)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" +#~ msgstr "" +#~ "\n" +#~ " # Cria um novo serviço clusterIP chamado my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Cria um novo serviço clusterIP chamado my-cs (em modo headless)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" + +#~ msgid "" +#~ "\n" +#~ " # Create a new deployment named my-dep that runs the busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" +#~ msgstr "" +#~ "\n" +#~ " # Cria um novo deployment chamado my-dep que executa uma imagem " +#~ "busybox.\n" +#~ " kubectl create deployment my-dep —image=busybox" + +#~ msgid "" +#~ "\n" +#~ " # Create a new nodeport service named my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" +#~ msgstr "" +#~ "\n" +#~ " # Cria um novo serviço nodeport chamado my-ns\n" +#~ " kubectl create service nodeport my-ns —tcp=5678:8080" + +#~ msgid "" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend'.\n" +#~ " # If the same annotation is set multiple times, only the last value " +#~ "will be applied\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Update a pod identified by type and name in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value " +#~ "'my frontend running nginx', overwriting any existing value.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend " +#~ "running nginx'\n" +#~ "\n" +#~ " # Update all pods in the namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Update pod 'foo' by removing an annotation named 'description' if " +#~ "it exists.\n" +#~ " # Does not require the --overwrite flag.\n" +#~ " kubectl annotate pods foo description-" +#~ msgstr "" +#~ "\n" +#~ " # Atualiza o pod 'foo' com a annotation 'description' e o valor 'my " +#~ "frontend'.\n" +#~ " # Se a mesma annotation é configurada várias vezes, apenas o último " +#~ "valor será utilizado\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Atualiza o pod identificado pelo tipo e nome definido no \"pod.json" +#~ "\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Atualiza o pod 'foo' com a annotation 'description' e o valor 'my " +#~ "frontend running nginx', sobreescrevendo qualquer valor existente.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend " +#~ "running nginx'\n" +#~ "\n" +#~ " # Atualiza todos os pods no namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Atualiza o pod 'foo' apenas se o recurso não foi modificado na " +#~ "versão 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Atualiza o pod 'foo' removendo a annotation chamada 'description' " +#~ "se ela existir.\n" +#~ " # Não necessita da flag --overwrite.\n" +#~ " kubectl annotate pods foo description-" + +#~ msgid "" +#~ "\n" +#~ " Create a clusterIP service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Cria um serviço do tipo clusterIP com o nome especificado." + +#~ msgid "" +#~ "\n" +#~ " Create a deployment with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Cria um deployment com o nome especificado." + +#~ msgid "" +#~ "\n" +#~ " Create a nodeport service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " Cria um serviço do tipo nodeport com o nome especificado." + +#~ msgid "" +#~ "\n" +#~ " Dumps cluster info out suitable for debugging and diagnosing cluster " +#~ "problems. By default, dumps everything to\n" +#~ " stdout. You can optionally specify a directory with --output-" +#~ "directory. If you specify a directory, kubernetes will\n" +#~ " build a set of files in that directory. By default only dumps things " +#~ "in the 'kube-system' namespace, but you can\n" +#~ " switch to a different namespace with the --namespaces flag, or " +#~ "specify --all-namespaces to dump all namespaces.\n" +#~ "\n" +#~ " The command also dumps the logs of all of the pods in the cluster, " +#~ "these logs are dumped into different directories\n" +#~ " based on namespace and pod name." +#~ msgstr "" +#~ "\n" +#~ " Coleta informações do cluster para debugar e diagnosticar problemas " +#~ "do dele. Por padrão, exibe tudo para o\n" +#~ " stdout. Você pode, se quiser, especificar um diretório com --output-" +#~ "directory. Se especificar o diretório, kubernetes irá\n" +#~ " montar um conjunto de arquivos no diretório. Por padrão, apenas " +#~ "coleta informações no namespace 'kube-system' , mas você pode\n" +#~ " trocar para um namespace diferente com a flag --namespaces, ou " +#~ "especificar --all-namespaces para todos os namespaces.\n" +#~ "\n" +#~ " O comando também coleta os logs de todos os pods no cluster, estes " +#~ "logs são salvos em outros diretórios\n" +#~ " baseado no namespace e nome do pod." + +#~ msgid "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." +#~ msgstr "" +#~ "\n" +#~ " Mostra os endereços dos servidores de gerenciamento e serviços com o " +#~ "label kubernetes.io/cluster-service=true\n" +#~ " Para debugar e diagnosticar outros problemas do cluster, utilize " +#~ "'kubectl cluster-info dump'." + +#~ msgid "A schedule in the Cron format the job should be run with." +#~ msgstr "Agendamento no formato Cron em qual o job deve rodar." + +#~ msgid "" +#~ "An inline JSON override for the generated service object. If this is non-" +#~ "empty, it is used to override the generated object. Requires that the " +#~ "object supply a valid apiVersion field. Only used if --expose is true." +#~ msgstr "" +#~ "Uma substituição inline JSON para o objeto de serviço gerado. Se não " +#~ "estiver vazio, ele será usado para substituir o objeto gerado. Requer que " +#~ "o objeto forneça o campo apiVersion válido. Usado apenas se --expose for " +#~ "true." + +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "" +#~ "Aplica a configuração para um recurso utilizado um nome de arquivo ou " +#~ "stdin" + +#~ msgid "Auto-scale a Deployment, ReplicaSet, or ReplicationController" +#~ msgstr "Auto-escala um Deployment, ReplicaSet ou ReplicationController" + +#~ msgid "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" +#~ msgstr "" +#~ "Nome do contêiner que terá sua imagem atualizada. Relevante apenas quando " +#~ "--image for especificado, caso contrário, ignorado. Obrigatório ao usar --" +#~ "image em um pod com vários contêineres" + +#~ msgid "Create a ClusterRoleBinding for a particular ClusterRole" +#~ msgstr "Cria um ClusterRoleBinding para um ClusterRole especifico" + +#~ msgid "Create a LoadBalancer service." +#~ msgstr "Cria um serviço do tipo LoadBalancer." + +#~ msgid "Create a NodePort service." +#~ msgstr "Cria um serviço do tipo NodePort." + +#~ msgid "Create a RoleBinding for a particular Role or ClusterRole" +#~ msgstr "Cria um RoleBinding para uma Role ou ClusterRole especifico" + +#~ msgid "Create a clusterIP service." +#~ msgstr "Cria um serviço do tipo clusterIP." + +#~ msgid "Create a configmap from a local file, directory or literal value" +#~ msgstr "" +#~ "Cria um configmap com base em um arquivo, diretório, ou um valor literal" + +#~ msgid "Create a deployment with the specified name." +#~ msgstr "Cria um deployment com um nome especificado." + +#~ msgid "Create a pod disruption budget with the specified name." +#~ msgstr "Cria um pod disruption budget com um nome especificado." + +#~ msgid "Create a quota with the specified name." +#~ msgstr "Cria uma quota com um nome especificado." + +#~ msgid "Create a resource by filename or stdin" +#~ msgstr "Cria um recurso por nome de arquivo ou stdin" + +#~ msgid "Create a secret from a local file, directory or literal value" +#~ msgstr "" +#~ "Cria um secret com base em um arquivo, diretório ou um valor literal" + +#~ msgid "Create a service using specified subcommand." +#~ msgstr "Cria um service utilizando um sub-comando especificado." + +#~ msgid "Create an ExternalName service." +#~ msgstr "Cria um serviço do tipo ExternalName." + +#~ msgid "" +#~ "Delete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector" +#~ msgstr "" +#~ "Apaga os recusros por nome de arquivos, stdin, recursos e nomes, ou por " +#~ "recursos e seletor de label" + +#~ msgid "Deprecated: Gracefully shut down a resource by name or filename" +#~ msgstr "Descontinuado: Termina um recurso por nome ou nome de arquivo" + +#~ msgid "Display Resource (CPU/Memory) usage of nodes" +#~ msgstr "Mostra a utilização de recursos (CPU/Memória) nos nodes" + +#~ msgid "Display Resource (CPU/Memory) usage of pods" +#~ msgstr "Mostra a utilização de recursos (CPU/Memória) nos pods" + +#~ msgid "Display Resource (CPU/Memory) usage." +#~ msgstr "Mostra a utilização de recursos (CPU/Memória)." + +#~ msgid "Display cluster info" +#~ msgstr "Mostra as informações do cluster" + +#~ msgid "Displays the current-context" +#~ msgstr "Mostra o contexto corrente" + +#~ msgid "Documentation of resources" +#~ msgstr "Documentação dos recursos" + +#~ msgid "Dump lots of relevant info for debugging and diagnosis" +#~ msgstr "" +#~ "Realiza o dump de muitas informações relevantes para debugging e " +#~ "diagnósticos" + +#~ msgid "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." +#~ msgstr "" +#~ "Política explícita para quando extrair imagens de contêiner. Obrigatório " +#~ "quando --image for igual à imagem existente, caso contrário, será " +#~ "ignorado." + +#~ msgid "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." +#~ msgstr "" +#~ "IP para ser alocado no Load Balancer. Se vazio, um IP efêmero será criado " +#~ "e utilizado (específico para cada provedor cloud)." + +#~ msgid "" +#~ "Image to use for upgrading the replication controller. Must be distinct " +#~ "from the existing image (either new image or new image tag). Can not be " +#~ "used with --filename/-f" +#~ msgstr "" +#~ "Imagem a ser utilizada para atualizar o replication controller. Deve ser " +#~ "diferente da imagem atual (pode ser uma nova imagem ou uma nova tag). Não " +#~ "pode ser utilizada com —filename/-f" + +#~ msgid "Manage a deployment rollout" +#~ msgstr "Gerencia um deployment rollout" + +#~ msgid "" +#~ "Output the formatted object with the given group version (for ex: " +#~ "'extensions/v1beta1').)" +#~ msgstr "" +#~ "Imprime o objeto formatado com a dada versão de grupo (por exemplo: " +#~ "'extensions/v1beta1').)" + +#~ msgid "Perform a rolling update of the given ReplicationController" +#~ msgstr "Executa uma atualização contínua" + +#~ msgid "Replace a resource by filename or stdin" +#~ msgstr "Substitui um recurso por um nome de arquivo ou stdin" + +#~ msgid "" +#~ "Set a new size for a Deployment, ReplicaSet, Replication Controller, or " +#~ "Job" +#~ msgstr "" +#~ "Define um novo tamanho para um Deployment, ReplicaSet, Replication " +#~ "Controller, ou Job" + +#~ msgid "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." +#~ msgstr "" +#~ "Define a anotação last-applied-configuration em um objeto existente para " +#~ "corresponder ao conteúdo do arquivo." + +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "Define um cluster no arquivo kubeconfig" + +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "Define um contexto no arquivo kubeconfig" + +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "Define um usuário no arquivo kubeconfig" + +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "Define um valor individual no arquivo kubeconfig" + +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "Define o current-context no arquivo kubeconfig" + +#~ msgid "" +#~ "Take a replication controller, service, deployment or pod and expose it " +#~ "as a new Kubernetes Service" +#~ msgstr "" +#~ "Pega um replication controlar, service, deployment ou pod e expõe como um " +#~ "novo Serviço do Kubernetes" + +#~ msgid "" +#~ "The key to use to differentiate between two different controllers, " +#~ "default 'deployment'. Only relevant when --image is specified, ignored " +#~ "otherwise" +#~ msgstr "" +#~ "A chave utilizada para diferenciar entre dois controlares diferentes, " +#~ "padrão 'deployment'. Apenas relevante quando --image é especificado, é " +#~ "ignorado caso contrário" + +#~ msgid "" +#~ "The name of the API generator to use, see http://kubernetes.io/docs/user-" +#~ "guide/kubectl-conventions/#generators for a list." +#~ msgstr "" +#~ "O nome do gerador de API a ser usado, veja a lista em http://kubernetes." +#~ "io/docs/user-guide/kubectl-conventions/#generators." + +#~ msgid "" +#~ "The name of the API generator to use. Currently there is only 1 generator." +#~ msgstr "" +#~ "O nome do gerador de API a ser usado. Atualmente existe apenas 1 gerador." + +#~ msgid "" +#~ "The name of the generator to use for creating a service. Only used if --" +#~ "expose is true" +#~ msgstr "" +#~ "O nome do recurso para ser utilizado quando criando um serviço. Apenas " +#~ "utilizado se —expose é verdadeiro" + +#~ msgid "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." +#~ msgstr "" +#~ "A porta que o container expõe. Se —expose é verdadeiro, esta também é a " +#~ "porta utilizada pelo serviço quando for criado." + +#~ msgid "" +#~ "The restart policy for this Pod. Legal values [Always, OnFailure, " +#~ "Never]. If set to 'Always' a deployment is created, if set to " +#~ "'OnFailure' a job is created, if set to 'Never', a regular pod is " +#~ "created. For the latter two --replicas must be 1. Default 'Always', for " +#~ "CronJobs `Never`." +#~ msgstr "" +#~ "A politica de restart para este Pod. Possíveis valores [Always, " +#~ "OnFailure, Never]. Se configurado para 'Always' um deployment é criado, " +#~ "se configurado para 'OnFailure' um job é criado, se configurado para " +#~ "'Never', um pod é criado. Para os dois últimos —replicas deve ser 1. " +#~ "Valor padrão 'Always', para CronJobs `Never`." + +#~ msgid "" +#~ "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +#~ "'ClusterIP'." +#~ msgstr "" +#~ "Tipo para este serviço: ClusterIP, NodePort, ou LoadBalancer. Valor " +#~ "padrão é 'ClusterIP'." + +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "Remover um valor individual do arquivo kubeconfig" + +#~ msgid "Update field(s) of a resource using strategic merge patch" +#~ msgstr "" +#~ "Atualizar o(s) campo(s) de um recurso usando a estratégia de merge patch" + +#~ msgid "Update image of a pod template" +#~ msgstr "Atualizar a imagem de um template de pod" + +#~ msgid "" +#~ "View latest last-applied-configuration annotations of a resource/object" +#~ msgstr "" +#~ "Visualizar a última anotação last-applied-configuration de um recurso/" +#~ "objeto" + +#~ msgid "external name of service" +#~ msgstr "nome externo do serviço" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/template.pot b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/template.pot new file mode 100644 index 0000000000..4e60cfc997 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/template.pot @@ -0,0 +1,3291 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: \n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2023-06-27 12:09-0400\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:142 +msgid "" +"\n" +"\t\t\t# Approve CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate approve csr-sqgzp\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:185 +msgid "" +"\n" +"\t\t\t# Deny CSR 'csr-sqgzp'\n" +"\t\t\tkubectl certificate deny csr-sqgzp\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:43 +msgid "" +"\n" +"\t\t\tModify kubeconfig files using subcommands like \"kubectl config set " +"current-context my-context\".\n" +"\n" +"\t\t\tThe loading order follows these rules:\n" +"\n" +"\t\t\t1. If the --" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:44 +msgid "" +"\n" +"\t\t # Create a cluster role binding for user1, user2, and group1 using the " +"cluster-admin cluster role\n" +"\t\t kubectl create clusterrolebinding cluster-admin --clusterrole=cluster-" +"admin --user=user1 --user=user2 --group=group1" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:57 +msgid "" +"\n" +"\t\t # Create a new config map named my-config based on folder bar\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config with specified keys instead " +"of file basenames on disk\n" +"\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/file1." +"txt --from-file=key2=/path/to/bar/file2.txt\n" +"\n" +"\t\t # Create a new config map named my-config with key1=config1 and " +"key2=config2\n" +"\t\t kubectl create configmap my-config --from-literal=key1=config1 --from-" +"literal=key2=config2\n" +"\n" +"\t\t # Create a new config map named my-config from the key=value pairs in " +"the file\n" +"\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +"\n" +"\t\t # Create a new config map named my-config from an env file\n" +"\t\t kubectl create configmap my-config --from-env-file=path/to/foo.env --" +"from-env-file=path/to/bar.env" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:56 +msgid "" +"\n" +"\t\t # If you do not already have a .dockercfg file, create a dockercfg " +"secret directly\n" +"\t\t kubectl create secret docker-registry my-secret --docker-" +"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL\n" +"\n" +"\t\t # Create a new secret named my-secret from ~/.docker/config.json\n" +"\t\t kubectl create secret docker-registry my-secret --from-file=." +"dockerconfigjson=path/to/.docker/config.json" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:63 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:41 +msgid "" +"\n" +"\t\t# !!!Important Note!!!\n" +"\t\t# Requires that the 'tar' binary is present in your container\n" +"\t\t# image. If 'tar' is not present, 'kubectl cp' will fail.\n" +"\t\t#\n" +"\t\t# For advanced use cases, such as symlinks, wildcard expansion or\n" +"\t\t# file mode preservation, consider using 'kubectl exec'.\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\ttar cf - /tmp/foo | kubectl exec -i -n -- " +"tar xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl exec -n -- tar cf - /tmp/foo | tar " +"xf - -C /tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod in " +"the default namespace\n" +"\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in a specific " +"container\n" +"\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +"\n" +"\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +"\n" +"\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +"\n" +"\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +"\t\tkubectl cp /:/tmp/foo /tmp/bar" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:153 +msgid "" +"\n" +"\t\t# Apply the configuration in pod.json to a pod\n" +"\t\tkubectl apply -f ./pod.json\n" +"\n" +"\t\t# Apply resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl apply -k dir/\n" +"\n" +"\t\t# Apply the JSON passed into stdin to a pod\n" +"\t\tcat pod.json | kubectl apply -f -\n" +"\n" +"\t\t# Apply the configuration from all files that end with '.json'\n" +"\t\tkubectl apply -f '*.json'\n" +"\n" +"\t\t# Note: --prune is still in Alpha\n" +"\t\t# Apply the configuration in manifest.yaml that matches label app=nginx " +"and delete all other resources that are not in the file and match label " +"app=nginx\n" +"\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +"\n" +"\t\t# Apply the configuration in manifest.yaml and delete all the other " +"config maps that are not in the file\n" +"\t\tkubectl apply --prune -f manifest.yaml --all --prune-allowlist=core/v1/" +"ConfigMap" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:50 +#, c-format +msgid "" +"\n" +"\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 and " +"10, no target CPU utilization specified so a default autoscaling policy will " +"be used\n" +"\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +"\n" +"\t\t# Auto scale a replication controller \"foo\", with the number of pods " +"between 1 and 5, target CPU utilization at 80%\n" +"\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +msgstr "" + +#: pkg/kubectl/cmd/convert/convert.go:52 +msgid "" +"\n" +"\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +"\t\tkubectl convert -f pod.yaml\n" +"\n" +"\t\t# Convert the live state of the resource specified by 'pod.yaml' to the " +"latest version\n" +"\t\t# and print to stdout in JSON format.\n" +"\t\tkubectl convert -f pod.yaml --local -o json\n" +"\n" +"\t\t# Convert all files under current directory to latest version and create " +"them all.\n" +"\t\tkubectl convert -f . | kubectl create -f -" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:41 +msgid "" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +"resource=pods\n" +"\n" +"\t\t# Create a cluster role named \"pod-reader\" with ResourceName " +"specified\n" +"\t\tkubectl create clusterrole pod-reader --verb=get --resource=pods --" +"resource-name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with API Group specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=rs.apps\n" +"\n" +"\t\t# Create a cluster role named \"foo\" with SubResource specified\n" +"\t\tkubectl create clusterrole foo --verb=get,list,watch --resource=pods," +"pods/status\n" +"\n" +"\t\t# Create a cluster role name \"foo\" with NonResourceURL specified\n" +"\t\tkubectl create clusterrole \"foo\" --verb=get --non-resource-url=/logs/" +"*\n" +"\n" +"\t\t# Create a cluster role name \"monitoring\" with AggregationRule " +"specified\n" +"\t\tkubectl create clusterrole monitoring --aggregation-rule=\"rbac.example." +"com/aggregate-to-monitoring=true\"" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:44 +msgid "" +"\n" +"\t\t# Create a job\n" +"\t\tkubectl create job my-job --image=busybox\n" +"\n" +"\t\t# Create a job with a command\n" +"\t\tkubectl create job my-job --image=busybox -- date\n" +"\n" +"\t\t# Create a job from a cron job named \"a-cronjob\"\n" +"\t\tkubectl create job test-job --from=cronjob/a-cronjob" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:44 +msgid "" +"\n" +"\t\t# Create a new resource quota named my-quota\n" +"\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +"replicationcontrollers=2,resourcequotas=1,secrets=5," +"persistentvolumeclaims=10\n" +"\n" +"\t\t# Create a new resource quota named best-effort\n" +"\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:44 +#, c-format +msgid "" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=rails label\n" +"\t\t# and require at least one of them being available at any point in time\n" +"\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +"available=1\n" +"\n" +"\t\t# Create a pod disruption budget named my-pdb that will select all pods " +"with the app=nginx label\n" +"\t\t# and require at least half of the pods selected to be available at any " +"point in time\n" +"\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:78 +msgid "" +"\n" +"\t\t# Create a pod using the data in pod.json\n" +"\t\tkubectl create -f ./pod.json\n" +"\n" +"\t\t# Create a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl create -f -\n" +"\n" +"\t\t# Edit the data in registry.yaml in JSON then create the resource using " +"the edited data\n" +"\t\tkubectl create -f registry.yaml --edit -o json" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:43 +msgid "" +"\n" +"\t\t# Create a priority class named high-priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --" +"description=\"high priority\"\n" +"\n" +"\t\t# Create a priority class named default-priority that is considered as " +"the global default priority\n" +"\t\tkubectl create priorityclass default-priority --value=1000 --global-" +"default=true --description=\"default priority\"\n" +"\n" +"\t\t# Create a priority class named high-priority that cannot preempt pods " +"with lower priority\n" +"\t\tkubectl create priorityclass high-priority --value=1000 --" +"description=\"high priority\" --preemption-policy=\"Never\"" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:43 +msgid "" +"\n" +"\t\t# Create a role binding for user1, user2, and group1 using the admin " +"cluster role\n" +"\t\tkubectl create rolebinding admin --clusterrole=admin --user=user1 --" +"user=user2 --group=group1\n" +"\n" +"\t\t# Create a role binding for serviceaccount monitoring:sa-dev using the " +"admin role\n" +"\t\tkubectl create rolebinding admin-binding --role=admin --" +"serviceaccount=monitoring:sa-dev" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:46 +msgid "" +"\n" +"\t\t# Create a role named \"pod-reader\" that allows user to perform " +"\"get\", \"watch\" and \"list\" on pods\n" +"\t\tkubectl create role pod-reader --verb=get --verb=list --verb=watch --" +"resource=pods\n" +"\n" +"\t\t# Create a role named \"pod-reader\" with ResourceName specified\n" +"\t\tkubectl create role pod-reader --verb=get --resource=pods --resource-" +"name=readablepod --resource-name=anotherpod\n" +"\n" +"\t\t# Create a role named \"foo\" with API Group specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=rs.apps\n" +"\n" +"\t\t# Create a role named \"foo\" with SubResource specified\n" +"\t\tkubectl create role foo --verb=get,list,watch --resource=pods,pods/status" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:67 +msgid "" +"\n" +"\t\t# Create a service for a replicated nginx, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a replication controller identified by type and " +"name specified in \"nginx-controller.yaml\", which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +"the name \"frontend\"\n" +"\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +"\n" +"\t\t# Create a second service based on the above service, exposing the " +"container port 8443 as port 443 with the name \"nginx-https\"\n" +"\t\tkubectl expose service nginx --port=443 --target-port=8443 --name=nginx-" +"https\n" +"\n" +"\t\t# Create a service for a replicated streaming application on port 4100 " +"balancing UDP traffic and named 'video-stream'.\n" +"\t\tkubectl expose rc streamer --port=4100 --protocol=UDP --name=video-" +"stream\n" +"\n" +"\t\t# Create a service for a replicated nginx using replica set, which " +"serves on port 80 and connects to the containers on port 8000\n" +"\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +"\n" +"\t\t# Create a service for an nginx deployment, which serves on port 80 and " +"connects to the containers on port 8000\n" +"\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:64 +msgid "" +"\n" +"\t\t# Create a single ingress called 'simple' that directs requests to foo." +"com/bar to svc\n" +"\t\t# svc1:8080 with a TLS secret \"my-cert\"\n" +"\t\tkubectl create ingress simple --rule=\"foo.com/bar=svc1:8080,tls=my-" +"cert\"\n" +"\n" +"\t\t# Create a catch all ingress of \"/path\" pointing to service svc:port " +"and Ingress Class as \"otheringress\"\n" +"\t\tkubectl create ingress catch-all --class=otheringress --rule=\"/path=svc:" +"port\"\n" +"\n" +"\t\t# Create an ingress with two annotations: ingress.annotation1 and " +"ingress.annotations2\n" +"\t\tkubectl create ingress annotated --class=default --rule=\"foo.com/" +"bar=svc:port\" \\\n" +"\t\t\t--annotation ingress.annotation1=foo \\\n" +"\t\t\t--annotation ingress.annotation2=bla\n" +"\n" +"\t\t# Create an ingress with the same host and multiple paths\n" +"\t\tkubectl create ingress multipath --class=default \\\n" +"\t\t\t--rule=\"foo.com/=svc:port\" \\\n" +"\t\t\t--rule=\"foo.com/admin/=svcadmin:portadmin\"\n" +"\n" +"\t\t# Create an ingress with multiple hosts and the pathType as Prefix\n" +"\t\tkubectl create ingress ingress1 --class=default \\\n" +"\t\t\t--rule=\"foo.com/path*=svc:8080\" \\\n" +"\t\t\t--rule=\"bar.com/admin*=svc2:http\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using the default ingress " +"certificate and different path types\n" +"\t\tkubectl create ingress ingtls --class=default \\\n" +"\t\t --rule=\"foo.com/=svc:https,tls\" \\\n" +"\t\t --rule=\"foo.com/path/subpath*=othersvc:8080\"\n" +"\n" +"\t\t# Create an ingress with TLS enabled using a specific secret and " +"pathType as Prefix\n" +"\t\tkubectl create ingress ingsecret --class=default \\\n" +"\t\t --rule=\"foo.com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t# Create an ingress with a default backend\n" +"\t\tkubectl create ingress ingdefault --class=default \\\n" +"\t\t --default-backend=defaultsvc:http \\\n" +"\t\t --rule=\"foo.com/*=svc:8080,tls=secret1\"\n" +"\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:78 +msgid "" +"\n" +"\t\t# Create an interactive debugging session in pod mypod and immediately " +"attach to it.\n" +"\t\tkubectl debug mypod -it --image=busybox\n" +"\n" +"\t\t# Create an interactive debugging session for the pod in the file pod." +"yaml and immediately attach to it.\n" +"\t\t# (requires the EphemeralContainers feature to be enabled in the " +"cluster)\n" +"\t\tkubectl debug -f pod.yaml -it --image=busybox\n" +"\n" +"\t\t# Create a debug container named debugger using a custom automated " +"debugging image.\n" +"\t\tkubectl debug --image=myproj/debug-tools -c debugger mypod\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and attach to it\n" +"\t\tkubectl debug mypod -it --image=busybox --copy-to=my-debugger\n" +"\n" +"\t\t# Create a copy of mypod changing the command of mycontainer\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --container=mycontainer -- " +"sh\n" +"\n" +"\t\t# Create a copy of mypod changing all container images to busybox\n" +"\t\tkubectl debug mypod --copy-to=my-debugger --set-image=*=busybox\n" +"\n" +"\t\t# Create a copy of mypod adding a debug container and changing container " +"images\n" +"\t\tkubectl debug mypod -it --copy-to=my-debugger --image=debian --set-" +"image=app=app:debug,sidecar=sidecar:debug\n" +"\n" +"\t\t# Create an interactive debugging session on a node and immediately " +"attach to it.\n" +"\t\t# The container will run in the host namespaces and the host's " +"filesystem will be mounted at /host\n" +"\t\tkubectl debug node/mynode -it --image=busybox\n" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:80 +msgid "" +"\n" +"\t\t# Delete a pod using the type and name specified in pod.json\n" +"\t\tkubectl delete -f ./pod.json\n" +"\n" +"\t\t# Delete resources from a directory containing kustomization.yaml - e.g. " +"dir/kustomization.yaml\n" +"\t\tkubectl delete -k dir\n" +"\n" +"\t\t# Delete resources from all files that end with '.json'\n" +"\t\tkubectl delete -f '*.json'\n" +"\n" +"\t\t# Delete a pod based on the type and name in the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl delete -f -\n" +"\n" +"\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +"\t\tkubectl delete pod,service baz foo\n" +"\n" +"\t\t# Delete pods and services with label name=myLabel\n" +"\t\tkubectl delete pods,services -l name=myLabel\n" +"\n" +"\t\t# Delete a pod with minimal delay\n" +"\t\tkubectl delete pod foo --now\n" +"\n" +"\t\t# Force delete a pod on a dead node\n" +"\t\tkubectl delete pod foo --force\n" +"\n" +"\t\t# Delete all pods\n" +"\t\tkubectl delete pods --all" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:51 +msgid "" +"\n" +"\t\t# Describe a node\n" +"\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +"\n" +"\t\t# Describe a pod\n" +"\t\tkubectl describe pods/nginx\n" +"\n" +"\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +"\t\tkubectl describe -f pod.json\n" +"\n" +"\t\t# Describe all pods\n" +"\t\tkubectl describe pods\n" +"\n" +"\t\t# Describe pods by label name=myLabel\n" +"\t\tkubectl describe pods -l name=myLabel\n" +"\n" +"\t\t# Describe all pods managed by the 'frontend' replication controller\n" +"\t\t# (rc-created pods get the name of the rc as a prefix in the pod name)\n" +"\t\tkubectl describe pods frontend" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:75 +msgid "" +"\n" +"\t\t# Diff resources included in pod.json\n" +"\t\tkubectl diff -f pod.json\n" +"\n" +"\t\t# Diff file read from stdin\n" +"\t\tcat service.yaml | kubectl diff -f -" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:140 +msgid "" +"\n" +"\t\t# Drain node \"foo\", even if there are pods not managed by a " +"replication controller, replica set, job, daemon set, or stateful set on it\n" +"\t\tkubectl drain foo --force\n" +"\n" +"\t\t# As above, but abort if there are pods not managed by a replication " +"controller, replica set, job, daemon set, or stateful set, and use a grace " +"period of 15 minutes\n" +"\t\tkubectl drain foo --grace-period=900" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:60 +msgid "" +"\n" +"\t\t# Edit the service named 'registry'\n" +"\t\tkubectl edit svc/registry\n" +"\n" +"\t\t# Use an alternative editor\n" +"\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/registry\n" +"\n" +"\t\t# Edit the job 'myjob' in JSON using the v1 API format\n" +"\t\tkubectl edit job.v1.batch/myjob -o json\n" +"\n" +"\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +"config in its annotation\n" +"\t\tkubectl edit deployment/mydeployment -o yaml --save-config\n" +"\n" +"\t\t# Edit the 'status' subresource for the 'mydeployment' deployment\n" +"\t\tkubectl edit deployment mydeployment --subresource='status'" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:47 +msgid "" +"\n" +"\t\t# Get output from running pod mypod; use the 'kubectl.kubernetes.io/" +"default-container' annotation\n" +"\t\t# for selecting the container to be attached or the first container in " +"the pod will be chosen\n" +"\t\tkubectl attach mypod\n" +"\n" +"\t\t# Get output from ruby-container from pod mypod\n" +"\t\tkubectl attach mypod -c ruby-container\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl attach mypod -c ruby-container -i -t\n" +"\n" +"\t\t# Get output from the first pod of a replica set named nginx\n" +"\t\tkubectl attach rs/nginx\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:50 +msgid "" +"\n" +"\t\t# Get output from running the 'date' command from pod mypod, using the " +"first container by default\n" +"\t\tkubectl exec mypod -- date\n" +"\n" +"\t\t# Get output from running the 'date' command in ruby-container from pod " +"mypod\n" +"\t\tkubectl exec mypod -c ruby-container -- date\n" +"\n" +"\t\t# Switch to raw terminal mode; sends stdin to 'bash' in ruby-container " +"from pod mypod\n" +"\t\t# and sends stdout/stderr from 'bash' back to the client\n" +"\t\tkubectl exec mypod -c ruby-container -i -t -- bash -il\n" +"\n" +"\t\t# List contents of /usr from the first container of pod mypod and sort " +"by modification time\n" +"\t\t# If the command you want to execute in the pod has any flags in common " +"(e.g. -i),\n" +"\t\t# you must use two dashes (--) to separate your command's flags/" +"arguments\n" +"\t\t# Also note, do not surround your command and its flags/arguments with " +"quotes\n" +"\t\t# unless that is how you would execute it normally (i.e., do ls -t /usr, " +"not \"ls -t /usr\")\n" +"\t\tkubectl exec mypod -i -t -- ls -t /usr\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"deployment mydeployment, using the first container by default\n" +"\t\tkubectl exec deploy/mydeployment -- date\n" +"\n" +"\t\t# Get output from running 'date' command from the first pod of the " +"service myservice, using the first container by default\n" +"\t\tkubectl exec svc/myservice -- date\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:47 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get all the fields in the resource\n" +"\t\tkubectl explain pods --recursive\n" +"\n" +"\t\t# Get the explanation for deployment in supported api versions\n" +"\t\tkubectl explain deployments --api-version=apps/v1\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers\n" +"\t\t\n" +"\t\t# Get the documentation of resources in different format\n" +"\t\tkubectl explain deployment --output=plaintext-openapiv2" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:66 +msgid "" +"\n" +"\t\t# Installing bash completion on macOS using homebrew\n" +"\t\t## If running Bash 3.2 included with macOS\n" +"\t\t brew install bash-completion\n" +"\t\t## or, if running Bash 4.1+\n" +"\t\t brew install bash-completion@2\n" +"\t\t## If kubectl is installed via homebrew, this should start working " +"immediately\n" +"\t\t## If you've installed via other means, you may need add the completion " +"to your completion directory\n" +"\t\t kubectl completion bash > $(brew --prefix)/etc/bash_completion.d/" +"kubectl\n" +"\n" +"\n" +"\t\t# Installing bash completion on Linux\n" +"\t\t## If bash-completion is not installed on Linux, install the 'bash-" +"completion' package\n" +"\t\t## via your distribution's package manager.\n" +"\t\t## Load the kubectl completion code for bash into the current shell\n" +"\t\t source <(kubectl completion bash)\n" +"\t\t## Write bash completion code to a file and source it from ." +"bash_profile\n" +"\t\t kubectl completion bash > ~/.kube/completion.bash.inc\n" +"\t\t printf \"\n" +"\t\t # kubectl shell completion\n" +"\t\t source '$HOME/.kube/completion.bash.inc'\n" +"\t\t \" >> $HOME/.bash_profile\n" +"\t\t source $HOME/.bash_profile\n" +"\n" +"\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +"\t\t source <(kubectl completion zsh)\n" +"\t\t# Set the kubectl completion code for zsh[1] to autoload on startup\n" +"\t\t kubectl completion zsh > \"${fpath[1]}/_kubectl\"\n" +"\n" +"\n" +"\t\t# Load the kubectl completion code for fish[2] into the current shell\n" +"\t\t kubectl completion fish | source\n" +"\t\t# To load completions for each session, execute once:\n" +"\t\t kubectl completion fish > ~/.config/fish/completions/kubectl.fish\n" +"\n" +"\t\t# Load the kubectl completion code for powershell into the current " +"shell\n" +"\t\t kubectl completion powershell | Out-String | Invoke-Expression\n" +"\t\t# Set kubectl completion code for powershell to run on startup\n" +"\t\t## Save completion code to a script and execute in the profile\n" +"\t\t kubectl completion powershell > $HOME\\.kube\\completion.ps1\n" +"\t\t Add-Content $PROFILE \"$HOME\\.kube\\completion.ps1\"\n" +"\t\t## Execute completion code in the profile\n" +"\t\t Add-Content $PROFILE \"if (Get-Command kubectl -ErrorAction " +"SilentlyContinue) {\n" +"\t\t kubectl completion powershell | Out-String | Invoke-Expression\n" +"\t\t }\"\n" +"\t\t## Add completion code directly to the $PROFILE script\n" +"\t\t kubectl completion powershell >> $PROFILE" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:46 +msgid "" +"\n" +"\t\t# List all available plugins\n" +"\t\tkubectl plugin list" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:100 +msgid "" +"\n" +"\t\t# List all pods in ps output format\n" +"\t\tkubectl get pods\n" +"\n" +"\t\t# List all pods in ps output format with more information (such as node " +"name)\n" +"\t\tkubectl get pods -o wide\n" +"\n" +"\t\t# List a single replication controller with specified NAME in ps output " +"format\n" +"\t\tkubectl get replicationcontroller web\n" +"\n" +"\t\t# List deployments in JSON output format, in the \"v1\" version of the " +"\"apps\" API group\n" +"\t\tkubectl get deployments.v1.apps -o json\n" +"\n" +"\t\t# List a single pod in JSON output format\n" +"\t\tkubectl get -o json pod web-pod-13je7\n" +"\n" +"\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +"JSON output format\n" +"\t\tkubectl get -f pod.yaml -o json\n" +"\n" +"\t\t# List resources from a directory with kustomization.yaml - e.g. dir/" +"kustomization.yaml\n" +"\t\tkubectl get -k dir/\n" +"\n" +"\t\t# Return only the phase value of the specified pod\n" +"\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status.phase}}\n" +"\n" +"\t\t# List resource information in custom columns\n" +"\t\tkubectl get pod test-pod -o custom-columns=CONTAINER:.spec.containers[0]." +"name,IMAGE:.spec.containers[0].image\n" +"\n" +"\t\t# List all replication controllers and services together in ps output " +"format\n" +"\t\tkubectl get rc,services\n" +"\n" +"\t\t# List one or more resources by their type and names\n" +"\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +"\n" +"\t\t# List the 'status' subresource for a single pod\n" +"\t\tkubectl get pod web-pod-13je7 --subresource status" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:73 +msgid "" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 5000 6000\n" +"\n" +"\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +"5000 and 6000 in a pod selected by the deployment\n" +"\t\tkubectl port-forward deployment/mydeployment 5000 6000\n" +"\n" +"\t\t# Listen on port 8443 locally, forwarding to the targetPort of the " +"service's port named \"https\" in a pod selected by the service\n" +"\t\tkubectl port-forward service/myservice 8443:https\n" +"\n" +"\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on all addresses, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward --address 0.0.0.0 pod/mypod 8888:5000\n" +"\n" +"\t\t# Listen on port 8888 on localhost and selected IP, forwarding to 5000 " +"in the pod\n" +"\t\tkubectl port-forward --address localhost,10.19.21.23 pod/mypod " +"8888:5000\n" +"\n" +"\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +"\t\tkubectl port-forward pod/mypod :5000" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:89 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as schedulable\n" +"\t\tkubectl uncordon foo" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:60 +msgid "" +"\n" +"\t\t# Mark node \"foo\" as unschedulable\n" +"\t\tkubectl cordon foo" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:89 +msgid "" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as JSON\n" +"\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Partially update a node using a strategic merge patch, specifying the " +"patch as YAML\n" +"\t\tkubectl patch node k8s-node-1 -p $'spec:\n" +" unschedulable: true'\n" +"\n" +"\t\t# Partially update a node identified by the type and name specified in " +"\"node.json\" using strategic merge patch\n" +"\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +"\n" +"\t\t# Update a container's image; spec.containers[*].name is required " +"because it's a merge key\n" +"\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +"\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +"\n" +"\t\t# Update a container's image using a JSON patch with positional arrays\n" +"\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +"\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'\n" +"\n" +"\t\t# Update a deployment's replicas through the 'scale' subresource using a " +"merge patch\n" +"\t\tkubectl patch deployment nginx-deployment --subresource='scale' --" +"type='merge' -p '{\"spec\":{\"replicas\":2}}'" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:45 +msgid "" +"\n" +"\t\t# Print the address of the control plane and cluster services\n" +"\t\tkubectl cluster-info" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:49 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:35 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:57 +msgid "" +"\n" +"\t\t# Replace a pod using the data in pod.json\n" +"\t\tkubectl replace -f ./pod.json\n" +"\n" +"\t\t# Replace a pod based on the JSON passed into stdin\n" +"\t\tcat pod.json | kubectl replace -f -\n" +"\n" +"\t\t# Update a single-container pod's image version (tag) to v4\n" +"\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/\\1:v4/' " +"| kubectl replace -f -\n" +"\n" +"\t\t# Force replace, delete and then re-create the resource\n" +"\t\tkubectl replace --force -f ./pod.json" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:55 +msgid "" +"\n" +"\t\t# Return snapshot logs from pod nginx with only one container\n" +"\t\tkubectl logs nginx\n" +"\n" +"\t\t# Return snapshot logs from pod nginx with multi containers\n" +"\t\tkubectl logs nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Return snapshot of previous terminated ruby container logs from pod " +"web-1\n" +"\t\tkubectl logs -p -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +"\t\tkubectl logs -f -c ruby web-1\n" +"\n" +"\t\t# Begin streaming the logs from all containers in pods defined by label " +"app=nginx\n" +"\t\tkubectl logs -f -l app=nginx --all-containers=true\n" +"\n" +"\t\t# Display only the most recent 20 lines of output in pod nginx\n" +"\t\tkubectl logs --tail=20 nginx\n" +"\n" +"\t\t# Show all logs from pod nginx written in the last hour\n" +"\t\tkubectl logs --since=1h nginx\n" +"\n" +"\t\t# Show logs from a kubelet with an expired serving certificate\n" +"\t\tkubectl logs --insecure-skip-tls-verify-backend nginx\n" +"\n" +"\t\t# Return snapshot logs from first container of a job named hello\n" +"\t\tkubectl logs job/hello\n" +"\n" +"\t\t# Return snapshot logs from container nginx-1 of a deployment named " +"nginx\n" +"\t\tkubectl logs deployment/nginx -c nginx-1" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:50 +msgid "" +"\n" +"\t\t# Scale a replica set named 'foo' to 3\n" +"\t\tkubectl scale --replicas=3 rs/foo\n" +"\n" +"\t\t# Scale a resource identified by type and name specified in \"foo.yaml\" " +"to 3\n" +"\t\tkubectl scale --replicas=3 -f foo.yaml\n" +"\n" +"\t\t# If the deployment named mysql's current size is 2, scale mysql to 3\n" +"\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +"\n" +"\t\t# Scale multiple replication controllers\n" +"\t\tkubectl scale --replicas=5 rc/example1 rc/example2 rc/example3\n" +"\n" +"\t\t# Scale stateful set named 'web' to 3\n" +"\t\tkubectl scale --replicas=3 statefulset/web" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:75 +msgid "" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml\n" +"\n" +"\t\t# Execute set-last-applied against each configuration file in a " +"directory\n" +"\t\tkubectl apply set-last-applied -f path/\n" +"\n" +"\t\t# Set the last-applied-configuration of a resource to match the contents " +"of a file; will create the annotation if it does not already exist\n" +"\t\tkubectl apply set-last-applied -f deploy.yaml --create-annotation=true\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:76 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:63 +msgid "" +"\n" +"\t\t# Start a nginx pod\n" +"\t\tkubectl run nginx --image=nginx\n" +"\n" +"\t\t# Start a hazelcast pod and let the container expose port 5701\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --port=5701\n" +"\n" +"\t\t# Start a hazelcast pod and set environment variables " +"\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --" +"env=\"DNS_DOMAIN=cluster\" --env=\"POD_NAMESPACE=default\"\n" +"\n" +"\t\t# Start a hazelcast pod and set labels \"app=hazelcast\" and " +"\"env=prod\" in the container\n" +"\t\tkubectl run hazelcast --image=hazelcast/hazelcast --" +"labels=\"app=hazelcast,env=prod\"\n" +"\n" +"\t\t# Dry run; print the corresponding API objects without creating them\n" +"\t\tkubectl run nginx --image=nginx --dry-run=client\n" +"\n" +"\t\t# Start a nginx pod, but overload the spec with a partial set of values " +"parsed from JSON\n" +"\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +"\"spec\": { ... } }'\n" +"\n" +"\t\t# Start a busybox pod and keep it in the foreground, don't restart it if " +"it exits\n" +"\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +"\n" +"\t\t# Start the nginx pod using the default command, but use custom " +"arguments (arg1 .. argN) for that command\n" +"\t\tkubectl run nginx --image=nginx -- ... \n" +"\n" +"\t\t# Start the nginx pod using a different command and custom arguments\n" +"\t\tkubectl run nginx --image=nginx --command -- ... " +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:78 +msgid "" +"\n" +"\t\t# To proxy all of the Kubernetes API and nothing else\n" +"\t\tkubectl proxy --api-prefix=/\n" +"\n" +"\t\t# To proxy only part of the Kubernetes API and also some static files\n" +"\t\t# You can get pods info with 'curl localhost:8001/api/v1/pods'\n" +"\t\tkubectl proxy --www=/my/files --www-prefix=/static/ --api-prefix=/api/\n" +"\n" +"\t\t# To proxy the entire Kubernetes API at a different root\n" +"\t\t# You can get pods info with 'curl localhost:8001/custom/api/v1/pods'\n" +"\t\tkubectl proxy --api-prefix=/custom/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on port 8011, serving static " +"content from ./local/www/\n" +"\t\tkubectl proxy --port=8011 --www=./local/www/\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server on an arbitrary local port\n" +"\t\t# The chosen port for the server will be output to stdout\n" +"\t\tkubectl proxy --port=0\n" +"\n" +"\t\t# Run a proxy to the Kubernetes API server, changing the API prefix to " +"k8s-api\n" +"\t\t# This makes e.g. the pods API available at localhost:8001/k8s-api/v1/" +"pods/\n" +"\t\tkubectl proxy --api-prefix=/k8s-api" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:81 +msgid "" +"\n" +"\t\t# Update node 'foo' with a taint with key 'dedicated' and value 'special-" +"user' and effect 'NoSchedule'\n" +"\t\t# If a taint with that key and effect already exists, its value is " +"replaced as specified\n" +"\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +"\n" +"\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +"'NoSchedule' if one exists\n" +"\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +"\n" +"\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +"\t\tkubectl taint nodes foo dedicated-\n" +"\n" +"\t\t# Add a taint with key 'dedicated' on nodes having label myLabel=X\n" +"\t\tkubectl taint node -l myLabel=X dedicated=foo:PreferNoSchedule\n" +"\n" +"\t\t# Add to node 'foo' a taint with key 'bar' and no value\n" +"\t\tkubectl taint nodes foo bar:NoSchedule" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:101 +msgid "" +"\n" +"\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'\n" +"\t\tkubectl label pods foo unhealthy=true\n" +"\n" +"\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +"overwriting any existing value\n" +"\t\tkubectl label --overwrite pods foo status=unhealthy\n" +"\n" +"\t\t# Update all pods in the namespace\n" +"\t\tkubectl label pods --all status=unhealthy\n" +"\n" +"\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +"\t\tkubectl label -f pod.json status=unhealthy\n" +"\n" +"\t\t# Update pod 'foo' only if the resource is unchanged from version 1\n" +"\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +"\n" +"\t\t# Update pod 'foo' by removing a label named 'bar' if it exists\n" +"\t\t# Does not require the --overwrite flag\n" +"\t\tkubectl label pods foo bar-" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:54 +msgid "" +"\n" +"\t\t# View the last-applied-configuration annotations by type/name in YAML\n" +"\t\tkubectl apply view-last-applied deployment/nginx\n" +"\n" +"\t\t# View the last-applied-configuration annotations by file in JSON\n" +"\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:67 +msgid "" +"\n" +"\t\t# Wait for the pod \"busybox1\" to contain the status condition of type " +"\"Ready\"\n" +"\t\tkubectl wait --for=condition=Ready pod/busybox1\n" +"\n" +"\t\t# The default value of status condition is true; you can wait for other " +"targets after an equal delimiter (compared after Unicode simple case " +"folding, which is a more general form of case-insensitivity)\n" +"\t\tkubectl wait --for=condition=Ready=false pod/busybox1\n" +"\n" +"\t\t# Wait for the pod \"busybox1\" to contain the status phase to be " +"\"Running\"\n" +"\t\tkubectl wait --for=jsonpath='{.status.phase}'=Running pod/busybox1\n" +"\n" +"\t\t# Wait for the pod \"busybox1\" to be deleted, with a timeout of 60s, " +"after having issued the \"delete\" command\n" +"\t\tkubectl delete pod/busybox1\n" +"\t\tkubectl wait --for=delete pod/busybox1 --timeout=60s" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:144 +msgid "" +"\n" +"\t\tApply a configuration to a resource by file name or stdin.\n" +"\t\tThe resource name must be specified. This resource will be created if it " +"doesn't exist yet.\n" +"\t\tTo use 'apply', always create the resource initially with either 'apply' " +"or 'create --save-config'.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do not " +"use unless you are aware of what the current state is. See https://issues." +"k8s.io/34274." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:130 +msgid "" +"\n" +"\t\tApprove a certificate signing request.\n" +"\n" +"\t\tkubectl certificate approve allows a cluster admin to approve a " +"certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tissue a certificate to the requester with the attributes requested in " +"the CSR.\n" +"\n" +"\t\tSECURITY NOTICE: Depending on the requested attributes, the issued " +"certificate\n" +"\t\tcan potentially grant a requester access to cluster resources or to " +"authenticate\n" +"\t\tas a requested identity. Before approving a CSR, ensure you understand " +"what the\n" +"\t\tsigned certificate can do.\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:30 +msgid "" +"\n" +"\t\tConfigure application resources.\n" +"\n" +"\t\tThese commands help you make changes to existing application resources." +msgstr "" + +#: pkg/kubectl/cmd/convert/convert.go:41 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use " +"-o option\n" +"\t\tto change to output destination." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:41 +msgid "" +"\n" +"\t\tCreate a TLS secret from the given public/private key pair.\n" +"\n" +"\t\tThe public/private key pair must exist beforehand. The public key " +"certificate must be .PEM encoded and match\n" +"\t\tthe given private key." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:41 +msgid "" +"\n" +"\t\tCreate a cluster role binding for a particular cluster role." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:38 +msgid "" +"\n" +"\t\tCreate a cluster role." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:45 +msgid "" +"\n" +"\t\tCreate a config map based on a file, directory, or specified literal " +"value.\n" +"\n" +"\t\tA single config map may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a config map based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key, you may " +"specify an alternate key.\n" +"\n" +"\t\tWhen creating a config map based on a directory, each file whose " +"basename is a valid key in the directory will be\n" +"\t\tpackaged into the config map. Any directory entries except regular " +"files are ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:41 +msgid "" +"\n" +"\t\tCreate a cron job with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:41 +msgid "" +"\n" +"\t\tCreate a job with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:40 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:41 +msgid "" +"\n" +"\t\tCreate a new secret for use with Docker registries.\n" +"\n" +"\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +"\n" +"\t\tWhen using the Docker command line to push images, you can authenticate " +"to a given registry by running:\n" +"\t\t\t'$ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +"password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +"\n" +"\tThat produces a ~/.dockercfg file that is used by subsequent 'docker push' " +"and 'docker pull' commands to\n" +"\t\tauthenticate to the registry. The email address is optional.\n" +"\n" +"\t\tWhen creating applications, you may have a Docker registry that requires " +"authentication. In order for the\n" +"\t\tnodes to pull images on your behalf, they must have the credentials. " +"You can provide this information\n" +"\t\tby creating a dockercfg secret and attaching it to your service account." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:41 +msgid "" +"\n" +"\t\tCreate a pod disruption budget with the specified name, selector, and " +"desired minimum available pods." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:40 +msgid "" +"\n" +"\t\tCreate a priority class with the specified name, value, globalDefault " +"and description." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:73 +msgid "" +"\n" +"\t\tCreate a resource from a file or from stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:41 +msgid "" +"\n" +"\t\tCreate a resource quota with the specified name, hard limits, and " +"optional scopes." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:40 +msgid "" +"\n" +"\t\tCreate a role binding for a particular role or cluster role." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:70 +msgid "" +"\n" +"\t\tCreate a secret based on a file, directory, or specified literal value.\n" +"\n" +"\t\tA single secret may package one or more key/value pairs.\n" +"\n" +"\t\tWhen creating a secret based on a file, the key will default to the " +"basename of the file, and the value will\n" +"\t\tdefault to the file content. If the basename is an invalid key or you " +"wish to chose your own, you may specify\n" +"\t\tan alternate key.\n" +"\n" +"\t\tWhen creating a secret based on a directory, each file whose basename is " +"a valid key in the directory will be\n" +"\t\tpackaged into the secret. Any directory entries except regular files are " +"ignored (e.g. subdirectories,\n" +"\t\tsymlinks, devices, pipes, etc)." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:61 +msgid "" +"\n" +"\t\tCreate a secret with specified type.\n" +"\t\t\n" +"\t\tA docker-registry type secret is for accessing a container registry.\n" +"\n" +"\t\tA generic type secret indicate an Opaque secret type.\n" +"\n" +"\t\tA tls type secret holds TLS certificate and its associated key." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:72 +msgid "" +"\n" +"\t\tCreates a proxy server or application-level gateway between localhost " +"and\n" +"\t\tthe Kubernetes API server. It also allows serving static content over " +"specified\n" +"\t\tHTTP path. All incoming data enters through one port and gets forwarded " +"to\n" +"\t\tthe remote Kubernetes API server port, except for the path matching the " +"static content path." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:44 +msgid "" +"\n" +"\t\tCreates an autoscaler that automatically chooses and sets the number of " +"pods that run in a Kubernetes cluster.\n" +"\n" +"\t\tLooks up a deployment, replica set, stateful set, or replication " +"controller by name and creates an autoscaler that uses the given resource as " +"a reference.\n" +"\t\tAn autoscaler can automatically increase or decrease number of pods " +"deployed within the system as needed." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:61 +msgid "" +"\n" +"\t\tDebug cluster resources using interactive debugging containers.\n" +"\n" +"\t\t'debug' provides automation for common debugging tasks for cluster " +"objects identified by\n" +"\t\tresource and name. Pods will be used by default if no resource is " +"specified.\n" +"\n" +"\t\tThe action taken by 'debug' varies depending on what resource is " +"specified. Supported\n" +"\t\tactions include:\n" +"\n" +"\t\t* Workload: Create a copy of an existing pod with certain attributes " +"changed,\n" +"\t for example changing the image tag to a new version.\n" +"\t\t* Workload: Add an ephemeral container to an already running pod, for " +"example to add\n" +"\t\t debugging utilities without restarting the pod.\n" +"\t\t* Node: Create a new pod that runs in the node's host namespaces and can " +"access\n" +"\t\t the node's filesystem.\n" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:47 +msgid "" +"\n" +"\t\tDelete resources by file names, stdin, resources and names, or by " +"resources and label selector.\n" +"\n" +"\t\tJSON and YAML formats are accepted. Only one type of argument may be " +"specified: file names,\n" +"\t\tresources and names, or resources and label selector.\n" +"\n" +"\t\tSome resources, such as pods, support graceful deletion. These resources " +"define a default period\n" +"\t\tbefore they are forcibly terminated (the grace period) but you may " +"override that value with\n" +"\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +"Because these resources often\n" +"\t\trepresent entities in the cluster, deletion may not be acknowledged " +"immediately. If the node\n" +"\t\thosting a pod is down or cannot reach the API server, termination may " +"take significantly longer\n" +"\t\tthan the grace period. To force delete a resource, you must specify the " +"--force flag.\n" +"\t\tNote: only a subset of resources support graceful deletion. In absence " +"of the support,\n" +"\t\tthe --grace-period flag is ignored.\n" +"\n" +"\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +"pod's processes have been\n" +"\t\tterminated, which can leave those processes running until the node " +"detects the deletion and\n" +"\t\tcompletes graceful deletion. If your processes use shared storage or " +"talk to a remote API and\n" +"\t\tdepend on the name of the pod to identify themselves, force deleting " +"those pods may result in\n" +"\t\tmultiple processes running on different machines using the same " +"identification which may lead\n" +"\t\tto data corruption or inconsistency. Only force delete pods when you are " +"sure the pod is\n" +"\t\tterminated, or if your application can tolerate multiple copies of the " +"same pod running at once.\n" +"\t\tAlso, if you force delete pods, the scheduler may place new pods on " +"those nodes before the node\n" +"\t\thas released those resources and causing those pods to be evicted " +"immediately.\n" +"\n" +"\t\tNote that the delete command does NOT do resource version checks, so if " +"someone submits an\n" +"\t\tupdate to a resource right when you submit a delete, their update will " +"be lost along with the\n" +"\t\trest of the resource.\n" +"\n" +"\t\tAfter a CustomResourceDefinition is deleted, invalidation of discovery " +"cache may take up\n" +"\t\tto 6 hours. If you don't want to wait, you might want to run \"kubectl " +"api-resources\" to refresh\n" +"\t\tthe discovery cache." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:178 +msgid "" +"\n" +"\t\tDeny a certificate signing request.\n" +"\n" +"\t\tkubectl certificate deny allows a cluster admin to deny a certificate\n" +"\t\tsigning request (CSR). This action tells a certificate signing " +"controller to\n" +"\t\tnot to issue a certificate to the requester.\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:37 +msgid "" +"\n" +"\t\tDescribe fields and structure of various resources.\n" +"\n" +"\t\tThis command describes the fields associated with each supported API " +"resource.\n" +"\t\tFields are identified via a simple JSONPath identifier:\n" +"\n" +"\t\t\t.[.]\n" +"\n" +"\t\tInformation about each field is retrieved from the server in OpenAPI " +"format." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:52 +msgid "" +"\n" +"\t\tDiff configurations specified by file name or stdin between the current " +"online\n" +"\t\tconfiguration, and the configuration as it would be if applied.\n" +"\n" +"\t\tThe output is always YAML.\n" +"\n" +"\t\tKUBECTL_EXTERNAL_DIFF environment variable can be used to select your " +"own\n" +"\t\tdiff command. Users can use external commands with params too, example:\n" +"\t\tKUBECTL_EXTERNAL_DIFF=\"colordiff -N -u\"\n" +"\n" +"\t\tBy default, the \"diff\" command available in your path will be\n" +"\t\trun with the \"-u\" (unified diff) and \"-N\" (treat absent files as " +"empty) options.\n" +"\n" +"\t\tExit status:\n" +"\t\t 0\n" +"\t\tNo differences were found.\n" +"\t\t 1\n" +"\t\tDifferences were found.\n" +"\t\t >1\n" +"\t\tKubectl or diff failed with an error.\n" +"\n" +"\t\tNote: KUBECTL_EXTERNAL_DIFF, if used, is expected to follow that " +"convention." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/events/events.go:51 +msgid "" +"\n" +"\t\tDisplay events.\n" +"\n" +"\t\tPrints a table of the most important information about events.\n" +"\t\tYou can request events for a namespace, for all namespace, or\n" +"\t\tfiltered to only those pertaining to a specified resource." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:55 +msgid "" +"\n" +"\t\tDisplay merged kubeconfig settings or a specified kubeconfig file.\n" +"\n" +"\t\tYou can use --output jsonpath={...} to extract specific values using a " +"jsonpath expression." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:89 +msgid "" +"\n" +"\t\tDisplay one or many resources.\n" +"\n" +"\t\tPrints a table of the most important information about the specified " +"resources.\n" +"\t\tYou can filter the list using a label selector and the --selector flag. " +"If the\n" +"\t\tdesired resource type is namespaced you will only see results in your " +"current\n" +"\t\tnamespace unless you pass --all-namespaces.\n" +"\n" +"\t\tBy specifying the output as 'template' and providing a Go template as " +"the value\n" +"\t\tof the --template flag, you can filter the attributes of the fetched " +"resources." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:58 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of nodes.\n" +"\n" +"\t\tThe top-node command allows you to see the resource consumption of nodes." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:68 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage of pods.\n" +"\n" +"\t\tThe 'top pod' command allows you to see the resource consumption of " +"pods.\n" +"\n" +"\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +"minutes\n" +"\t\tsince pod creation." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:39 +msgid "" +"\n" +"\t\tDisplay resource (CPU/memory) usage.\n" +"\n" +"\t\tThe top command allows you to see the resource consumption for nodes or " +"pods.\n" +"\n" +"\t\tThis command requires Metrics Server to be correctly configured and " +"working on the server. " +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:37 +msgid "" +"\n" +"\t\tDisplay the current-context." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:115 +msgid "" +"\n" +"\t\tDrain node in preparation for maintenance.\n" +"\n" +"\t\tThe given node will be marked unschedulable to prevent new pods from " +"arriving.\n" +"\t\t'drain' evicts the pods if the API server supports\n" +"\t\t[eviction](https://kubernetes.io/docs/concepts/workloads/pods/" +"disruptions/). Otherwise, it will use normal\n" +"\t\tDELETE to delete the pods.\n" +"\t\tThe 'drain' evicts or deletes all pods except mirror pods (which cannot " +"be deleted through\n" +"\t\tthe API server). If there are daemon set-managed pods, drain will not " +"proceed\n" +"\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +"\t\tdaemon set-managed pods, because those pods would be immediately " +"replaced by the\n" +"\t\tdaemon set controller, which ignores unschedulable markings. If there " +"are any\n" +"\t\tpods that are neither mirror pods nor managed by a replication " +"controller,\n" +"\t\treplica set, daemon set, stateful set, or job, then drain will not " +"delete any pods unless you\n" +"\t\tuse --force. --force will also allow deletion to proceed if the " +"managing resource of one\n" +"\t\tor more pods is missing.\n" +"\n" +"\t\t'drain' waits for graceful termination. You should not operate on the " +"machine until\n" +"\t\tthe command completes.\n" +"\n" +"\t\tWhen you are ready to put the node back into service, use kubectl " +"uncordon, which\n" +"\t\twill make the node schedulable again.\n" +"\n" +"\t\t![Workflow](https://kubernetes.io/images/docs/kubectl_drain.svg)" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:32 +msgid "" +"\n" +"\t\tEdit a resource from the default editor.\n" +"\n" +"\t\tThe edit command allows you to directly edit any API resource you can " +"retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tWhen attempting to open the editor, it will first attempt to use the " +"shell\n" +"\t\tthat has been defined in the 'SHELL' environment variable. If this is " +"not defined,\n" +"\t\tthe default shell will be used, which is '/bin/bash' for Linux or 'cmd' " +"for Windows.\n" +"\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tEditing is done with the API version used to fetch the resource.\n" +"\t\tTo edit using a specific API version, fully-qualify the resource, " +"version, and group.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go:31 +msgid "" +"\n" +"\t\tEdit the latest last-applied-configuration annotations of resources from " +"the default editor.\n" +"\n" +"\t\tThe edit-last-applied command allows you to directly edit any API " +"resource you can retrieve via the\n" +"\t\tcommand-line tools. It will open the editor defined by your KUBE_EDITOR, " +"or EDITOR\n" +"\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +"Windows.\n" +"\t\tYou can edit multiple objects, although changes are applied one at a " +"time. The command\n" +"\t\taccepts file names as well as command-line arguments, although the files " +"you point to must\n" +"\t\tbe previously saved versions of resources.\n" +"\n" +"\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +"\n" +"\t\tThe flag --windows-line-endings can be used to force Windows line " +"endings,\n" +"\t\totherwise the default for your operating system will be used.\n" +"\n" +"\t\tIn the event an error occurs while updating, a temporary file will be " +"created on disk\n" +"\t\tthat contains your unapplied changes. The most common error when " +"updating a resource\n" +"\t\tis another editor changing the resource on the server. When this occurs, " +"you will have\n" +"\t\tto apply your changes to the newer version of the resource, or update " +"your temporary\n" +"\t\tsaved copy to include the latest resource version." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:55 +msgid "" +"\n" +"\t\tExperimental: Wait for a specific condition on one or many resources.\n" +"\n" +"\t\tThe command takes multiple resources and waits until the specified " +"condition\n" +"\t\tis seen in the Status field of every given resource.\n" +"\n" +"\t\tAlternatively, the command can wait for the given set of resources to be " +"deleted\n" +"\t\tby providing the \"delete\" keyword as the value to the --for flag.\n" +"\n" +"\t\tA successful message will be printed to stdout indicating when the " +"specified\n" +" condition has been met. You can use -o option to change to output " +"destination." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:53 +msgid "" +"\n" +"\t\tExpose a resource as a new Kubernetes service.\n" +"\n" +"\t\tLooks up a deployment, service, replica set, replication controller or " +"pod by name and uses the selector\n" +"\t\tfor that resource as the selector for a new service on the specified " +"port. A deployment or replica set\n" +"\t\twill be exposed as a service only if its selector is convertible to a " +"selector that service supports,\n" +"\t\ti.e. when the selector contains only the matchLabels component. Note " +"that if no port is specified via\n" +"\t\t--port and the exposed resource has multiple ports, all will be re-used " +"by the new service. Also if no\n" +"\t\tlabels are specified, the new service will re-use the labels from the " +"resource it exposes.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:50 +msgid "" +"\n" +"\t\tList all available plugin files on a user's PATH.\n" +"\n" +"\t\tAvailable plugin files are those that are:\n" +"\t\t- executable\n" +"\t\t- anywhere on the user's PATH\n" +"\t\t- begin with \"kubectl-\"\n" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:31 +msgid "" +"\n" +"\t\tManage the rollout of one or many resources." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:86 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:57 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:59 +msgid "" +"\n" +"\t\tMark the provided resource as paused.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller.\n" +"\t\tUse \"kubectl rollout resume\" to resume a paused resource.\n" +"\t\tCurrently only deployments support being paused." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:47 +msgid "" +"\n" +"\t\tOutput shell completion code for the specified shell (bash, zsh, fish, " +"or powershell).\n" +"\t\tThe shell code must be evaluated to provide interactive\n" +"\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +"\t\tthe .bash_profile.\n" +"\n" +"\t\tDetailed instructions on how to do this are available here:\n" +"\n" +" for macOS:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-macos/#enable-" +"shell-autocompletion\n" +"\n" +" for linux:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-linux/#enable-" +"shell-autocompletion\n" +"\n" +" for windows:\n" +" https://kubernetes.io/docs/tasks/tools/install-kubectl-windows/" +"#enable-shell-autocompletion\n" +"\n" +"\t\tNote for zsh users: [1] zsh completions are only supported in versions " +"of zsh >= 5.2." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:51 +msgid "" +"\n" +"\t\tPrint the logs for a container in a pod or specified resource. \n" +"\t\tIf the pod has only one container, the container name is optional." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:37 +msgid "" +"\n" +"\t\tProvides utilities for interacting with plugins.\n" +"\n" +"\t\tPlugins provide extended functionality that is not part of the major " +"command-line distribution.\n" +"\t\tPlease refer to the documentation and examples for more information " +"about how write your own plugins.\n" +"\n" +"\t\tThe easiest way to discover and install plugins is via the kubernetes " +"sub-project krew.\n" +"\t\tTo install krew, visit [krew.sigs.k8s.io](https://krew.sigs.k8s.io/docs/" +"user-guide/setup/install/)" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/rename_context.go:47 +msgid "" +"\n" +"\t\tRenames a context from the kubeconfig file.\n" +"\n" +"\t\tCONTEXT_NAME is the context name that you want to change.\n" +"\n" +"\t\tNEW_NAME is the new name you want to set.\n" +"\n" +"\t\tNote: If the context being renamed is the 'current-context', this field " +"will also be updated." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:49 +msgid "" +"\n" +"\t\tReplace a resource by file name or stdin.\n" +"\n" +"\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +"the\n" +"\t\tcomplete resource spec must be provided. This can be obtained by\n" +"\n" +"\t\t $ kubectl get TYPE NAME -o yaml" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:60 +msgid "" +"\n" +"\t\tRestart a resource.\n" +"\n" +"\t Resource rollout will be restarted." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:60 +msgid "" +"\n" +"\t\tResume a paused resource.\n" +"\n" +"\t\tPaused resources will not be reconciled by a controller. By resuming a\n" +"\t\tresource, we allow it to be reconciled again.\n" +"\t\tCurrently only deployments support being resumed." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:56 +msgid "" +"\n" +"\t\tRoll back to a previous rollout." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_cluster.go:47 +msgid "" +"\n" +"\t\tSet a cluster entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_context.go:45 +msgid "" +"\n" +"\t\tSet a context entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values for those fields." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:41 +msgid "" +"\n" +"\t\tSet a new size for a deployment, replica set, replication controller, or " +"stateful set.\n" +"\n" +"\t\tScale also allows users to specify one or more preconditions for the " +"scale action.\n" +"\n" +"\t\tIf --current-replicas or --resource-version is specified, it is " +"validated before the\n" +"\t\tscale is attempted, and it is guaranteed that the precondition holds " +"true when the\n" +"\t\tscale is sent to the server." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_credentials.go:69 +#, c-format +msgid "" +"\n" +"\t\tSet a user entry in kubeconfig.\n" +"\n" +"\t\tSpecifying a name that already exists will merge new fields on top of " +"existing values.\n" +"\n" +"\t\t Client-certificate flags:\n" +"\t\t --%v=certfile --%v=keyfile\n" +"\n" +"\t\t Bearer token flags:\n" +"\t\t\t --%v=bearer_token\n" +"\n" +"\t\t Basic auth flags:\n" +"\t\t\t --%v=basic_user --%v=basic_password\n" +"\n" +"\t\tBearer token and basic auth are mutually exclusive." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:67 +#, c-format +msgid "" +"\n" +"\t\tSet the selector on a resource. Note that the new selector will " +"overwrite the old selector if the resource had one prior to the invocation\n" +"\t\tof 'set selector'.\n" +"\n" +"\t\tA selector must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\tIf --resource-version is specified, then updates will use this resource " +"version, otherwise the existing resource-version will be used.\n" +" Note: currently selectors can only be set on Service objects." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:39 +msgid "" +"\n" +"\t\tShow details of a specific resource or group of resources.\n" +"\n" +"\t\tPrint a detailed description of the selected resources, including " +"related resources such\n" +"\t\tas events or controllers. You may select a single object by name, all " +"objects of that\n" +"\t\ttype, provide a name prefix, or label selector. For example:\n" +"\n" +"\t\t $ kubectl describe TYPE NAME_PREFIX\n" +"\n" +"\t\twill first check for an exact match on TYPE and NAME_PREFIX. If no such " +"resource\n" +"\t\texists, it will output details for every resource that has a name " +"prefixed with NAME_PREFIX." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:48 +msgid "" +"\n" +"\t\tShow the status of the rollout.\n" +"\n" +"\t\tBy default 'rollout status' will watch the status of the latest rollout\n" +"\t\tuntil it's done. If you don't want to wait for the rollout to finish " +"then\n" +"\t\tyou can use --watch=false. Note that if a new rollout starts in-between, " +"then\n" +"\t\t'rollout status' will continue watching the latest revision. If you want " +"to\n" +"\t\tpin to a specific revision and abort if it is rolled over by another " +"revision,\n" +"\t\tuse --revision=N where N is the revision you need to watch for." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:42 +#, c-format +msgid "" +"\n" +"\t\tSpecify compute resource requirements (CPU, memory) for any resource " +"that defines a pod template. If a pod is successfully scheduled, it is " +"guaranteed the amount of resource requested, but may burst up to its " +"specified limits.\n" +"\n" +"\t\tFor each compute resource, if a limit is specified and a request is " +"omitted, the request will default to the limit.\n" +"\n" +"\t\tPossible resources include (case insensitive): %s." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:52 +msgid "" +"\n" +"\t\tUpdate environment variables on a pod template.\n" +"\n" +"\t\tList environment variable definitions in one or more pods, pod " +"templates.\n" +"\t\tAdd, update, or remove container environment variable definitions in one " +"or\n" +"\t\tmore pod templates (within replication controllers or deployment " +"configurations).\n" +"\t\tView or modify the environment variable definitions on all containers in " +"the\n" +"\t\tspecified pods or pod templates, or just those that match a wildcard.\n" +"\n" +"\t\tIf \"--env -\" is passed, environment variables can be read from STDIN " +"using the standard env\n" +"\t\tsyntax.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:79 +msgid "" +"\n" +"\t\tUpdate existing container image(s) of resources.\n" +"\n" +"\t\tPossible resources include (case insensitive):\n" +"\t\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:82 +msgid "" +"\n" +"\t\tUpdate fields of a resource using strategic merge patch, a JSON merge " +"patch, or a JSON patch.\n" +"\n" +"\t\tJSON and YAML formats are accepted.\n" +"\n" +"\t\tNote: Strategic merge patch is not supported for custom resources." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:112 +msgid "" +"\n" +"\t\tUpdate the annotations on one or more resources.\n" +"\n" +"\t\tAll Kubernetes objects support the ability to store additional data with " +"the object as\n" +"\t\tannotations. Annotations are key/value pairs that can be larger than " +"labels and include\n" +"\t\tarbitrary string values such as structured JSON. Tools and system " +"extensions may use\n" +"\t\tannotations to store their own data.\n" +"\n" +"\t\tAttempting to set an annotation that already exists will fail unless --" +"overwrite is set.\n" +"\t\tIf --resource-version is specified and does not match the current " +"resource version on\n" +"\t\tthe server the command will fail." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:93 +#, c-format +msgid "" +"\n" +"\t\tUpdate the labels on a resource.\n" +"\n" +"\t\t* A label key and value must begin with a letter or number, and may " +"contain letters, numbers, hyphens, dots, and underscores, up to %[1]d " +"characters each.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* If --overwrite is true, then existing labels can be overwritten, " +"otherwise attempting to overwrite a label will result in an error.\n" +"\t\t* If --resource-version is specified, then updates will use this " +"resource version, otherwise the existing resource-version will be used." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:71 +#, c-format +msgid "" +"\n" +"\t\tUpdate the taints on one or more nodes.\n" +"\n" +"\t\t* A taint consists of a key, value, and effect. As an argument here, it " +"is expressed as key=value:effect.\n" +"\t\t* The key must begin with a letter or number, and may contain letters, " +"numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +"\t\t* Optionally, the key can begin with a DNS subdomain prefix and a single " +"'/', like example.com/my-app.\n" +"\t\t* The value is optional. If given, it must begin with a letter or " +"number, and may contain letters, numbers, hyphens, dots, and underscores, up " +"to %[2]d characters.\n" +"\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +"\t\t* Currently taint can only apply to node." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:38 +msgid "" +"\n" +"\t\tView previous rollout revisions and configurations." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:48 +msgid "" +"\n" +"\t\tView the latest last-applied-configuration annotations by type/name or " +"file.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. You can use " +"the -o option\n" +"\t\tto change the output format." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:47 +msgid "" +"\n" +"\t # Create a new TLS secret named tls-secret with the given key pair\n" +"\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --key=path/" +"to/tls.key" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:43 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:83 +msgid "" +"\n" +"\t # Create a new secret named my-secret with keys for each file in folder " +"bar\n" +"\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +"\n" +"\t # Create a new secret named my-secret with specified keys instead of " +"names on disk\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-file=ssh-publickey=path/to/id_rsa.pub\n" +"\n" +"\t # Create a new secret named my-secret with key1=supersecret and " +"key2=topsecret\n" +"\t kubectl create secret generic my-secret --from-literal=key1=supersecret " +"--from-literal=key2=topsecret\n" +"\n" +"\t # Create a new secret named my-secret using a combination of a file and " +"a literal\n" +"\t kubectl create secret generic my-secret --from-file=ssh-privatekey=path/" +"to/id_rsa --from-literal=passphrase=topsecret\n" +"\n" +"\t # Create a new secret named my-secret from env files\n" +"\t kubectl create secret generic my-secret --from-env-file=path/to/foo.env " +"--from-env-file=path/to/bar.env" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:45 +msgid "" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image\n" +"\tkubectl create deployment my-dep --image=busybox\n" +"\n" +"\t# Create a deployment with a command\n" +"\tkubectl create deployment my-dep --image=busybox -- date\n" +"\n" +"\t# Create a deployment named my-dep that runs the nginx image with 3 " +"replicas\n" +"\tkubectl create deployment my-dep --image=nginx --replicas=3\n" +"\n" +"\t# Create a deployment named my-dep that runs the busybox image and expose " +"port 5701\n" +"\tkubectl create deployment my-dep --image=busybox --port=5701" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:352 +msgid "" +"\n" +"\t# Create a new ExternalName service named my-ns\n" +"\tkubectl create service externalname my-ns --external-name bar.com" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/events/events.go:58 +msgid "" +"\n" +"\t# List recent events in the default namespace\n" +"\tkubectl events\n" +"\n" +"\t# List recent events in all namespaces\n" +"\tkubectl events --all-namespaces\n" +"\n" +"\t# List recent events for the specified pod, then wait for more events and " +"list them as they arrive\n" +"\tkubectl events --for pod/web-pod-13je7 --watch\n" +"\n" +"\t# List recent events in YAML format\n" +"\tkubectl events -oyaml\n" +"\n" +"\t# List recent only events of type 'Warning' or 'Normal'\n" +"\tkubectl events --types=Warning,Normal" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:51 +msgid "" +"\n" +"\t# Set deployment nginx-deployment's service account to serviceaccount1\n" +"\tkubectl set serviceaccount deployment nginx-deployment serviceaccount1\n" +"\n" +"\t# Print the result (in YAML format) of updated nginx deployment with the " +"service account from local file, without hitting the API server\n" +"\tkubectl set sa -f nginx-deployment.yaml serviceaccount1 --local --dry-" +"run=client -o yaml\n" +"\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:42 +msgid "" +"\n" +"\tCreate a deployment with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:345 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:61 +msgid "" +"\n" +"\tCreate an ingress with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set.go:44 +msgid "" +"\n" +"\tSet an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots.\n" +"\n" +"\tPROPERTY_VALUE is the new value you want to set. Binary fields such as " +"'certificate-authority-data' expect a base64 encoded string unless the --set-" +"raw-bytes flag is used.\n" +"\n" +"\tSpecifying an attribute name that already exists will merge new fields on " +"top of existing values." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/unset.go:39 +msgid "" +"\n" +"\tUnset an individual value in a kubeconfig file.\n" +"\n" +"\tPROPERTY_NAME is a dot delimited name where each token represents either " +"an attribute name or a map key. Map keys may not contain dots." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:44 +msgid "" +"\n" +"\tUpdate the service account of pod template resources.\n" +"\n" +"\tPossible resources (case insensitive) can be:\n" +"\n" +"\t" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:41 +msgid "" +"\n" +"\tUpdate the user, group, or service account in a role binding or cluster " +"role binding." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:76 +msgid "" +"\n" +" \tpod (po), replicationcontroller (rc), deployment (deploy), daemonset " +"(ds), statefulset (sts), cronjob (cj), replicaset (rs)" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:64 +msgid "" +"\n" +" Forward one or more local ports to a pod.\n" +"\n" +" Use resource type/name such as deployment/mydeployment to " +"select a pod. Resource type defaults to 'pod' if omitted.\n" +"\n" +" If there are multiple pods matching the criteria, a pod will " +"be selected automatically. The\n" +" forwarding session ends when the selected pod terminates, " +"and a rerun of the command is needed\n" +" to resume forwarding." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:234 +msgid "" +"\n" +" # Create a new ClusterIP service named my-cs\n" +" kubectl create service clusterip my-cs --tcp=5678:8080\n" +"\n" +" # Create a new ClusterIP service named my-cs (in headless mode)\n" +" kubectl create service clusterip my-cs --clusterip=\"None\"" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:312 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:275 +msgid "" +"\n" +" # Create a new NodePort service named my-ns\n" +" kubectl create service nodeport my-ns --tcp=5678:8080" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:103 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:124 +msgid "" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend'\n" +" # If the same annotation is set multiple times, only the last value will " +"be applied\n" +" kubectl annotate pods foo description='my frontend'\n" +"\n" +" # Update a pod identified by type and name in \"pod.json\"\n" +" kubectl annotate -f pod.json description='my frontend'\n" +"\n" +" # Update pod 'foo' with the annotation 'description' and the value 'my " +"frontend running nginx', overwriting any existing value\n" +" kubectl annotate --overwrite pods foo description='my frontend running " +"nginx'\n" +"\n" +" # Update all pods in the namespace\n" +" kubectl annotate pods --all description='my frontend running nginx'\n" +"\n" +" # Update pod 'foo' only if the resource is unchanged from version 1\n" +" kubectl annotate pods foo description='my frontend running nginx' --" +"resource-version=1\n" +"\n" +" # Update pod 'foo' by removing an annotation named 'description' if it " +"exists\n" +" # Does not require the --overwrite flag\n" +" kubectl annotate pods foo description-" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:231 +msgid "" +"\n" +" Create a ClusterIP service with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:309 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:272 +msgid "" +"\n" +" Create a NodePort service with the specified name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:94 +msgid "" +"\n" +" Dump cluster information out suitable for debugging and diagnosing " +"cluster problems. By default, dumps everything to\n" +" stdout. You can optionally specify a directory with --output-directory. " +"If you specify a directory, Kubernetes will\n" +" build a set of files in that directory. By default, only dumps things " +"in the current namespace and 'kube-system' namespace, but you can\n" +" switch to a different namespace with the --namespaces flag, or specify --" +"all-namespaces to dump all namespaces.\n" +"\n" +" The command also dumps the logs of all of the pods in the cluster; these " +"logs are dumped into different directories\n" +" based on namespace and pod name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:41 +msgid "" +"\n" +" Display addresses of the control plane and services with label kubernetes." +"io/cluster-service=true.\n" +" To further debug and diagnose cluster problems, use 'kubectl cluster-info " +"dump'." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:49 +msgid "" +" environment variable is set, then it is used as a list of paths (normal " +"path delimiting rules for your system). These paths are merged. When a value " +"is modified, it is modified in the file that defines the stanza. When a " +"value is created, it is created in the first file that exists. If no files " +"in the chain exist, then it creates the last file in the list.\n" +"\t\t\t3. Otherwise, " +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:48 +msgid "" +" flag is set, then only that file is loaded. The flag may only be set once " +"and no merging takes place.\n" +"\t\t\t2. If $" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:50 +msgid " is used and no merging takes place." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object " +"tracked by the quota." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:180 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:183 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:193 +msgid "Allocate a TTY for the debugging container." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/util/override_options.go:50 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it " +"is used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:191 +msgid "Annotations to apply to the pod." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:203 +msgid "Apply a configuration to a resource by file name or stdin" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:129 +msgid "Approve a certificate signing request" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:264 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:109 +msgid "" +"Attach to a process that is already running inside an existing container." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:108 +msgid "Attach to a running container" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:108 +msgid "" +"Auto-scale a deployment, replica set, stateful set, or replication controller" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:186 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or " +"set to 'None' to create a headless service." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:107 +msgid "ClusterRole this RoleBinding should reference" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:33 +msgid "Commands for features in alpha" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:185 +msgid "Container image to use for debug container." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:181 +msgid "Container name to use for debug container." +msgstr "" + +#: pkg/kubectl/cmd/convert/convert.go:96 +msgid "Convert config files between different API versions" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:98 +msgid "Copy files and directories to and from containers" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:99 +msgid "Copy files and directories to and from containers." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:249 +msgid "Create a ClusterIP service" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:324 +msgid "Create a LoadBalancer service" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:287 +msgid "Create a NodePort service" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrole.go:81 +msgid "Create a cluster role" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:87 +msgid "Create a cluster role binding for a particular cluster role" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:121 +msgid "Create a config map from a local file, directory or literal value" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:182 +msgid "Create a copy of the target Pod with this name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:91 +msgid "Create a cron job with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:100 +msgid "Create a deployment with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:92 +msgid "Create a job with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:84 +msgid "Create a namespace with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:95 +msgid "Create a pod disruption budget with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:92 +msgid "Create a priority class with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:91 +msgid "Create a quota with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:108 +msgid "Create a resource from a file or from stdin" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:92 +msgid "Create a role binding for a particular role or cluster role" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:171 +msgid "Create a role with single rule" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:146 +msgid "Create a secret from a local file, directory, or literal value" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using a specified subcommand" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:48 +msgid "Create a service using a specified subcommand" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:49 +msgid "Create a service using a specified subcommand." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:364 +msgid "Create an ExternalName service" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:145 +msgid "Create an ingress with the specified name" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:61 +msgid "Create and run a particular image in a pod." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:162 +msgid "Create debugging sessions for troubleshooting workloads and nodes" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:194 +msgid "" +"Debugging profile. Options are \"legacy\", \"general\", \"baseline\", " +"\"netadmin\", or \"restricted\"." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:146 +msgid "" +"Delete resources by file names, stdin, resources and names, or by resources " +"and label selector" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:43 +msgid "Delete the specified cluster from the kubeconfig." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:43 +msgid "Delete the specified context from the kubeconfig." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_user.go:65 +msgid "Delete the specified user from the kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_user.go:66 +msgid "Delete the specified user from the kubeconfig." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:177 +msgid "Deny a certificate signing request" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:75 +msgid "Describe one or many contexts" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:136 +msgid "Diff the live version against a would-be applied version" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:66 +msgid "Display cluster information" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:42 +msgid "Display clusters defined in the kubeconfig." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:82 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:53 +msgid "Display one or many contexts from the kubeconfig file." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:164 +msgid "Display one or many resources" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:50 +msgid "Display resource (CPU/memory) usage" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:82 +msgid "Display resource (CPU/memory) usage of nodes" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:101 +msgid "Display resource (CPU/memory) usage of pods" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:51 +msgid "Display the current-context" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_users.go:60 +msgid "Display users defined in the kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_users.go:61 +msgid "Display users defined in the kubeconfig." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:186 +msgid "Drain node in preparation for maintenance" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:75 +msgid "Dump relevant information for debugging and diagnosis" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:83 +msgid "Edit a resource on the server" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_edit_last_applied.go:67 +msgid "Edit latest last-applied-configuration annotations of a resource/object" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:184 +msgid "Environment variables to set in the container." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:91 +msgid "Execute a command in a container" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:92 +msgid "Execute a command in a container." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:124 +msgid "Experimental: Wait for a specific condition on one or many resources" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:379 +msgid "External name of service" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:110 +msgid "Forward one or more local ports to a pod" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:102 +msgid "Get documentation for a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:179 +msgid "" +"IP to assign to the LoadBalancer. If empty, an ephemeral IP will be created " +"and used (cloud-provider specific)." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:185 +msgid "" +"If non-empty, set the session affinity for the service to this; legal " +"values: 'None', 'ClientIP'" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:187 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:159 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single " +"resource." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:179 +msgid "" +"If specified, everything after -- will be passed to the new container as " +"Args instead of Command." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:207 +msgid "If true, run the container in privileged mode." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:189 +msgid "If true, suppress informational messages." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:180 +msgid "" +"If true, wait for the container to start running, and then attach as if " +"'kubectl attach ...' were called. Default false, unless '-i/--stdin' is " +"set, in which case the default is true." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:188 +msgid "" +"Keep stdin open on the container(s) in the pod, even if nothing is attached." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:94 +msgid "List all visible plugin executables on a user's PATH" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/events/events.go:126 +msgid "List events" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:61 +msgid "Manage the rollout of a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:100 +msgid "Mark node as schedulable" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:71 +msgid "Mark node as unschedulable" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:85 +msgid "Mark the provided resource as paused" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:48 +msgid "Modify certificate resources" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +msgid "Modify certificate resources." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:182 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:41 +msgid "No alpha commands are available in this version of kubectl" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:176 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:134 +msgid "" +"Output shell completion code for the specified shell (bash, zsh, fish, or " +"powershell)" +msgstr "" + +#: pkg/kubectl/cmd/convert/convert.go:106 +msgid "" +"Output the formatted object with the given group version (for ex: " +"'extensions/v1beta1')." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:129 +msgid "" +"Precondition for resource version. Requires that the current resource " +"version match this value in order to scale." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:80 +msgid "Print the client and server version information" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:81 +msgid "" +"Print the client and server version information for the current context." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:154 +msgid "Print the logs for a container in a pod" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:102 +msgid "Print the supported API resources on the server" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:103 +msgid "Print the supported API resources on the server." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:59 +msgid "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:60 +msgid "" +"Print the supported API versions on the server, in the form of \"group/" +"version\"." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/plugin/plugin.go:66 +msgid "Provides utilities for interacting with plugins" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/rename_context.go:45 +msgid "Rename a context from the kubeconfig file" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:119 +msgid "Replace a resource by file name or stdin" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:93 +msgid "Restart a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:89 +msgid "Resume a paused resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:108 +msgid "Role this RoleBinding should reference" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:153 +msgid "Run a particular image on the cluster" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:124 +msgid "Run a proxy to the Kubernetes API server" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_cluster.go:76 +msgid "Set a cluster entry in kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_context.go:62 +msgid "Set a context entry in kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:114 +msgid "Set a new size for a deployment, replica set, or replication controller" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_credentials.go:157 +msgid "Set a user entry in kubeconfig" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/set.go:74 +msgid "Set an individual value in a kubeconfig file" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:41 +msgid "Set specific features on objects" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/use_context.go:52 +msgid "Set the current-context in a kubeconfig file" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:101 +msgid "" +"Set the last-applied-configuration annotation on a live object to match the " +"contents of a file" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:150 +msgid "Show details of a specific resource or group of resources" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:104 +msgid "Show the status of the rollout" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:163 +msgid "" +"Take a replication controller, service, deployment or pod and expose it as a " +"new Kubernetes service" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "The image for the container to run." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:187 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:112 +msgid "" +"The maximum number or percentage of unavailable pods this budget requires." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "" +"The minimum number or percentage of available pods this budget requires." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:184 +msgid "The name for the newly created object." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:126 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:176 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:177 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:197 +msgid "The port that this container exposes." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:203 +msgid "" +"The restart policy for this Pod. Legal values [Always, OnFailure, Never]." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:164 +msgid "The type of secret to create" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:34 +msgid "" +"These commands correspond to alpha features that are not enabled in " +"Kubernetes clusters by default." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:178 +msgid "" +"Type for this service: ClusterIP, NodePort, LoadBalancer, or ExternalName. " +"Default is 'ClusterIP'." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:88 +msgid "Undo a previous rollout" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/unset.go:59 +msgid "Unset an individual value in a kubeconfig file" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:156 +msgid "Update environment variables on a pod template" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/patch/patch.go:126 +msgid "Update fields of a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:153 +msgid "Update the annotations on a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_image.go:118 +msgid "Update the image of a pod template" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:139 +msgid "Update the labels on a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:102 +msgid "Update the service account of a resource" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:110 +msgid "Update the taints on one or more nodes" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:99 +msgid "" +"Update the user, group, or service account in a role binding or cluster role " +"binding" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:86 +msgid "View rollout history" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_view_last_applied.go:78 +msgid "" +"View the latest last-applied-configuration annotations of a resource/object" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:186 +msgid "" +"When used with '--copy-to', a list of name=image pairs for changing " +"container images, similar to how 'kubectl set image' works." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:183 +msgid "When used with '--copy-to', delete the original Pod." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:191 +msgid "" +"When used with '--copy-to', enable process namespace sharing in the copy." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:190 +msgid "" +"When used with '--copy-to', schedule the copy of target Pod on the same node." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:192 +msgid "" +"When using an ephemeral container, target processes in this container name." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:86 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:108 +msgid "" +"description is an arbitrary string that usually provides guidelines on when " +"this priority class should be used." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:89 +msgid "dummy restart flag)" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:107 +msgid "" +"global-default specifies whether this PriorityClass should be considered as " +"the default priority." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:317 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:51 +msgid "" +"pod (po), service (svc), replicationcontroller (rc), deployment (deploy), " +"replicaset (rs)" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:109 +msgid "" +"preemption-policy is the policy for preempting pods with lower priority." +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:42 +msgid "" +"replicationcontroller (rc), deployment (deploy), daemonset (ds), job, " +"replicaset (rs), statefulset" +msgstr "" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:106 +msgid "the value of this priority class." +msgstr "" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000..42bc2ad659 Binary files /dev/null and b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.mo differ diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.po new file mode 100644 index 0000000000..29bd5844d5 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_CN/LC_MESSAGES/k8s.po @@ -0,0 +1,3236 @@ +# Test translations for unit tests. +# Copyright (C) 2017 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR shiywang@redhat.com, 2017. +# FIRST AUTHOR zhengjiajin@caicloud.io, 2017. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2022-07-04 18:54+0800\n" +"Last-Translator: zhengjiajin \n" +"Language-Team: \n" +"Language: zh\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" +"X-Generator: Poedit 3.0.1\n" +"X-Poedit-SourceCharset: UTF-8\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_node.go:62 +msgid "" +"\n" +"\t\t # Show metrics for all nodes\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # Show metrics for a given node\n" +"\t\t kubectl top node NODE_NAME" +msgstr "" +"\n" +"\t\t # 显示所有节点的指标\n" +"\t\t kubectl top node\n" +"\n" +"\t\t # 显示指定节点的指标\n" +"\t\t kubectl top node NODE_NAME" + +#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:46 +msgid "" +"\n" +"\t\t# Get the documentation of the resource and its fields\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# Get the documentation of a specific field of a resource\n" +"\t\tkubectl explain pods.spec.containers" +msgstr "" +"\n" +"\t\t# 获取资源及其字段的文档\n" +"\t\tkubectl explain pods\n" +"\n" +"\t\t# 获取资源指定字段的文档\n" +"\t\tkubectl explain pods.spec.containers" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:29 +msgid "" +"\n" +"\t\t# Print flags inherited by all commands\n" +"\t\tkubectl options" +msgstr "" +"\n" +"\t\t# 输出所有命令继承的 flags\n" +"\t\tkubectl options" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:44 +msgid "" +"\n" +"\t\t# Print the client and server versions for the current context\n" +"\t\tkubectl version" +msgstr "" +"\n" +"\t\t# 输出当前客户端和服务端的版本\n" +"\t\tkubectl version" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34 +msgid "" +"\n" +"\t\t# Print the supported API versions\n" +"\t\tkubectl api-versions" +msgstr "" +"\n" +"\t\t# 输出支持的 API 版本\n" +"\t\tkubectl api-versions" + +#: staging/src/k8s.io/kubectl/pkg/cmd/top/top_pod.go:75 +msgid "" +"\n" +"\t\t# Show metrics for all pods in the default namespace\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# Show metrics for all pods in the given namespace\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# Show metrics for a given pod and its containers\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# Show metrics for the pods defined by label name=myLabel\n" +"\t\tkubectl top pod -l name=myLabel" +msgstr "" +"\n" +"\t\t# 显示 default 命名空间下所有 Pods 的指标\n" +"\t\tkubectl top pod\n" +"\n" +"\t\t# 显示指定命名空间下所有 Pods 的指标\n" +"\t\tkubectl top pod --namespace=NAMESPACE\n" +"\n" +"\t\t# 显示指定 Pod 和它的容器的 metrics\n" +"\t\tkubectl top pod POD_NAME --containers\n" +"\n" +"\t\t# 显示指定 label 为 name=myLabel 的 Pods 的 metrics\n" +"\t\tkubectl top pod -l name=myLabel" + +#: pkg/kubectl/cmd/convert/convert.go:40 +msgid "" +"\n" +"\t\tConvert config files between different API versions. Both YAML\n" +"\t\tand JSON formats are accepted.\n" +"\n" +"\t\tThe command takes filename, directory, or URL as input, and convert it " +"into format\n" +"\t\tof version specified by --output-version flag. If target version is not " +"specified or\n" +"\t\tnot supported, convert to latest version.\n" +"\n" +"\t\tThe default output will be printed to stdout in YAML format. One can use -" +"o option\n" +"\t\tto change to output destination." +msgstr "" +"\n" +"\t\t在不同的 API 版本之间转换配置文件。接受 YAML\n" +"\t\t和 JSON 格式。\n" +"\n" +"\t\t这个命令以文件名, 目录, 或者 URL 作为输入,并通过 —output-version 参数\n" +"\t\t 转换到指定版本的格式。如果没有指定目标版本或者所指定版本\n" +"\t\t不支持, 则转换为最新版本。\n" +"\n" +"\t\t默认以 YAML 格式输出到标准输出。可以使用 -o option\n" +"\t\t修改目标输出的格式。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:39 +msgid "" +"\n" +"\t\tCreate a namespace with the specified name." +msgstr "" +"\n" +"\t\t用给定名称创建一个命名空间。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:43 +msgid "" +"\n" +"\t\tCreate a role with single rule." +msgstr "" +"\n" +"\t\t创建一个具有单一规则的角色。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40 +msgid "" +"\n" +"\t\tCreate a service account with the specified name." +msgstr "" +"\n" +"\t\t用指定的名称创建一个服务账户。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:84 +msgid "" +"\n" +"\t\tMark node as schedulable." +msgstr "" +"\n" +"\t\t标记节点为可调度。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:55 +msgid "" +"\n" +"\t\tMark node as unschedulable." +msgstr "" +"\n" +"\t\t标记节点为不可调度。" + +#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply_set_last_applied.go:70 +msgid "" +"\n" +"\t\tSet the latest last-applied-configuration annotations by setting it to " +"match the contents of a file.\n" +"\t\tThis results in the last-applied-configuration being updated as though " +"'kubectl apply -f ' was run,\n" +"\t\twithout updating any other parts of the object." +msgstr "" +"\n" +"\t\t设置最新的 last-applied-configuration 注解,使之匹配某文件的内容。\n" +"\t\t这会导致 last-applied-configuration 被更新,就像执行了 kubectl apply -f " +" 一样,\n" +"\t\t只是不会更新对象的其他部分。" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:42 +msgid "" +"\n" +"\t # Create a new namespace named my-namespace\n" +"\t kubectl create namespace my-namespace" +msgstr "" +"\n" +"\t # 创建一个名为 my-namespace 的新命名空间\n" +"\t kubectl create namespace my-namespace" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:43 +msgid "" +"\n" +"\t # Create a new service account named my-service-account\n" +"\t kubectl create serviceaccount my-service-account" +msgstr "" +"\n" +"\t # 创建一个名为 my-service-account 的新服务帐户\n" +"\t kubectl create serviceaccount my-service-account" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:344 +msgid "" +"\n" +"\tCreate an ExternalName service with the specified name.\n" +"\n" +"\tExternalName service references to an external DNS address instead of\n" +"\tonly pods, which will allow application authors to reference services\n" +"\tthat exist off platform, on other clusters, or locally." +msgstr "" +"\n" +"\t创建具有指定名称的 ExternalName 服务。\n" +"\n" +"\tExternalName 服务引用外部 DNS 地址而不是 Pod 地址,\n" +"\t这将允许应用程序作者引用存在于平台外、其他集群上或本地的服务。" + +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:28 +msgid "" +"\n" +"\tHelp provides help for any command in the application.\n" +"\tSimply type kubectl help [path to command] for full details." +msgstr "" +"\n" +"\tHelp 为应用程序中的任何命令提供帮助。\n" +"\t只需键入 kubectl help [命令路径] 即可获得完整的详细信息。" + +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:311 +msgid "" +"\n" +" # Create a new LoadBalancer service named my-lbs\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" +msgstr "" +"\n" +" # 创建一个名称为 my-lbs 的新负载均衡服务\n" +" kubectl create service loadbalancer my-lbs --tcp=5678:8080" + +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102 +msgid "" +"\n" +" # Dump current cluster state to stdout\n" +" kubectl cluster-info dump\n" +"\n" +" # Dump current cluster state to /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # Dump all namespaces to stdout\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # Dump a set of namespaces to /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" +msgstr "" +"\n" +" # 导出当前的集群状态信息到标准输出\n" +" kubectl cluster-info dump\n" +"\n" +" # 导出当前的集群状态到 /path/to/cluster-state\n" +" kubectl cluster-info dump --output-directory=/path/to/cluster-state\n" +"\n" +" # 导出所有命名空间到标准输出\n" +" kubectl cluster-info dump --all-namespaces\n" +"\n" +" # 导出一组命名空间到 /path/to/cluster-state\n" +" kubectl cluster-info dump --namespaces default,kube-system --output-" +"directory=/path/to/cluster-state" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:308 +msgid "" +"\n" +" Create a LoadBalancer service with the specified name." +msgstr "" +"\n" +" 使用一个指定的名称创建一个 LoadBalancer 服务。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L61 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107 +msgid "" +"A comma-delimited set of quota scopes that must all match each object tracked " +"by the quota." +msgstr "一组以逗号分隔的配额范围,必须全部匹配配额所跟踪的每个对象。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L60 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106 +msgid "" +"A comma-delimited set of resource=quantity pairs that define a hard limit." +msgstr "一组以逗号分隔的资源=数量对,用于定义硬性限制。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L63 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113 +msgid "" +"A label selector to use for this budget. Only equality-based selector " +"requirements are supported." +msgstr "一个用于该预算的标签选择器。只支持基于等值比较的选择器要求。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L106 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:152 +msgid "" +"A label selector to use for this service. Only equality-based selector " +"requirements are supported. If empty (the default) infer the selector from " +"the replication controller or replica set.)" +msgstr "" +"用于此服务的标签选择器。仅支持基于等值比较的选择器要求。如果为空(默认),则从" +"副本控制器或副本集中推断选择器。)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L111 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:157 +msgid "" +"Additional external IP address (not managed by Kubernetes) to accept for the " +"service. If this IP is routed to a node, the service can be accessed by this " +"IP in addition to its generated service IP." +msgstr "" +"为服务所接受的其他外部 IP 地址(不由 Kubernetes 管理)。如果这个 IP 被路由到一" +"个节点,除了其生成的服务 IP 外,还可以通过这个 IP 访问服务。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L119 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:158 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:178 +msgid "" +"An inline JSON override for the generated object. If this is non-empty, it is " +"used to override the generated object. Requires that the object supply a " +"valid apiVersion field." +msgstr "" +"针对所生成对象的内联 JSON 覆盖。如果这一对象是非空的,将用于覆盖所生成的对象。" +"要求对象提供有效的 apiVersion 字段。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:125 +msgid "Approve a certificate signing request" +msgstr "批准一个证书签署请求" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:263 +msgid "" +"Assign your own ClusterIP or set to 'None' for a 'headless' service (no " +"loadbalancing)." +msgstr "为“无头”服务(无负载平衡)分配你自己的 ClusterIP 或设置为“无。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/attach.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:105 +msgid "Attach to a running container" +msgstr "挂接到一个运行中的容器" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L115 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:161 +msgid "" +"ClusterIP to be assigned to the service. Leave empty to auto-allocate, or set " +"to 'None' to create a headless service." +msgstr "" +"要分配给服务的 ClusterIP。留空表示自动分配,或设置为 “None” 以创建无头服务。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101 +msgid "ClusterRole this ClusterRoleBinding should reference" +msgstr "ClusterRoleBinding 应该指定 ClusterRole" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L55 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:104 +msgid "ClusterRole this RoleBinding should reference" +msgstr "RoleBinding 应该指定 ClusterRole" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/convert.go#L67 +#: pkg/kubectl/cmd/convert/convert.go:95 +msgid "Convert config files between different API versions" +msgstr "在不同的 API 版本之间转换配置文件" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cp.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:106 +msgid "Copy files and directories to and from containers." +msgstr "将文件和目录复制到容器中或从容器中复制出来。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L214 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94 +msgid "Create a TLS secret" +msgstr "创建一个 TLS secret" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_namespace.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:83 +msgid "Create a namespace with the specified name" +msgstr "用指定的名称创建一个命名空间" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L143 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134 +msgid "Create a secret for use with a Docker registry" +msgstr "创建一个给 Docker registry 使用的 Secret" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L34 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49 +msgid "Create a secret using specified subcommand" +msgstr "使用指定的子命令创建一个 Secret" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85 +msgid "Create a service account with the specified name" +msgstr "创建一个指定名称的服务账户" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_cluster.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "从 kubeconfig 中删除指定的集群" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/delete_context.go#L38 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "从 kubeconfig 中删除指定的上下文" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L121 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:174 +msgid "Deny a certificate signing request" +msgstr "拒绝一个证书签名请求" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_contexts.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "描述一个或多个上下文" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/get_clusters.go#L40 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "显示在 kubeconfig 中定义的集群" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/view.go#L64 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "显示合并的 kubeconfig 配置或一个指定的 kubeconfig 文件" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/get.go#L107 +#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:165 +msgid "Display one or many resources" +msgstr "显示一个或多个资源" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L176 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:184 +msgid "Drain node in preparation for maintenance" +msgstr "清空节点以准备维护" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/edit.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:77 +msgid "Edit a resource on the server" +msgstr "编辑服务器上的资源" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L159 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152 +msgid "Email for Docker registry" +msgstr "用于 Docker 镜像库的邮件地址" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/exec.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89 +msgid "Execute a command in a container" +msgstr "在某个容器中执行一个命令" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/portforward.go#L75 +#: staging/src/k8s.io/kubectl/pkg/cmd/portforward/portforward.go:109 +msgid "Forward one or more local ports to a pod" +msgstr "将一个或多个本地端口转发到某个 Pod" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/help.go#L36 +#: staging/src/k8s.io/kubectl/pkg/cmd/help/help.go:37 +msgid "Help about any command" +msgstr "关于任何命令的帮助" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:160 +msgid "" +"If non-empty, set the session affinity for the service to this; legal values: " +"'None', 'ClientIP'" +msgstr "如果非空,则将服务的会话亲和性设置为此值;合法值:'None'、'ClientIP'" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/annotate.go#L135 +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:157 +msgid "" +"If non-empty, the annotation update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single resource." +msgstr "" +"如果非空,则只有当所给值是对象的当前资源版本时,注解更新才会成功。 仅在指定单" +"个资源时有效。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L132 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:154 +msgid "" +"If non-empty, the labels update will only succeed if this is the current " +"resource-version for the object. Only valid when specifying a single resource." +msgstr "" +"如果非空,则标签更新只有在所给值是对象的当前资源版本时才会成功。仅在指定单个资" +"源时有效。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L127 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:98 +msgid "Mark node as schedulable" +msgstr "标记节点为可调度" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:69 +msgid "Mark node as unschedulable" +msgstr "标记节点为不可调度" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_pause.go#L73 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:83 +msgid "Mark the provided resource as paused" +msgstr "将所指定的资源标记为已暂停" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/certificates.go#L35 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49 +#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:50 +msgid "Modify certificate resources." +msgstr "修改证书资源。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/config.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "修改 kubeconfig 文件" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L110 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:156 +msgid "" +"Name or number for the port on the container that the service should direct " +"traffic to. Optional." +msgstr "" +"此为端口的名称或端口号,服务应将流量定向到容器上的这一端口。此属性为可选。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:174 +msgid "" +"Only return logs after a specific date (RFC3339). Defaults to all logs. Only " +"one of since-time / since may be used." +msgstr "" +"仅返回在指定日期 (RFC3339) 之后的日志。默认为所有日志。只能使用 since-time / " +"since 之一。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/completion.go#L97 +#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:112 +msgid "Output shell completion code for the specified shell (bash or zsh)" +msgstr "为指定的 Shell(Bash 或 zsh) 输出 Shell 补全代码。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L157 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151 +msgid "Password for Docker registry authentication" +msgstr "用于 Docker 镜像库身份验证的密码" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L226 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110 +msgid "Path to PEM encoded public key certificate." +msgstr "PEM 编码的公钥证书的路径。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L227 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111 +msgid "Path to private key associated with given certificate." +msgstr "与给定证书关联的私钥的路径。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/scale.go#L82 +#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:130 +msgid "" +"Precondition for resource version. Requires that the current resource version " +"match this value in order to scale." +msgstr "资源版本的前提条件。要求当前资源版本与此值匹配才能进行扩缩操作。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/version.go#L39 +#: staging/src/k8s.io/kubectl/pkg/cmd/version/version.go:73 +msgid "Print the client and server version information" +msgstr "输出客户端和服务端的版本信息" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/options.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:38 +#: staging/src/k8s.io/kubectl/pkg/cmd/options/options.go:39 +msgid "Print the list of flags inherited by all commands" +msgstr "输出所有命令的层级关系" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/logs.go#L86 +#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:152 +msgid "Print the logs for a container in a pod" +msgstr "打印 Pod 中容器的日志" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_resume.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:87 +msgid "Resume a paused resource" +msgstr "恢复暂停的资源" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L56 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105 +msgid "Role this RoleBinding should reference" +msgstr "RoleBinding 应该引用的 Role" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L94 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:152 +msgid "Run a particular image on the cluster" +msgstr "在集群上运行特定镜像" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/proxy.go#L68 +#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:119 +msgid "Run a proxy to the Kubernetes API server" +msgstr "运行一个指向 Kubernetes API 服务器的代理" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L161 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153 +msgid "Server location for Docker registry" +msgstr "Docker 镜像库的服务器位置" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set.go#L37 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39 +msgid "Set specific features on objects" +msgstr "为对象设置指定特性" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_selector.go#L81 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_selector.go:104 +msgid "Set the selector on a resource" +msgstr "为资源设置选择器" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/describe.go#L80 +#: staging/src/k8s.io/kubectl/pkg/cmd/describe/describe.go:107 +msgid "Show details of a specific resource or group of resources" +msgstr "显示特定资源或资源组的详细信息" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_status.go#L57 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:102 +msgid "Show the status of the rollout" +msgstr "显示上线的状态" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L108 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:154 +msgid "Synonym for --target-port" +msgstr "--target-port 的同义词" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L114 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:174 +msgid "The image for the container to run." +msgstr "指定容器要运行的镜像." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L116 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:176 +msgid "" +"The image pull policy for the container. If left empty, this value will not " +"be specified by the client and defaulted by the server" +msgstr "容器的镜像拉取策略。如果留空,该值将不由客户端指定,由服务器默认设置" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L62 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111 +msgid "The minimum number or percentage of available pods this budget requires." +msgstr "此预算要求的可用 Pod 的最小数量或百分比。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L113 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:159 +msgid "The name for the newly created object." +msgstr "新创建的对象的名称。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:125 +msgid "" +"The name for the newly created object. If not specified, the name of the " +"input resource will be used." +msgstr "新创建的对象的名称。如果未指定,将使用输入资源的名称。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L98 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:147 +msgid "" +"The name of the API generator to use. There are 2 generators: 'service/v1' " +"and 'service/v2'. The only difference between them is that service port in v1 " +"is named 'default', while it is left unnamed in v2. Default is 'service/v2'." +msgstr "" +"要使用的 API 生成器的名称。有两个生成器。'service/v1' 和 'service/v2'。它们之" +"间唯一的区别是,v1 中的服务端口被命名为 'default',如果在 v2 中没有指定名称。" +"默认是 'service/v2'。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L99 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:148 +msgid "The network protocol for the service to be created. Default is 'TCP'." +msgstr "要创建的服务的网络协议。默认为 “TCP”。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L100 +#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:149 +msgid "" +"The port that the service should serve on. Copied from the resource being " +"exposed, if unspecified" +msgstr "服务要使用的端口。如果没有指定,则从被暴露的资源复制" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L131 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:194 +msgid "" +"The resource requirement limits for this container. For example, 'cpu=200m," +"memory=512Mi'. Note that server side components may assign limits depending " +"on the server configuration, such as limit ranges." +msgstr "" +"这个容器的资源需求限制。例如,\"cpu=200m,内存=512Mi\"。请注意,服务器端的组件" +"可能会根据服务器的配置来分配限制,例如限制范围。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L130 +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:192 +msgid "" +"The resource requirement requests for this container. For example, 'cpu=100m," +"memory=256Mi'. Note that server side components may assign requests " +"depending on the server configuration, such as limit ranges." +msgstr "" +"这个容器的资源需求请求。例如,\"cpu=200m,内存=512Mi\"。请注意,服务器端的组件" +"可能会根据服务器的配置来分配限制,例如限制范围。" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L87 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:155 +msgid "The type of secret to create" +msgstr "要创建的 Secret 类型" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_undo.go#L71 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_undo.go:87 +msgid "Undo a previous rollout" +msgstr "撤销上一次的上线" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_resources.go#L101 +#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:116 +msgid "Update resource requests/limits on objects with pod templates" +msgstr "使用 Pod 模板更新对象的资源请求/限制" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "更新一个资源的注解" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/label.go#L109 +#: staging/src/k8s.io/kubectl/pkg/cmd/label/label.go:133 +msgid "Update the labels on a resource" +msgstr "更新某资源上的标签" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/taint.go#L88 +#: staging/src/k8s.io/kubectl/pkg/cmd/taint/taint.go:109 +msgid "Update the taints on one or more nodes" +msgstr "更新一个或者多个节点上的污点" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L155 +#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150 +msgid "Username for Docker registry authentication" +msgstr "用于 Docker 镜像库身份验证的用户名" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout_history.go#L51 +#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:83 +msgid "View rollout history" +msgstr "显示上线历史" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo_dump.go#L45 +#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85 +msgid "" +"Where to output the files. If empty or '-' uses stdout, otherwise creates a " +"directory hierarchy in that directory" +msgstr "" +"在哪里输出文件。如果为空或 “-” 则使用标准输出,否则在该目录中创建目录层次结构" + +#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88 +msgid "dummy restart flag)" +msgstr "假的重启标志)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/cmd.go#L217 +#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:227 +msgid "kubectl controls the Kubernetes cluster manager" +msgstr "kubectl 控制 Kubernetes 集群管理器" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a ClusterRoleBinding for user1, user2, and group1 using the " +#~ "cluster-admin ClusterRole\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # 使用 cluster-admin ClusterRole 为 user1, user2, and group1 创建一" +#~ "个 ClusterRoleBinding\n" +#~ "\t\t kubectl create clusterrolebinding cluster-admin --" +#~ "clusterrole=cluster-admin --user=user1 --user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a RoleBinding for user1, user2, and group1 using the admin " +#~ "ClusterRole\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" +#~ msgstr "" +#~ "\n" +#~ "\t\t # 使用 admin ClusterRole 为 user1, user2, and group1 创建一个 " +#~ "RoleBinding\n" +#~ "\t\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --" +#~ "user=user2 --group=group1" + +#~ msgid "" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config based on folder bar\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with specified keys instead " +#~ "of file basenames on disk\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # Create a new configmap named my-config with key1=config1 and " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" +#~ msgstr "" +#~ "\n" +#~ "\t\t # 通过文件夹 bar 创建一个名称为 my-config 的 configmap\n" +#~ "\t\t kubectl create configmap my-config --from-file=path/to/bar\n" +#~ "\n" +#~ "\t\t # 创建一个名称为 my-config 的 configmap 并指定 keys 而不是使用磁盘上" +#~ "所在的文件名\n" +#~ "\t\t kubectl create configmap my-config --from-file=key1=/path/to/bar/" +#~ "file1.txt --from-file=key2=/path/to/bar/file2.txt\n" +#~ "\n" +#~ "\t\t # 创建一个名称为 my-config 的 configmap 且 key1=config1 和 " +#~ "key2=config2\n" +#~ "\t\t kubectl create configmap my-config --from-literal=key1=config1 --" +#~ "from-literal=key2=config2" + +#~ msgid "" +#~ "\n" +#~ "\t\t # If you don't already have a .dockercfg file, you can create a " +#~ "dockercfg secret directly by using:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" +#~ msgstr "" +#~ "\n" +#~ "\t\t # 如果你还没有 .dockercfg 文件, 你可以直接使用下面的命令创建一个 " +#~ "dockercfg 类型的 Secret:\n" +#~ "\t\t kubectl create secret docker-registry my-secret --docker-" +#~ "server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-" +#~ "password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Apply the configuration in pod.json to a pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Apply the JSON passed into stdin to a pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune is still in Alpha\n" +#~ "\t\t# Apply the configuration in manifest.yaml that matches label " +#~ "app=nginx and delete all the other resources that are not in the file and " +#~ "match label app=nginx.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# Apply the configuration in manifest.yaml and delete all the other " +#~ "configmaps that are not in the file.\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +#~ "ConfigMap" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 将 pod.json 上的配置应用于 pod.\n" +#~ "\t\tkubectl apply -f ./pod.json\n" +#~ "\n" +#~ "\t\t# 将传入 stdin 的 JSON 应用到一个 pod.\n" +#~ "\t\tcat pod.json | kubectl apply -f -\n" +#~ "\n" +#~ "\t\t# Note: --prune 仍然在 Alpha\n" +#~ "\t\t# 应用在 manifest.yaml 中匹配标签 app=nginx 的资源配置并删除所有不在这" +#~ "个文件中并匹配标签app=nginx 的资源\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml -l app=nginx\n" +#~ "\n" +#~ "\t\t# 应用 manifest.yaml 的配置并删除所有不在这个文件中的 ConfigMaps。\n" +#~ "\t\tkubectl apply --prune -f manifest.yaml --all --prune-whitelist=core/v1/" +#~ "ConfigMap" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Auto scale a deployment \"foo\", with the number of pods between 2 " +#~ "and 10, target CPU utilization specified so a default autoscaling policy " +#~ "will be used:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# Auto scale a replication controller \"foo\", with the number of pods " +#~ "between 1 and 5, target CPU utilization at 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 自动弹性伸缩 deployment \"foo\", pods 的数量在 2 和 10 之间, 目标 " +#~ "CPU 指定为默认的弹性伸缩策略:\n" +#~ "\t\tkubectl autoscale deployment foo --min=2 --max=10\n" +#~ "\n" +#~ "\t\t# 自动弹性伸缩 replication controller \"foo\", pods 的数量在 1 和 5 之" +#~ "间, 目标 CPU 利用率为 80%:\n" +#~ "\t\tkubectl autoscale rc foo --max=5 --cpu-percent=80" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Convert 'pod.yaml' to latest version and print to stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# Convert the live state of the resource specified by 'pod.yaml' to " +#~ "the latest version\n" +#~ "\t\t# and print to stdout in json format.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# Convert all files under current directory to latest version and " +#~ "create them all.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 将’pod.yaml' 转换为最新版本并打印到 stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml\n" +#~ "\n" +#~ "\t\t# 将 ‘pod.yaml' 指定的资源的实时状态转换为最新版本\n" +#~ "\t\t# 并以 json 格式打印到 stdout.\n" +#~ "\t\tkubectl convert -f pod.yaml --local -o json\n" +#~ "\n" +#~ "\t\t# 将当前目录下的所以文件转换为最新版本并创建它们.\n" +#~ "\t\tkubectl convert -f . | kubectl create -f -" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" that allows user to " +#~ "perform \"get\", \"watch\" and \"list\" on pods\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# Create a ClusterRole named \"pod-reader\" with ResourceName " +#~ "specified\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 创建一个名为 \"pod-reader\" 的 ClusterRole, 允许用户在 pods 上执行 " +#~ "“get\", \"watch\" 和 \"list\"\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods\n" +#~ "\n" +#~ "\t\t# 创建一个名为 \"pod-reader\" ClusterRole, 其中指定了 ResourceName\n" +#~ "\t\tkubectl create clusterrole pod-reader --verb=get,list,watch --" +#~ "resource=pods --resource-name=readablepod" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a new resourcequota named my-quota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +#~ "replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# Create a new resourcequota named best-effort\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 创建一个名为 my-quota 的 resourcequota\n" +#~ "\t\tkubectl create quota my-quota --hard=cpu=1,memory=1G,pods=2,services=3," +#~ "replicationcontrollers=2,resourcequotas=1,secrets=5," +#~ "persistentvolumeclaims=10\n" +#~ "\n" +#~ "\t\t# 创建一个名为 best-effort 的 resourcequota\n" +#~ "\t\tkubectl create quota best-effort --hard=pods=100 --scopes=BestEffort" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=rails label\n" +#~ "\t\t# and require at least one of them being available at any point in " +#~ "time.\n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# Create a pod disruption budget named my-pdb that will select all " +#~ "pods with the app=nginx label\n" +#~ "\t\t# and require at least half of the pods selected to be available at " +#~ "any point in time.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 创建一个名称为 my-pdb 的 pod disruption budget 并将会选择所有 " +#~ "app=rails 标签的 pods\n" +#~ "\t\t# 并要求他们在同一时间中最少有一个可用. \n" +#~ "\t\tkubectl create poddisruptionbudget my-pdb --selector=app=rails --min-" +#~ "available=1\n" +#~ "\n" +#~ "\t\t# 创建一个名称为 my-pdb 的 pod disruption budget 并将会选择所有 " +#~ "app=rails 标签的 pods\n" +#~ "\t\t# 并要求他们在同一时间中最少有一半可用.\n" +#~ "\t\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a pod using the data in pod.json.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Create a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# Edit the data in docker-registry.yaml in JSON using the v1 API " +#~ "format then create the resource using the edited data.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 使用在 pod.json 的 数据创建一个 pod.\n" +#~ "\t\tkubectl create -f ./pod.json\n" +#~ "\n" +#~ "\t\t# 根据传入 stdin 的 JSON 创建一个 pod.\n" +#~ "\t\tcat pod.json | kubectl create -f -\n" +#~ "\n" +#~ "\t\t# 使用 v1 API 格式在 JSON 中编辑在 docker-registry.yaml 中的数据然后使" +#~ "用被编辑后的数据创建资源.\n" +#~ "\t\tkubectl create -f docker-registry.yaml --edit --output-version=v1 -o " +#~ "json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx, which serves on port 80 and " +#~ "connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a replication controller identified by type and " +#~ "name specified in \"nginx-controller.yaml\", which serves on port 80 and " +#~ "connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for a pod valid-pod, which serves on port 444 with " +#~ "the name \"frontend\"\n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# Create a second service based on the above service, exposing the " +#~ "container port 8443 as port 443 with the name \"nginx-https\"\n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated streaming application on port 4100 " +#~ "balancing UDP traffic and named 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# Create a service for a replicated nginx using replica set, which " +#~ "serves on port 80 and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# Create a service for an nginx deployment, which serves on port 80 " +#~ "and connects to the containers on port 8000.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 为一个 replicated nginx 创建一个 service, 服务在端口 80 并连接到 " +#~ "containers 的8000端口.\n" +#~ "\t\tkubectl expose rc nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# 使用在 \"nginx-controller.yaml\\ 中指定的 type 和 name 为一个" +#~ "replication controller 创建一个 service, 服务在端口 80 并连接到 containers " +#~ "的8000端口.\n" +#~ "\t\tkubectl expose -f nginx-controller.yaml --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# 为名为 valid-pod 的 pod 创建一个 service, 服务在端口 444 并命名为 " +#~ "\"frontend\" \n" +#~ "\t\tkubectl expose pod valid-pod --port=444 --name=frontend\n" +#~ "\n" +#~ "\t\t# 基于上面的 service 创建第二个 service, 暴露容器端口 8443 并命名为 " +#~ "\"nginx-https\" 端口为 443 \n" +#~ "\t\tkubectl expose service nginx --port=443 --target-port=8443 --" +#~ "name=nginx-https\n" +#~ "\n" +#~ "\t\t# 为一个名称为 streaming 的应用创建一个 service 暴露端口 4100, 协议为 " +#~ "UDP 名称为 'video-stream'.\n" +#~ "\t\tkubectl expose rc streamer --port=4100 --protocol=udp --name=video-" +#~ "stream\n" +#~ "\n" +#~ "\t\t# 为一个名称为 nginx 的 replica set 创建一个 service, 服务在 端口 80 且" +#~ "连接到容器端口 8000.\n" +#~ "\t\tkubectl expose rs nginx --port=80 --target-port=8000\n" +#~ "\n" +#~ "\t\t# 为一个名称为 nginx 的 deployment 创建一个 service, 服务在端口 80 且 " +#~ "连接到 containers 的 8000 端口.\n" +#~ "\t\tkubectl expose deployment nginx --port=80 --target-port=8000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Delete a pod using the type and name specified in pod.json.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Delete a pod based on the type and name in the JSON passed into " +#~ "stdin.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# Delete pods and services with same names \"baz\" and \"foo\"\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# Delete pods and services with label name=myLabel.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Delete a pod with minimal delay\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# Force delete a pod on a dead node\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# Delete all pods\n" +#~ "\t\tkubectl delete pods --all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 使用 pod.json 中的类型和名称删除一个 pod.\n" +#~ "\t\tkubectl delete -f ./pod.json\n" +#~ "\n" +#~ "\t\t# 基于重定向到 stdin 中的 JSON 的类型和名称删除一个 pod.\n" +#~ "\t\tcat pod.json | kubectl delete -f -\n" +#~ "\n" +#~ "\t\t# 删除名为 \"baz\" 和 \"foo\" 的 pod 和 service\n" +#~ "\t\tkubectl delete pod,service baz foo\n" +#~ "\n" +#~ "\t\t# 删除标签为 name=myLabel 的 pods 和 services.\n" +#~ "\t\tkubectl delete pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# 删除最小延迟的 pod\n" +#~ "\t\tkubectl delete pod foo --now\n" +#~ "\n" +#~ "\t\t# 强制删除名为 foo 的 pod\n" +#~ "\t\tkubectl delete pod foo --grace-period=0 --force\n" +#~ "\n" +#~ "\t\t# 删除所有 pods\n" +#~ "\t\tkubectl delete pods --all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Describe a node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# Describe a pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# Describe a pod identified by type and name in \"pod.json\"\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# Describe all pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# Describe pods by label name=myLabel\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Describe all pods managed by the 'frontend' replication controller " +#~ "(rc-created pods\n" +#~ "\t\t# get the name of the rc as a prefix in the pod the name).\n" +#~ "\t\tkubectl describe pods frontend" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 描述一个 node\n" +#~ "\t\tkubectl describe nodes kubernetes-node-emt8.c.myproject.internal\n" +#~ "\n" +#~ "\t\t# 描述一个 pod\n" +#~ "\t\tkubectl describe pods/nginx\n" +#~ "\n" +#~ "\t\t# 描述一个被 \"pod.json\" 中的类型和名称标识的 pod\n" +#~ "\t\tkubectl describe -f pod.json\n" +#~ "\n" +#~ "\t\t# 描述所有 pods\n" +#~ "\t\tkubectl describe pods\n" +#~ "\n" +#~ "\t\t# 描述标签为 name=myLabel 的 pods\n" +#~ "\t\tkubectl describe po -l name=myLabel\n" +#~ "\n" +#~ "\t\t# 描述所有被名称为 'frontend' 的 replication controller 管理的 pods(rc-" +#~ "创建 pods\n" +#~ "\t\t# 并使用 rc 的名称作为 pod 的前缀).\n" +#~ "\t\tkubectl describe pods frontend" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Drain node \"foo\", even if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet on it.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# As above, but abort if there are pods not managed by a " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet 或者 StatefulSet, and " +#~ "use a grace period of 15 minutes.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 驱逐节点 \"foo\", 即使很多 pods 没有被一个在 node 上的 " +#~ "ReplicationController, ReplicaSet, Job, DaemonSet 或者 StatefulSet 管理.\n" +#~ "\t\t$ kubectl drain foo --force\n" +#~ "\n" +#~ "\t\t# 同上, 如果存在 pods 没有被一个 ReplicationController, ReplicaSet, " +#~ "Job, DaemonSet 或者 StatefulSet 管理超过 15 分钟则退出.\n" +#~ "\t\t$ kubectl drain foo --grace-period=900" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Edit the service named 'docker-registry':\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Use an alternative editor\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# Edit the job 'myjob' in JSON using the v1 API format:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# Edit the deployment 'mydeployment' in YAML and save the modified " +#~ "config in its annotation:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 编辑名为 'docker-registry' 的 service:\n" +#~ "\t\tkubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# 使用一个可选择的编辑器\n" +#~ "\t\tKUBE_EDITOR=\"nano\" kubectl edit svc/docker-registry\n" +#~ "\n" +#~ "\t\t# 使用 v1 API 格式的 JSON 编辑名为 'myjob' 的 job:\n" +#~ "\t\tkubectl edit job.v1.batch/myjob -o json\n" +#~ "\n" +#~ "\t\t# 在 YAML 中编辑名为 'mydeployment' 的 deployment 并在它的注解中保存修" +#~ "改后的配置:\n" +#~ "\t\tkubectl edit deployment/mydeployment -o yaml --save-config" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running 'date' from pod 123456-7890, using the first " +#~ "container by default\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# Get output from running 'date' in ruby-container from pod " +#~ "123456-7890\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container " +#~ "from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 从运行中pod 123456-7890 获取执行 'date' 的输出, 默认使用第一个容器\n" +#~ "\t\tkubectl exec 123456-7890 date\n" +#~ "\n" +#~ "\t\t# 从 pod 123456-7890 的容器 ruby-container 获取执行 'date' 的输出\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container date\n" +#~ "\n" +#~ "\t\t# 切换到 terminal 模式, 发送 stdin 到运行在 pod 123456-7890 的容器 " +#~ "ruby-container 'bash' \n" +#~ "\t\t# 并从 'bash' 发送 stdout/stderr 返回到 client\n" +#~ "\t\tkubectl exec 123456-7890 -c ruby-container -i -t -- bash -il" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Get output from running pod 123456-7890, using the first container " +#~ "by default\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# Get output from ruby-container from pod 123456-7890\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# Switch to raw terminal mode, sends stdin to 'bash' in ruby-container " +#~ "from pod 123456-7890\n" +#~ "\t\t# and sends stdout/stderr from 'bash' back to the client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# Get output from the first pod of a ReplicaSet named nginx\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 从运行中pod 123456-7890 获取执行 'date' 的输出, 默认使用第一个容器\n" +#~ "\t\tkubectl attach 123456-7890\n" +#~ "\n" +#~ "\t\t# 从 pod 123456-7890 的容器 ruby-container 获取输出\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container\n" +#~ "\n" +#~ "\t\t# 切换到 terminal 模式, 发送 stdin 到运行在 pod 123456-7890 的容器 " +#~ "ruby-container 'bash' \n" +#~ "\t\t# 并从 'bash' 发送 stdout/stderr 返回到 client\n" +#~ "\t\tkubectl attach 123456-7890 -c ruby-container -i -t\n" +#~ "\n" +#~ "\t\t# 从名称为 nginx 的 ReplicaSet 获取第一个 pod 的输出\n" +#~ "\t\tkubectl attach rs/nginx\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Install bash completion on a Mac using homebrew\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash completion support\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for bash into the current shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# Write bash completion code to a file and source if from ." +#~ "bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell completion\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# Load the kubectl completion code for zsh[1] into the current shell\n" +#~ "\t\tsource <(kubectl completion zsh)" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 在一个 Mac 中使用 homebrew 安装 bash 补全\n" +#~ "\t\tbrew install bash-completion\n" +#~ "\t\tprintf \"\n" +#~ "# Bash 补全支持\n" +#~ "source $(brew --prefix)/etc/bash_completion\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# 导入 kubectl 补全代码到当前 shell\n" +#~ "\t\tsource <(kubectl completion bash)\n" +#~ "\n" +#~ "\t\t# 写入 bash 补全代码到一个文件并 source 如果它是 .bash_profile\n" +#~ "\t\tkubectl completion bash > ~/.kube/completion.bash.inc\n" +#~ "\t\tprintf \"\n" +#~ "# Kubectl shell 补全\n" +#~ "source '$HOME/.kube/completion.bash.inc'\n" +#~ "\" >> $HOME/.bash_profile\n" +#~ "\t\tsource $HOME/.bash_profile\n" +#~ "\n" +#~ "\t\t# 为 zsh[1] 导入 kubectl 补全代码到当前 shell\n" +#~ "\t\tsource <(kubectl completion zsh)" + +#~ msgid "" +#~ "\n" +#~ "\t\t# List all pods in ps output format.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# List all pods in ps output format with more information (such as " +#~ "node name).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# List a single replication controller with specified NAME in ps " +#~ "output format.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# List a single pod in JSON output format.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List a pod identified by type and name specified in \"pod.yaml\" in " +#~ "JSON output format.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# Return only the phase value of the specified pod.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# List all replication controllers and services together in ps output " +#~ "format.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# List one or more resources by their type and names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# List all resources with different types.\n" +#~ "\t\tkubectl get all" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 以 ps 输出格式列出所有 pod.\n" +#~ "\t\tkubectl get pods\n" +#~ "\n" +#~ "\t\t# 以 ps 输出格式列出所有 pod(如节点名称).\n" +#~ "\t\tkubectl get pods -o wide\n" +#~ "\n" +#~ "\t\t# 获取名称为 web 的 replicationcontroller.\n" +#~ "\t\tkubectl get replicationcontroller web\n" +#~ "\n" +#~ "\t\t# 使用 JSON 格式化输出显示一个单独的 pod.\n" +#~ "\t\tkubectl get -o json pod web-pod-13je7\n" +#~ "\n" +#~ "\t\t# 显示一个被 \"pod.yaml\" 中的 type 和 name 标识的 pod 并使用 JSON 格式" +#~ "化输出.\n" +#~ "\t\tkubectl get -f pod.yaml -o json\n" +#~ "\n" +#~ "\t\t# 只返回被指定 pod 中 phase 的值.\n" +#~ "\t\tkubectl get -o template pod/web-pod-13je7 --template={{.status." +#~ "phase}}\n" +#~ "\n" +#~ "\t\t# 显示所有的 replication controllers 和 services 并格式化输出.\n" +#~ "\t\tkubectl get rc,services\n" +#~ "\n" +#~ "\t\t# 显示一个或者更多 resources 通过它们的 type 和 names.\n" +#~ "\t\tkubectl get rc/web service/frontend pods/web-pod-13je7\n" +#~ "\n" +#~ "\t\t# 使用不同的 types 显示所有 resources.\n" +#~ "\t\tkubectl get all" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Listen on ports 5000 and 6000 locally, forwarding data to/from ports " +#~ "5000 and 6000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# Listen on port 8888 locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# Listen on a random port locally, forwarding to 5000 in the pod\n" +#~ "\t\tkubectl port-forward mypod 0:5000" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 在本地监听端口 5000 和 6000 , forwarding 数据 to/from 在 pod 5000 和 " +#~ "6000 端口\n" +#~ "\t\tkubectl port-forward mypod 5000 6000\n" +#~ "\n" +#~ "\t\t# 在本地监听端口 8888 , forwarding 到 pod 的 5000端口\n" +#~ "\t\tkubectl port-forward mypod 8888:5000\n" +#~ "\n" +#~ "\t\t# 在本地随机监听一个端口 , forwarding 到 pod 的 5000端口\n" +#~ "\t\tkubectl port-forward mypod :5000\n" +#~ "\n" +#~ "\t\t# 在本地随机监听一个端口 , forwarding 到 pod 的 5000端口\n" +#~ "\t\tkubectl port-forward mypod 0:5000" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 标记 node \"foo\" 为 schedulable.\n" +#~ "\t\t$ kubectl uncordon foo" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/drain.go#L102 +#~ msgid "" +#~ "\n" +#~ "\t\t# Mark node \"foo\" as unschedulable.\n" +#~ "\t\tkubectl cordon foo" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 标记 node \"foo\" 为 unschedulable.\n" +#~ "\t\tkubectl cordon foo" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Partially update a node using strategic merge patch\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# Partially update a node identified by the type and name specified in " +#~ "\"node.json\" using strategic merge patch\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image; spec.containers[*].name is required " +#~ "because it's a merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# Update a container's image using a json patch with positional " +#~ "arrays\n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 使用 strategic merge patch 部分更新一个 node\n" +#~ "\t\tkubectl patch node k8s-node-1 -p '{\"spec\":{\"unschedulable\":" +#~ "true}}'\n" +#~ "\n" +#~ "\t\t# 使用 strategic merge patch 部分更新一个被 \"node.json\" 的 type 和 " +#~ "name 标示 的 node.\n" +#~ "\t\tkubectl patch -f node.json -p '{\"spec\":{\"unschedulable\":true}}'\n" +#~ "\n" +#~ "\t\t# 更新一个 container 的 image; spec.containers[*].name 是必须的 因为它" +#~ "是一个 merge key\n" +#~ "\t\tkubectl patch pod valid-pod -p '{\"spec\":{\"containers\":[{\"name\":" +#~ "\"kubernetes-serve-hostname\",\"image\":\"new image\"}]}}'\n" +#~ "\n" +#~ "\t\t# 使用一个 json patch 更新一个指定坐标的 container 的 image \n" +#~ "\t\tkubectl patch pod valid-pod --type='json' -p='[{\"op\": \"replace\", " +#~ "\"path\": \"/spec/containers/0/image\", \"value\":\"new image\"}]'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Print the address of the master and cluster services\n" +#~ "\t\tkubectl cluster-info" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 输出 master 和 cluster services 的地址\n" +#~ "\t\tkubectl cluster-info" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Replace a pod using the data in pod.json.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# Replace a pod based on the JSON passed into stdin.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Update a single-container pod's image version (tag) to v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# Force replace, delete and then re-create the resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 使用在 pod.json 中的数据替换一个 pod.\n" +#~ "\t\tkubectl replace -f ./pod.json\n" +#~ "\n" +#~ "\t\t# 基于被重定向到 stdin 中的 JSON 替换一个 pod.\n" +#~ "\t\tcat pod.json | kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# 更新一个单独容器的 pod 的 image 版本 (tag) 到 v4\n" +#~ "\t\tkubectl get pod mypod -o yaml | sed 's/\\(image: myimage\\):.*$/:v4/' " +#~ "| kubectl replace -f -\n" +#~ "\n" +#~ "\t\t# 强制替换, 删除然后重新创建这个 resource\n" +#~ "\t\tkubectl replace --force -f ./pod.json" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Return snapshot logs from pod nginx with only one container\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs for the pods defined by label app=nginx\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 返回仅有一个容器 pod 名称为 nginx 的 snapshot 日志\n" +#~ "\t\tkubectl logs nginx\n" +#~ "\n" +#~ "\t\t# 返回 label 为 app=nginx 的 pods 的 snapshot 日志\n" +#~ "\t\tkubectl logs -lapp=nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot of previous terminated ruby container logs from pod " +#~ "web-1\n" +#~ "\t\tkubectl logs -p -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Begin streaming the logs of the ruby container in pod web-1\n" +#~ "\t\tkubectl logs -f -c ruby web-1\n" +#~ "\n" +#~ "\t\t# Display only the most recent 20 lines of output in pod nginx\n" +#~ "\t\tkubectl logs --tail=20 nginx\n" +#~ "\n" +#~ "\t\t# Show all logs from pod nginx written in the last hour\n" +#~ "\t\tkubectl logs --since=1h nginx\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from first container of a job named hello\n" +#~ "\t\tkubectl logs job/hello\n" +#~ "\n" +#~ "\t\t# Return snapshot logs from container nginx-1 of a deployment named " +#~ "nginx\n" +#~ "\t\tkubectl logs deployment/nginx -c nginx-1" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on port 8011, serving static " +#~ "content from ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver on an arbitrary local port.\n" +#~ "\t\t# The chosen port for the server will be output to stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# Run a proxy to kubernetes apiserver, changing the api prefix to k8s-" +#~ "api\n" +#~ "\t\t# This makes e.g. the pods api available at localhost:8001/k8s-api/v1/" +#~ "pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 运行 proxy 到 kubernetes apiserver 的 8011 端口上, 服务静态内容路径" +#~ "为 ./local/www/\n" +#~ "\t\tkubectl proxy --port=8011 --www=./local/www/\n" +#~ "\n" +#~ "\t\t# 在任意的本地端口上运行一个 proxy 到 kubernetes apiserver.\n" +#~ "\t\t# 为这个 server 挑选的端口将会被输出到 stdout.\n" +#~ "\t\tkubectl proxy --port=0\n" +#~ "\n" +#~ "\t\t# 运行一个 proxy 到 kubernetes apiserver, 修改 api prefix 为 k8s-api\n" +#~ "\t\t# 这会使 e.g. 这个 pods 的有效 api 为 localhost:8001/k8s-api/v1/pods/\n" +#~ "\t\tkubectl proxy --api-prefix=/k8s-api" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Scale a replicaset named 'foo' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale a resource identified by type and name specified in \"foo." +#~ "yaml\" to 3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# If the deployment named mysql's current size is 2, scale mysql to " +#~ "3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale multiple replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale job named 'cron' to 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Scale 一个名称为 ‘foo’ 的 replicaset 服本数为 3.\n" +#~ "\t\tkubectl scale --replicas=3 rs/foo\n" +#~ "\n" +#~ "\t\t# Scale 指定的 \"foo.yaml\" 的 type 和 name 标识的 resource 副本数量为 " +#~ "3.\n" +#~ "\t\tkubectl scale --replicas=3 -f foo.yaml\n" +#~ "\n" +#~ "\t\t# 如果名称为 mysql 的 deployment 当前副本数量为 2, scale mysql 到 3.\n" +#~ "\t\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\n" +#~ "\n" +#~ "\t\t# Scale 多个 replication controllers.\n" +#~ "\t\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\n" +#~ "\n" +#~ "\t\t# Scale 名称为 ’cron’ 的 job 副本数量为 3.\n" +#~ "\t\tkubectl scale --replicas=3 job/cron" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# Set the last-applied-configuration of a resource to match the " +#~ "contents of a file, will create the annotation if it does not already " +#~ "exist.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" +#~ msgstr "" +#~ "\n" +#~ "\t\t# 设置一个资源的 last-applied-configuration 去匹配一个文件的内容.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml\n" +#~ "\n" +#~ "\t\t# Execute set-last-applied against each configuration file in a " +#~ "directory.\n" +#~ "\t\tkubectl apply set-last-applied -f path/\n" +#~ "\n" +#~ "\t\t# 设置一个资源的 last-applied-configuration 去匹配一个文件的内容, 如果" +#~ "不存在将会创建一个 annotation.\n" +#~ "\t\tkubectl apply set-last-applied -f deploy.yaml --create-" +#~ "annotation=true\n" +#~ "\t\t" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Shut down foo.\n" +#~ "\t\tkubectl stop replicationcontroller foo\n" +#~ "\n" +#~ "\t\t# Stop pods and services with label name=myLabel.\n" +#~ "\t\tkubectl stop pods,services -l name=myLabel\n" +#~ "\n" +#~ "\t\t# Shut down the service defined in service.json\n" +#~ "\t\tkubectl stop -f service.json\n" +#~ "\n" +#~ "\t\t# Shut down all resources in the path/to/resources directory\n" +#~ "\t\tkubectl stop -f path/to/resources" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" --" +#~ "env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +#~ "\"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute π to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute π to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Start a single instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and let the container expose " +#~ "port 5701 .\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --port=5701\n" +#~ "\n" +#~ "\t\t# Start a single instance of hazelcast and set environment variables " +#~ "\"DNS_DOMAIN=cluster\" and \"POD_NAMESPACE=default\" in the container.\n" +#~ "\t\tkubectl run hazelcast --image=hazelcast --env=\"DNS_DOMAIN=cluster\" --" +#~ "env=\"POD_NAMESPACE=default\"\n" +#~ "\n" +#~ "\t\t# Start a replicated instance of nginx.\n" +#~ "\t\tkubectl run nginx --image=nginx --replicas=5\n" +#~ "\n" +#~ "\t\t# Dry run. Print the corresponding API objects without creating them.\n" +#~ "\t\tkubectl run nginx --image=nginx --dry-run\n" +#~ "\n" +#~ "\t\t# Start a single instance of nginx, but overload the spec of the " +#~ "deployment with a partial set of values parsed from JSON.\n" +#~ "\t\tkubectl run nginx --image=nginx --overrides='{ \"apiVersion\": \"v1\", " +#~ "\"spec\": { ... } }'\n" +#~ "\n" +#~ "\t\t# Start a pod of busybox and keep it in the foreground, don't restart " +#~ "it if it exits.\n" +#~ "\t\tkubectl run -i -t busybox --image=busybox --restart=Never\n" +#~ "\n" +#~ "\t\t# Start the nginx container using the default command, but use custom " +#~ "arguments (arg1 .. argN) for that command.\n" +#~ "\t\tkubectl run nginx --image=nginx -- ... \n" +#~ "\n" +#~ "\t\t# Start the nginx container using a different command and custom " +#~ "arguments.\n" +#~ "\t\tkubectl run nginx --image=nginx --command -- ... \n" +#~ "\n" +#~ "\t\t# Start the perl container to compute π to 2000 places and print it " +#~ "out.\n" +#~ "\t\tkubectl run pi --image=perl --restart=OnFailure -- perl -Mbignum=bpi -" +#~ "wle 'print bpi(2000)'\n" +#~ "\n" +#~ "\t\t# Start the cron job to compute π to 2000 places and print it out " +#~ "every 5 minutes.\n" +#~ "\t\tkubectl run pi --schedule=\"0/5 * * * ?\" --image=perl --" +#~ "restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(2000)'" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Update node 'foo' with a taint with key 'dedicated' and value " +#~ "'special-user' and effect 'NoSchedule'.\n" +#~ "\t\t# If a taint with that key and effect already exists, its value is " +#~ "replaced as specified.\n" +#~ "\t\tkubectl taint nodes foo dedicated=special-user:NoSchedule\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' the taint with key 'dedicated' and effect " +#~ "'NoSchedule' if one exists.\n" +#~ "\t\tkubectl taint nodes foo dedicated:NoSchedule-\n" +#~ "\n" +#~ "\t\t# Remove from node 'foo' all the taints with key 'dedicated'\n" +#~ "\t\tkubectl taint nodes foo dedicated-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'unhealthy' and the value 'true'.\n" +#~ "\t\tkubectl label pods foo unhealthy=true\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' with the label 'status' and the value 'unhealthy', " +#~ "overwriting any existing value.\n" +#~ "\t\tkubectl label --overwrite pods foo status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update all pods in the namespace\n" +#~ "\t\tkubectl label pods --all status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update a pod identified by the type and name in \"pod.json\"\n" +#~ "\t\tkubectl label -f pod.json status=unhealthy\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ "\t\tkubectl label pods foo status=unhealthy --resource-version=1\n" +#~ "\n" +#~ "\t\t# Update pod 'foo' by removing a label named 'bar' if it exists.\n" +#~ "\t\t# Does not require the --overwrite flag.\n" +#~ "\t\tkubectl label pods foo bar-" + +#~ msgid "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" +#~ msgstr "" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using new replication controller data in " +#~ "frontend-v2.json.\n" +#~ "\t\tkubectl rolling-update frontend-v1 -f frontend-v2.json\n" +#~ "\n" +#~ "\t\t# Update pods of frontend-v1 using JSON data passed into stdin.\n" +#~ "\t\tcat frontend-v2.json | kubectl rolling-update frontend-v1 -f -\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend-v1 to frontend-v2 by just changing the " +#~ "image, and switching the\n" +#~ "\t\t# name of the replication controller.\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --image=image:v2\n" +#~ "\n" +#~ "\t\t# Update the pods of frontend by just changing the image, and keeping " +#~ "the old name.\n" +#~ "\t\tkubectl rolling-update frontend --image=image:v2\n" +#~ "\n" +#~ "\t\t# Abort and reverse an existing rollout in progress (from frontend-v1 " +#~ "to frontend-v2).\n" +#~ "\t\tkubectl rolling-update frontend-v1 frontend-v2 --rollback" + +#~ msgid "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" +#~ msgstr "" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by type/name in " +#~ "YAML.\n" +#~ "\t\tkubectl apply view-last-applied deployment/nginx\n" +#~ "\n" +#~ "\t\t# View the last-applied-configuration annotations by file in JSON\n" +#~ "\t\tkubectl apply view-last-applied -f deploy.yaml -o json" + +#~ msgid "" +#~ "\n" +#~ "\t\tApply a configuration to a resource by filename or stdin.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." +#~ msgstr "" +#~ "\n" +#~ "\t\t通过文件名或标准输入流(stdin)对资源进行配置.\n" +#~ "\t\tThis resource will be created if it doesn't exist yet.\n" +#~ "\t\tTo use 'apply', always create the resource initially with either " +#~ "'apply' or 'create --save-config'.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ "\n" +#~ "\t\tAlpha Disclaimer: the --prune functionality is not yet complete. Do " +#~ "not use unless you are aware of what the current state is. See https://" +#~ "issues.k8s.io/34274." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L68 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\t创建一个 ClusterRole." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L43 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a ClusterRoleBinding for a particular ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\t 为指定的 ClusterRole 创建一个 ClusterRoleBinding." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L43 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a RoleBinding for a particular Role or ClusterRole." +#~ msgstr "" +#~ "\n" +#~ "\t\t为指定的 Role 或者 ClusterRole 创建一个 RoleBinding." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a TLS secret from the given public/private key pair.\n" +#~ "\n" +#~ "\t\tThe public/private key pair must exist before hand. The public key " +#~ "certificate must be .PEM encoded and match the given private key." +#~ msgstr "" +#~ "\n" +#~ "\t\t为指定的 public/private key pair 创建一个 TLS secret.\n" +#~ "\n" +#~ "\t\tpublic/private key pair 必须在传递前存在. public key certificate 必须" +#~ "以 .PEM 被编码且匹配指定的 private key." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a configmap based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single configmap may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a configmap based on a directory, each file whose " +#~ "basename is a valid key in the directory will be\n" +#~ "\t\tpackaged into the configmap. Any directory entries except regular " +#~ "files are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a new secret for use with Docker registries.\n" +#~ "\n" +#~ "\t\tDockercfg secrets are used to authenticate against Docker registries.\n" +#~ "\n" +#~ "\t\tWhen using the Docker command line to push images, you can " +#~ "authenticate to a given registry by running\n" +#~ "\n" +#~ "\t\t $ docker login DOCKER_REGISTRY_SERVER --username=DOCKER_USER --" +#~ "password=DOCKER_PASSWORD --email=DOCKER_EMAIL'.\n" +#~ "\n" +#~ " That produces a ~/.dockercfg file that is used by subsequent 'docker " +#~ "push' and 'docker pull' commands to\n" +#~ "\t\tauthenticate to the registry. The email address is optional.\n" +#~ "\n" +#~ "\t\tWhen creating applications, you may have a Docker registry that " +#~ "requires authentication. In order for the\n" +#~ "\t\tnodes to pull images on your behalf, they have to have the " +#~ "credentials. You can provide this information\n" +#~ "\t\tby creating a dockercfg secret and attaching it to your service " +#~ "account." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L49 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a pod disruption budget with the specified name, selector, and " +#~ "desired minimum available pods" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create.go#L56 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." +#~ msgstr "" +#~ "\n" +#~ "\t\t通过文件名或者标准输入流(stdin)创建一个资源.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a resourcequota with the specified name, hard limits and " +#~ "optional scopes" + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate a secret based on a file, directory, or specified literal " +#~ "value.\n" +#~ "\n" +#~ "\t\tA single secret may package one or more key/value pairs.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a file, the key will default to the " +#~ "basename of the file, and the value will\n" +#~ "\t\tdefault to the file content. If the basename is an invalid key, you " +#~ "may specify an alternate key.\n" +#~ "\n" +#~ "\t\tWhen creating a secret based on a directory, each file whose basename " +#~ "is a valid key in the directory will be\n" +#~ "\t\tpackaged into the secret. Any directory entries except regular files " +#~ "are ignored (e.g. subdirectories,\n" +#~ "\t\tsymlinks, devices, pipes, etc)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreate and run a particular image, possibly replicated.\n" +#~ "\n" +#~ "\t\tCreates a deployment or job to manage the created container(s)." + +#~ msgid "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." +#~ msgstr "" +#~ "\n" +#~ "\t\tCreates an autoscaler that automatically chooses and sets the number " +#~ "of pods that run in a kubernetes cluster.\n" +#~ "\n" +#~ "\t\tLooks up a Deployment, ReplicaSet, or ReplicationController by name " +#~ "and creates an autoscaler that uses the given resource as a reference.\n" +#~ "\t\tAn autoscaler can automatically increase or decrease number of pods " +#~ "deployed within the system as needed." + +#~ msgid "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments may " +#~ "be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +#~ "pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of the " +#~ "same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." +#~ msgstr "" +#~ "\n" +#~ "\t\tDelete resources by filenames, stdin, resources and names, or by " +#~ "resources and label selector.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. Only one type of the arguments may " +#~ "be specified: filenames,\n" +#~ "\t\tresources and names, or resources and label selector.\n" +#~ "\n" +#~ "\t\tSome resources, such as pods, support graceful deletion. These " +#~ "resources define a default period\n" +#~ "\t\tbefore they are forcibly terminated (the grace period) but you may " +#~ "override that value with\n" +#~ "\t\tthe --grace-period flag, or pass --now to set a grace-period of 1. " +#~ "Because these resources often\n" +#~ "\t\trepresent entities in the cluster, deletion may not be acknowledged " +#~ "immediately. If the node\n" +#~ "\t\thosting a pod is down or cannot reach the API server, termination may " +#~ "take significantly longer\n" +#~ "\t\tthan the grace period. To force delete a resource,\tyou must pass a " +#~ "grace\tperiod of 0 and specify\n" +#~ "\t\tthe --force flag.\n" +#~ "\n" +#~ "\t\tIMPORTANT: Force deleting pods does not wait for confirmation that the " +#~ "pod's processes have been\n" +#~ "\t\tterminated, which can leave those processes running until the node " +#~ "detects the deletion and\n" +#~ "\t\tcompletes graceful deletion. If your processes use shared storage or " +#~ "talk to a remote API and\n" +#~ "\t\tdepend on the name of the pod to identify themselves, force deleting " +#~ "those pods may result in\n" +#~ "\t\tmultiple processes running on different machines using the same " +#~ "identification which may lead\n" +#~ "\t\tto data corruption or inconsistency. Only force delete pods when you " +#~ "are sure the pod is\n" +#~ "\t\tterminated, or if your application can tolerate multiple copies of the " +#~ "same pod running at once.\n" +#~ "\t\tAlso, if you force delete pods the scheduler may place new pods on " +#~ "those nodes before the node\n" +#~ "\t\thas released those resources and causing those pods to be evicted " +#~ "immediately.\n" +#~ "\n" +#~ "\t\tNote that the delete command does NOT do resource version checks, so " +#~ "if someone\n" +#~ "\t\tsubmits an update to a resource right when you submit a delete, their " +#~ "update\n" +#~ "\t\twill be lost along with the rest of the resource." + +#~ msgid "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered by " +#~ "delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." +#~ msgstr "" +#~ "\n" +#~ "\t\tDeprecated: Gracefully shut down a resource by name or filename.\n" +#~ "\n" +#~ "\t\tThe stop command is deprecated, all its functionalities are covered by " +#~ "delete command.\n" +#~ "\t\tSee 'kubectl delete --help' for more details.\n" +#~ "\n" +#~ "\t\tAttempts to shut down and delete a resource that supports graceful " +#~ "termination.\n" +#~ "\t\tIf the resource is scalable it will be scaled to 0 before deletion." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of nodes.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." +#~ msgstr "" +#~ "\n" +#~ "\t\t显示 node 的资源(CPU/Memory/Storage)使用.\n" +#~ "\n" +#~ "\t\tThe top-node command allows you to see the resource consumption of " +#~ "nodes." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage of pods.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." +#~ msgstr "" +#~ "\n" +#~ "\t\t显示 pods 资源(CPU/Memory/Storage)使用.\n" +#~ "\n" +#~ "\t\tThe 'top pod' command allows you to see the resource consumption of " +#~ "pods.\n" +#~ "\n" +#~ "\t\tDue to the metrics pipeline delay, they may be unavailable for a few " +#~ "minutes\n" +#~ "\t\tsince pod creation." + +#~ msgid "" +#~ "\n" +#~ "\t\tDisplay Resource (CPU/Memory/Storage) usage.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " +#~ msgstr "" +#~ "\n" +#~ "\t\t显示资源(CPU/Memory/Storage)使用.\n" +#~ "\n" +#~ "\t\tThe top command allows you to see the resource consumption for nodes " +#~ "or pods.\n" +#~ "\n" +#~ "\t\tThis command requires Heapster to be correctly configured and working " +#~ "on the server. " + +#~ msgid "" +#~ "\n" +#~ "\t\tDrain node in preparation for maintenance.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use " +#~ "normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\t清理节点为节点维护做准备.\n" +#~ "\n" +#~ "\t\tThe given node will be marked unschedulable to prevent new pods from " +#~ "arriving.\n" +#~ "\t\t'drain' evicts the pods if the APIServer supports eviction\n" +#~ "\t\t(http://kubernetes.io/docs/admin/disruptions/). Otherwise, it will use " +#~ "normal DELETE\n" +#~ "\t\tto delete the pods.\n" +#~ "\t\tThe 'drain' evicts or deletes all pods except mirror pods (which " +#~ "cannot be deleted through\n" +#~ "\t\tthe API server). If there are DaemonSet-managed pods, drain will not " +#~ "proceed\n" +#~ "\t\twithout --ignore-daemonsets, and regardless it will not delete any\n" +#~ "\t\tDaemonSet-managed pods, because those pods would be immediately " +#~ "replaced by the\n" +#~ "\t\tDaemonSet controller, which ignores unschedulable markings. If there " +#~ "are any\n" +#~ "\t\tpods that are neither mirror pods nor managed by " +#~ "ReplicationController,\n" +#~ "\t\tReplicaSet, DaemonSet, StatefulSet or Job, then drain will not delete " +#~ "any pods unless you\n" +#~ "\t\tuse --force. --force will also allow deletion to proceed if the " +#~ "managing resource of one\n" +#~ "\t\tor more pods is missing.\n" +#~ "\n" +#~ "\t\t'drain' waits for graceful termination. You should not operate on the " +#~ "machine until\n" +#~ "\t\tthe command completes.\n" +#~ "\n" +#~ "\t\tWhen you are ready to put the node back into service, use kubectl " +#~ "uncordon, which\n" +#~ "\t\twill make the node schedulable again.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_drain.svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tEdit a resource from the default editor.\n" +#~ "\n" +#~ "\t\tThe edit command allows you to directly edit any API resource you can " +#~ "retrieve via the\n" +#~ "\t\tcommand line tools. It will open the editor defined by your " +#~ "KUBE_EDITOR, or EDITOR\n" +#~ "\t\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for " +#~ "Windows.\n" +#~ "\t\tYou can edit multiple objects, although changes are applied one at a " +#~ "time. The command\n" +#~ "\t\taccepts filenames as well as command line arguments, although the " +#~ "files you point to must\n" +#~ "\t\tbe previously saved versions of resources.\n" +#~ "\n" +#~ "\t\tEditing is done with the API version used to fetch the resource.\n" +#~ "\t\tTo edit using a specific API version, fully-qualify the resource, " +#~ "version, and group.\n" +#~ "\n" +#~ "\t\tThe default format is YAML. To edit in JSON, specify \"-o json\".\n" +#~ "\n" +#~ "\t\tThe flag --windows-line-endings can be used to force Windows line " +#~ "endings,\n" +#~ "\t\totherwise the default for your operating system will be used.\n" +#~ "\n" +#~ "\t\tIn the event an error occurs while updating, a temporary file will be " +#~ "created on disk\n" +#~ "\t\tthat contains your unapplied changes. The most common error when " +#~ "updating a resource\n" +#~ "\t\tis another editor changing the resource on the server. When this " +#~ "occurs, you will have\n" +#~ "\t\tto apply your changes to the newer version of the resource, or update " +#~ "your temporary\n" +#~ "\t\tsaved copy to include the latest resource version." +#~ msgstr "" +#~ "\n" +#~ "\t\t使用默认的编辑器修改资源.\n" +#~ "\n" +#~ "\t\tedit 命令允许你通过命令行直接修改 API 资源.\n" +#~ "\t\t它会打开你在 KUBE_EDITOR 或者EDITOR 环境变量中定义的编辑器\n" +#~ "\t\t或者回滚到 Linux vi 编辑器或者 Windows notepad.\n" +#~ "\t\t你可以修改多个对象, 虽然每次只能修改一次. 这个命令\n" +#~ "\t\t同时也接受文件名作为命令行参数, 尽管这些文件你指出必须是\n" +#~ "\t\t你之前保存的资源版本.\n" +#~ "\n" +#~ "\t\tEditing 是通过用于获取资源的API版本完成的.\n" +#~ "\t\t为了能通过指定的 API 版本修改, 请完全限定 resource, version 和 group.\n" +#~ "\n" +#~ "\t\t默认是 YAML 格式. 想在 JSON 中修改, 指定 \"-o json\".\n" +#~ "\n" +#~ "\t\t--windows-line-endings 命令行参数可以用来强制使用 Windows line " +#~ "endings,\n" +#~ "\t\t否则会使用你操作系统的默认值.\n" +#~ "\n" +#~ "\t\t如果更新时发生错误,将在磁盘上创建一个临时文件\n" +#~ "\t\t里面包含您未应用的更改. 更新资源时最常见的错误\n" +#~ "\t\t是另一个编辑器也在服务器中修改这个资源. 当发生这种情况时, 你将\n" +#~ "\t\t需要应用你的修改到资源的最新版本, 或者更新你被保存的临时文件\n" +#~ "\t\t复制它并使用最新的版本." + +#~ msgid "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in versions " +#~ "of zsh >= 5.2" +#~ msgstr "" +#~ "\n" +#~ "\t\tOutput shell completion code for the specified shell (bash or zsh).\n" +#~ "\t\tThe shell code must be evaluated to provide interactive\n" +#~ "\t\tcompletion of kubectl commands. This can be done by sourcing it from\n" +#~ "\t\tthe .bash_profile.\n" +#~ "\n" +#~ "\t\tNote: this requires the bash-completion framework, which is not " +#~ "installed\n" +#~ "\t\tby default on Mac. This can be installed by using homebrew:\n" +#~ "\n" +#~ "\t\t $ brew install bash-completion\n" +#~ "\n" +#~ "\t\tOnce installed, bash_completion must be evaluated. This can be done " +#~ "by adding the\n" +#~ "\t\tfollowing line to the .bash_profile\n" +#~ "\n" +#~ "\t\t $ source $(brew --prefix)/etc/bash_completion\n" +#~ "\n" +#~ "\t\tNote for zsh users: [1] zsh completions are only supported in versions " +#~ "of zsh >= 5.2" + +#~ msgid "" +#~ "\n" +#~ "\t\tPerform a rolling update of the given ReplicationController.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate.svg)" +#~ msgstr "" +#~ "\n" +#~ "\t\t完成指定的 ReplicationController 的滚动升级.\n" +#~ "\n" +#~ "\t\tReplaces the specified replication controller with a new replication " +#~ "controller by updating one pod at a time to use the\n" +#~ "\t\tnew PodTemplate. The new-controller.json must specify the same " +#~ "namespace as the\n" +#~ "\t\texisting replication controller and overwrite at least one (common) " +#~ "label in its replicaSelector.\n" +#~ "\n" +#~ "\t\t![Workflow](http://kubernetes.io/images/docs/kubectl_rollingupdate.svg)" + +#~ msgid "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +#~ "the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" +#~ msgstr "" +#~ "\n" +#~ "\t\tReplace a resource by filename or stdin.\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted. If replacing an existing resource, " +#~ "the\n" +#~ "\t\tcomplete resource spec must be provided. This can be obtained by\n" +#~ "\n" +#~ "\t\t $ kubectl get TYPE NAME -o yaml\n" + +#~ msgid "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." +#~ msgstr "" +#~ "\n" +#~ "\t\tSet a new size for a Deployment, ReplicaSet, Replication Controller, " +#~ "or Job.\n" +#~ "\n" +#~ "\t\tScale also allows users to specify one or more preconditions for the " +#~ "scale action.\n" +#~ "\n" +#~ "\t\tIf --current-replicas or --resource-version is specified, it is " +#~ "validated before the\n" +#~ "\t\tscale is attempted, and it is guaranteed that the precondition holds " +#~ "true when the\n" +#~ "\t\tscale is sent to the server." + +#~ msgid "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" +#~ msgstr "" +#~ "\n" +#~ "\t\tTo proxy all of the kubernetes api and nothing else, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/\n" +#~ "\n" +#~ "\t\tTo proxy only part of the kubernetes api and also some static files:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --www=/my/files --www-prefix=/static/ --api-" +#~ "prefix=/api/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/api/v1/pods'.\n" +#~ "\n" +#~ "\t\tTo proxy the entire kubernetes api at a different root, use:\n" +#~ "\n" +#~ "\t\t $ kubectl proxy --api-prefix=/custom/\n" +#~ "\n" +#~ "\t\tThe above lets you 'curl localhost:8001/custom/api/v1/pods'" + +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" +#~ msgstr "" +#~ "\n" +#~ "\t\tUpdate field(s) of a resource using strategic merge patch\n" +#~ "\n" +#~ "\t\tJSON and YAML formats are accepted.\n" + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the labels on a resource.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain letters, " +#~ "numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." +#~ msgstr "" +#~ "\n" +#~ "\t\tUpdate the labels on a resource.\n" +#~ "\n" +#~ "\t\t* A label must begin with a letter or number, and may contain letters, " +#~ "numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +#~ "\t\t* If --overwrite is true, then existing labels can be overwritten, " +#~ "otherwise attempting to overwrite a label will result in an error.\n" +#~ "\t\t* If --resource-version is specified, then updates will use this " +#~ "resource version, otherwise the existing resource-version will be used." + +#, c-format +#~ msgid "" +#~ "\n" +#~ "\t\tUpdate the taints on one or more nodes.\n" +#~ "\n" +#~ "\t\t* A taint consists of a key, value, and effect. As an argument here, " +#~ "it is expressed as key=value:effect.\n" +#~ "\t\t* The key must begin with a letter or number, and may contain letters, " +#~ "numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +#~ "\t\t* The value must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[2]d characters.\n" +#~ "\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +#~ "\t\t* Currently taint can only apply to node." +#~ msgstr "" +#~ "\n" +#~ "\t\t更新一个或者多个 node 上的 taints.\n" +#~ "\n" +#~ "\t\t* A taint consists of a key, value, and effect. As an argument here, " +#~ "it is expressed as key=value:effect.\n" +#~ "\t\t* The key must begin with a letter or number, and may contain letters, " +#~ "numbers, hyphens, dots, and underscores, up to %[1]d characters.\n" +#~ "\t\t* The value must begin with a letter or number, and may contain " +#~ "letters, numbers, hyphens, dots, and underscores, up to %[2]d characters.\n" +#~ "\t\t* The effect must be NoSchedule, PreferNoSchedule or NoExecute.\n" +#~ "\t\t* Currently taint can only apply to node." + +#~ msgid "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name or " +#~ "file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." +#~ msgstr "" +#~ "\n" +#~ "\t\tView the latest last-applied-configuration annotations by type/name or " +#~ "file.\n" +#~ "\n" +#~ "\t\tThe default output will be printed to stdout in YAML format. One can " +#~ "use -o option\n" +#~ "\t\tto change output format." + +#~ msgid "" +#~ "\n" +#~ "\t # !!!Important Note!!!\n" +#~ "\t # Requires that the 'tar' binary is present in your container\n" +#~ "\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +#~ "\n" +#~ "\t # Copy /tmp/foo_dir local directory to /tmp/bar_dir in a remote pod " +#~ "in the default namespace\n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # Copy /tmp/foo local file to /tmp/bar in a remote pod in a " +#~ "specific container\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo local file to /tmp/bar in a remote pod in namespace " +#~ "\n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# Copy /tmp/foo from a remote pod to /tmp/bar locally\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" +#~ msgstr "" +#~ "\n" +#~ "\t # !!!注意!!!\n" +#~ "\t # 要求容器中有 'tar' 命令\n" +#~ "\t # image. If 'tar' is not present, 'kubectl cp' will fail.\n" +#~ "\n" +#~ "\t # 复制本地目录 /tmp/foo_dir 到 default namespace 下的远程 pod 的 /" +#~ "tmp/bar_dir 路径 \n" +#~ "\t\tkubectl cp /tmp/foo_dir :/tmp/bar_dir\n" +#~ "\n" +#~ " # 复制 /tmp/foo local 本地文件到指定远程 pod 的指定容器的 /tmp/bar " +#~ "路径\n" +#~ "\t\tkubectl cp /tmp/foo :/tmp/bar -c \n" +#~ "\n" +#~ "\t\t# 复制 /tmp/foo 本地文件到在 namespace 下的某个 pod " +#~ "的 /tmp/bar 路径\n" +#~ "\t\tkubectl cp /tmp/foo /:/tmp/bar\n" +#~ "\n" +#~ "\t\t# 从一个远程的 pod 的 /tmp/foo 路径复制到本地 /tmp/bar 路径\n" +#~ "\t\tkubectl cp /:/tmp/foo /tmp/bar" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new TLS secret named tls-secret with the given key pair:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" +#~ msgstr "" +#~ "\n" +#~ "\t # 使用提供的 key pair 名称为tls-secret 的 secret:\n" +#~ "\t kubectl create secret tls tls-secret --cert=path/to/tls.cert --" +#~ "key=path/to/tls.key" + +#~ msgid "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" +#~ msgstr "" +#~ "\n" +#~ "\t # Create a new secret named my-secret with keys for each file in " +#~ "folder bar\n" +#~ "\t kubectl create secret generic my-secret --from-file=path/to/bar\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with specified keys instead of " +#~ "names on disk\n" +#~ "\t kubectl create secret generic my-secret --from-file=ssh-privatekey=~/." +#~ "ssh/id_rsa --from-file=ssh-publickey=~/.ssh/id_rsa.pub\n" +#~ "\n" +#~ "\t # Create a new secret named my-secret with key1=supersecret and " +#~ "key2=topsecret\n" +#~ "\t kubectl create secret generic my-secret --from-" +#~ "literal=key1=supersecret --from-literal=key2=topsecret" + +#~ msgid "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" +#~ msgstr "" +#~ "\n" +#~ "\t# Create a new ExternalName service named my-ns \n" +#~ "\tkubectl create service externalname my-ns --external-name bar.com" + +#~ msgid "" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # Create a new clusterIP service named my-cs (in headless mode)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" +#~ msgstr "" +#~ "\n" +#~ " # 创建一个名称为 my-cs 的 clusterIP service\n" +#~ " kubectl create service clusterip my-cs --tcp=5678:8080\n" +#~ "\n" +#~ " # 创建一个名称为 my-cs 的 clusterIP service (在 headless 模式)\n" +#~ " kubectl create service clusterip my-cs --clusterip=\"None\"" + +#~ msgid "" +#~ "\n" +#~ " # Create a new deployment named my-dep that runs the busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" +#~ msgstr "" +#~ "\n" +#~ " # 创建一个名称为 my-dep 的 deployment 并运行 busybox image.\n" +#~ " kubectl create deployment my-dep --image=busybox" + +#~ msgid "" +#~ "\n" +#~ " # Create a new nodeport service named my-ns\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" +#~ msgstr "" +#~ "\n" +#~ " # 创建一个名称为 my-ns 的 nodeport service\n" +#~ " kubectl create service nodeport my-ns --tcp=5678:8080" + +#~ msgid "" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value 'my " +#~ "frontend'.\n" +#~ " # If the same annotation is set multiple times, only the last value " +#~ "will be applied\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Update a pod identified by type and name in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value 'my " +#~ "frontend running nginx', overwriting any existing value.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend running " +#~ "nginx'\n" +#~ "\n" +#~ " # Update all pods in the namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # Update pod 'foo' by removing an annotation named 'description' if it " +#~ "exists.\n" +#~ " # Does not require the --overwrite flag.\n" +#~ " kubectl annotate pods foo description-" +#~ msgstr "" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value 'my " +#~ "frontend'.\n" +#~ " # If the same annotation is set multiple times, only the last value " +#~ "will be applied\n" +#~ " kubectl annotate pods foo description='my frontend'\n" +#~ "\n" +#~ " # Update a pod identified by type and name in \"pod.json\"\n" +#~ " kubectl annotate -f pod.json description='my frontend'\n" +#~ "\n" +#~ " # Update pod 'foo' with the annotation 'description' and the value 'my " +#~ "frontend running nginx', overwriting any existing value.\n" +#~ " kubectl annotate --overwrite pods foo description='my frontend running " +#~ "nginx'\n" +#~ "\n" +#~ " # Update all pods in the namespace\n" +#~ " kubectl annotate pods --all description='my frontend running nginx'\n" +#~ "\n" +#~ " # Update pod 'foo' only if the resource is unchanged from version 1.\n" +#~ " kubectl annotate pods foo description='my frontend running nginx' --" +#~ "resource-version=1\n" +#~ "\n" +#~ " # 更新名称为 'foo' 的 pod, 删除一个名称为 'description' 的 annotation " +#~ "如果它存在. \n" +#~ " # 不要求使用 --overwrite flag.\n" +#~ " kubectl annotate pods foo description-" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_serviceaccount.go#L44 +#~ msgid "" +#~ "\n" +#~ " Create a clusterIP service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " 使用一个指定的名称创建一个 clusterIP service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_deployment.go#L44 +#~ msgid "" +#~ "\n" +#~ " Create a deployment with the specified name." +#~ msgstr "" +#~ "\n" +#~ " 使用一个指定的名称创建一个 deployment." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_deployment.go#L44 +#~ msgid "" +#~ "\n" +#~ " Create a nodeport service with the specified name." +#~ msgstr "" +#~ "\n" +#~ " 使用一个指定的名称创建一个 nodeport service." + +#~ msgid "" +#~ "\n" +#~ " Dumps cluster info out suitable for debugging and diagnosing cluster " +#~ "problems. By default, dumps everything to\n" +#~ " stdout. You can optionally specify a directory with --output-" +#~ "directory. If you specify a directory, kubernetes will\n" +#~ " build a set of files in that directory. By default only dumps things " +#~ "in the 'kube-system' namespace, but you can\n" +#~ " switch to a different namespace with the --namespaces flag, or specify " +#~ "--all-namespaces to dump all namespaces.\n" +#~ "\n" +#~ " The command also dumps the logs of all of the pods in the cluster, " +#~ "these logs are dumped into different directories\n" +#~ " based on namespace and pod name." +#~ msgstr "" +#~ "\n" +#~ " Dumps cluster info out suitable for debugging and diagnosing cluster " +#~ "problems. By default, dumps everything to\n" +#~ " stdout. You can optionally specify a directory with --output-" +#~ "directory. If you specify a directory, kubernetes will\n" +#~ " build a set of files in that directory. By default only dumps things " +#~ "in the 'kube-system' namespace, but you can\n" +#~ " switch to a different namespace with the --namespaces flag, or specify " +#~ "--all-namespaces to dump all namespaces.\n" +#~ "\n" +#~ " The command also dumps the logs of all of the pods in the cluster, " +#~ "these logs are dumped into different directories\n" +#~ " based on namespace and pod name." + +#~ msgid "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." +#~ msgstr "" +#~ "\n" +#~ " Display addresses of the master and services with label kubernetes.io/" +#~ "cluster-service=true\n" +#~ " To further debug and diagnose cluster problems, use 'kubectl cluster-" +#~ "info dump'." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L136 +#~ msgid "A schedule in the Cron format the job should be run with." +#~ msgstr "A schedule in the Cron format the job should be run with." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L134 +#~ msgid "" +#~ "An inline JSON override for the generated service object. If this is non-" +#~ "empty, it is used to override the generated object. Requires that the " +#~ "object supply a valid apiVersion field. Only used if --expose is true." +#~ msgstr "" +#~ "An inline JSON override for the generated service object. If this is non-" +#~ "empty, it is used to override the generated object. Requires that the " +#~ "object supply a valid apiVersion field. Only used if --expose is true." + +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "通过文件名或标准输入流(stdin)对资源进行配置" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L55 +#~ msgid "Auto-scale a Deployment, ReplicaSet, or ReplicationController" +#~ msgstr "" +#~ "自动调整一个 Deployment, ReplicaSet, 或者 ReplicationController 的副本数量" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L101 +#~ msgid "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" +#~ msgstr "" +#~ "Container name which will have its image upgraded. Only relevant when --" +#~ "image is specified, ignored otherwise. Required when using --image on a " +#~ "multi-container pod" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_clusterrolebinding.go#L43 +#~ msgid "Create a ClusterRoleBinding for a particular ClusterRole" +#~ msgstr "为一个指定的 ClusterRole 创建一个 ClusterRoleBinding" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L181 +#~ msgid "Create a LoadBalancer service." +#~ msgstr "创建一个 LoadBalancer service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L124 +#~ msgid "Create a NodePort service." +#~ msgstr "创建一个 NodePort service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_rolebinding.go#L43 +#~ msgid "Create a RoleBinding for a particular Role or ClusterRole" +#~ msgstr "为一个指定的 Role 或者 ClusterRole创建一个 RoleBinding" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L68 +#~ msgid "Create a clusterIP service." +#~ msgstr "创建一个 clusterIP service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_configmap.go#L59 +#~ msgid "Create a configmap from a local file, directory or literal value" +#~ msgstr "从本地 file, directory 或者 literal value 创建一个 configmap" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_deployment.go#L44 +#~ msgid "Create a deployment with the specified name." +#~ msgstr "创建一个指定名称的 deployment." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_pdb.go#L49 +#~ msgid "Create a pod disruption budget with the specified name." +#~ msgstr "创建一个指定名称的 pod disruption budget." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_quota.go#L47 +#~ msgid "Create a quota with the specified name." +#~ msgstr "创建一个指定名称的 quota." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create.go#L56 +#~ msgid "Create a resource by filename or stdin" +#~ msgstr "通过文件名或者标准输入流(stdin)创建一个资源" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_secret.go#L73 +#~ msgid "Create a secret from a local file, directory or literal value" +#~ msgstr "从本地 file, directory 或者 literal value 创建一个 secret" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L36 +#~ msgid "Create a service using specified subcommand." +#~ msgstr "使用指定的 subcommand 创建一个 service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L240 +#~ msgid "Create an ExternalName service." +#~ msgstr "Create an ExternalName service." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/delete.go#L130 +#~ msgid "" +#~ "Delete resources by filenames, stdin, resources and names, or by resources " +#~ "and label selector" +#~ msgstr "" +#~ "Delete resources by filenames, stdin, resources and names, or by resources " +#~ "and label selector" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/stop.go#L58 +#~ msgid "Deprecated: Gracefully shut down a resource by name or filename" +#~ msgstr "Deprecated: Gracefully shut down a resource by name or filename" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top_node.go#L77 +#~ msgid "Display Resource (CPU/Memory) usage of nodes" +#~ msgstr "显示 nodes 的 Resource (CPU/Memory) 使用" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top_pod.go#L79 +#~ msgid "Display Resource (CPU/Memory) usage of pods" +#~ msgstr "显示 pods 的 Resource (CPU/Memory) 使用" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/top.go#L43 +#~ msgid "Display Resource (CPU/Memory) usage." +#~ msgstr "显示 Resource (CPU/Memory) 使用." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo.go#L49 +#~ msgid "Display cluster info" +#~ msgstr "显示集群信息" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/current_context.go#L48 +#~ msgid "Displays the current-context" +#~ msgstr "显示当前的 context" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/explain.go#L50 +#~ msgid "Documentation of resources" +#~ msgstr "查看资源的文档" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/clusterinfo_dump.go#L37 +#~ msgid "Dump lots of relevant info for debugging and diagnosis" +#~ msgstr "Dump lots of relevant info for debugging and diagnosis" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L102 +#~ msgid "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." +#~ msgstr "" +#~ "Explicit policy for when to pull container images. Required when --image " +#~ "is same as existing image, ignored otherwise." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L105 +#~ msgid "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." +#~ msgstr "" +#~ "IP to assign to the Load Balancer. If empty, an ephemeral IP will be " +#~ "created and used (cloud-provider specific)." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L98 +#~ msgid "" +#~ "Image to use for upgrading the replication controller. Must be distinct " +#~ "from the existing image (either new image or new image tag). Can not be " +#~ "used with --filename/-f" +#~ msgstr "" +#~ "Image to use for upgrading the replication controller. Must be distinct " +#~ "from the existing image (either new image or new image tag). Can not be " +#~ "used with --filename/-f" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollout/rollout.go#L46 +#~ msgid "Manage a deployment rollout" +#~ msgstr "管理一个 deployment 的 rollout" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/edit.go#L115 +#~ msgid "" +#~ "Output the formatted object with the given group version (for ex: " +#~ "'extensions/v1beta1').)" +#~ msgstr "" +#~ "Output the formatted object with the given group version (for ex: " +#~ "'extensions/v1beta1').)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L84 +#~ msgid "Perform a rolling update of the given ReplicationController" +#~ msgstr "完成指定的 ReplicationController 的滚动升级" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/replace.go#L70 +#~ msgid "Replace a resource by filename or stdin" +#~ msgstr "通过 filename 或者 stdin替换一个资源" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/scale.go#L71 +#~ msgid "" +#~ "Set a new size for a Deployment, ReplicaSet, Replication Controller, or Job" +#~ msgstr "" +#~ "为 Deployment, ReplicaSet, Replication Controller 或者 Job 设置一个新的副本" +#~ "数量" + +#~ msgid "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." +#~ msgstr "" +#~ "Set the last-applied-configuration annotation on a live object to match " +#~ "the contents of a file." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_cluster.go#L67 +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "设置 kubeconfig 文件中的一个集群条目" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_context.go#L57 +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "设置 kubeconfig 文件中的一个 context 条目" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/create_authinfo.go#L103 +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "设置 kubeconfig 文件中的一个用户条目" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/set.go#L59 +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "设置 kubeconfig 文件中的一个单个值" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/use_context.go#L48 +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "设置 kubeconfig 文件中的当前上下文" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L87 +#~ msgid "" +#~ "Take a replication controller, service, deployment or pod and expose it as " +#~ "a new Kubernetes Service" +#~ msgstr "" +#~ "使用 replication controller, service, deployment 或者 pod 并暴露它作为一个 " +#~ "新的 Kubernetes Service" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/rollingupdate.go#L100 +#~ msgid "" +#~ "The key to use to differentiate between two different controllers, default " +#~ "'deployment'. Only relevant when --image is specified, ignored otherwise" +#~ msgstr "" +#~ "这个 key 使用有区别在两个不同的 controllers, 默认 'deployment'. 只有当 --" +#~ "image 指定值, 否则忽略" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L113 +#~ msgid "" +#~ "The name of the API generator to use, see http://kubernetes.io/docs/user-" +#~ "guide/kubectl-conventions/#generators for a list." +#~ msgstr "" +#~ "使用 API generator 的名字, 在 http://kubernetes.io/docs/user-guide/kubectl-" +#~ "conventions/#generators 查看列表." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/autoscale.go#L66 +#~ msgid "" +#~ "The name of the API generator to use. Currently there is only 1 generator." +#~ msgstr "使用 API generator 的名字. 目前只有 1 个 generator." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L133 +#~ msgid "" +#~ "The name of the generator to use for creating a service. Only used if --" +#~ "expose is true" +#~ msgstr "" +#~ "使用 gnerator 的名称创建一个 service. 只有在 --expose 为 true 的时候使用" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L121 +#~ msgid "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." +#~ msgstr "" +#~ "The port that this container exposes. If --expose is true, this is also " +#~ "the port used by the service that is created." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/run.go#L128 +#~ msgid "" +#~ "The restart policy for this Pod. Legal values [Always, OnFailure, " +#~ "Never]. If set to 'Always' a deployment is created, if set to 'OnFailure' " +#~ "a job is created, if set to 'Never', a regular pod is created. For the " +#~ "latter two --replicas must be 1. Default 'Always', for CronJobs `Never`." +#~ msgstr "" +#~ "这个 Pod 的 restart policy. Legal values [Always, OnFailure, Never]. 如果" +#~ "设置为 'Always' 一个 deployment 被创建, 如果设置为 ’OnFailure' 一个 job 被" +#~ "创建, 如果设置为 'Never', 一个普通的 pod 被创建. 对于后面两个 --replicas 必" +#~ "须为 1. 默认 'Always', 为 CronJobs 设置为 `Never`." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/expose.go#L101 +#~ msgid "" +#~ "Type for this service: ClusterIP, NodePort, or LoadBalancer. Default is " +#~ "'ClusterIP'." +#~ msgstr "" +#~ "对于服务的类型: ClusterIP, NodePort, 或者 LoadBalancer. 默认是 'ClusterIP’." + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/config/unset.go#L47 +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "取消设置 kubeconfig 文件中的一个单个值" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/patch.go#L91 +#~ msgid "Update field(s) of a resource using strategic merge patch" +#~ msgstr "使用 strategic merge patch 更新一个资源的 field(s)" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/set/set_image.go#L94 +#~ msgid "Update image of a pod template" +#~ msgstr "更新一个 pod template 的镜像" + +#~ msgid "" +#~ "View latest last-applied-configuration annotations of a resource/object" +#~ msgstr "显示最后的 resource/object 的 last-applied-configuration annotations" + +# https://github.com/kubernetes/kubernetes/blob/master/pkg/kubectl/cmd/create_service.go#L253 +#~ msgid "external name of service" +#~ msgstr "服务的外部名称" + +#~ msgid "" +#~ "watch is only supported on individual resources and resource collections - " +#~ "%d resources were found" +#~ msgid_plural "" +#~ "watch is only supported on individual resources and resource collections - " +#~ "%d resources were found" +#~ msgstr[0] "" +#~ "watch 仅支持单独的资源或者资源集合 - 找到了 %d 个资源watch is only " +#~ "supported on individual resources and resource collections - %d resource " +#~ "was found" +#~ msgstr[1] "" +#~ "watch 仅支持单独的资源或者资源集合 - 找到了 %d 个资源watch is only " +#~ "supported on individual resources and resource collections - %d resources " +#~ "were found" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000..9be9a30d32 Binary files /dev/null and b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.mo differ diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.po new file mode 100644 index 0000000000..58fd63a9ff --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/zh_TW/LC_MESSAGES/k8s.po @@ -0,0 +1,81 @@ +# Test translations for unit tests. +# Copyright (C) 2017 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR warmchang@outlook.com, 2017. +# +msgid "" +msgstr "" +"Project-Id-Version: hello-world\n" +"Report-Msgid-Bugs-To: EMAIL\n" +"POT-Creation-Date: 2021-07-07 20:15+0200\n" +"PO-Revision-Date: 2017-06-02 09:13+0800\n" +"Last-Translator: William Chang \n" +"Language-Team: \n" +"Language: zh\n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 2.0.2\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Plural-Forms: nplurals=2; plural=(n > 1);\n" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_cluster.go:42 +msgid "Delete the specified cluster from the kubeconfig" +msgstr "刪除 kubeconfig 檔案中指定的叢集(cluster)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/delete_context.go:42 +msgid "Delete the specified context from the kubeconfig" +msgstr "刪除 kubeconfig 檔案中指定的 context" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_contexts.go:72 +msgid "Describe one or many contexts" +msgstr "描述一個或多個 context" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/get_clusters.go:41 +msgid "Display clusters defined in the kubeconfig" +msgstr "顯示 kubeconfig 檔案中定義的叢集(cluster)" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:81 +msgid "Display merged kubeconfig settings or a specified kubeconfig file" +msgstr "顯示合併的 kubeconfig 配置或一個指定的 kubeconfig 檔案" + +#: staging/src/k8s.io/kubectl/pkg/cmd/config/config.go:42 +msgid "Modify kubeconfig files" +msgstr "修改 kubeconfig 檔案" + +#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135 +msgid "Update the annotations on a resource" +msgstr "更新一個資源的注解(annotations)" + +#~ msgid "Apply a configuration to a resource by filename or stdin" +#~ msgstr "通過檔案名或標準輸入流(stdin)對資源進行配置" + +#~ msgid "Displays the current-context" +#~ msgstr "顯示目前的 context" + +#~ msgid "Sets a cluster entry in kubeconfig" +#~ msgstr "設置 kubeconfig 檔案中的一個叢集(cluster)條目" + +#~ msgid "Sets a context entry in kubeconfig" +#~ msgstr "設置 kubeconfig 檔案中的一個 context 條目" + +#~ msgid "Sets a user entry in kubeconfig" +#~ msgstr "設置 kubeconfig 檔案中的一個使用者條目" + +#~ msgid "Sets an individual value in a kubeconfig file" +#~ msgstr "設置 kubeconfig 檔案中的一個值" + +#~ msgid "Sets the current-context in a kubeconfig file" +#~ msgstr "設置 kubeconfig 檔案中的目前 context" + +#~ msgid "Unsets an individual value in a kubeconfig file" +#~ msgstr "取消設置 kubeconfig 檔案中的一個值" + +#~ msgid "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgid_plural "" +#~ "watch is only supported on individual resources and resource collections " +#~ "- %d resources were found" +#~ msgstr[0] "一次只能 watch 一個資源或資料集合 - 找到了 %d 個資源" +#~ msgstr[1] "一次只能 watch 一個資源或資料集合 - 找到了 %d 個資源" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/default/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/default/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000..5487fb8ccf Binary files /dev/null and b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/default/LC_MESSAGES/k8s.mo differ diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/default/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/default/LC_MESSAGES/k8s.po new file mode 100644 index 0000000000..7e77c43009 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/default/LC_MESSAGES/k8s.po @@ -0,0 +1,28 @@ +# Test translations for unit tests. +# Copyright (C) 2016 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR brendan.d.burns@gmail.com, 2016. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-12-12 20:03+0000\n" +"PO-Revision-Date: 2016-12-13 21:35-0800\n" +"Last-Translator: Brendan Burns \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.6.10\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Language-Team: \n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"Language: en\n" + +msgid "test_plural" +msgid_plural "test_plural" +msgstr[0] "there was %d item" +msgstr[1] "there were %d items" + +msgid "test_string" +msgstr "foo" diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.mo b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.mo new file mode 100644 index 0000000000..44fe7b5d96 Binary files /dev/null and b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.mo differ diff --git a/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.po b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.po new file mode 100644 index 0000000000..9944046aeb --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/i18n/translations/test/en_US/LC_MESSAGES/k8s.po @@ -0,0 +1,28 @@ +# Test translations for unit tests. +# Copyright (C) 2016 +# This file is distributed under the same license as the Kubernetes package. +# FIRST AUTHOR brendan.d.burns@gmail.com, 2016. +# +msgid "" +msgstr "" +"Project-Id-Version: gettext-go-examples-hello\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2013-12-12 20:03+0000\n" +"PO-Revision-Date: 2016-12-13 22:12-0800\n" +"Last-Translator: Brendan Burns \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"X-Generator: Poedit 1.6.10\n" +"X-Poedit-SourceCharset: UTF-8\n" +"Language-Team: \n" +"Plural-Forms: nplurals=2; plural=(n != 1);\n" +"Language: en\n" + +msgid "test_plural" +msgid_plural "test_plural" +msgstr[0] "there was %d item" +msgstr[1] "there were %d items" + +msgid "test_string" +msgstr "baz" diff --git a/vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go b/vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go new file mode 100644 index 0000000000..0265b9fb17 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/interrupt/interrupt.go @@ -0,0 +1,104 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package interrupt + +import ( + "os" + "os/signal" + "sync" + "syscall" +) + +// terminationSignals are signals that cause the program to exit in the +// supported platforms (linux, darwin, windows). +var terminationSignals = []os.Signal{syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT} + +// Handler guarantees execution of notifications after a critical section (the function passed +// to a Run method), even in the presence of process termination. It guarantees exactly once +// invocation of the provided notify functions. +type Handler struct { + notify []func() + final func(os.Signal) + once sync.Once +} + +// Chain creates a new handler that invokes all notify functions when the critical section exits +// and then invokes the optional handler's notifications. This allows critical sections to be +// nested without losing exactly once invocations. Notify functions can invoke any cleanup needed +// but should not exit (which is the responsibility of the parent handler). +func Chain(handler *Handler, notify ...func()) *Handler { + if handler == nil { + return New(nil, notify...) + } + return New(handler.Signal, append(notify, handler.Close)...) +} + +// New creates a new handler that guarantees all notify functions are run after the critical +// section exits (or is interrupted by the OS), then invokes the final handler. If no final +// handler is specified, the default final is `os.Exit(1)`. A handler can only be used for +// one critical section. +func New(final func(os.Signal), notify ...func()) *Handler { + return &Handler{ + final: final, + notify: notify, + } +} + +// Close executes all the notification handlers if they have not yet been executed. +func (h *Handler) Close() { + h.once.Do(func() { + for _, fn := range h.notify { + fn() + } + }) +} + +// Signal is called when an os.Signal is received, and guarantees that all notifications +// are executed, then the final handler is executed. This function should only be called once +// per Handler instance. +func (h *Handler) Signal(s os.Signal) { + h.once.Do(func() { + for _, fn := range h.notify { + fn() + } + if h.final == nil { + os.Exit(1) + } + h.final(s) + }) +} + +// Run ensures that any notifications are invoked after the provided fn exits (even if the +// process is interrupted by an OS termination signal). Notifications are only invoked once +// per Handler instance, so calling Run more than once will not behave as the user expects. +func (h *Handler) Run(fn func() error) error { + ch := make(chan os.Signal, 1) + signal.Notify(ch, terminationSignals...) + defer func() { + signal.Stop(ch) + close(ch) + }() + go func() { + sig, ok := <-ch + if !ok { + return + } + h.Signal(sig) + }() + defer h.Close() + return fn() +} diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS b/vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS new file mode 100644 index 0000000000..cb873612b1 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/OWNERS @@ -0,0 +1,6 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - apelisse +reviewers: + - apelisse diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go b/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go new file mode 100644 index 0000000000..08194d5808 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package openapi is a collection of libraries for fetching the openapi spec +// from a Kubernetes server and then indexing the type definitions. +// The openapi spec contains the object model definitions and extensions metadata +// such as the patchStrategy and patchMergeKey for creating patches. +package openapi // k8s.io/kubectl/pkg/util/openapi diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go new file mode 100644 index 0000000000..74955da367 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi.go @@ -0,0 +1,177 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + openapi_v2 "github.com/google/gnostic-models/openapiv2" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kube-openapi/pkg/util/proto" + "sigs.k8s.io/yaml" +) + +// OpenAPIResourcesGetter represents a function to return +// OpenAPI V2 resource specifications. Used for lazy-loading +// these resource specifications. +type OpenAPIResourcesGetter interface { + OpenAPISchema() (Resources, error) +} + +// Resources interface describe a resources provider, that can give you +// resource based on group-version-kind. +type Resources interface { + LookupResource(gvk schema.GroupVersionKind) proto.Schema + GetConsumes(gvk schema.GroupVersionKind, operation string) []string +} + +// groupVersionKindExtensionKey is the key used to lookup the +// GroupVersionKind value for an object definition from the +// definition's "extensions" map. +const groupVersionKindExtensionKey = "x-kubernetes-group-version-kind" + +// document is an implementation of `Resources`. It looks for +// resources in an openapi Schema. +type document struct { + // Maps gvk to model name + resources map[schema.GroupVersionKind]string + models proto.Models + doc *openapi_v2.Document +} + +var _ Resources = &document{} + +// NewOpenAPIData creates a new `Resources` out of the openapi document +func NewOpenAPIData(doc *openapi_v2.Document) (Resources, error) { + models, err := proto.NewOpenAPIData(doc) + if err != nil { + return nil, err + } + + resources := map[schema.GroupVersionKind]string{} + for _, modelName := range models.ListModels() { + model := models.LookupModel(modelName) + if model == nil { + panic("ListModels returns a model that can't be looked-up.") + } + gvkList := parseGroupVersionKind(model) + for _, gvk := range gvkList { + if len(gvk.Kind) > 0 { + resources[gvk] = modelName + } + } + } + + return &document{ + resources: resources, + models: models, + doc: doc, + }, nil +} + +func (d *document) LookupResource(gvk schema.GroupVersionKind) proto.Schema { + modelName, found := d.resources[gvk] + if !found { + return nil + } + return d.models.LookupModel(modelName) +} + +func (d *document) GetConsumes(gvk schema.GroupVersionKind, operation string) []string { + for _, path := range d.doc.GetPaths().GetPath() { + for _, ex := range path.GetValue().GetPatch().GetVendorExtension() { + if ex.GetValue().GetYaml() == "" || + ex.GetName() != "x-kubernetes-group-version-kind" { + continue + } + + var value map[string]string + err := yaml.Unmarshal([]byte(ex.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + if value["group"] == gvk.Group && value["kind"] == gvk.Kind && value["version"] == gvk.Version { + switch operation { + case "GET": + return path.GetValue().GetGet().GetConsumes() + case "PATCH": + return path.GetValue().GetPatch().GetConsumes() + case "HEAD": + return path.GetValue().GetHead().GetConsumes() + case "PUT": + return path.GetValue().GetPut().GetConsumes() + case "POST": + return path.GetValue().GetPost().GetConsumes() + case "OPTIONS": + return path.GetValue().GetOptions().GetConsumes() + case "DELETE": + return path.GetValue().GetDelete().GetConsumes() + } + } + } + } + + return nil +} + +// Get and parse GroupVersionKind from the extension. Returns empty if it doesn't have one. +func parseGroupVersionKind(s proto.Schema) []schema.GroupVersionKind { + extensions := s.GetExtensions() + + gvkListResult := []schema.GroupVersionKind{} + + // Get the extensions + gvkExtension, ok := extensions[groupVersionKindExtensionKey] + if !ok { + return []schema.GroupVersionKind{} + } + + // gvk extension must be a list of at least 1 element. + gvkList, ok := gvkExtension.([]interface{}) + if !ok { + return []schema.GroupVersionKind{} + } + + for _, gvk := range gvkList { + // gvk extension list must be a map with group, version, and + // kind fields + gvkMap, ok := gvk.(map[interface{}]interface{}) + if !ok { + continue + } + group, ok := gvkMap["group"].(string) + if !ok { + continue + } + version, ok := gvkMap["version"].(string) + if !ok { + continue + } + kind, ok := gvkMap["kind"].(string) + if !ok { + continue + } + + gvkListResult = append(gvkListResult, schema.GroupVersionKind{ + Group: group, + Version: version, + Kind: kind, + }) + } + + return gvkListResult +} diff --git a/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go new file mode 100644 index 0000000000..3179161ee0 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/openapi/openapi_getter.go @@ -0,0 +1,82 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openapi + +import ( + "sync" + + openapi_v2 "github.com/google/gnostic-models/openapiv2" + "k8s.io/client-go/discovery" +) + +// CachedOpenAPIGetter fetches the openapi schema once and then caches it in memory +type CachedOpenAPIGetter struct { + openAPIClient discovery.OpenAPISchemaInterface + + // Cached results + sync.Once + openAPISchema *openapi_v2.Document + err error +} + +var _ discovery.OpenAPISchemaInterface = &CachedOpenAPIGetter{} + +// NewOpenAPIGetter returns an object to return OpenAPIDatas which reads +// from a server, and then stores in memory for subsequent invocations +func NewOpenAPIGetter(openAPIClient discovery.OpenAPISchemaInterface) *CachedOpenAPIGetter { + return &CachedOpenAPIGetter{ + openAPIClient: openAPIClient, + } +} + +// OpenAPISchema implements OpenAPISchemaInterface. +func (g *CachedOpenAPIGetter) OpenAPISchema() (*openapi_v2.Document, error) { + g.Do(func() { + g.openAPISchema, g.err = g.openAPIClient.OpenAPISchema() + }) + + // Return the saved result. + return g.openAPISchema, g.err +} + +type CachedOpenAPIParser struct { + openAPIClient discovery.OpenAPISchemaInterface + + // Cached results + sync.Once + openAPIResources Resources + err error +} + +func NewOpenAPIParser(openAPIClient discovery.OpenAPISchemaInterface) *CachedOpenAPIParser { + return &CachedOpenAPIParser{ + openAPIClient: openAPIClient, + } +} + +func (p *CachedOpenAPIParser) Parse() (Resources, error) { + p.Do(func() { + oapi, err := p.openAPIClient.OpenAPISchema() + if err != nil { + p.err = err + return + } + p.openAPIResources, p.err = NewOpenAPIData(oapi) + }) + + return p.openAPIResources, p.err +} diff --git a/vendor/k8s.io/kubectl/pkg/util/slice/slice.go b/vendor/k8s.io/kubectl/pkg/util/slice/slice.go new file mode 100644 index 0000000000..d02bb3458b --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/slice/slice.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package slice + +import ( + "sort" +) + +// SortInts64 sorts []int64 in increasing order +func SortInts64(a []int64) { sort.Slice(a, func(i, j int) bool { return a[i] < a[j] }) } + +// ContainsString checks if a given slice of strings contains the provided string. +// If a modifier func is provided, it is called with the slice item before the comparation. +func ContainsString(slice []string, s string, modifier func(s string) string) bool { + for _, item := range slice { + if item == s { + return true + } + if modifier != nil && modifier(item) == s { + return true + } + } + return false +} + +// ToSet returns a single slice containing the unique values from one or more slices. The order of the items in the +// result is not guaranteed. +func ToSet[T comparable](slices ...[]T) []T { + if len(slices) == 0 { + return nil + } + m := map[T]struct{}{} + for _, slice := range slices { + for _, value := range slice { + m[value] = struct{}{} + } + } + result := []T{} + for k := range m { + result = append(result, k) + } + return result +} diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go b/vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go new file mode 100644 index 0000000000..447a39621f --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/templates/command_groups.go @@ -0,0 +1,59 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "github.com/spf13/cobra" +) + +type CommandGroup struct { + Message string + Commands []*cobra.Command +} + +type CommandGroups []CommandGroup + +func (g CommandGroups) Add(c *cobra.Command) { + for _, group := range g { + c.AddCommand(group.Commands...) + } +} + +func (g CommandGroups) Has(c *cobra.Command) bool { + for _, group := range g { + for _, command := range group.Commands { + if command == c { + return true + } + } + } + return false +} + +func AddAdditionalCommands(g CommandGroups, message string, cmds []*cobra.Command) CommandGroups { + group := CommandGroup{Message: message} + for _, c := range cmds { + // Don't show commands that have no short description + if !g.Has(c) && len(c.Short) != 0 { + group.Commands = append(group.Commands, c) + } + } + if len(group.Commands) == 0 { + return g + } + return append(g, group) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go b/vendor/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go new file mode 100644 index 0000000000..fdfdf08eeb --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/templates/help_flags_printer.go @@ -0,0 +1,76 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "bytes" + "fmt" + "io" + "strings" + + "github.com/mitchellh/go-wordwrap" + flag "github.com/spf13/pflag" +) + +const offset = 10 + +// HelpFlagPrinter is a printer that +// processes the help flag and print +// it to i/o writer +type HelpFlagPrinter struct { + wrapLimit uint + out io.Writer +} + +// NewHelpFlagPrinter will initialize a HelpFlagPrinter given the +// i/o writer +func NewHelpFlagPrinter(out io.Writer, wrapLimit uint) *HelpFlagPrinter { + return &HelpFlagPrinter{ + wrapLimit: wrapLimit, + out: out, + } +} + +// PrintHelpFlag will beautify the help flags and print it out to p.out +func (p *HelpFlagPrinter) PrintHelpFlag(flag *flag.Flag) { + formatBuf := new(bytes.Buffer) + writeFlag(formatBuf, flag) + + wrappedStr := formatBuf.String() + flagAndUsage := strings.Split(formatBuf.String(), "\n") + flagStr := flagAndUsage[0] + + // if the flag usage is longer than one line, wrap it again + if len(flagAndUsage) > 1 { + nextLines := strings.Join(flagAndUsage[1:], " ") + wrappedUsages := wordwrap.WrapString(nextLines, p.wrapLimit-offset) + wrappedStr = flagStr + "\n" + wrappedUsages + } + appendTabStr := strings.ReplaceAll(wrappedStr, "\n", "\n\t") + + fmt.Fprintf(p.out, appendTabStr+"\n\n") +} + +// writeFlag will output the help flag based +// on the format provided by getFlagFormat to i/o writer +func writeFlag(out io.Writer, f *flag.Flag) { + deprecated := "" + if f.Deprecated != "" { + deprecated = fmt.Sprintf(" (DEPRECATED: %s)", f.Deprecated) + } + fmt.Fprintf(out, getFlagFormat(f), f.Shorthand, f.Name, f.DefValue, f.Usage, deprecated) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/markdown.go b/vendor/k8s.io/kubectl/pkg/util/templates/markdown.go new file mode 100644 index 0000000000..962cd9eec9 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/templates/markdown.go @@ -0,0 +1,116 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "fmt" + "io" + "strings" + + "github.com/russross/blackfriday/v2" +) + +const linebreak = "\n" + +// ASCIIRenderer implements blackfriday.Renderer +var _ blackfriday.Renderer = &ASCIIRenderer{} + +// ASCIIRenderer is a blackfriday.Renderer intended for rendering markdown +// documents as plain text, well suited for human reading on terminals. +type ASCIIRenderer struct { + Indentation string + + listItemCount uint + listLevel uint +} + +// render markdown to text +func (r *ASCIIRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus { + switch node.Type { + case blackfriday.Text: + raw := string(node.Literal) + lines := strings.Split(raw, linebreak) + for _, line := range lines { + trimmed := strings.Trim(line, " \n\t") + if len(trimmed) > 0 && trimmed[0] != '_' { + w.Write([]byte(" ")) + } + w.Write([]byte(trimmed)) + } + case blackfriday.HorizontalRule, blackfriday.Hardbreak: + w.Write([]byte(linebreak + "----------" + linebreak)) + case blackfriday.Code, blackfriday.CodeBlock: + w.Write([]byte(linebreak)) + lines := []string{} + for _, line := range strings.Split(string(node.Literal), linebreak) { + trimmed := strings.Trim(line, " \t") + // Adding 4 times of indentation will let blackfriday to accept + // this literal as Code or CodeBlock again in next invocation + indented := strings.Repeat(r.Indentation, 4) + trimmed + lines = append(lines, indented) + } + w.Write([]byte(strings.Join(lines, linebreak))) + case blackfriday.Image: + w.Write(node.LinkData.Destination) + case blackfriday.Link: + w.Write([]byte(" ")) + w.Write(node.LinkData.Destination) + case blackfriday.Paragraph: + if r.listLevel == 0 { + w.Write([]byte(linebreak)) + } + case blackfriday.List: + if entering { + w.Write([]byte(linebreak)) + r.listLevel++ + } else { + r.listLevel-- + r.listItemCount = 0 + } + case blackfriday.Item: + if entering { + r.listItemCount++ + for i := 0; uint(i) < r.listLevel; i++ { + w.Write([]byte(r.Indentation)) + } + if node.ListFlags&blackfriday.ListTypeOrdered != 0 { + w.Write([]byte(fmt.Sprintf("%d. ", r.listItemCount))) + } else { + w.Write([]byte("* ")) + } + } else { + w.Write([]byte(linebreak)) + } + default: + normalText(w, node.Literal) + } + return blackfriday.GoToNext +} + +func normalText(w io.Writer, text []byte) { + w.Write([]byte(strings.Trim(string(text), " \n\t"))) +} + +// RenderHeader writes document preamble and TOC if requested. +func (r *ASCIIRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) { + +} + +// RenderFooter writes document footer. +func (r *ASCIIRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) { + io.WriteString(w, "\n") +} diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go b/vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go new file mode 100644 index 0000000000..09094ffdfe --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/templates/normalizers.go @@ -0,0 +1,97 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "strings" + + "github.com/MakeNowJust/heredoc" + "github.com/russross/blackfriday/v2" + "github.com/spf13/cobra" +) + +const Indentation = ` ` + +// LongDesc normalizes a command's long description to follow the conventions. +func LongDesc(s string) string { + if len(s) == 0 { + return s + } + return normalizer{s}.heredoc().markdown().trim().string +} + +// Examples normalizes a command's examples to follow the conventions. +func Examples(s string) string { + if len(s) == 0 { + return s + } + return normalizer{s}.trim().indent().string +} + +// Normalize perform all required normalizations on a given command. +func Normalize(cmd *cobra.Command) *cobra.Command { + if len(cmd.Long) > 0 { + cmd.Long = LongDesc(cmd.Long) + } + if len(cmd.Example) > 0 { + cmd.Example = Examples(cmd.Example) + } + return cmd +} + +// NormalizeAll perform all required normalizations in the entire command tree. +func NormalizeAll(cmd *cobra.Command) *cobra.Command { + if cmd.HasSubCommands() { + for _, subCmd := range cmd.Commands() { + NormalizeAll(subCmd) + } + } + Normalize(cmd) + return cmd +} + +type normalizer struct { + string +} + +func (s normalizer) markdown() normalizer { + bytes := []byte(s.string) + formatted := blackfriday.Run(bytes, blackfriday.WithExtensions(blackfriday.NoIntraEmphasis), blackfriday.WithRenderer(&ASCIIRenderer{Indentation: Indentation})) + s.string = string(formatted) + return s +} + +func (s normalizer) heredoc() normalizer { + s.string = heredoc.Doc(s.string) + return s +} + +func (s normalizer) trim() normalizer { + s.string = strings.TrimSpace(s.string) + return s +} + +func (s normalizer) indent() normalizer { + indentedLines := []string{} + for _, line := range strings.Split(s.string, "\n") { + trimmed := strings.TrimSpace(line) + indented := Indentation + trimmed + indentedLines = append(indentedLines, indented) + } + s.string = strings.Join(indentedLines, "\n") + return s +} diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/templater.go b/vendor/k8s.io/kubectl/pkg/util/templates/templater.go new file mode 100644 index 0000000000..8fe181a050 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/templates/templater.go @@ -0,0 +1,319 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "bytes" + "fmt" + "strings" + "text/template" + "unicode" + + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" + + "k8s.io/kubectl/pkg/util/term" +) + +type FlagExposer interface { + ExposeFlags(cmd *cobra.Command, flags ...string) FlagExposer +} + +func ActsAsRootCommand(cmd *cobra.Command, filters []string, groups ...CommandGroup) FlagExposer { + if cmd == nil { + panic("nil root command") + } + templater := &templater{ + RootCmd: cmd, + UsageTemplate: MainUsageTemplate(), + HelpTemplate: MainHelpTemplate(), + CommandGroups: groups, + Filtered: filters, + } + cmd.SetFlagErrorFunc(templater.FlagErrorFunc()) + cmd.SilenceUsage = true + cmd.SetUsageFunc(templater.UsageFunc()) + cmd.SetHelpFunc(templater.HelpFunc()) + return templater +} + +func UseOptionsTemplates(cmd *cobra.Command) { + templater := &templater{ + UsageTemplate: OptionsUsageTemplate(), + HelpTemplate: OptionsHelpTemplate(), + } + cmd.SetUsageFunc(templater.UsageFunc()) + cmd.SetHelpFunc(templater.HelpFunc()) +} + +type templater struct { + UsageTemplate string + HelpTemplate string + RootCmd *cobra.Command + CommandGroups + Filtered []string +} + +func (templater *templater) FlagErrorFunc(exposedFlags ...string) func(*cobra.Command, error) error { + return func(c *cobra.Command, err error) error { + c.SilenceUsage = true + switch c.CalledAs() { + case "options": + return fmt.Errorf("%s\nRun '%s' without flags.", err, c.CommandPath()) + default: + return fmt.Errorf("%s\nSee '%s --help' for usage.", err, c.CommandPath()) + } + } +} + +func (templater *templater) ExposeFlags(cmd *cobra.Command, flags ...string) FlagExposer { + cmd.SetUsageFunc(templater.UsageFunc(flags...)) + return templater +} + +func (templater *templater) HelpFunc() func(*cobra.Command, []string) { + return func(c *cobra.Command, s []string) { + t := template.New("help") + t.Funcs(templater.templateFuncs()) + template.Must(t.Parse(templater.HelpTemplate)) + out := term.NewResponsiveWriter(c.OutOrStdout()) + err := t.Execute(out, c) + if err != nil { + c.Println(err) + } + } +} + +func (templater *templater) UsageFunc(exposedFlags ...string) func(*cobra.Command) error { + return func(c *cobra.Command) error { + t := template.New("usage") + t.Funcs(templater.templateFuncs(exposedFlags...)) + template.Must(t.Parse(templater.UsageTemplate)) + out := term.NewResponsiveWriter(c.OutOrStderr()) + return t.Execute(out, c) + } +} + +func (templater *templater) templateFuncs(exposedFlags ...string) template.FuncMap { + return template.FuncMap{ + "trim": strings.TrimSpace, + "trimRight": func(s string) string { return strings.TrimRightFunc(s, unicode.IsSpace) }, + "trimLeft": func(s string) string { return strings.TrimLeftFunc(s, unicode.IsSpace) }, + "gt": cobra.Gt, + "eq": cobra.Eq, + "rpad": rpad, + "appendIfNotPresent": appendIfNotPresent, + "flagsNotIntersected": flagsNotIntersected, + "visibleFlags": visibleFlags, + "flagsUsages": flagsUsages, + "cmdGroups": templater.cmdGroups, + "cmdGroupsString": templater.cmdGroupsString, + "rootCmd": templater.rootCmdName, + "isRootCmd": templater.isRootCmd, + "optionsCmdFor": templater.optionsCmdFor, + "usageLine": templater.usageLine, + "reverseParentsNames": templater.reverseParentsNames, + "exposed": func(c *cobra.Command) *flag.FlagSet { + exposed := flag.NewFlagSet("exposed", flag.ContinueOnError) + if len(exposedFlags) > 0 { + for _, name := range exposedFlags { + if flag := c.Flags().Lookup(name); flag != nil { + exposed.AddFlag(flag) + } + } + } + return exposed + }, + } +} + +func (templater *templater) cmdGroups(c *cobra.Command, all []*cobra.Command) []CommandGroup { + if len(templater.CommandGroups) > 0 && c == templater.RootCmd { + all = filter(all, templater.Filtered...) + return AddAdditionalCommands(templater.CommandGroups, "Other Commands:", all) + } + all = filter(all, "options") + return []CommandGroup{ + { + Message: "Available Commands:", + Commands: all, + }, + } +} + +func (t *templater) cmdGroupsString(c *cobra.Command) string { + groups := []string{} + for _, cmdGroup := range t.cmdGroups(c, c.Commands()) { + cmds := []string{cmdGroup.Message} + for _, cmd := range cmdGroup.Commands { + if cmd.IsAvailableCommand() { + cmds = append(cmds, " "+rpad(cmd.Name(), cmd.NamePadding())+" "+cmd.Short) + } + } + groups = append(groups, strings.Join(cmds, "\n")) + } + return strings.Join(groups, "\n\n") +} + +func (t *templater) rootCmdName(c *cobra.Command) string { + return t.rootCmd(c).CommandPath() +} + +func (t *templater) reverseParentsNames(c *cobra.Command) []string { + reverseParentsNames := []string{} + parents := t.parents(c) + for i := len(parents) - 1; i >= 0; i-- { + reverseParentsNames = append(reverseParentsNames, parents[i].Name()) + } + return reverseParentsNames +} + +func (t *templater) isRootCmd(c *cobra.Command) bool { + return t.rootCmd(c) == c +} + +func (t *templater) parents(c *cobra.Command) []*cobra.Command { + parents := []*cobra.Command{c} + for current := c; !t.isRootCmd(current) && current.HasParent(); { + current = current.Parent() + parents = append(parents, current) + } + return parents +} + +func (t *templater) rootCmd(c *cobra.Command) *cobra.Command { + if c != nil && !c.HasParent() { + return c + } + if t.RootCmd == nil { + panic("nil root cmd") + } + return t.RootCmd +} + +func (t *templater) optionsCmdFor(c *cobra.Command) string { + if !c.Runnable() { + return "" + } + rootCmdStructure := t.parents(c) + for i := len(rootCmdStructure) - 1; i >= 0; i-- { + cmd := rootCmdStructure[i] + if _, _, err := cmd.Find([]string{"options"}); err == nil { + return cmd.CommandPath() + " options" + } + } + return "" +} + +func (t *templater) usageLine(c *cobra.Command) string { + usage := c.UseLine() + suffix := "[options]" + if c.HasFlags() && !strings.Contains(usage, suffix) { + usage += " " + suffix + } + return usage +} + +// flagsUsages will print out the kubectl help flags +func flagsUsages(f *flag.FlagSet) (string, error) { + flagBuf := new(bytes.Buffer) + wrapLimit, err := term.GetWordWrapperLimit() + if err != nil { + wrapLimit = 0 + } + printer := NewHelpFlagPrinter(flagBuf, wrapLimit) + + f.VisitAll(func(flag *flag.Flag) { + if flag.Hidden { + return + } + printer.PrintHelpFlag(flag) + }) + + return flagBuf.String(), nil +} + +// getFlagFormat will output the flag format +func getFlagFormat(f *flag.Flag) string { + var format string + format = "--%s=%s:\n%s%s" + if f.Value.Type() == "string" { + format = "--%s='%s':\n%s%s" + } + + if len(f.Shorthand) > 0 { + format = " -%s, " + format + } else { + format = " %s" + format + } + + return format +} + +func rpad(s string, padding int) string { + template := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(template, s) +} + +func appendIfNotPresent(s, stringToAppend string) string { + if strings.Contains(s, stringToAppend) { + return s + } + return s + " " + stringToAppend +} + +func flagsNotIntersected(l *flag.FlagSet, r *flag.FlagSet) *flag.FlagSet { + f := flag.NewFlagSet("notIntersected", flag.ContinueOnError) + l.VisitAll(func(flag *flag.Flag) { + if r.Lookup(flag.Name) == nil { + f.AddFlag(flag) + } + }) + return f +} + +func visibleFlags(l *flag.FlagSet) *flag.FlagSet { + hidden := "help" + f := flag.NewFlagSet("visible", flag.ContinueOnError) + l.VisitAll(func(flag *flag.Flag) { + if flag.Name != hidden { + f.AddFlag(flag) + } + }) + return f +} + +func filter(cmds []*cobra.Command, names ...string) []*cobra.Command { + out := []*cobra.Command{} + for _, c := range cmds { + if c.Hidden { + continue + } + skip := false + for _, name := range names { + if name == c.Name() { + skip = true + break + } + } + if skip { + continue + } + out = append(out, c) + } + return out +} diff --git a/vendor/k8s.io/kubectl/pkg/util/templates/templates.go b/vendor/k8s.io/kubectl/pkg/util/templates/templates.go new file mode 100644 index 0000000000..454695c0b2 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/templates/templates.go @@ -0,0 +1,104 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package templates + +import ( + "strings" + "unicode" +) + +const ( + // SectionVars is the help template section that declares variables to be used in the template. + SectionVars = `{{$isRootCmd := isRootCmd .}}` + + `{{$rootCmd := rootCmd .}}` + + `{{$visibleFlags := visibleFlags (flagsNotIntersected .LocalFlags .PersistentFlags)}}` + + `{{$explicitlyExposedFlags := exposed .}}` + + `{{$optionsCmdFor := optionsCmdFor .}}` + + `{{$usageLine := usageLine .}}` + + `{{$reverseParentsNames := reverseParentsNames .}}` + + // SectionAliases is the help template section that displays command aliases. + SectionAliases = `{{if gt .Aliases 0}}Aliases: +{{.NameAndAliases}} + +{{end}}` + + // SectionExamples is the help template section that displays command examples. + SectionExamples = `{{if .HasExample}}Examples: +{{trimRight .Example}} + +{{end}}` + + // SectionSubcommands is the help template section that displays the command's subcommands. + SectionSubcommands = `{{if .HasAvailableSubCommands}}{{cmdGroupsString .}} + +{{end}}` + + // SectionFlags is the help template section that displays the command's flags. + SectionFlags = `{{ if or $visibleFlags.HasFlags $explicitlyExposedFlags.HasFlags}}Options: +{{ if $visibleFlags.HasFlags}}{{trimRight (flagsUsages $visibleFlags)}}{{end}}{{ if $explicitlyExposedFlags.HasFlags}}{{ if $visibleFlags.HasFlags}} +{{end}}{{trimRight (flagsUsages $explicitlyExposedFlags)}}{{end}} + +{{end}}` + + // SectionUsage is the help template section that displays the command's usage. + SectionUsage = `{{if and .Runnable (ne .UseLine "") (ne .UseLine $rootCmd)}}Usage: + {{$usageLine}} + +{{end}}` + + // SectionTipsHelp is the help template section that displays the '--help' hint. + SectionTipsHelp = `{{if .HasSubCommands}}Use "{{range $reverseParentsNames}}{{.}} {{end}} --help" for more information about a given command. +{{end}}` + + // SectionTipsGlobalOptions is the help template section that displays the 'options' hint for displaying global flags. + SectionTipsGlobalOptions = `{{if $optionsCmdFor}}Use "{{$optionsCmdFor}}" for a list of global command-line options (applies to all commands). +{{end}}` +) + +// MainHelpTemplate if the template for 'help' used by most commands. +func MainHelpTemplate() string { + return `{{with or .Long .Short }}{{. | trim}}{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` +} + +// MainUsageTemplate if the template for 'usage' used by most commands. +func MainUsageTemplate() string { + sections := []string{ + "\n\n", + SectionVars, + SectionAliases, + SectionExamples, + SectionSubcommands, + SectionFlags, + SectionUsage, + SectionTipsHelp, + SectionTipsGlobalOptions, + } + return strings.TrimRightFunc(strings.Join(sections, ""), unicode.IsSpace) +} + +// OptionsHelpTemplate if the template for 'help' used by the 'options' command. +func OptionsHelpTemplate() string { + return "" +} + +// OptionsUsageTemplate if the template for 'usage' used by the 'options' command. +func OptionsUsageTemplate() string { + return `{{ if .HasInheritedFlags}}The following options can be passed to any command: + +{{flagsUsages .InheritedFlags}}{{end}}` +} diff --git a/vendor/k8s.io/kubectl/pkg/util/term/resize.go b/vendor/k8s.io/kubectl/pkg/util/term/resize.go new file mode 100644 index 0000000000..636b8bef45 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/term/resize.go @@ -0,0 +1,132 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "fmt" + + "github.com/moby/term" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/remotecommand" +) + +// GetSize returns the current size of the user's terminal. If it isn't a terminal, +// nil is returned. +func (t TTY) GetSize() *remotecommand.TerminalSize { + outFd, isTerminal := term.GetFdInfo(t.Out) + if !isTerminal { + return nil + } + return GetSize(outFd) +} + +// GetSize returns the current size of the terminal associated with fd. +func GetSize(fd uintptr) *remotecommand.TerminalSize { + winsize, err := term.GetWinsize(fd) + if err != nil { + runtime.HandleError(fmt.Errorf("unable to get terminal size: %v", err)) + return nil + } + + return &remotecommand.TerminalSize{Width: winsize.Width, Height: winsize.Height} +} + +// MonitorSize monitors the terminal's size. It returns a TerminalSizeQueue primed with +// initialSizes, or nil if there's no TTY present. +func (t *TTY) MonitorSize(initialSizes ...*remotecommand.TerminalSize) remotecommand.TerminalSizeQueue { + outFd, isTerminal := term.GetFdInfo(t.Out) + if !isTerminal { + return nil + } + + t.sizeQueue = &sizeQueue{ + t: *t, + // make it buffered so we can send the initial terminal sizes without blocking, prior to starting + // the streaming below + resizeChan: make(chan remotecommand.TerminalSize, len(initialSizes)), + stopResizing: make(chan struct{}), + } + + t.sizeQueue.monitorSize(outFd, initialSizes...) + + return t.sizeQueue +} + +// sizeQueue implements remotecommand.TerminalSizeQueue +type sizeQueue struct { + t TTY + // resizeChan receives a Size each time the user's terminal is resized. + resizeChan chan remotecommand.TerminalSize + stopResizing chan struct{} +} + +// make sure sizeQueue implements the resize.TerminalSizeQueue interface +var _ remotecommand.TerminalSizeQueue = &sizeQueue{} + +// monitorSize primes resizeChan with initialSizes and then monitors for resize events. With each +// new event, it sends the current terminal size to resizeChan. +func (s *sizeQueue) monitorSize(outFd uintptr, initialSizes ...*remotecommand.TerminalSize) { + // send the initial sizes + for i := range initialSizes { + if initialSizes[i] != nil { + s.resizeChan <- *initialSizes[i] + } + } + + resizeEvents := make(chan remotecommand.TerminalSize, 1) + + monitorResizeEvents(outFd, resizeEvents, s.stopResizing) + + // listen for resize events in the background + go func() { + defer runtime.HandleCrash() + + for { + select { + case size, ok := <-resizeEvents: + if !ok { + return + } + + select { + // try to send the size to resizeChan, but don't block + case s.resizeChan <- size: + // send successful + default: + // unable to send / no-op + } + case <-s.stopResizing: + return + } + } + }() +} + +// Next returns the new terminal size after the terminal has been resized. It returns nil when +// monitoring has been stopped. +func (s *sizeQueue) Next() *remotecommand.TerminalSize { + size, ok := <-s.resizeChan + if !ok { + return nil + } + return &size +} + +// stop stops the background goroutine that is monitoring for terminal resizes. +func (s *sizeQueue) stop() { + close(s.stopResizing) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go new file mode 100644 index 0000000000..e361b1adb3 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents.go @@ -0,0 +1,62 @@ +//go:build !windows +// +build !windows + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "os" + "os/signal" + + "golang.org/x/sys/unix" + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/remotecommand" +) + +// monitorResizeEvents spawns a goroutine that waits for SIGWINCH signals (these indicate the +// terminal has resized). After receiving a SIGWINCH, this gets the terminal size and tries to send +// it to the resizeEvents channel. The goroutine stops when the stop channel is closed. +func monitorResizeEvents(fd uintptr, resizeEvents chan<- remotecommand.TerminalSize, stop chan struct{}) { + go func() { + defer runtime.HandleCrash() + + winch := make(chan os.Signal, 1) + signal.Notify(winch, unix.SIGWINCH) + defer signal.Stop(winch) + + for { + select { + case <-winch: + size := GetSize(fd) + if size == nil { + return + } + + // try to send size + select { + case resizeEvents <- *size: + // success + default: + // not sent + } + case <-stop: + return + } + } + }() +} diff --git a/vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go new file mode 100644 index 0000000000..adccf87346 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/term/resizeevents_windows.go @@ -0,0 +1,62 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "time" + + "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/remotecommand" +) + +// monitorResizeEvents spawns a goroutine that periodically gets the terminal size and tries to send +// it to the resizeEvents channel if the size has changed. The goroutine stops when the stop channel +// is closed. +func monitorResizeEvents(fd uintptr, resizeEvents chan<- remotecommand.TerminalSize, stop chan struct{}) { + go func() { + defer runtime.HandleCrash() + + size := GetSize(fd) + if size == nil { + return + } + lastSize := *size + + for { + // see if we need to stop running + select { + case <-stop: + return + default: + } + + size := GetSize(fd) + if size == nil { + return + } + + if size.Height != lastSize.Height || size.Width != lastSize.Width { + lastSize.Height = size.Height + lastSize.Width = size.Width + resizeEvents <- *size + } + + // sleep to avoid hot looping + time.Sleep(250 * time.Millisecond) + } + }() +} diff --git a/vendor/k8s.io/kubectl/pkg/util/term/term.go b/vendor/k8s.io/kubectl/pkg/util/term/term.go new file mode 100644 index 0000000000..93a992fe31 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/term/term.go @@ -0,0 +1,115 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "io" + "os" + + "k8s.io/cli-runtime/pkg/printers" + + "github.com/moby/term" + + "k8s.io/kubectl/pkg/util/interrupt" +) + +// SafeFunc is a function to be invoked by TTY. +type SafeFunc func() error + +// TTY helps invoke a function and preserve the state of the terminal, even if the process is +// terminated during execution. It also provides support for terminal resizing for remote command +// execution/attachment. +type TTY struct { + // In is a reader representing stdin. It is a required field. + In io.Reader + // Out is a writer representing stdout. It must be set to support terminal resizing. It is an + // optional field. + Out io.Writer + // Raw is true if the terminal should be set raw. + Raw bool + // TryDev indicates the TTY should try to open /dev/tty if the provided input + // is not a file descriptor. + TryDev bool + // Parent is an optional interrupt handler provided to this function - if provided + // it will be invoked after the terminal state is restored. If it is not provided, + // a signal received during the TTY will result in os.Exit(0) being invoked. + Parent *interrupt.Handler + + // sizeQueue is set after a call to MonitorSize() and is used to monitor SIGWINCH signals when the + // user's terminal resizes. + sizeQueue *sizeQueue +} + +// IsTerminalIn returns true if t.In is a terminal. Does not check /dev/tty +// even if TryDev is set. +func (t TTY) IsTerminalIn() bool { + return printers.IsTerminal(t.In) +} + +// IsTerminalOut returns true if t.Out is a terminal. Does not check /dev/tty +// even if TryDev is set. +func (t TTY) IsTerminalOut() bool { + return printers.IsTerminal(t.Out) +} + +// IsTerminal returns whether the passed object is a terminal or not. +// Deprecated: use printers.IsTerminal instead. +var IsTerminal = printers.IsTerminal + +// AllowsColorOutput returns true if the specified writer is a terminal and +// the process environment indicates color output is supported and desired. +// Deprecated: use printers.AllowsColorOutput instead. +var AllowsColorOutput = printers.AllowsColorOutput + +// Safe invokes the provided function and will attempt to ensure that when the +// function returns (or a termination signal is sent) that the terminal state +// is reset to the condition it was in prior to the function being invoked. If +// t.Raw is true the terminal will be put into raw mode prior to calling the function. +// If the input file descriptor is not a TTY and TryDev is true, the /dev/tty file +// will be opened (if available). +func (t TTY) Safe(fn SafeFunc) error { + inFd, isTerminal := term.GetFdInfo(t.In) + + if !isTerminal && t.TryDev { + if f, err := os.Open("/dev/tty"); err == nil { + defer f.Close() + inFd = f.Fd() + isTerminal = term.IsTerminal(inFd) + } + } + if !isTerminal { + return fn() + } + + var state *term.State + var err error + if t.Raw { + state, err = term.MakeRaw(inFd) + } else { + state, err = term.SaveState(inFd) + } + if err != nil { + return err + } + return interrupt.Chain(t.Parent, func() { + if t.sizeQueue != nil { + t.sizeQueue.stop() + } + + term.RestoreTerminal(inFd, state) + }).Run(fn) +} diff --git a/vendor/k8s.io/kubectl/pkg/util/term/term_writer.go b/vendor/k8s.io/kubectl/pkg/util/term/term_writer.go new file mode 100644 index 0000000000..e3f6008802 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/util/term/term_writer.go @@ -0,0 +1,146 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package term + +import ( + "errors" + "io" + "os" + + wordwrap "github.com/mitchellh/go-wordwrap" + "github.com/moby/term" + + "k8s.io/client-go/tools/remotecommand" +) + +type wordWrapWriter struct { + limit uint + writer io.Writer +} + +// NewResponsiveWriter creates a Writer that detects the column width of the +// terminal we are in, and adjusts every line width to fit and use recommended +// terminal sizes for better readability. Does proper word wrapping automatically. +// +// if terminal width >= 120 columns use 120 columns +// if terminal width >= 100 columns use 100 columns +// if terminal width >= 80 columns use 80 columns +// +// In case we're not in a terminal or if it's smaller than 80 columns width, +// doesn't do any wrapping. +func NewResponsiveWriter(w io.Writer) io.Writer { + file, ok := w.(*os.File) + if !ok { + return w + } + fd := file.Fd() + if !term.IsTerminal(fd) { + return w + } + + terminalSize := GetSize(fd) + if terminalSize == nil { + return w + } + limit := getTerminalLimitWidth(terminalSize) + + return NewWordWrapWriter(w, limit) +} + +// NewWordWrapWriter is a Writer that supports a limit of characters on every line +// and does auto word wrapping that respects that limit. +func NewWordWrapWriter(w io.Writer, limit uint) io.Writer { + return &wordWrapWriter{ + limit: limit, + writer: w, + } +} + +func getTerminalLimitWidth(terminalSize *remotecommand.TerminalSize) uint { + var limit uint + switch { + case terminalSize.Width >= 120: + limit = 120 + case terminalSize.Width >= 100: + limit = 100 + case terminalSize.Width >= 80: + limit = 80 + } + return limit +} + +func GetWordWrapperLimit() (uint, error) { + stdout := os.Stdout + fd := stdout.Fd() + if !term.IsTerminal(fd) { + return 0, errors.New("file descriptor is not a terminal") + } + terminalSize := GetSize(fd) + if terminalSize == nil { + return 0, errors.New("terminal size is nil") + } + return getTerminalLimitWidth(terminalSize), nil +} + +func (w wordWrapWriter) Write(p []byte) (nn int, err error) { + if w.limit == 0 { + return w.writer.Write(p) + } + original := string(p) + wrapped := wordwrap.WrapString(original, w.limit) + return w.writer.Write([]byte(wrapped)) +} + +// NewPunchCardWriter is a NewWordWrapWriter that limits the line width to 80 columns. +func NewPunchCardWriter(w io.Writer) io.Writer { + return NewWordWrapWriter(w, 80) +} + +type maxWidthWriter struct { + maxWidth uint + currentWidth uint + written uint + writer io.Writer +} + +// NewMaxWidthWriter is a Writer that supports a limit of characters on every +// line, but doesn't do any word wrapping automatically. +func NewMaxWidthWriter(w io.Writer, maxWidth uint) io.Writer { + return &maxWidthWriter{ + maxWidth: maxWidth, + writer: w, + } +} + +func (m maxWidthWriter) Write(p []byte) (nn int, err error) { + for _, b := range p { + if m.currentWidth == m.maxWidth { + m.writer.Write([]byte{'\n'}) + m.currentWidth = 0 + } + if b == '\n' { + m.currentWidth = 0 + } + _, err := m.writer.Write([]byte{b}) + if err != nil { + return int(m.written), err + } + m.written++ + m.currentWidth++ + } + return len(p), nil +} diff --git a/vendor/k8s.io/kubectl/pkg/validation/schema.go b/vendor/k8s.io/kubectl/pkg/validation/schema.go new file mode 100644 index 0000000000..fb8841b9c4 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/validation/schema.go @@ -0,0 +1,153 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "bytes" + "encoding/json" + "fmt" + + ejson "github.com/exponent-io/jsonpath" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/cli-runtime/pkg/resource" + "k8s.io/klog/v2" +) + +// Schema is an interface that knows how to validate an API object serialized to a byte array. +type Schema interface { + ValidateBytes(data []byte) error +} + +// NullSchema always validates bytes. +type NullSchema struct{} + +// ValidateBytes never fails for NullSchema. +func (NullSchema) ValidateBytes(data []byte) error { return nil } + +// NoDoubleKeySchema is a schema that disallows double keys. +type NoDoubleKeySchema struct{} + +// ValidateBytes validates bytes. +func (NoDoubleKeySchema) ValidateBytes(data []byte) error { + var list []error + if err := validateNoDuplicateKeys(data, "metadata", "labels"); err != nil { + list = append(list, err) + } + if err := validateNoDuplicateKeys(data, "metadata", "annotations"); err != nil { + list = append(list, err) + } + return utilerrors.NewAggregate(list) +} + +func validateNoDuplicateKeys(data []byte, path ...string) error { + r := ejson.NewDecoder(bytes.NewReader(data)) + // This is Go being unfriendly. The 'path ...string' comes in as a + // []string, and SeekTo takes ...interface{}, so we can't just pass + // the path straight in, we have to copy it. *sigh* + ifacePath := []interface{}{} + for ix := range path { + ifacePath = append(ifacePath, path[ix]) + } + found, err := r.SeekTo(ifacePath...) + if err != nil { + return err + } + if !found { + return nil + } + seen := map[string]bool{} + for { + tok, err := r.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case json.Delim: + if t.String() == "}" { + return nil + } + case ejson.KeyString: + if seen[string(t)] { + return fmt.Errorf("duplicate key: %s", string(t)) + } + seen[string(t)] = true + } + } +} + +// ConjunctiveSchema encapsulates a schema list. +type ConjunctiveSchema []Schema + +// ValidateBytes validates bytes per a ConjunctiveSchema. +func (c ConjunctiveSchema) ValidateBytes(data []byte) error { + var list []error + schemas := []Schema(c) + for ix := range schemas { + if err := schemas[ix].ValidateBytes(data); err != nil { + list = append(list, err) + } + } + return utilerrors.NewAggregate(list) +} + +func NewParamVerifyingSchema(s Schema, verifier resource.Verifier, directive string) Schema { + return ¶mVerifyingSchema{ + schema: s, + verifier: verifier, + directive: directive, + } +} + +// paramVerifyingSchema only performs validation +// based on the fieldValidation query param +// being unsupported by the apiserver, because +// server-side validation will be performed instead +// of client-side validation. +type paramVerifyingSchema struct { + schema Schema + verifier resource.Verifier + directive string +} + +// ValidateBytes validates bytes per a ParamVerifyingSchema +func (c *paramVerifyingSchema) ValidateBytes(data []byte) error { + obj, err := parse(data) + if err != nil { + return err + } + + gvk, errs := getObjectKind(obj) + if errs != nil { + return utilerrors.NewAggregate(errs) + } + + err = c.verifier.HasSupport(gvk) + if resource.IsParamUnsupportedError(err) { + switch c.directive { + case metav1.FieldValidationStrict: + return c.schema.ValidateBytes(data) + case metav1.FieldValidationWarn: + klog.Warningf("cannot perform warn validation if server-side field validation is unsupported, skipping validation") + default: + // can't be reached + klog.Warningf("unexpected field validation directive: %s, skipping validation", c.directive) + } + return nil + } + return err +} diff --git a/vendor/k8s.io/kubectl/pkg/validation/validation.go b/vendor/k8s.io/kubectl/pkg/validation/validation.go new file mode 100644 index 0000000000..47c74e5b85 --- /dev/null +++ b/vendor/k8s.io/kubectl/pkg/validation/validation.go @@ -0,0 +1,144 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "errors" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/kube-openapi/pkg/util/proto/validation" + "k8s.io/kubectl/pkg/util/openapi" +) + +// schemaValidation validates the object against an OpenAPI schema. +type schemaValidation struct { + resourcesGetter openapi.OpenAPIResourcesGetter +} + +// NewSchemaValidation creates a new Schema that can be used +// to validate objects. +func NewSchemaValidation(resourcesGetter openapi.OpenAPIResourcesGetter) Schema { + return &schemaValidation{ + resourcesGetter: resourcesGetter, + } +} + +// ValidateBytes will validates the object against using the Resources +// object. +func (v *schemaValidation) ValidateBytes(data []byte) error { + obj, err := parse(data) + if err != nil { + return err + } + + gvk, errs := getObjectKind(obj) + if errs != nil { + return utilerrors.NewAggregate(errs) + } + + if (gvk == schema.GroupVersionKind{Version: "v1", Kind: "List"}) { + return utilerrors.NewAggregate(v.validateList(obj)) + } + return utilerrors.NewAggregate(v.validateResource(obj, gvk)) +} + +func (v *schemaValidation) validateList(object interface{}) []error { + fields, ok := object.(map[string]interface{}) + if !ok || fields == nil { + return []error{errors.New("invalid object to validate")} + } + + allErrors := []error{} + if _, ok := fields["items"].([]interface{}); !ok { + return []error{errors.New("invalid object to validate")} + } + for _, item := range fields["items"].([]interface{}) { + if gvk, errs := getObjectKind(item); errs != nil { + allErrors = append(allErrors, errs...) + } else { + allErrors = append(allErrors, v.validateResource(item, gvk)...) + } + } + return allErrors +} + +func (v *schemaValidation) validateResource(obj interface{}, gvk schema.GroupVersionKind) []error { + // This lazy-loads the OpenAPI V2 specifications, caching the specs. + resources, err := v.resourcesGetter.OpenAPISchema() + if err != nil { + return []error{err} + } + resource := resources.LookupResource(gvk) + if resource == nil { + // resource is not present, let's just skip validation. + return nil + } + + return validation.ValidateModel(obj, resource, gvk.Kind) +} + +func parse(data []byte) (interface{}, error) { + var obj interface{} + out, err := yaml.ToJSON(data) + if err != nil { + return nil, err + } + if err := json.Unmarshal(out, &obj); err != nil { + return nil, err + } + return obj, nil +} + +func getObjectKind(object interface{}) (schema.GroupVersionKind, []error) { + var listErrors []error + fields, ok := object.(map[string]interface{}) + if !ok || fields == nil { + listErrors = append(listErrors, errors.New("invalid object to validate")) + return schema.GroupVersionKind{}, listErrors + } + + var group string + var version string + apiVersion := fields["apiVersion"] + if apiVersion == nil { + listErrors = append(listErrors, errors.New("apiVersion not set")) + } else if _, ok := apiVersion.(string); !ok { + listErrors = append(listErrors, errors.New("apiVersion isn't string type")) + } else { + gv, err := schema.ParseGroupVersion(apiVersion.(string)) + if err != nil { + listErrors = append(listErrors, err) + } else { + group = gv.Group + version = gv.Version + } + } + kind := fields["kind"] + if kind == nil { + listErrors = append(listErrors, errors.New("kind not set")) + } else if _, ok := kind.(string); !ok { + listErrors = append(listErrors, errors.New("kind isn't string type")) + } + if listErrors != nil { + return schema.GroupVersionKind{}, listErrors + } + + return schema.GroupVersionKind{Group: group, Version: version, Kind: kind.(string)}, nil +} diff --git a/vendor/k8s.io/utils/exec/README.md b/vendor/k8s.io/utils/exec/README.md new file mode 100644 index 0000000000..7944e8dd3b --- /dev/null +++ b/vendor/k8s.io/utils/exec/README.md @@ -0,0 +1,5 @@ +# Exec + +This package provides an interface for `os/exec`. It makes it easier to mock +and replace in tests, especially with the [FakeExec](testing/fake_exec.go) +struct. diff --git a/vendor/k8s.io/utils/exec/doc.go b/vendor/k8s.io/utils/exec/doc.go new file mode 100644 index 0000000000..cbb44bdb5d --- /dev/null +++ b/vendor/k8s.io/utils/exec/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package exec provides an injectable interface and implementations for running commands. +package exec // import "k8s.io/utils/exec" diff --git a/vendor/k8s.io/utils/exec/exec.go b/vendor/k8s.io/utils/exec/exec.go new file mode 100644 index 0000000000..d9c91e3ca3 --- /dev/null +++ b/vendor/k8s.io/utils/exec/exec.go @@ -0,0 +1,256 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "context" + "io" + "io/fs" + osexec "os/exec" + "syscall" + "time" +) + +// ErrExecutableNotFound is returned if the executable is not found. +var ErrExecutableNotFound = osexec.ErrNotFound + +// Interface is an interface that presents a subset of the os/exec API. Use this +// when you want to inject fakeable/mockable exec behavior. +type Interface interface { + // Command returns a Cmd instance which can be used to run a single command. + // This follows the pattern of package os/exec. + Command(cmd string, args ...string) Cmd + + // CommandContext returns a Cmd instance which can be used to run a single command. + // + // The provided context is used to kill the process if the context becomes done + // before the command completes on its own. For example, a timeout can be set in + // the context. + CommandContext(ctx context.Context, cmd string, args ...string) Cmd + + // LookPath wraps os/exec.LookPath + LookPath(file string) (string, error) +} + +// Cmd is an interface that presents an API that is very similar to Cmd from os/exec. +// As more functionality is needed, this can grow. Since Cmd is a struct, we will have +// to replace fields with get/set method pairs. +type Cmd interface { + // Run runs the command to the completion. + Run() error + // CombinedOutput runs the command and returns its combined standard output + // and standard error. This follows the pattern of package os/exec. + CombinedOutput() ([]byte, error) + // Output runs the command and returns standard output, but not standard err + Output() ([]byte, error) + SetDir(dir string) + SetStdin(in io.Reader) + SetStdout(out io.Writer) + SetStderr(out io.Writer) + SetEnv(env []string) + + // StdoutPipe and StderrPipe for getting the process' Stdout and Stderr as + // Readers + StdoutPipe() (io.ReadCloser, error) + StderrPipe() (io.ReadCloser, error) + + // Start and Wait are for running a process non-blocking + Start() error + Wait() error + + // Stops the command by sending SIGTERM. It is not guaranteed the + // process will stop before this function returns. If the process is not + // responding, an internal timer function will send a SIGKILL to force + // terminate after 10 seconds. + Stop() +} + +// ExitError is an interface that presents an API similar to os.ProcessState, which is +// what ExitError from os/exec is. This is designed to make testing a bit easier and +// probably loses some of the cross-platform properties of the underlying library. +type ExitError interface { + String() string + Error() string + Exited() bool + ExitStatus() int +} + +// Implements Interface in terms of really exec()ing. +type executor struct{} + +// New returns a new Interface which will os/exec to run commands. +func New() Interface { + return &executor{} +} + +// Command is part of the Interface interface. +func (executor *executor) Command(cmd string, args ...string) Cmd { + return (*cmdWrapper)(maskErrDotCmd(osexec.Command(cmd, args...))) +} + +// CommandContext is part of the Interface interface. +func (executor *executor) CommandContext(ctx context.Context, cmd string, args ...string) Cmd { + return (*cmdWrapper)(maskErrDotCmd(osexec.CommandContext(ctx, cmd, args...))) +} + +// LookPath is part of the Interface interface +func (executor *executor) LookPath(file string) (string, error) { + path, err := osexec.LookPath(file) + return path, handleError(maskErrDot(err)) +} + +// Wraps exec.Cmd so we can capture errors. +type cmdWrapper osexec.Cmd + +var _ Cmd = &cmdWrapper{} + +func (cmd *cmdWrapper) SetDir(dir string) { + cmd.Dir = dir +} + +func (cmd *cmdWrapper) SetStdin(in io.Reader) { + cmd.Stdin = in +} + +func (cmd *cmdWrapper) SetStdout(out io.Writer) { + cmd.Stdout = out +} + +func (cmd *cmdWrapper) SetStderr(out io.Writer) { + cmd.Stderr = out +} + +func (cmd *cmdWrapper) SetEnv(env []string) { + cmd.Env = env +} + +func (cmd *cmdWrapper) StdoutPipe() (io.ReadCloser, error) { + r, err := (*osexec.Cmd)(cmd).StdoutPipe() + return r, handleError(err) +} + +func (cmd *cmdWrapper) StderrPipe() (io.ReadCloser, error) { + r, err := (*osexec.Cmd)(cmd).StderrPipe() + return r, handleError(err) +} + +func (cmd *cmdWrapper) Start() error { + err := (*osexec.Cmd)(cmd).Start() + return handleError(err) +} + +func (cmd *cmdWrapper) Wait() error { + err := (*osexec.Cmd)(cmd).Wait() + return handleError(err) +} + +// Run is part of the Cmd interface. +func (cmd *cmdWrapper) Run() error { + err := (*osexec.Cmd)(cmd).Run() + return handleError(err) +} + +// CombinedOutput is part of the Cmd interface. +func (cmd *cmdWrapper) CombinedOutput() ([]byte, error) { + out, err := (*osexec.Cmd)(cmd).CombinedOutput() + return out, handleError(err) +} + +func (cmd *cmdWrapper) Output() ([]byte, error) { + out, err := (*osexec.Cmd)(cmd).Output() + return out, handleError(err) +} + +// Stop is part of the Cmd interface. +func (cmd *cmdWrapper) Stop() { + c := (*osexec.Cmd)(cmd) + + if c.Process == nil { + return + } + + c.Process.Signal(syscall.SIGTERM) + + time.AfterFunc(10*time.Second, func() { + if !c.ProcessState.Exited() { + c.Process.Signal(syscall.SIGKILL) + } + }) +} + +func handleError(err error) error { + if err == nil { + return nil + } + + switch e := err.(type) { + case *osexec.ExitError: + return &ExitErrorWrapper{e} + case *fs.PathError: + return ErrExecutableNotFound + case *osexec.Error: + if e.Err == osexec.ErrNotFound { + return ErrExecutableNotFound + } + } + + return err +} + +// ExitErrorWrapper is an implementation of ExitError in terms of os/exec ExitError. +// Note: standard exec.ExitError is type *os.ProcessState, which already implements Exited(). +type ExitErrorWrapper struct { + *osexec.ExitError +} + +var _ ExitError = &ExitErrorWrapper{} + +// ExitStatus is part of the ExitError interface. +func (eew ExitErrorWrapper) ExitStatus() int { + ws, ok := eew.Sys().(syscall.WaitStatus) + if !ok { + panic("can't call ExitStatus() on a non-WaitStatus exitErrorWrapper") + } + return ws.ExitStatus() +} + +// CodeExitError is an implementation of ExitError consisting of an error object +// and an exit code (the upper bits of os.exec.ExitStatus). +type CodeExitError struct { + Err error + Code int +} + +var _ ExitError = CodeExitError{} + +func (e CodeExitError) Error() string { + return e.Err.Error() +} + +func (e CodeExitError) String() string { + return e.Err.Error() +} + +// Exited is to check if the process has finished +func (e CodeExitError) Exited() bool { + return true +} + +// ExitStatus is for checking the error code +func (e CodeExitError) ExitStatus() int { + return e.Code +} diff --git a/vendor/k8s.io/utils/exec/fixup_go118.go b/vendor/k8s.io/utils/exec/fixup_go118.go new file mode 100644 index 0000000000..acf45f1cd5 --- /dev/null +++ b/vendor/k8s.io/utils/exec/fixup_go118.go @@ -0,0 +1,32 @@ +//go:build !go1.19 +// +build !go1.19 + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + osexec "os/exec" +) + +func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { + return cmd +} + +func maskErrDot(err error) error { + return err +} diff --git a/vendor/k8s.io/utils/exec/fixup_go119.go b/vendor/k8s.io/utils/exec/fixup_go119.go new file mode 100644 index 0000000000..55874c9297 --- /dev/null +++ b/vendor/k8s.io/utils/exec/fixup_go119.go @@ -0,0 +1,40 @@ +//go:build go1.19 +// +build go1.19 + +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "errors" + osexec "os/exec" +) + +// maskErrDotCmd reverts the behavior of osexec.Cmd to what it was before go1.19 +// specifically set the Err field to nil (LookPath returns a new error when the file +// is resolved to the current directory. +func maskErrDotCmd(cmd *osexec.Cmd) *osexec.Cmd { + cmd.Err = maskErrDot(cmd.Err) + return cmd +} + +func maskErrDot(err error) error { + if err != nil && errors.Is(err, osexec.ErrDot) { + return nil + } + return err +} diff --git a/vendor/modules.txt b/vendor/modules.txt index fc2ea536dc..3852d72b07 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2,6 +2,9 @@ ## explicit; go 1.16 github.com/Azure/go-ansiterm github.com/Azure/go-ansiterm/winterm +# github.com/MakeNowJust/heredoc v1.0.0 +## explicit; go 1.12 +github.com/MakeNowJust/heredoc # github.com/NYTimes/gziphandler v1.1.1 ## explicit; go 1.11 github.com/NYTimes/gziphandler @@ -23,6 +26,12 @@ github.com/cenkalti/backoff/v4 # github.com/cespare/xxhash/v2 v2.3.0 ## explicit; go 1.11 github.com/cespare/xxhash/v2 +# github.com/chai2010/gettext-go v1.0.2 +## explicit; go 1.14 +github.com/chai2010/gettext-go +github.com/chai2010/gettext-go/mo +github.com/chai2010/gettext-go/plural +github.com/chai2010/gettext-go/po # github.com/coreos/go-semver v0.3.1 ## explicit; go 1.8 github.com/coreos/go-semver/semver @@ -44,12 +53,18 @@ github.com/evanphx/json-patch ## explicit; go 1.18 github.com/evanphx/json-patch/v5 github.com/evanphx/json-patch/v5/internal/json +# github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d +## explicit +github.com/exponent-io/jsonpath # github.com/felixge/httpsnoop v1.0.4 ## explicit; go 1.13 github.com/felixge/httpsnoop # github.com/fsnotify/fsnotify v1.7.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify +# github.com/fvbommel/sortorder v1.1.0 +## explicit; go 1.13 +github.com/fvbommel/sortorder # github.com/go-errors/errors v1.4.2 ## explicit; go 1.14 github.com/go-errors/errors @@ -151,6 +166,7 @@ github.com/google/shlex github.com/google/uuid # github.com/gorilla/websocket v1.5.1 ## explicit; go 1.20 +github.com/gorilla/websocket # github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 ## explicit github.com/gregjones/httpcache @@ -191,6 +207,13 @@ github.com/mailru/easyjson/jwriter # github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 ## explicit; go 1.19 github.com/matttproud/golang_protobuf_extensions/v2/pbutil +# github.com/mitchellh/go-wordwrap v1.0.1 +## explicit; go 1.14 +github.com/mitchellh/go-wordwrap +# github.com/moby/spdystream v0.2.0 +## explicit; go 1.13 +github.com/moby/spdystream +github.com/moby/spdystream/spdy # github.com/moby/term v0.0.0-20221205130635-1aeaba878587 ## explicit; go 1.18 github.com/moby/term @@ -207,6 +230,9 @@ github.com/monochromegane/go-gitignore # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg +# github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f +## explicit +github.com/mxk/go-flowrate/flowrate # github.com/onsi/ginkgo/v2 v2.20.0 ## explicit; go 1.20 github.com/onsi/ginkgo/v2 @@ -275,8 +301,12 @@ github.com/prometheus/procfs/internal/util # github.com/ray-project/kuberay/ray-operator v1.1.1 ## explicit; go 1.20 github.com/ray-project/kuberay/ray-operator/apis/ray/v1 +github.com/ray-project/kuberay/ray-operator/controllers/ray/utils # github.com/rogpeppe/go-internal v1.12.0 ## explicit; go 1.20 +# github.com/russross/blackfriday/v2 v2.1.0 +## explicit +github.com/russross/blackfriday/v2 # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -430,7 +460,9 @@ golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna +golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries +golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket # golang.org/x/oauth2 v0.20.0 @@ -673,6 +705,7 @@ k8s.io/api/flowcontrol/v1 k8s.io/api/flowcontrol/v1beta1 k8s.io/api/flowcontrol/v1beta2 k8s.io/api/flowcontrol/v1beta3 +k8s.io/api/imagepolicy/v1alpha1 k8s.io/api/networking/v1 k8s.io/api/networking/v1alpha1 k8s.io/api/networking/v1beta1 @@ -740,6 +773,7 @@ k8s.io/apimachinery/pkg/util/duration k8s.io/apimachinery/pkg/util/errors k8s.io/apimachinery/pkg/util/framer k8s.io/apimachinery/pkg/util/httpstream +k8s.io/apimachinery/pkg/util/httpstream/spdy k8s.io/apimachinery/pkg/util/httpstream/wsstream k8s.io/apimachinery/pkg/util/intstr k8s.io/apimachinery/pkg/util/json @@ -749,6 +783,7 @@ k8s.io/apimachinery/pkg/util/mergepatch k8s.io/apimachinery/pkg/util/naming k8s.io/apimachinery/pkg/util/net k8s.io/apimachinery/pkg/util/portforward +k8s.io/apimachinery/pkg/util/proxy k8s.io/apimachinery/pkg/util/rand k8s.io/apimachinery/pkg/util/remotecommand k8s.io/apimachinery/pkg/util/runtime @@ -764,6 +799,7 @@ k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json +k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/apiserver v0.29.7 ## explicit; go 1.21 @@ -1215,6 +1251,14 @@ k8s.io/client-go/rest k8s.io/client-go/rest/fake k8s.io/client-go/rest/watch k8s.io/client-go/restmapper +k8s.io/client-go/scale +k8s.io/client-go/scale/scheme +k8s.io/client-go/scale/scheme/appsint +k8s.io/client-go/scale/scheme/appsv1beta1 +k8s.io/client-go/scale/scheme/appsv1beta2 +k8s.io/client-go/scale/scheme/autoscalingv1 +k8s.io/client-go/scale/scheme/extensionsint +k8s.io/client-go/scale/scheme/extensionsv1beta1 k8s.io/client-go/testing k8s.io/client-go/third_party/forked/golang/template k8s.io/client-go/tools/auth @@ -1233,9 +1277,14 @@ k8s.io/client-go/tools/pager k8s.io/client-go/tools/record k8s.io/client-go/tools/record/util k8s.io/client-go/tools/reference +k8s.io/client-go/tools/remotecommand +k8s.io/client-go/tools/watch k8s.io/client-go/transport +k8s.io/client-go/transport/spdy +k8s.io/client-go/transport/websocket k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation +k8s.io/client-go/util/exec k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/homedir k8s.io/client-go/util/jsonpath @@ -1360,11 +1409,25 @@ k8s.io/kube-openapi/pkg/schemamutation k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util k8s.io/kube-openapi/pkg/util/proto +k8s.io/kube-openapi/pkg/util/proto/validation k8s.io/kube-openapi/pkg/util/sets k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson +# k8s.io/kubectl v0.29.7 +## explicit; go 1.21 +k8s.io/kubectl/pkg/cmd/get +k8s.io/kubectl/pkg/cmd/util +k8s.io/kubectl/pkg/rawhttp +k8s.io/kubectl/pkg/scheme +k8s.io/kubectl/pkg/util/i18n +k8s.io/kubectl/pkg/util/interrupt +k8s.io/kubectl/pkg/util/openapi +k8s.io/kubectl/pkg/util/slice +k8s.io/kubectl/pkg/util/templates +k8s.io/kubectl/pkg/util/term +k8s.io/kubectl/pkg/validation # k8s.io/metrics v0.29.7 ## explicit; go 1.21 k8s.io/metrics/pkg/apis/metrics @@ -1375,6 +1438,7 @@ k8s.io/metrics/pkg/apis/metrics/v1beta1 k8s.io/utils/buffer k8s.io/utils/clock k8s.io/utils/clock/testing +k8s.io/utils/exec k8s.io/utils/field k8s.io/utils/integer k8s.io/utils/internal/third_party/forked/golang/golang-lru