diff --git a/api/elbv2/v1beta1/groupversion_info.go b/api/elbv2/v1beta1/groupversion_info.go new file mode 100644 index 0000000..ef47ce7 --- /dev/null +++ b/api/elbv2/v1beta1/groupversion_info.go @@ -0,0 +1,36 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the elbv2 v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=elbv2.k8s.aws +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "elbv2.k8s.aws", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/elbv2/v1beta1/targetgroupbinding_types.go b/api/elbv2/v1beta1/targetgroupbinding_types.go new file mode 100644 index 0000000..7d50262 --- /dev/null +++ b/api/elbv2/v1beta1/targetgroupbinding_types.go @@ -0,0 +1,172 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +kubebuilder:validation:Enum=instance;ip +// TargetType is the targetType of your ELBV2 TargetGroup. +// +// * with `instance` TargetType, nodes with nodePort for your service will be registered as targets +// * with `ip` TargetType, Pods with containerPort for your service will be registered as targets +type TargetType string + +const ( + TargetTypeInstance TargetType = "instance" + TargetTypeIP TargetType = "ip" +) + +// ServiceReference defines reference to a Kubernetes Service and its ServicePort. +type ServiceReference struct { + // Name is the name of the Service. + Name string `json:"name"` + + // Port is the port of the ServicePort. + Port intstr.IntOrString `json:"port"` +} + +// IPBlock defines source/destination IPBlock in networking rules. +type IPBlock struct { + // CIDR is the network CIDR. + // Both IPV4 or IPV6 CIDR are accepted. + CIDR string `json:"cidr"` +} + +// SecurityGroup defines reference to an AWS EC2 SecurityGroup. +type SecurityGroup struct { + // GroupID is the EC2 SecurityGroupID. + GroupID string `json:"groupID"` +} + +// NetworkingPeer defines the source/destination peer for networking rules. +type NetworkingPeer struct { + // IPBlock defines an IPBlock peer. + // If specified, none of the other fields can be set. + // +optional + IPBlock *IPBlock `json:"ipBlock,omitempty"` + + // SecurityGroup defines a SecurityGroup peer. + // If specified, none of the other fields can be set. + // +optional + SecurityGroup *SecurityGroup `json:"securityGroup,omitempty"` +} + +// +kubebuilder:validation:Enum=TCP;UDP +// NetworkingProtocol defines the protocol for networking rules. +type NetworkingProtocol string + +const ( + // NetworkingProtocolTCP is the TCP protocol. + NetworkingProtocolTCP NetworkingProtocol = "TCP" + + // NetworkingProtocolUDP is the UDP protocol. + NetworkingProtocolUDP NetworkingProtocol = "UDP" +) + +// NetworkingPort defines the port and protocol for networking rules. +type NetworkingPort struct { + // The protocol which traffic must match. + // If protocol is unspecified, it defaults to TCP. + Protocol *NetworkingProtocol `json:"protocol,omitempty"` + + // The port which traffic must match. + // When NodePort endpoints(instance TargetType) is used, this must be a numerical port. + // When Port endpoints(ip TargetType) is used, this can be either numerical or named port on pods. + // if port is unspecified, it defaults to all ports. + // +optional + Port *intstr.IntOrString `json:"port,omitempty"` +} + +// NetworkingIngressRule defines a particular set of traffic that is allowed to access TargetGroup's targets. +type NetworkingIngressRule struct { + // List of peers which should be able to access the targets in TargetGroup. + // At least one NetworkingPeer should be specified. + From []NetworkingPeer `json:"from"` + + // List of ports which should be made accessible on the targets in TargetGroup. + // If ports is empty or unspecified, it defaults to all ports with TCP. + Ports []NetworkingPort `json:"ports"` +} + +// TargetGroupBindingNetworking defines the networking rules to allow ELBV2 LoadBalancer to access targets in TargetGroup. +type TargetGroupBindingNetworking struct { + // List of ingress rules to allow ELBV2 LoadBalancer to access targets in TargetGroup. + // +optional + Ingress []NetworkingIngressRule `json:"ingress,omitempty"` +} + +// TargetGroupBindingSpec defines the desired state of TargetGroupBinding +type TargetGroupBindingSpec struct { + // targetGroupARN is the Amazon Resource Name (ARN) for the TargetGroup. + // +kubebuilder:validation:MinLength=1 + TargetGroupARN string `json:"targetGroupARN"` + + // targetType is the TargetType of TargetGroup. If unspecified, it will be automatically inferred. + // +optional + TargetType *TargetType `json:"targetType,omitempty"` + + // serviceRef is a reference to a Kubernetes Service and ServicePort. + ServiceRef ServiceReference `json:"serviceRef"` + + // networking defines the networking rules to allow ELBV2 LoadBalancer to access targets in TargetGroup. + // +optional + Networking *TargetGroupBindingNetworking `json:"networking,omitempty"` + + // node selector for instance type target groups to only register certain nodes + // +optional + NodeSelector *metav1.LabelSelector `json:"nodeSelector,omitempty"` +} + +// TargetGroupBindingStatus defines the observed state of TargetGroupBinding +type TargetGroupBindingStatus struct { + // The generation observed by the TargetGroupBinding controller. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:storageversion +// +kubebuilder:printcolumn:name="SERVICE-NAME",type="string",JSONPath=".spec.serviceRef.name",description="The Kubernetes Service's name" +// +kubebuilder:printcolumn:name="SERVICE-PORT",type="string",JSONPath=".spec.serviceRef.port",description="The Kubernetes Service's port" +// +kubebuilder:printcolumn:name="TARGET-TYPE",type="string",JSONPath=".spec.targetType",description="The AWS TargetGroup's TargetType" +// +kubebuilder:printcolumn:name="ARN",type="string",JSONPath=".spec.targetGroupARN",description="The AWS TargetGroup's Amazon Resource Name",priority=1 +// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp" +// TargetGroupBinding is the Schema for the TargetGroupBinding API +type TargetGroupBinding struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec TargetGroupBindingSpec `json:"spec,omitempty"` + Status TargetGroupBindingStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// TargetGroupBindingList contains a list of TargetGroupBinding +type TargetGroupBindingList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []TargetGroupBinding `json:"items"` +} + +func init() { + SchemeBuilder.Register(&TargetGroupBinding{}, &TargetGroupBindingList{}) +} diff --git a/api/elbv2/v1beta1/zz_generated.deepcopy.go b/api/elbv2/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..25c99dc --- /dev/null +++ b/api/elbv2/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,285 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Okra authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPBlock) DeepCopyInto(out *IPBlock) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock. +func (in *IPBlock) DeepCopy() *IPBlock { + if in == nil { + return nil + } + out := new(IPBlock) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkingIngressRule) DeepCopyInto(out *NetworkingIngressRule) { + *out = *in + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]NetworkingPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkingPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkingIngressRule. +func (in *NetworkingIngressRule) DeepCopy() *NetworkingIngressRule { + if in == nil { + return nil + } + out := new(NetworkingIngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkingPeer) DeepCopyInto(out *NetworkingPeer) { + *out = *in + if in.IPBlock != nil { + in, out := &in.IPBlock, &out.IPBlock + *out = new(IPBlock) + **out = **in + } + if in.SecurityGroup != nil { + in, out := &in.SecurityGroup, &out.SecurityGroup + *out = new(SecurityGroup) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkingPeer. +func (in *NetworkingPeer) DeepCopy() *NetworkingPeer { + if in == nil { + return nil + } + out := new(NetworkingPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkingPort) DeepCopyInto(out *NetworkingPort) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(NetworkingProtocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(intstr.IntOrString) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkingPort. +func (in *NetworkingPort) DeepCopy() *NetworkingPort { + if in == nil { + return nil + } + out := new(NetworkingPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityGroup) DeepCopyInto(out *SecurityGroup) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityGroup. +func (in *SecurityGroup) DeepCopy() *SecurityGroup { + if in == nil { + return nil + } + out := new(SecurityGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + out.Port = in.Port +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupBinding) DeepCopyInto(out *TargetGroupBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupBinding. +func (in *TargetGroupBinding) DeepCopy() *TargetGroupBinding { + if in == nil { + return nil + } + out := new(TargetGroupBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TargetGroupBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupBindingList) DeepCopyInto(out *TargetGroupBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TargetGroupBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupBindingList. +func (in *TargetGroupBindingList) DeepCopy() *TargetGroupBindingList { + if in == nil { + return nil + } + out := new(TargetGroupBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TargetGroupBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupBindingNetworking) DeepCopyInto(out *TargetGroupBindingNetworking) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]NetworkingIngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupBindingNetworking. +func (in *TargetGroupBindingNetworking) DeepCopy() *TargetGroupBindingNetworking { + if in == nil { + return nil + } + out := new(TargetGroupBindingNetworking) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupBindingSpec) DeepCopyInto(out *TargetGroupBindingSpec) { + *out = *in + if in.TargetType != nil { + in, out := &in.TargetType, &out.TargetType + *out = new(TargetType) + **out = **in + } + out.ServiceRef = in.ServiceRef + if in.Networking != nil { + in, out := &in.Networking, &out.Networking + *out = new(TargetGroupBindingNetworking) + (*in).DeepCopyInto(*out) + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupBindingSpec. +func (in *TargetGroupBindingSpec) DeepCopy() *TargetGroupBindingSpec { + if in == nil { + return nil + } + out := new(TargetGroupBindingSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupBindingStatus) DeepCopyInto(out *TargetGroupBindingStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupBindingStatus. +func (in *TargetGroupBindingStatus) DeepCopy() *TargetGroupBindingStatus { + if in == nil { + return nil + } + out := new(TargetGroupBindingStatus) + in.DeepCopyInto(out) + return out +} diff --git a/api/rollouts/v1alpha1/analysis_types.go b/api/rollouts/v1alpha1/analysis_types.go new file mode 100644 index 0000000..a855b57 --- /dev/null +++ b/api/rollouts/v1alpha1/analysis_types.go @@ -0,0 +1,437 @@ +/* +Copyright The Argo Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "time" + + intstrutil "k8s.io/apimachinery/pkg/util/intstr" + + batchv1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterAnalysisTemplate holds the template for performing canary analysis +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:path=clusteranalysistemplates,shortName=cat +type ClusterAnalysisTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec AnalysisTemplateSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// AnalysisTemplateList is a list of AnalysisTemplate resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ClusterAnalysisTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []ClusterAnalysisTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// AnalysisTemplate holds the template for performing canary analysis +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:path=analysistemplates,shortName=at +type AnalysisTemplate struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec AnalysisTemplateSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// AnalysisTemplateList is a list of AnalysisTemplate resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AnalysisTemplateList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []AnalysisTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// AnalysisTemplateSpec is the specification for a AnalysisTemplate resource +type AnalysisTemplateSpec struct { + // Metrics contains the list of metrics to query as part of an analysis run + // +patchMergeKey=name + // +patchStrategy=merge + Metrics []Metric `json:"metrics" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=metrics"` + // Args are the list of arguments to the template + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Args []Argument `json:"args,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=args"` +} + +// DurationString is a string representing a duration (e.g. 30s, 5m, 1h) +type DurationString string + +// Duration converts DurationString into a time.Duration +func (d DurationString) Duration() (time.Duration, error) { + return time.ParseDuration(string(d)) +} + +// Metric defines a metric in which to perform analysis +type Metric struct { + // Name is the name of the metric + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Interval defines an interval string (e.g. 30s, 5m, 1h) between each measurement. + // If omitted, will perform a single measurement + Interval DurationString `json:"interval,omitempty" protobuf:"bytes,2,opt,name=interval,casttype=DurationString"` + // InitialDelay how long the AnalysisRun should wait before starting this metric + InitialDelay DurationString `json:"initialDelay,omitempty" protobuf:"bytes,3,opt,name=initialDelay,casttype=DurationString"` + // Count is the number of times to run the measurement. If both interval and count are omitted, + // the effective count is 1. If only interval is specified, metric runs indefinitely. + // If count > 1, interval must be specified. + Count *intstrutil.IntOrString `json:"count,omitempty" protobuf:"bytes,4,opt,name=count"` + // SuccessCondition is an expression which determines if a measurement is considered successful + // Expression is a goevaluate expression. The keyword `result` is a variable reference to the + // value of measurement. Results can be both structured data or primitive. + // Examples: + // result > 10 + // (result.requests_made * result.requests_succeeded / 100) >= 90 + SuccessCondition string `json:"successCondition,omitempty" protobuf:"bytes,5,opt,name=successCondition"` + // FailureCondition is an expression which determines if a measurement is considered failed + // If both success and failure conditions are specified, and the measurement does not fall into + // either condition, the measurement is considered Inconclusive + FailureCondition string `json:"failureCondition,omitempty" protobuf:"bytes,6,opt,name=failureCondition"` + // FailureLimit is the maximum number of times the measurement is allowed to fail, before the + // entire metric is considered Failed (default: 0) + FailureLimit *intstrutil.IntOrString `json:"failureLimit,omitempty" protobuf:"bytes,7,opt,name=failureLimit"` + // InconclusiveLimit is the maximum number of times the measurement is allowed to measure + // Inconclusive, before the entire metric is considered Inconclusive (default: 0) + InconclusiveLimit *intstrutil.IntOrString `json:"inconclusiveLimit,omitempty" protobuf:"bytes,8,opt,name=inconclusiveLimit"` + // ConsecutiveErrorLimit is the maximum number of times the measurement is allowed to error in + // succession, before the metric is considered error (default: 4) + ConsecutiveErrorLimit *intstrutil.IntOrString `json:"consecutiveErrorLimit,omitempty" protobuf:"bytes,9,opt,name=consecutiveErrorLimit"` + // Provider configuration to the external system to use to verify the analysis + Provider MetricProvider `json:"provider" protobuf:"bytes,10,opt,name=provider"` +} + +// EffectiveCount is the effective count based on whether or not count/interval is specified +// If neither count or interval is specified, the effective count is 1 +// If only interval is specified, metric runs indefinitely and there is no effective count (nil) +// Otherwise, it is the user specified value +func (m *Metric) EffectiveCount() *intstrutil.IntOrString { + // Need to check if type is String + if m.Count == nil || m.Count.IntValue() == 0 { + if m.Interval == "" { + one := intstrutil.FromInt(1) + return &one + } + return nil + } + return m.Count +} + +// MetricProvider which external system to use to verify the analysis +// Only one of the fields in this struct should be non-nil +type MetricProvider struct { + // Prometheus specifies the prometheus metric to query + Prometheus *PrometheusMetric `json:"prometheus,omitempty" protobuf:"bytes,1,opt,name=prometheus"` + // Kayenta specifies a Kayenta metric + Kayenta *KayentaMetric `json:"kayenta,omitempty" protobuf:"bytes,2,opt,name=kayenta"` + // Web specifies a generic HTTP web metric + Web *WebMetric `json:"web,omitempty" protobuf:"bytes,3,opt,name=web"` + // Datadog specifies a datadog metric to query + Datadog *DatadogMetric `json:"datadog,omitempty" protobuf:"bytes,4,opt,name=datadog"` + // Wavefront specifies the wavefront metric to query + Wavefront *WavefrontMetric `json:"wavefront,omitempty" protobuf:"bytes,5,opt,name=wavefront"` + // NewRelic specifies the newrelic metric to query + NewRelic *NewRelicMetric `json:"newRelic,omitempty" protobuf:"bytes,6,opt,name=newRelic"` + // Job specifies the job metric run + Job *JobMetric `json:"job,omitempty" protobuf:"bytes,7,opt,name=job"` + // CloudWatch specifies the cloudWatch metric to query + CloudWatch *CloudWatchMetric `json:"cloudWatch,omitempty" protobuf:"bytes,8,opt,name=cloudWatch"` + // Graphite specifies the Graphite metric to query + Graphite *GraphiteMetric `json:"graphite,omitempty" protobuf:"bytes,9,opt,name=graphite"` +} + +// AnalysisPhase is the overall phase of an AnalysisRun, MetricResult, or Measurement +type AnalysisPhase string + +// Possible AnalysisPhase values +const ( + AnalysisPhasePending AnalysisPhase = "Pending" + AnalysisPhaseRunning AnalysisPhase = "Running" + AnalysisPhaseSuccessful AnalysisPhase = "Successful" + AnalysisPhaseFailed AnalysisPhase = "Failed" + AnalysisPhaseError AnalysisPhase = "Error" + AnalysisPhaseInconclusive AnalysisPhase = "Inconclusive" +) + +// Completed returns whether or not the analysis status is considered completed +func (as AnalysisPhase) Completed() bool { + switch as { + case AnalysisPhaseSuccessful, AnalysisPhaseFailed, AnalysisPhaseError, AnalysisPhaseInconclusive: + return true + } + return false +} + +// PrometheusMetric defines the prometheus query to perform canary analysis +type PrometheusMetric struct { + // Address is the HTTP address and port of the prometheus server + Address string `json:"address,omitempty" protobuf:"bytes,1,opt,name=address"` + // Query is a raw prometheus query to perform + Query string `json:"query,omitempty" protobuf:"bytes,2,opt,name=query"` +} + +// WavefrontMetric defines the wavefront query to perform canary analysis +type WavefrontMetric struct { + // Address is the HTTP address and port of the wavefront server + Address string `json:"address,omitempty" protobuf:"bytes,1,opt,name=address"` + // Query is a raw wavefront query to perform + Query string `json:"query,omitempty" protobuf:"bytes,2,opt,name=query"` +} + +// NewRelicMetric defines the newrelic query to perform canary analysis +type NewRelicMetric struct { + // Profile is the name of the secret holding NR account configuration + Profile string `json:"profile,omitempty" protobuf:"bytes,1,opt,name=profile"` + // Query is a raw newrelic NRQL query to perform + Query string `json:"query" protobuf:"bytes,2,opt,name=query"` +} + +// JobMetric defines a job to run which acts as a metric +type JobMetric struct { + Metadata metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Spec batchv1.JobSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// GraphiteMetric defines the Graphite query to perform canary analysis +type GraphiteMetric struct { + // Address is the HTTP address and port of the Graphite server + Address string `json:"address,omitempty" protobuf:"bytes,1,opt,name=address"` + // Query is a raw Graphite query to perform + Query string `json:"query,omitempty" protobuf:"bytes,2,opt,name=query"` +} + +// CloudWatchMetric defines the cloudwatch query to perform canary analysis +type CloudWatchMetric struct { + Interval DurationString `json:"interval,omitempty" protobuf:"bytes,1,opt,name=interval,casttype=DurationString"` + MetricDataQueries []CloudWatchMetricDataQuery `json:"metricDataQueries" protobuf:"bytes,2,rep,name=metricDataQueries"` +} + +// CloudWatchMetricDataQuery defines the cloudwatch query +type CloudWatchMetricDataQuery struct { + Id string `json:"id,omitempty" protobuf:"bytes,1,opt,name=id"` + Expression *string `json:"expression,omitempty" protobuf:"bytes,2,opt,name=expression"` + Label *string `json:"label,omitempty" protobuf:"bytes,3,opt,name=label"` + MetricStat *CloudWatchMetricStat `json:"metricStat,omitempty" protobuf:"bytes,4,opt,name=metricStat"` + Period *intstrutil.IntOrString `json:"period,omitempty" protobuf:"varint,5,opt,name=period"` + ReturnData *bool `json:"returnData,omitempty" protobuf:"bytes,6,opt,name=returnData"` +} + +type CloudWatchMetricStat struct { + Metric CloudWatchMetricStatMetric `json:"metric,omitempty" protobuf:"bytes,1,opt,name=metric"` + Period intstrutil.IntOrString `json:"period,omitempty" protobuf:"varint,2,opt,name=period"` + Stat string `json:"stat,omitempty" protobuf:"bytes,3,opt,name=stat"` + Unit string `json:"unit,omitempty" protobuf:"bytes,4,opt,name=unit"` +} + +type CloudWatchMetricStatMetric struct { + Dimensions []CloudWatchMetricStatMetricDimension `json:"dimensions,omitempty" protobuf:"bytes,1,rep,name=dimensions"` + MetricName string `json:"metricName,omitempty" protobuf:"bytes,2,opt,name=metricName"` + Namespace *string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` +} + +type CloudWatchMetricStatMetricDimension struct { + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + +// AnalysisRun is an instantiation of an AnalysisTemplate +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:path=analysisruns, shortName=ar +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.phase",description="AnalysisRun status" +type AnalysisRun struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Spec AnalysisRunSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + Status AnalysisRunStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// AnalysisRunList is a list of AnalysisTemplate resources +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type AnalysisRunList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + Items []AnalysisRun `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// AnalysisRunSpec is the spec for a AnalysisRun resource +type AnalysisRunSpec struct { + // Metrics contains the list of metrics to query as part of an analysis run + // +patchMergeKey=name + // +patchStrategy=merge + Metrics []Metric `json:"metrics" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=metrics"` + // Args are the list of arguments used in this run + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Args []Argument `json:"args,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=args"` + // Terminate is used to prematurely stop the run (e.g. rollout completed and analysis is no longer desired) + Terminate bool `json:"terminate,omitempty" protobuf:"varint,3,opt,name=terminate"` +} + +// Argument is an argument to an AnalysisRun +type Argument struct { + // Name is the name of the argument + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Value is the value of the argument + // +optional + Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // ValueFrom is a reference to where a secret is stored. This field is one of the fields with valueFrom + // +optional + ValueFrom *ValueFrom `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"` +} + +type ValueFrom struct { + // Secret is a reference to where a secret is stored. This field is one of the fields with valueFrom + // +optional + SecretKeyRef *SecretKeyRef `json:"secretKeyRef,omitempty" protobuf:"bytes,1,opt,name=secretKeyRef"` + //FieldRef is a reference to the fields in metadata which we are referencing. This field is one of the fields with + //valueFrom + // +optional + FieldRef *FieldRef `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"` +} + +type SecretKeyRef struct { + // Name is the name of the secret + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Key is the key of the secret to select from. + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` +} + +// AnalysisRunStatus is the status for a AnalysisRun resource +type AnalysisRunStatus struct { + // Phase is the status of the analysis run + Phase AnalysisPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=AnalysisPhase"` + // Message is a message explaining current status + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // MetricResults contains the metrics collected during the run + MetricResults []MetricResult `json:"metricResults,omitempty" protobuf:"bytes,3,rep,name=metricResults"` + // StartedAt indicates when the analysisRun first started + StartedAt *metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,4,opt,name=startedAt"` +} + +// MetricResult contain a list of the most recent measurements for a single metric along with +// counters on how often the measurement +type MetricResult struct { + // Name is the name of the metric + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Phase is the overall aggregate status of the metric + Phase AnalysisPhase `json:"phase" protobuf:"bytes,2,opt,name=phase,casttype=AnalysisPhase"` + // Measurements holds the most recent measurements collected for the metric + Measurements []Measurement `json:"measurements,omitempty" protobuf:"bytes,3,rep,name=measurements"` + // Message contains a message describing current condition (e.g. error messages) + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + // Count is the number of times the metric was measured without Error + // This is equal to the sum of Successful, Failed, Inconclusive + Count int32 `json:"count,omitempty" protobuf:"varint,5,opt,name=count"` + // Successful is the number of times the metric was measured Successful + Successful int32 `json:"successful,omitempty" protobuf:"varint,6,opt,name=successful"` + // Failed is the number of times the metric was measured Failed + Failed int32 `json:"failed,omitempty" protobuf:"varint,7,opt,name=failed"` + // Inconclusive is the number of times the metric was measured Inconclusive + Inconclusive int32 `json:"inconclusive,omitempty" protobuf:"varint,8,opt,name=inconclusive"` + // Error is the number of times an error was encountered during measurement + Error int32 `json:"error,omitempty" protobuf:"varint,9,opt,name=error"` + // ConsecutiveError is the number of times an error was encountered during measurement in succession + // Resets to zero when non-errors are encountered + ConsecutiveError int32 `json:"consecutiveError,omitempty" protobuf:"varint,10,opt,name=consecutiveError"` +} + +// Measurement is a point in time result value of a single metric, and the time it was measured +type Measurement struct { + // Phase is the status of this single measurement + Phase AnalysisPhase `json:"phase" protobuf:"bytes,1,opt,name=phase,casttype=AnalysisPhase"` + // Message contains a message describing current condition (e.g. error messages) + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // StartedAt is the timestamp in which this measurement started to be measured + StartedAt *metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,3,opt,name=startedAt"` + // FinishedAt is the timestamp in which this measurement completed and value was collected + FinishedAt *metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,4,opt,name=finishedAt"` + // Value is the measured value of the metric + Value string `json:"value,omitempty" protobuf:"bytes,5,opt,name=value"` + // Metadata stores additional metadata about this metric result, used by the different providers + // (e.g. kayenta run ID, job name) + Metadata map[string]string `json:"metadata,omitempty" protobuf:"bytes,6,rep,name=metadata"` + // ResumeAt is the timestamp when the analysisRun should try to resume the measurement + ResumeAt *metav1.Time `json:"resumeAt,omitempty" protobuf:"bytes,7,opt,name=resumeAt"` +} + +type KayentaMetric struct { + Address string `json:"address" protobuf:"bytes,1,opt,name=address"` + + Application string `json:"application" protobuf:"bytes,2,opt,name=application"` + + CanaryConfigName string `json:"canaryConfigName" protobuf:"bytes,3,opt,name=canaryConfigName"` + + MetricsAccountName string `json:"metricsAccountName" protobuf:"bytes,4,opt,name=metricsAccountName"` + ConfigurationAccountName string `json:"configurationAccountName" protobuf:"bytes,5,opt,name=configurationAccountName"` + StorageAccountName string `json:"storageAccountName" protobuf:"bytes,6,opt,name=storageAccountName"` + + Threshold KayentaThreshold `json:"threshold" protobuf:"bytes,7,opt,name=threshold"` + + Scopes []KayentaScope `json:"scopes" protobuf:"bytes,8,rep,name=scopes"` +} + +type KayentaThreshold struct { + Pass int64 `json:"pass" protobuf:"varint,1,opt,name=pass"` + Marginal int64 `json:"marginal" protobuf:"varint,2,opt,name=marginal"` +} + +type KayentaScope struct { + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + ControlScope ScopeDetail `json:"controlScope" protobuf:"bytes,2,opt,name=controlScope"` + ExperimentScope ScopeDetail `json:"experimentScope" protobuf:"bytes,3,opt,name=experimentScope"` +} + +type ScopeDetail struct { + Scope string `json:"scope" protobuf:"bytes,1,opt,name=scope"` + Region string `json:"region" protobuf:"bytes,2,opt,name=region"` + Step int64 `json:"step" protobuf:"varint,3,opt,name=step"` + Start string `json:"start" protobuf:"bytes,4,opt,name=start"` + End string `json:"end" protobuf:"bytes,5,opt,name=end"` +} + +type WebMetric struct { + // URL is the address of the web metric + URL string `json:"url" protobuf:"bytes,1,opt,name=url"` + // +patchMergeKey=key + // +patchStrategy=merge + // Headers are optional HTTP headers to use in the request + Headers []WebMetricHeader `json:"headers,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,2,rep,name=headers"` + // TimeoutSeconds is the timeout for the request in seconds (default: 10) + TimeoutSeconds int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` + // JSONPath is a JSON Path to use as the result variable (default: "{$}") + JSONPath string `json:"jsonPath,omitempty" protobuf:"bytes,4,opt,name=jsonPath"` + // Insecure skips host TLS verification + Insecure bool `json:"insecure,omitempty" protobuf:"varint,5,opt,name=insecure"` +} + +type WebMetricHeader struct { + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + Value string `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +type DatadogMetric struct { + Interval DurationString `json:"interval,omitempty" protobuf:"bytes,1,opt,name=interval,casttype=DurationString"` + Query string `json:"query" protobuf:"bytes,2,opt,name=query"` +} diff --git a/api/rollouts/v1alpha1/groupversion_info.go b/api/rollouts/v1alpha1/groupversion_info.go new file mode 100644 index 0000000..8ca480e --- /dev/null +++ b/api/rollouts/v1alpha1/groupversion_info.go @@ -0,0 +1,33 @@ +/* +Copyright 2020 The Okra authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the actions v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=argoproj.io +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "argoproj.io", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/rollouts/v1alpha1/register.go b/api/rollouts/v1alpha1/register.go new file mode 100644 index 0000000..2fbd583 --- /dev/null +++ b/api/rollouts/v1alpha1/register.go @@ -0,0 +1,12 @@ +package v1alpha1 + +func init() { + SchemeBuilder.Register( + &AnalysisTemplate{}, + &AnalysisTemplateList{}, + &ClusterAnalysisTemplate{}, + &ClusterAnalysisTemplateList{}, + &AnalysisRun{}, + &AnalysisRunList{}, + ) +} diff --git a/api/rollouts/v1alpha1/types.go b/api/rollouts/v1alpha1/types.go new file mode 100644 index 0000000..49a097a --- /dev/null +++ b/api/rollouts/v1alpha1/types.go @@ -0,0 +1,864 @@ +/* +Copyright The Argo Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package v1alpha1 + +import ( + "encoding/json" + "strconv" + "time" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:resource:path=rollouts,shortName=ro +// +kubebuilder:subresource:scale:specpath=.spec.replicas,statuspath=.status.HPAReplicas,selectorpath=.status.selector +// +kubebuilder:printcolumn:name="Desired",type="integer",JSONPath=".spec.replicas",description="Number of desired pods" +// +kubebuilder:printcolumn:name="Current",type="integer",JSONPath=".status.replicas",description="Total number of non-terminated pods targeted by this rollout" +// +kubebuilder:printcolumn:name="Up-to-date",type="integer",JSONPath=".status.updatedReplicas",description="Total number of non-terminated pods targeted by this rollout that have the desired template spec" +// +kubebuilder:printcolumn:name="Available",type="integer",JSONPath=".status.availableReplicas",description="Total number of available pods (ready for at least minReadySeconds) targeted by this rollout" +// +kubebuilder:subresource:status + +// Rollout is a specification for a Rollout resource +type Rollout struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec RolloutSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + Status RolloutStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// RolloutSpec is the spec for a Rollout resource +type RolloutSpec struct { + TemplateResolvedFromRef bool `json:"-"` + SelectorResolvedFromRef bool `json:"-"` + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this rollout. + // It must match the pod template's labels. + // +optional + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + // Template describes the pods that will be created. + // +optional + Template corev1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + // WorkloadRef holds a references to a workload that provides Pod template + // +optional + WorkloadRef *ObjectRef `json:"workloadRef,omitempty" protobuf:"bytes,10,opt,name=workloadRef"` + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy RolloutStrategy `json:"strategy" protobuf:"bytes,5,opt,name=strategy"` + // The number of old ReplicaSets to retain. If unspecified, will retain 10 old ReplicaSets + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + // Paused pauses the rollout at its current step. + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + // ProgressDeadlineSeconds The maximum time in seconds for a rollout to + // make progress before it is considered to be failed. Argo Rollouts will + // continue to process failed rollouts and a condition with a + // ProgressDeadlineExceeded reason will be surfaced in the rollout status. + // Note that progress will not be estimated during the time a rollout is paused. + // Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,8,opt,name=progressDeadlineSeconds"` + // ProgressDeadlineAbort is whether to abort the update when ProgressDeadlineSeconds + // is exceeded if analysis is not used. Default is false. + // +optional + ProgressDeadlineAbort bool `json:"progressDeadlineAbort,omitempty" protobuf:"varint,12,opt,name=progressDeadlineAbort"` + // RestartAt indicates when all the pods of a Rollout should be restarted + RestartAt *metav1.Time `json:"restartAt,omitempty" protobuf:"bytes,9,opt,name=restartAt"` + // Analysis configuration for the analysis runs to retain + Analysis *AnalysisRunStrategy `json:"analysis,omitempty" protobuf:"bytes,11,opt,name=analysis"` +} + +func (s *RolloutSpec) SetResolvedSelector(selector *metav1.LabelSelector) { + s.SelectorResolvedFromRef = true + s.Selector = selector +} + +func (s *RolloutSpec) SetResolvedTemplate(template corev1.PodTemplateSpec) { + s.TemplateResolvedFromRef = true + s.Template = template +} + +func (s *RolloutSpec) EmptyTemplate() bool { + if len(s.Template.Labels) > 0 { + return false + } + if len(s.Template.Annotations) > 0 { + return false + } + return true +} + +func (s *RolloutSpec) MarshalJSON() ([]byte, error) { + type Alias RolloutSpec + + if s.TemplateResolvedFromRef || s.SelectorResolvedFromRef { + obj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&struct { + Alias `json:",inline"` + }{ + Alias: (Alias)(*s), + }) + if err != nil { + return nil, err + } + if s.TemplateResolvedFromRef { + unstructured.RemoveNestedField(obj, "template") + } + if s.SelectorResolvedFromRef { + unstructured.RemoveNestedField(obj, "selector") + } + + return json.Marshal(obj) + } + return json.Marshal(&struct{ *Alias }{ + Alias: (*Alias)(s), + }) +} + +// ObjectRef holds a references to the Kubernetes object +type ObjectRef struct { + // API Version of the referent + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` + // Kind of the referent + Kind string `json:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"` + // Name of the referent + Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` +} + +const ( + // DefaultRolloutUniqueLabelKey is the default key of the selector that is added + // to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets + // to select new pods (and old pods being select by new ReplicaSet). + DefaultRolloutUniqueLabelKey string = "rollouts-pod-template-hash" + // DefaultReplicaSetScaleDownDeadlineAnnotationKey is the default key attached to an old stable ReplicaSet after + // the rollout transitioned to a new version. It contains the time when the controller can scale down the RS. + DefaultReplicaSetScaleDownDeadlineAnnotationKey = "scale-down-deadline" + // ManagedByRolloutKey is the key used to indicate which rollout(s) manage a resource but doesn't own it. + ManagedByRolloutsKey = "argo-rollouts.argoproj.io/managed-by-rollouts" + // DefaultReplicaSetRestartAnnotationKey indicates that the ReplicaSet with this annotation was restarted at the + // time listed in the value + DefaultReplicaSetRestartAnnotationKey = "argo-rollouts.argoproj.io/restarted-after" + // LabelKeyControllerInstanceID is the label the controller uses for the rollout, experiment, analysis segregation + // between controllers. Controllers will only operate on objects with the same instanceID as the controller. + LabelKeyControllerInstanceID = "argo-rollouts.argoproj.io/controller-instance-id" +) + +// RolloutStrategy defines strategy to apply during next rollout +type RolloutStrategy struct { + // +optional + BlueGreen *BlueGreenStrategy `json:"blueGreen,omitempty" protobuf:"bytes,1,opt,name=blueGreen"` + // +optional + Canary *CanaryStrategy `json:"canary,omitempty" protobuf:"bytes,2,opt,name=canary"` +} + +// BlueGreenStrategy defines parameters for Blue Green deployment +type BlueGreenStrategy struct { + // Name of the service that the rollout modifies as the active service. + ActiveService string `json:"activeService" protobuf:"bytes,1,opt,name=activeService"` + // Name of the service that the rollout modifies as the preview service. + // +optional + PreviewService string `json:"previewService,omitempty" protobuf:"bytes,2,opt,name=previewService"` + // PreviewReplicaCount is the number of replicas to run for the preview stack before the + // switchover. Once the rollout is resumed the desired replicaset will be full scaled up before the switch occurs + // +optional + PreviewReplicaCount *int32 `json:"previewReplicaCount,omitempty" protobuf:"varint,3,opt,name=previewReplicaCount"` + // AutoPromotionEnabled indicates if the rollout should automatically promote the new ReplicaSet + // to the active service or enter a paused state. If not specified, the default value is true. + // +optional + AutoPromotionEnabled *bool `json:"autoPromotionEnabled,omitempty" protobuf:"varint,4,opt,name=autoPromotionEnabled"` + // AutoPromotionSeconds is a duration in seconds in which to delay auto-promotion (default: 0). + // The countdown begins after the preview ReplicaSet have reached full availability. + // This option is ignored if autoPromotionEnabled is set to false. + // +optional + AutoPromotionSeconds int32 `json:"autoPromotionSeconds,omitempty" protobuf:"varint,5,opt,name=autoPromotionSeconds"` + // MaxUnavailable The maximum number of pods that can be unavailable during a restart operation. + // Defaults to 25% of total replicas. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,6,opt,name=maxUnavailable"` + // ScaleDownDelaySeconds adds a delay before scaling down the previous replicaset. + // If omitted, the Rollout waits 30 seconds before scaling down the previous ReplicaSet. + // A minimum of 30 seconds is recommended to ensure IP table propagation across the nodes in + // a cluster. See https://github.com/argoproj/argo-rollouts/issues/19#issuecomment-476329960 for + // more information + // +optional + ScaleDownDelaySeconds *int32 `json:"scaleDownDelaySeconds,omitempty" protobuf:"varint,7,opt,name=scaleDownDelaySeconds"` + // ScaleDownDelayRevisionLimit limits the number of old RS that can run at one time before getting scaled down + // +optional + ScaleDownDelayRevisionLimit *int32 `json:"scaleDownDelayRevisionLimit,omitempty" protobuf:"varint,8,opt,name=scaleDownDelayRevisionLimit"` + // PrePromotionAnalysis configuration to run analysis before a selector switch + PrePromotionAnalysis *RolloutAnalysis `json:"prePromotionAnalysis,omitempty" protobuf:"bytes,9,opt,name=prePromotionAnalysis"` + // AntiAffinity enables anti-affinity rules for Blue Green deployment + // +optional + AntiAffinity *AntiAffinity `json:"antiAffinity,omitempty" protobuf:"bytes,10,opt,name=antiAffinity"` + // PostPromotionAnalysis configuration to run analysis after a selector switch + PostPromotionAnalysis *RolloutAnalysis `json:"postPromotionAnalysis,omitempty" protobuf:"bytes,11,opt,name=postPromotionAnalysis"` + // PreviewMetadata specify labels and annotations which will be attached to the preview pods for + // the duration which they act as a preview pod, and will be removed after + PreviewMetadata *PodTemplateMetadata `json:"previewMetadata,omitempty" protobuf:"bytes,12,opt,name=previewMetadata"` + // ActiveMetadata specify labels and annotations which will be attached to the active pods for + // the duration which they act as a active pod, and will be removed after + ActiveMetadata *PodTemplateMetadata `json:"activeMetadata,omitempty" protobuf:"bytes,13,opt,name=activeMetadata"` + // AbortScaleDownDelaySeconds adds a delay in second before scaling down the preview replicaset + // if update is aborted. 0 means not to scale down. + // Default is 30 second + // +optional + AbortScaleDownDelaySeconds *int32 `json:"abortScaleDownDelaySeconds,omitempty" protobuf:"varint,14,opt,name=abortScaleDownDelaySeconds"` +} + +// AntiAffinity defines which inter-pod scheduling rule to use for anti-affinity injection +type AntiAffinity struct { + // +optional + PreferredDuringSchedulingIgnoredDuringExecution *PreferredDuringSchedulingIgnoredDuringExecution `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=preferredDuringSchedulingIgnoredDuringExecution"` + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *RequiredDuringSchedulingIgnoredDuringExecution `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,opt,name=requiredDuringSchedulingIgnoredDuringExecution"` +} + +// PreferredDuringSchedulingIgnoredDuringExecution defines the weight of the anti-affinity injection +type PreferredDuringSchedulingIgnoredDuringExecution struct { + // Weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` +} + +// RequiredDuringSchedulingIgnoredDuringExecution defines inter-pod scheduling rule to be RequiredDuringSchedulingIgnoredDuringExecution +type RequiredDuringSchedulingIgnoredDuringExecution struct{} + +// CanaryStrategy defines parameters for a Replica Based Canary +type CanaryStrategy struct { + // CanaryService holds the name of a service which selects pods with canary version and don't select any pods with stable version. + // +optional + CanaryService string `json:"canaryService,omitempty" protobuf:"bytes,1,opt,name=canaryService"` + // StableService holds the name of a service which selects pods with stable version and don't select any pods with canary version. + // +optional + StableService string `json:"stableService,omitempty" protobuf:"bytes,2,opt,name=stableService"` + // Steps define the order of phases to execute the canary deployment + // +optional + Steps []CanaryStep `json:"steps,omitempty" protobuf:"bytes,3,rep,name=steps"` + // TrafficRouting hosts all the supported service meshes supported to enable more fine-grained traffic routing + TrafficRouting *RolloutTrafficRouting `json:"trafficRouting,omitempty" protobuf:"bytes,4,opt,name=trafficRouting"` + + // MaxUnavailable The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of total pods at the start of update (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 25% is used. + // Example: when this is set to 30%, the old RC can be scaled down by 30% + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that at least 70% of original number of pods are available at all times + // during the update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,5,opt,name=maxUnavailable"` + + // MaxSurge The maximum number of pods that can be scheduled above the original number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of total pods at + // the start of the update (ex: 10%). This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 25% is used. + // Example: when this is set to 30%, the new RC can be scaled up by 30% + // immediately when the rolling update starts. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is at most 130% of original pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,6,opt,name=maxSurge"` + // Analysis runs a separate analysisRun while all the steps execute. This is intended to be a continuous validation of the new ReplicaSet + Analysis *RolloutAnalysisBackground `json:"analysis,omitempty" protobuf:"bytes,7,opt,name=analysis"` + // AntiAffinity enables anti-affinity rules for Canary deployment + // +optional + AntiAffinity *AntiAffinity `json:"antiAffinity,omitempty" protobuf:"bytes,8,opt,name=antiAffinity"` + // CanaryMetadata specify labels and annotations which will be attached to the canary pods for + // the duration which they act as a canary, and will be removed after + CanaryMetadata *PodTemplateMetadata `json:"canaryMetadata,omitempty" protobuf:"bytes,9,opt,name=canaryMetadata"` + // StableMetadata specify labels and annotations which will be attached to the stable pods for + // the duration which they act as a canary, and will be removed after + StableMetadata *PodTemplateMetadata `json:"stableMetadata,omitempty" protobuf:"bytes,10,opt,name=stableMetadata"` + + // ScaleDownDelaySeconds adds a delay before scaling down the previous ReplicaSet when the + // canary strategy is used with traffic routing (default 30 seconds). A delay in scaling down + // the previous ReplicaSet is needed after switching the stable service selector to point to + // the new ReplicaSet, in order to give time for traffic providers to re-target the new pods. + // This value is ignored with basic, replica-weighted canary without traffic routing. + // +optional + ScaleDownDelaySeconds *int32 `json:"scaleDownDelaySeconds,omitempty" protobuf:"varint,11,opt,name=scaleDownDelaySeconds"` + // ScaleDownDelayRevisionLimit limits the number of old RS that can run at one time before getting scaled down + // +optional + ScaleDownDelayRevisionLimit *int32 `json:"scaleDownDelayRevisionLimit,omitempty" protobuf:"varint,12,opt,name=scaleDownDelayRevisionLimit"` + // AbortScaleDownDelaySeconds adds a delay in second before scaling down the canary pods when update + // is aborted for canary strategy with traffic routing (not applicable for basic canary). + // 0 means canary pods are not scaled down. + // Default is 30 seconds. + // +optional + AbortScaleDownDelaySeconds *int32 `json:"abortScaleDownDelaySeconds,omitempty" protobuf:"varint,13,opt,name=abortScaleDownDelaySeconds"` + // DynamicStableScale is a traffic routing feature which dynamically scales the stable + // ReplicaSet to minimize total pods which are running during an update. This is calculated by + // scaling down the stable as traffic is increased to canary. When disabled (the default behavior) + // the stable ReplicaSet remains fully scaled to support instantaneous aborts. + DynamicStableScale bool `json:"dynamicStableScale,omitempty" protobuf:"varint,14,opt,name=dynamicStableScale"` +} + +// AnalysisRunStrategy configuration for the analysis runs and experiments to retain +type AnalysisRunStrategy struct { + // SuccessfulRunHistoryLimit limits the number of old successful analysis runs and experiments to be retained in a history + SuccessfulRunHistoryLimit *int32 `json:"successfulRunHistoryLimit,omitempty" protobuf:"varint,1,opt,name=successfulRunHistoryLimit"` + // UnsuccessfulRunHistoryLimit limits the number of old unsuccessful analysis runs and experiments to be retained in a history. + // Stages for unsuccessful: "Error", "Failed", "Inconclusive" + UnsuccessfulRunHistoryLimit *int32 `json:"unsuccessfulRunHistoryLimit,omitempty" protobuf:"varint,2,opt,name=unsuccessfulRunHistoryLimit"` +} + +// ALBTrafficRouting configuration for ALB ingress controller to control traffic routing +type ALBTrafficRouting struct { + // Ingress refers to the name of an `Ingress` resource in the same namespace as the `Rollout` + Ingress string `json:"ingress" protobuf:"bytes,1,opt,name=ingress"` + // ServicePort refers to the port that the Ingress action should route traffic to + ServicePort int32 `json:"servicePort" protobuf:"varint,2,opt,name=servicePort"` + // RootService references the service in the ingress to the controller should add the action to + RootService string `json:"rootService,omitempty" protobuf:"bytes,3,opt,name=rootService"` + // AnnotationPrefix has to match the configured annotation prefix on the alb ingress controller + // +optional + AnnotationPrefix string `json:"annotationPrefix,omitempty" protobuf:"bytes,4,opt,name=annotationPrefix"` +} + +// RolloutTrafficRouting hosts all the different configuration for supported service meshes to enable more fine-grained traffic routing +type RolloutTrafficRouting struct { + // Istio holds Istio specific configuration to route traffic + Istio *IstioTrafficRouting `json:"istio,omitempty" protobuf:"bytes,1,opt,name=istio"` + // Nginx holds Nginx Ingress specific configuration to route traffic + Nginx *NginxTrafficRouting `json:"nginx,omitempty" protobuf:"bytes,2,opt,name=nginx"` + // Nginx holds ALB Ingress specific configuration to route traffic + ALB *ALBTrafficRouting `json:"alb,omitempty" protobuf:"bytes,3,opt,name=alb"` + // SMI holds TrafficSplit specific configuration to route traffic + SMI *SMITrafficRouting `json:"smi,omitempty" protobuf:"bytes,4,opt,name=smi"` + // Ambassador holds specific configuration to use Ambassador to route traffic + Ambassador *AmbassadorTrafficRouting `json:"ambassador,omitempty" protobuf:"bytes,5,opt,name=ambassador"` +} + +// AmbassadorTrafficRouting defines the configuration required to use Ambassador as traffic +// router +type AmbassadorTrafficRouting struct { + // Mappings refer to the name of the Ambassador Mappings used to route traffic to the + // service + Mappings []string `json:"mappings" protobuf:"bytes,1,rep,name=mappings"` +} + +// SMITrafficRouting configuration for TrafficSplit Custom Resource to control traffic routing +type SMITrafficRouting struct { + // RootService holds the name of that clients use to communicate. + // +optional + RootService string `json:"rootService,omitempty" protobuf:"bytes,1,opt,name=rootService"` + // TrafficSplitName holds the name of the TrafficSplit. + // +optional + TrafficSplitName string `json:"trafficSplitName,omitempty" protobuf:"bytes,2,opt,name=trafficSplitName"` +} + +// NginxTrafficRouting configuration for Nginx ingress controller to control traffic routing +type NginxTrafficRouting struct { + // AnnotationPrefix has to match the configured annotation prefix on the nginx ingress controller + // +optional + AnnotationPrefix string `json:"annotationPrefix,omitempty" protobuf:"bytes,1,opt,name=annotationPrefix"` + // StableIngress refers to the name of an `Ingress` resource in the same namespace as the `Rollout` + StableIngress string `json:"stableIngress" protobuf:"bytes,2,opt,name=stableIngress"` + // +optional + AdditionalIngressAnnotations map[string]string `json:"additionalIngressAnnotations,omitempty" protobuf:"bytes,3,rep,name=additionalIngressAnnotations"` +} + +// IstioTrafficRouting configuration for Istio service mesh to enable fine grain configuration +type IstioTrafficRouting struct { + // VirtualService references an Istio VirtualService to modify to shape traffic + VirtualService *IstioVirtualService `json:"virtualService,omitempty" protobuf:"bytes,1,opt,name=virtualService"` + // DestinationRule references an Istio DestinationRule to modify to shape traffic + DestinationRule *IstioDestinationRule `json:"destinationRule,omitempty" protobuf:"bytes,2,opt,name=destinationRule"` + // VirtualServices references a list of Istio VirtualService to modify to shape traffic + VirtualServices []IstioVirtualService `json:"virtualServices,omitempty" protobuf:"bytes,3,opt,name=virtualServices"` +} + +// IstioVirtualService holds information on the virtual service the rollout needs to modify +type IstioVirtualService struct { + // Name holds the name of the VirtualService + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // A list of HTTP routes within VirtualService to edit. If omitted, VirtualService must have a single route of this type. + Routes []string `json:"routes,omitempty" protobuf:"bytes,2,rep,name=routes"` + // A list of TLS/HTTPS routes within VirtualService to edit. If omitted, VirtualService must have a single route of this type. + TLSRoutes []TLSRoute `json:"tlsRoutes,omitempty" protobuf:"bytes,3,rep,name=tlsRoutes"` +} + +// TLSRoute holds the information on the virtual service's TLS/HTTPS routes that are desired to be matched for changing weights. +type TLSRoute struct { + // Port number of the TLS Route desired to be matched in the given Istio VirtualService. + Port int64 `json:"port,omitempty" protobuf:"bytes,1,opt,name=port"` + // A list of all the SNI Hosts of the TLS Route desired to be matched in the given Istio VirtualService. + SNIHosts []string `json:"sniHosts,omitempty" protobuf:"bytes,2,rep,name=sniHosts"` +} + +// IstioDestinationRule is a reference to an Istio DestinationRule to modify and shape traffic +type IstioDestinationRule struct { + // Name holds the name of the DestinationRule + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // CanarySubsetName is the subset name to modify labels with canary ReplicaSet pod template hash value + CanarySubsetName string `json:"canarySubsetName" protobuf:"bytes,2,opt,name=canarySubsetName"` + // StableSubsetName is the subset name to modify labels with stable ReplicaSet pod template hash value + StableSubsetName string `json:"stableSubsetName" protobuf:"bytes,3,opt,name=stableSubsetName"` +} + +// RolloutExperimentStep defines a template that is used to create a experiment for a step +type RolloutExperimentStep struct { + // Templates what templates that should be added to the experiment. Should be non-nil + // +patchMergeKey=name + // +patchStrategy=merge + Templates []RolloutExperimentTemplate `json:"templates" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=templates"` + // Duration is a duration string (e.g. 30s, 5m, 1h) that the experiment should run for + // +optional + Duration DurationString `json:"duration,omitempty" protobuf:"bytes,2,opt,name=duration,casttype=DurationString"` + // Analyses reference which analysis templates to run with the experiment + // +patchMergeKey=name + // +patchStrategy=merge + Analyses []RolloutExperimentStepAnalysisTemplateRef `json:"analyses,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,3,rep,name=analyses"` +} + +type RolloutExperimentStepAnalysisTemplateRef struct { + // Name is a name for this analysis template invocation + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // TemplateName reference of the AnalysisTemplate name used by the Experiment to create the run + TemplateName string `json:"templateName" protobuf:"bytes,2,opt,name=templateName"` + // Whether to look for the templateName at cluster scope or namespace scope + // +optional + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,3,opt,name=clusterScope"` + // Args the arguments that will be added to the AnalysisRuns + // +patchMergeKey=name + // +patchStrategy=merge + Args []AnalysisRunArgument `json:"args,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,4,rep,name=args"` + // RequiredForCompletion blocks the Experiment from completing until the analysis has completed + RequiredForCompletion bool `json:"requiredForCompletion,omitempty" protobuf:"varint,5,opt,name=requiredForCompletion"` +} + +// RolloutExperimentTemplate defines the template used to create experiments for the Rollout's experiment canary step +type RolloutExperimentTemplate struct { + // Name description of template that passed to the template + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // SpecRef indicates where the rollout should get the RS template from + SpecRef ReplicaSetSpecRef `json:"specRef" protobuf:"bytes,2,opt,name=specRef,casttype=ReplicaSetSpecRef"` + // Replicas replica count for the template + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,3,opt,name=replicas"` + // Metadata sets labels and annotations to use for the RS created from the template + // +optional + Metadata PodTemplateMetadata `json:"metadata,omitempty" protobuf:"bytes,4,opt,name=metadata"` + // Selector overrides the selector to be used for the template's ReplicaSet. If omitted, will + // use the same selector as the Rollout + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,5,opt,name=selector"` + // Weight sets the percentage of traffic the template's replicas should receive + Weight *int32 `json:"weight,omitempty" protobuf:"varint,6,opt,name=weight"` +} + +// PodTemplateMetadata extra labels to add to the template +type PodTemplateMetadata struct { + // Labels Additional labels to add to the experiment + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,1,rep,name=labels"` + // Annotations additional annotations to add to the experiment + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,2,rep,name=annotations"` +} + +// ReplicaSetSpecRef defines which RS that the experiment's template will use. +type ReplicaSetSpecRef string + +const ( + // CanarySpecRef indicates the RS template should be pulled from the newRS's template + CanarySpecRef ReplicaSetSpecRef = "canary" + // StableSpecRef indicates the RS template should be pulled from the stableRS's template + StableSpecRef ReplicaSetSpecRef = "stable" +) + +// CanaryStep defines a step of a canary deployment. +type CanaryStep struct { + // SetWeight sets what percentage of the newRS should receive + SetWeight *int32 `json:"setWeight,omitempty" protobuf:"varint,1,opt,name=setWeight"` + // Pause freezes the rollout by setting spec.Paused to true. + // A Rollout will resume when spec.Paused is reset to false. + // +optional + Pause *RolloutPause `json:"pause,omitempty" protobuf:"bytes,2,opt,name=pause"` + // Experiment defines the experiment object that should be created + Experiment *RolloutExperimentStep `json:"experiment,omitempty" protobuf:"bytes,3,opt,name=experiment"` + // Analysis defines the AnalysisRun that will run for a step + Analysis *RolloutAnalysis `json:"analysis,omitempty" protobuf:"bytes,4,opt,name=analysis"` + // SetCanaryScale defines how to scale the newRS without changing traffic weight + // +optional + SetCanaryScale *SetCanaryScale `json:"setCanaryScale,omitempty" protobuf:"bytes,5,opt,name=setCanaryScale"` +} + +// SetCanaryScale defines how to scale the newRS without changing traffic weight +type SetCanaryScale struct { + // Weight sets the percentage of replicas the newRS should have + // +optional + Weight *int32 `json:"weight,omitempty" protobuf:"varint,1,opt,name=weight"` + // Replicas sets the number of replicas the newRS should have + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + // MatchTrafficWeight cancels out previously set Replicas or Weight, effectively activating SetWeight + // +optional + MatchTrafficWeight bool `json:"matchTrafficWeight,omitempty" protobuf:"varint,3,opt,name=matchTrafficWeight"` +} + +// RolloutAnalysisBackground defines a template that is used to create a background analysisRun +type RolloutAnalysisBackground struct { + RolloutAnalysis `json:",inline" protobuf:"bytes,1,opt,name=rolloutAnalysis"` + // StartingStep indicates which step the background analysis should start on + // If not listed, controller defaults to 0 + StartingStep *int32 `json:"startingStep,omitempty" protobuf:"varint,2,opt,name=startingStep"` +} + +// RolloutAnalysis defines a template that is used to create a analysisRun +type RolloutAnalysis struct { + //Templates reference to a list of analysis templates to combine for an AnalysisRun + Templates []RolloutAnalysisTemplate `json:"templates,omitempty" protobuf:"bytes,1,rep,name=templates"` + // Args the arguments that will be added to the AnalysisRuns + // +patchMergeKey=name + // +patchStrategy=merge + Args []AnalysisRunArgument `json:"args,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=args"` +} + +type RolloutAnalysisTemplate struct { + //TemplateName name of template to use in AnalysisRun + // +optional + TemplateName string `json:"templateName" protobuf:"bytes,1,opt,name=templateName"` + // Whether to look for the templateName at cluster scope or namespace scope + // +optional + ClusterScope bool `json:"clusterScope,omitempty" protobuf:"varint,2,opt,name=clusterScope"` +} + +// AnalysisRunArgument argument to add to analysisRun +type AnalysisRunArgument struct { + // Name argument name + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Value a hardcoded value for the argument. This field is a one of field with valueFrom + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // ValueFrom A reference to where the value is stored. This field is a one of field with valueFrom + ValueFrom *ArgumentValueFrom `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"` +} + +// ArgumentValueFrom defines references to fields within resources to grab for the value (i.e. Pod Template Hash) +type ArgumentValueFrom struct { + // PodTemplateHashValue gets the value from one of the children ReplicaSet's Pod Template Hash + PodTemplateHashValue *ValueFromPodTemplateHash `json:"podTemplateHashValue,omitempty" protobuf:"bytes,1,opt,name=podTemplateHashValue,casttype=ValueFromPodTemplateHash"` + //FieldRef + FieldRef *FieldRef `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"` +} + +type FieldRef struct { + // Required: Path of the field to select in the specified API version + FieldPath string `json:"fieldPath" protobuf:"bytes,1,opt,name=fieldPath"` +} + +// ValueFromPodTemplateHash indicates which ReplicaSet pod template pod hash to use +type ValueFromPodTemplateHash string + +const ( + // Stable tells the Rollout to get the pod template hash from the stable ReplicaSet + Stable ValueFromPodTemplateHash = "Stable" + // Latest tells the Rollout to get the pod template hash from the latest ReplicaSet + Latest ValueFromPodTemplateHash = "Latest" +) + +const ( + // RolloutTypeLabel indicates how the rollout created the analysisRun + RolloutTypeLabel = "rollout-type" + // RolloutTypeStepLabel indicates that the analysisRun was created as a canary step + RolloutTypeStepLabel = "Step" + // RolloutTypeBackgroundRunLabel indicates that the analysisRun was created in Background to an execution + RolloutTypeBackgroundRunLabel = "Background" + // RolloutTypePrePromotionLabel indicates that the analysisRun was created before the active service promotion + RolloutTypePrePromotionLabel = "PrePromotion" + // RolloutTypePostPromotionLabel indicates that the analysisRun was created after the active service promotion + RolloutTypePostPromotionLabel = "PostPromotion" + // RolloutCanaryStepIndexLabel indicates which step created this analysisRun + RolloutCanaryStepIndexLabel = "step-index" +) + +// RolloutPause defines a pause stage for a rollout +type RolloutPause struct { + // Duration the amount of time to wait before moving to the next step. + // +optional + Duration *intstr.IntOrString `json:"duration,omitempty" protobuf:"bytes,1,opt,name=duration"` +} + +// DurationSeconds converts the pause duration to seconds +// If Duration is nil 0 is returned +// if Duration values is string and does not contain a valid unit -1 is returned +func (p RolloutPause) DurationSeconds() int32 { + if p.Duration != nil { + if p.Duration.Type == intstr.String { + s, err := strconv.ParseInt(p.Duration.StrVal, 10, 32) + if err != nil { + d, err := time.ParseDuration(p.Duration.StrVal) + if err != nil { + return -1 + } + return int32(d.Seconds()) + } + // special case where no unit was specified + return int32(s) + } + return p.Duration.IntVal + } + return 0 +} + +// DurationFromInt creates duration in seconds from int value +func DurationFromInt(i int) *intstr.IntOrString { + d := intstr.FromInt(i) + return &d +} + +// DurationFromString creates duration from string +// value must be a string representation of an int with optional time unit (see time.ParseDuration) +func DurationFromString(s string) *intstr.IntOrString { + d := intstr.FromString(s) + return &d +} + +// PauseReason reasons that the rollout can pause +type PauseReason string + +const ( + // PauseReasonInconclusiveAnalysis pauses rollout when rollout has an inconclusive analysis run + PauseReasonInconclusiveAnalysis PauseReason = "InconclusiveAnalysisRun" + // PauseReasonInconclusiveExperiment pauses rollout when rollout has an inconclusive experiment + PauseReasonInconclusiveExperiment PauseReason = "InconclusiveExperiment" + // PauseReasonCanaryPauseStep pause rollout for canary pause step + PauseReasonCanaryPauseStep PauseReason = "CanaryPauseStep" + // PauseReasonBlueGreenPause pause rollout before promoting rollout + PauseReasonBlueGreenPause PauseReason = "BlueGreenPause" +) + +// PauseCondition the reason for a pause and when it started +type PauseCondition struct { + Reason PauseReason `json:"reason" protobuf:"bytes,1,opt,name=reason,casttype=PauseReason"` + StartTime metav1.Time `json:"startTime" protobuf:"bytes,2,opt,name=startTime"` +} + +// RolloutPhase are a set of phases that this rollout +type RolloutPhase string + +const ( + // RolloutPhaseHealthy indicates a rollout is healthy + RolloutPhaseHealthy RolloutPhase = "Healthy" + // RolloutPhaseDegraded indicates a rollout is degraded (e.g. pod unavailability, misconfiguration) + RolloutPhaseDegraded RolloutPhase = "Degraded" + // RolloutPhaseProgressing indicates a rollout is not yet healthy but still making progress towards a healthy state + RolloutPhaseProgressing RolloutPhase = "Progressing" + // RolloutPhasePaused indicates a rollout is not yet healthy and will not make progress until unpaused + RolloutPhasePaused RolloutPhase = "Paused" +) + +// RolloutStatus is the status for a Rollout resource +type RolloutStatus struct { + // Abort cancel the current rollout progression + Abort bool `json:"abort,omitempty" protobuf:"varint,1,opt,name=abort"` + // PauseConditions indicates why the rollout is currently paused + PauseConditions []PauseCondition `json:"pauseConditions,omitempty" protobuf:"bytes,2,rep,name=pauseConditions"` + // ControllerPause indicates the controller has paused the rollout. It is set to true when + // the controller adds a pause condition. This field helps to discern the scenario where a + // rollout was resumed after being paused by the controller (e.g. via the plugin). In that + // situation, the pauseConditions would have been cleared , but controllerPause would still be + // set to true. + ControllerPause bool `json:"controllerPause,omitempty" protobuf:"varint,3,opt,name=controllerPause"` + // AbortedAt indicates the controller reconciled an aborted rollout. The controller uses this to understand if + // the controller needs to do some specific work when a Rollout is aborted. For example, the reconcileAbort is used + // to indicate if the Rollout should enter an aborted state when the latest AnalysisRun is a failure, or the controller + // has already put the Rollout into an aborted and should create a new AnalysisRun. + AbortedAt *metav1.Time `json:"abortedAt,omitempty" protobuf:"bytes,4,opt,name=abortedAt"` + // CurrentPodHash the hash of the current pod template + // +optional + CurrentPodHash string `json:"currentPodHash,omitempty" protobuf:"bytes,5,opt,name=currentPodHash"` + // CurrentStepHash the hash of the current list of steps for the current strategy. This is used to detect when the + // list of current steps change + // +optional + CurrentStepHash string `json:"currentStepHash,omitempty" protobuf:"bytes,6,opt,name=currentStepHash"` + // Total number of non-terminated pods targeted by this rollout (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,7,opt,name=replicas"` + // Total number of non-terminated pods targeted by this rollout that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,8,opt,name=updatedReplicas"` + // Total number of ready pods targeted by this rollout. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,9,opt,name=readyReplicas"` + // Total number of available pods (ready for at least minReadySeconds) targeted by this rollout. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,10,opt,name=availableReplicas"` + // CurrentStepIndex defines the current step of the rollout is on. If the current step index is null, the + // controller will execute the rollout. + // +optional + CurrentStepIndex *int32 `json:"currentStepIndex,omitempty" protobuf:"varint,11,opt,name=currentStepIndex"` + // Count of hash collisions for the Rollout. The Rollout controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,12,opt,name=collisionCount"` + // The generation observed by the rollout controller from metadata.generation + // +optional + ObservedGeneration string `json:"observedGeneration,omitempty" protobuf:"bytes,13,opt,name=observedGeneration"` + // The generation of referenced workload observed by the rollout controller + // +optional + WorkloadObservedGeneration string `json:"workloadObservedGeneration,omitempty" protobuf:"bytes,24,opt,name=workloadObservedGeneration"` + // Conditions a list of conditions a rollout can have. + // +optional + Conditions []RolloutCondition `json:"conditions,omitempty" protobuf:"bytes,14,rep,name=conditions"` + // Canary describes the state of the canary rollout + // +optional + Canary CanaryStatus `json:"canary,omitempty" protobuf:"bytes,15,opt,name=canary"` + // BlueGreen describes the state of the bluegreen rollout + // +optional + BlueGreen BlueGreenStatus `json:"blueGreen,omitempty" protobuf:"bytes,16,opt,name=blueGreen"` + // HPAReplicas the number of non-terminated replicas that are receiving active traffic + // +optional + HPAReplicas int32 `json:"HPAReplicas,omitempty" protobuf:"varint,17,opt,name=HPAReplicas"` + // Selector that identifies the pods that are receiving active traffic + // +optional + Selector string `json:"selector,omitempty" protobuf:"bytes,18,opt,name=selector"` + // StableRS indicates the replicaset that has successfully rolled out + // +optional + StableRS string `json:"stableRS,omitempty" protobuf:"bytes,19,opt,name=stableRS"` + // RestartedAt indicates last time a Rollout was restarted + RestartedAt *metav1.Time `json:"restartedAt,omitempty" protobuf:"bytes,20,opt,name=restartedAt"` + // PromoteFull indicates if the rollout should perform a full promotion, skipping analysis and pauses. + PromoteFull bool `json:"promoteFull,omitempty" protobuf:"varint,21,opt,name=promoteFull"` + // Phase is the rollout phase. Clients should only rely on the value if status.observedGeneration equals metadata.generation + Phase RolloutPhase `json:"phase,omitempty" protobuf:"bytes,22,opt,name=phase,casttype=RolloutPhase"` + // Message provides details on why the rollout is in its current phase + Message string `json:"message,omitempty" protobuf:"bytes,23,opt,name=message"` +} + +// BlueGreenStatus status fields that only pertain to the blueGreen rollout +type BlueGreenStatus struct { + // PreviewSelector indicates which replicas set the preview service is serving traffic to + // +optional + PreviewSelector string `json:"previewSelector,omitempty" protobuf:"bytes,1,opt,name=previewSelector"` + // ActiveSelector indicates which replicas set the active service is serving traffic to + // +optional + ActiveSelector string `json:"activeSelector,omitempty" protobuf:"bytes,2,opt,name=activeSelector"` + // ScaleUpPreviewCheckPoint indicates that the Replicaset receiving traffic from the preview service is ready to be scaled up after the rollout is unpaused + // +optional + ScaleUpPreviewCheckPoint bool `json:"scaleUpPreviewCheckPoint,omitempty" protobuf:"varint,3,opt,name=scaleUpPreviewCheckPoint"` + // PrePromotionAnalysisRunStatus indicates the status of the current prepromotion analysis run + PrePromotionAnalysisRunStatus *RolloutAnalysisRunStatus `json:"prePromotionAnalysisRunStatus,omitempty" protobuf:"bytes,4,opt,name=prePromotionAnalysisRunStatus"` + // PostPromotionAnalysisRunStatus indicates the status of the current post promotion analysis run + PostPromotionAnalysisRunStatus *RolloutAnalysisRunStatus `json:"postPromotionAnalysisRunStatus,omitempty" protobuf:"bytes,5,opt,name=postPromotionAnalysisRunStatus"` +} + +// CanaryStatus status fields that only pertain to the canary rollout +type CanaryStatus struct { + // CurrentStepAnalysisRunStatus indicates the status of the current step analysis run + CurrentStepAnalysisRunStatus *RolloutAnalysisRunStatus `json:"currentStepAnalysisRunStatus,omitempty" protobuf:"bytes,1,opt,name=currentStepAnalysisRunStatus"` + // CurrentBackgroundAnalysisRunStatus indicates the status of the current background analysis run + CurrentBackgroundAnalysisRunStatus *RolloutAnalysisRunStatus `json:"currentBackgroundAnalysisRunStatus,omitempty" protobuf:"bytes,2,opt,name=currentBackgroundAnalysisRunStatus"` + // CurrentExperiment indicates the running experiment + CurrentExperiment string `json:"currentExperiment,omitempty" protobuf:"bytes,3,opt,name=currentExperiment"` + // Weights records the weights which have been set on traffic provider. Only valid when using traffic routing + Weights *TrafficWeights `json:"weights,omitempty" protobuf:"bytes,4,opt,name=weights"` +} + +// TrafficWeights describes the current status of how traffic has been split +type TrafficWeights struct { + // Canary is the current traffic weight split to canary ReplicaSet + Canary WeightDestination `json:"canary" protobuf:"bytes,1,opt,name=canary"` + // Stable is the current traffic weight split to stable ReplicaSet + Stable WeightDestination `json:"stable" protobuf:"bytes,2,opt,name=stable"` + // Additional holds the weights split to additional ReplicaSets such as experiment ReplicaSets + Additional []WeightDestination `json:"additional,omitempty" protobuf:"bytes,3,rep,name=additional"` + // Verified is an optional indicator that the weight has been verified to have taken effect. + // This is currently only applicable to ALB traffic router + Verified *bool `json:"verified,omitempty" protobuf:"bytes,4,opt,name=verified"` +} + +type WeightDestination struct { + // Weight is an percentage of traffic being sent to this destination + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` + // ServiceName is the Kubernetes service name traffic is being sent to + ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,2,opt,name=serviceName"` + // PodTemplateHash is the pod template hash label for this destination + PodTemplateHash string `json:"podTemplateHash,omitempty" protobuf:"bytes,3,opt,name=podTemplateHash"` +} + +type RolloutAnalysisRunStatus struct { + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + Status AnalysisPhase `json:"status" protobuf:"bytes,2,opt,name=status,casttype=AnalysisPhase"` + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` +} + +// RolloutConditionType defines the conditions of Rollout +type RolloutConditionType string + +// These are valid conditions of a rollout. +const ( + // InvalidSpec means the rollout has an invalid spec and will not progress until + // the spec is fixed. + InvalidSpec RolloutConditionType = "InvalidSpec" + // RolloutAvailable means the rollout is available, ie. the active service is pointing at a + // replicaset with the required replicas up and running for at least minReadySeconds. + RolloutAvailable RolloutConditionType = "Available" + // RolloutProgressing means the rollout is progressing. Progress for a rollout is + // considered when a new replica set is created or adopted, when pods scale + // up or old pods scale down, or when the services are updated. Progress is not estimated + // for paused rollouts. + RolloutProgressing RolloutConditionType = "Progressing" + // RolloutReplicaFailure ReplicaFailure is added in a deployment when one of its pods + // fails to be created or deleted. + RolloutReplicaFailure RolloutConditionType = "ReplicaFailure" + // RolloutPaused means that rollout is in a paused state. It is still progressing at this point. + RolloutPaused RolloutConditionType = "Paused" + // RolloutCompleted means that rollout is in a completed state. It is still progressing at this point. + RolloutCompleted RolloutConditionType = "Completed" +) + +// RolloutCondition describes the state of a rollout at a certain point. +type RolloutCondition struct { + // Type of deployment condition. + Type RolloutConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RolloutConditionType"` + // Phase of the condition, one of True, False, Unknown. + Status corev1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime" protobuf:"bytes,3,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason" protobuf:"bytes,5,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message" protobuf:"bytes,6,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RolloutList is a list of Rollout resources +type RolloutList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + Items []Rollout `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/api/rollouts/v1alpha1/zz_generated.deepcopy.go b/api/rollouts/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..088c6fd --- /dev/null +++ b/api/rollouts/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,1909 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Okra authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ALBTrafficRouting) DeepCopyInto(out *ALBTrafficRouting) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ALBTrafficRouting. +func (in *ALBTrafficRouting) DeepCopy() *ALBTrafficRouting { + if in == nil { + return nil + } + out := new(ALBTrafficRouting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AmbassadorTrafficRouting) DeepCopyInto(out *AmbassadorTrafficRouting) { + *out = *in + if in.Mappings != nil { + in, out := &in.Mappings, &out.Mappings + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AmbassadorTrafficRouting. +func (in *AmbassadorTrafficRouting) DeepCopy() *AmbassadorTrafficRouting { + if in == nil { + return nil + } + out := new(AmbassadorTrafficRouting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalysisRun) DeepCopyInto(out *AnalysisRun) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalysisRun. +func (in *AnalysisRun) DeepCopy() *AnalysisRun { + if in == nil { + return nil + } + out := new(AnalysisRun) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AnalysisRun) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalysisRunArgument) DeepCopyInto(out *AnalysisRunArgument) { + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(ArgumentValueFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalysisRunArgument. +func (in *AnalysisRunArgument) DeepCopy() *AnalysisRunArgument { + if in == nil { + return nil + } + out := new(AnalysisRunArgument) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalysisRunList) DeepCopyInto(out *AnalysisRunList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AnalysisRun, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalysisRunList. +func (in *AnalysisRunList) DeepCopy() *AnalysisRunList { + if in == nil { + return nil + } + out := new(AnalysisRunList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AnalysisRunList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalysisRunSpec) DeepCopyInto(out *AnalysisRunSpec) { + *out = *in + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]Metric, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]Argument, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalysisRunSpec. +func (in *AnalysisRunSpec) DeepCopy() *AnalysisRunSpec { + if in == nil { + return nil + } + out := new(AnalysisRunSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalysisRunStatus) DeepCopyInto(out *AnalysisRunStatus) { + *out = *in + if in.MetricResults != nil { + in, out := &in.MetricResults, &out.MetricResults + *out = make([]MetricResult, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartedAt != nil { + in, out := &in.StartedAt, &out.StartedAt + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalysisRunStatus. +func (in *AnalysisRunStatus) DeepCopy() *AnalysisRunStatus { + if in == nil { + return nil + } + out := new(AnalysisRunStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalysisRunStrategy) DeepCopyInto(out *AnalysisRunStrategy) { + *out = *in + if in.SuccessfulRunHistoryLimit != nil { + in, out := &in.SuccessfulRunHistoryLimit, &out.SuccessfulRunHistoryLimit + *out = new(int32) + **out = **in + } + if in.UnsuccessfulRunHistoryLimit != nil { + in, out := &in.UnsuccessfulRunHistoryLimit, &out.UnsuccessfulRunHistoryLimit + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalysisRunStrategy. +func (in *AnalysisRunStrategy) DeepCopy() *AnalysisRunStrategy { + if in == nil { + return nil + } + out := new(AnalysisRunStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalysisTemplate) DeepCopyInto(out *AnalysisTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalysisTemplate. +func (in *AnalysisTemplate) DeepCopy() *AnalysisTemplate { + if in == nil { + return nil + } + out := new(AnalysisTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AnalysisTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalysisTemplateList) DeepCopyInto(out *AnalysisTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AnalysisTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalysisTemplateList. +func (in *AnalysisTemplateList) DeepCopy() *AnalysisTemplateList { + if in == nil { + return nil + } + out := new(AnalysisTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AnalysisTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AnalysisTemplateSpec) DeepCopyInto(out *AnalysisTemplateSpec) { + *out = *in + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]Metric, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]Argument, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnalysisTemplateSpec. +func (in *AnalysisTemplateSpec) DeepCopy() *AnalysisTemplateSpec { + if in == nil { + return nil + } + out := new(AnalysisTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AntiAffinity) DeepCopyInto(out *AntiAffinity) { + *out = *in + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = new(PreferredDuringSchedulingIgnoredDuringExecution) + **out = **in + } + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(RequiredDuringSchedulingIgnoredDuringExecution) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AntiAffinity. +func (in *AntiAffinity) DeepCopy() *AntiAffinity { + if in == nil { + return nil + } + out := new(AntiAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Argument) DeepCopyInto(out *Argument) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(ValueFrom) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Argument. +func (in *Argument) DeepCopy() *Argument { + if in == nil { + return nil + } + out := new(Argument) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArgumentValueFrom) DeepCopyInto(out *ArgumentValueFrom) { + *out = *in + if in.PodTemplateHashValue != nil { + in, out := &in.PodTemplateHashValue, &out.PodTemplateHashValue + *out = new(ValueFromPodTemplateHash) + **out = **in + } + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(FieldRef) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArgumentValueFrom. +func (in *ArgumentValueFrom) DeepCopy() *ArgumentValueFrom { + if in == nil { + return nil + } + out := new(ArgumentValueFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlueGreenStatus) DeepCopyInto(out *BlueGreenStatus) { + *out = *in + if in.PrePromotionAnalysisRunStatus != nil { + in, out := &in.PrePromotionAnalysisRunStatus, &out.PrePromotionAnalysisRunStatus + *out = new(RolloutAnalysisRunStatus) + **out = **in + } + if in.PostPromotionAnalysisRunStatus != nil { + in, out := &in.PostPromotionAnalysisRunStatus, &out.PostPromotionAnalysisRunStatus + *out = new(RolloutAnalysisRunStatus) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlueGreenStatus. +func (in *BlueGreenStatus) DeepCopy() *BlueGreenStatus { + if in == nil { + return nil + } + out := new(BlueGreenStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BlueGreenStrategy) DeepCopyInto(out *BlueGreenStrategy) { + *out = *in + if in.PreviewReplicaCount != nil { + in, out := &in.PreviewReplicaCount, &out.PreviewReplicaCount + *out = new(int32) + **out = **in + } + if in.AutoPromotionEnabled != nil { + in, out := &in.AutoPromotionEnabled, &out.AutoPromotionEnabled + *out = new(bool) + **out = **in + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.ScaleDownDelaySeconds != nil { + in, out := &in.ScaleDownDelaySeconds, &out.ScaleDownDelaySeconds + *out = new(int32) + **out = **in + } + if in.ScaleDownDelayRevisionLimit != nil { + in, out := &in.ScaleDownDelayRevisionLimit, &out.ScaleDownDelayRevisionLimit + *out = new(int32) + **out = **in + } + if in.PrePromotionAnalysis != nil { + in, out := &in.PrePromotionAnalysis, &out.PrePromotionAnalysis + *out = new(RolloutAnalysis) + (*in).DeepCopyInto(*out) + } + if in.AntiAffinity != nil { + in, out := &in.AntiAffinity, &out.AntiAffinity + *out = new(AntiAffinity) + (*in).DeepCopyInto(*out) + } + if in.PostPromotionAnalysis != nil { + in, out := &in.PostPromotionAnalysis, &out.PostPromotionAnalysis + *out = new(RolloutAnalysis) + (*in).DeepCopyInto(*out) + } + if in.PreviewMetadata != nil { + in, out := &in.PreviewMetadata, &out.PreviewMetadata + *out = new(PodTemplateMetadata) + (*in).DeepCopyInto(*out) + } + if in.ActiveMetadata != nil { + in, out := &in.ActiveMetadata, &out.ActiveMetadata + *out = new(PodTemplateMetadata) + (*in).DeepCopyInto(*out) + } + if in.AbortScaleDownDelaySeconds != nil { + in, out := &in.AbortScaleDownDelaySeconds, &out.AbortScaleDownDelaySeconds + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BlueGreenStrategy. +func (in *BlueGreenStrategy) DeepCopy() *BlueGreenStrategy { + if in == nil { + return nil + } + out := new(BlueGreenStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanaryStatus) DeepCopyInto(out *CanaryStatus) { + *out = *in + if in.CurrentStepAnalysisRunStatus != nil { + in, out := &in.CurrentStepAnalysisRunStatus, &out.CurrentStepAnalysisRunStatus + *out = new(RolloutAnalysisRunStatus) + **out = **in + } + if in.CurrentBackgroundAnalysisRunStatus != nil { + in, out := &in.CurrentBackgroundAnalysisRunStatus, &out.CurrentBackgroundAnalysisRunStatus + *out = new(RolloutAnalysisRunStatus) + **out = **in + } + if in.Weights != nil { + in, out := &in.Weights, &out.Weights + *out = new(TrafficWeights) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryStatus. +func (in *CanaryStatus) DeepCopy() *CanaryStatus { + if in == nil { + return nil + } + out := new(CanaryStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanaryStep) DeepCopyInto(out *CanaryStep) { + *out = *in + if in.SetWeight != nil { + in, out := &in.SetWeight, &out.SetWeight + *out = new(int32) + **out = **in + } + if in.Pause != nil { + in, out := &in.Pause, &out.Pause + *out = new(RolloutPause) + (*in).DeepCopyInto(*out) + } + if in.Experiment != nil { + in, out := &in.Experiment, &out.Experiment + *out = new(RolloutExperimentStep) + (*in).DeepCopyInto(*out) + } + if in.Analysis != nil { + in, out := &in.Analysis, &out.Analysis + *out = new(RolloutAnalysis) + (*in).DeepCopyInto(*out) + } + if in.SetCanaryScale != nil { + in, out := &in.SetCanaryScale, &out.SetCanaryScale + *out = new(SetCanaryScale) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryStep. +func (in *CanaryStep) DeepCopy() *CanaryStep { + if in == nil { + return nil + } + out := new(CanaryStep) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CanaryStrategy) DeepCopyInto(out *CanaryStrategy) { + *out = *in + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]CanaryStep, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TrafficRouting != nil { + in, out := &in.TrafficRouting, &out.TrafficRouting + *out = new(RolloutTrafficRouting) + (*in).DeepCopyInto(*out) + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + if in.Analysis != nil { + in, out := &in.Analysis, &out.Analysis + *out = new(RolloutAnalysisBackground) + (*in).DeepCopyInto(*out) + } + if in.AntiAffinity != nil { + in, out := &in.AntiAffinity, &out.AntiAffinity + *out = new(AntiAffinity) + (*in).DeepCopyInto(*out) + } + if in.CanaryMetadata != nil { + in, out := &in.CanaryMetadata, &out.CanaryMetadata + *out = new(PodTemplateMetadata) + (*in).DeepCopyInto(*out) + } + if in.StableMetadata != nil { + in, out := &in.StableMetadata, &out.StableMetadata + *out = new(PodTemplateMetadata) + (*in).DeepCopyInto(*out) + } + if in.ScaleDownDelaySeconds != nil { + in, out := &in.ScaleDownDelaySeconds, &out.ScaleDownDelaySeconds + *out = new(int32) + **out = **in + } + if in.ScaleDownDelayRevisionLimit != nil { + in, out := &in.ScaleDownDelayRevisionLimit, &out.ScaleDownDelayRevisionLimit + *out = new(int32) + **out = **in + } + if in.AbortScaleDownDelaySeconds != nil { + in, out := &in.AbortScaleDownDelaySeconds, &out.AbortScaleDownDelaySeconds + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CanaryStrategy. +func (in *CanaryStrategy) DeepCopy() *CanaryStrategy { + if in == nil { + return nil + } + out := new(CanaryStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchMetric) DeepCopyInto(out *CloudWatchMetric) { + *out = *in + if in.MetricDataQueries != nil { + in, out := &in.MetricDataQueries, &out.MetricDataQueries + *out = make([]CloudWatchMetricDataQuery, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchMetric. +func (in *CloudWatchMetric) DeepCopy() *CloudWatchMetric { + if in == nil { + return nil + } + out := new(CloudWatchMetric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchMetricDataQuery) DeepCopyInto(out *CloudWatchMetricDataQuery) { + *out = *in + if in.Expression != nil { + in, out := &in.Expression, &out.Expression + *out = new(string) + **out = **in + } + if in.Label != nil { + in, out := &in.Label, &out.Label + *out = new(string) + **out = **in + } + if in.MetricStat != nil { + in, out := &in.MetricStat, &out.MetricStat + *out = new(CloudWatchMetricStat) + (*in).DeepCopyInto(*out) + } + if in.Period != nil { + in, out := &in.Period, &out.Period + *out = new(intstr.IntOrString) + **out = **in + } + if in.ReturnData != nil { + in, out := &in.ReturnData, &out.ReturnData + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchMetricDataQuery. +func (in *CloudWatchMetricDataQuery) DeepCopy() *CloudWatchMetricDataQuery { + if in == nil { + return nil + } + out := new(CloudWatchMetricDataQuery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchMetricStat) DeepCopyInto(out *CloudWatchMetricStat) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + out.Period = in.Period +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchMetricStat. +func (in *CloudWatchMetricStat) DeepCopy() *CloudWatchMetricStat { + if in == nil { + return nil + } + out := new(CloudWatchMetricStat) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchMetricStatMetric) DeepCopyInto(out *CloudWatchMetricStatMetric) { + *out = *in + if in.Dimensions != nil { + in, out := &in.Dimensions, &out.Dimensions + *out = make([]CloudWatchMetricStatMetricDimension, len(*in)) + copy(*out, *in) + } + if in.Namespace != nil { + in, out := &in.Namespace, &out.Namespace + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchMetricStatMetric. +func (in *CloudWatchMetricStatMetric) DeepCopy() *CloudWatchMetricStatMetric { + if in == nil { + return nil + } + out := new(CloudWatchMetricStatMetric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CloudWatchMetricStatMetricDimension) DeepCopyInto(out *CloudWatchMetricStatMetricDimension) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloudWatchMetricStatMetricDimension. +func (in *CloudWatchMetricStatMetricDimension) DeepCopy() *CloudWatchMetricStatMetricDimension { + if in == nil { + return nil + } + out := new(CloudWatchMetricStatMetricDimension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAnalysisTemplate) DeepCopyInto(out *ClusterAnalysisTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAnalysisTemplate. +func (in *ClusterAnalysisTemplate) DeepCopy() *ClusterAnalysisTemplate { + if in == nil { + return nil + } + out := new(ClusterAnalysisTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterAnalysisTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterAnalysisTemplateList) DeepCopyInto(out *ClusterAnalysisTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterAnalysisTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterAnalysisTemplateList. +func (in *ClusterAnalysisTemplateList) DeepCopy() *ClusterAnalysisTemplateList { + if in == nil { + return nil + } + out := new(ClusterAnalysisTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterAnalysisTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DatadogMetric) DeepCopyInto(out *DatadogMetric) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DatadogMetric. +func (in *DatadogMetric) DeepCopy() *DatadogMetric { + if in == nil { + return nil + } + out := new(DatadogMetric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldRef) DeepCopyInto(out *FieldRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldRef. +func (in *FieldRef) DeepCopy() *FieldRef { + if in == nil { + return nil + } + out := new(FieldRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GraphiteMetric) DeepCopyInto(out *GraphiteMetric) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GraphiteMetric. +func (in *GraphiteMetric) DeepCopy() *GraphiteMetric { + if in == nil { + return nil + } + out := new(GraphiteMetric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioDestinationRule) DeepCopyInto(out *IstioDestinationRule) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioDestinationRule. +func (in *IstioDestinationRule) DeepCopy() *IstioDestinationRule { + if in == nil { + return nil + } + out := new(IstioDestinationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioTrafficRouting) DeepCopyInto(out *IstioTrafficRouting) { + *out = *in + if in.VirtualService != nil { + in, out := &in.VirtualService, &out.VirtualService + *out = new(IstioVirtualService) + (*in).DeepCopyInto(*out) + } + if in.DestinationRule != nil { + in, out := &in.DestinationRule, &out.DestinationRule + *out = new(IstioDestinationRule) + **out = **in + } + if in.VirtualServices != nil { + in, out := &in.VirtualServices, &out.VirtualServices + *out = make([]IstioVirtualService, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioTrafficRouting. +func (in *IstioTrafficRouting) DeepCopy() *IstioTrafficRouting { + if in == nil { + return nil + } + out := new(IstioTrafficRouting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IstioVirtualService) DeepCopyInto(out *IstioVirtualService) { + *out = *in + if in.Routes != nil { + in, out := &in.Routes, &out.Routes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TLSRoutes != nil { + in, out := &in.TLSRoutes, &out.TLSRoutes + *out = make([]TLSRoute, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IstioVirtualService. +func (in *IstioVirtualService) DeepCopy() *IstioVirtualService { + if in == nil { + return nil + } + out := new(IstioVirtualService) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobMetric) DeepCopyInto(out *JobMetric) { + *out = *in + in.Metadata.DeepCopyInto(&out.Metadata) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobMetric. +func (in *JobMetric) DeepCopy() *JobMetric { + if in == nil { + return nil + } + out := new(JobMetric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KayentaMetric) DeepCopyInto(out *KayentaMetric) { + *out = *in + out.Threshold = in.Threshold + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]KayentaScope, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KayentaMetric. +func (in *KayentaMetric) DeepCopy() *KayentaMetric { + if in == nil { + return nil + } + out := new(KayentaMetric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KayentaScope) DeepCopyInto(out *KayentaScope) { + *out = *in + out.ControlScope = in.ControlScope + out.ExperimentScope = in.ExperimentScope +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KayentaScope. +func (in *KayentaScope) DeepCopy() *KayentaScope { + if in == nil { + return nil + } + out := new(KayentaScope) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KayentaThreshold) DeepCopyInto(out *KayentaThreshold) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KayentaThreshold. +func (in *KayentaThreshold) DeepCopy() *KayentaThreshold { + if in == nil { + return nil + } + out := new(KayentaThreshold) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Measurement) DeepCopyInto(out *Measurement) { + *out = *in + if in.StartedAt != nil { + in, out := &in.StartedAt, &out.StartedAt + *out = (*in).DeepCopy() + } + if in.FinishedAt != nil { + in, out := &in.FinishedAt, &out.FinishedAt + *out = (*in).DeepCopy() + } + if in.Metadata != nil { + in, out := &in.Metadata, &out.Metadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ResumeAt != nil { + in, out := &in.ResumeAt, &out.ResumeAt + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Measurement. +func (in *Measurement) DeepCopy() *Measurement { + if in == nil { + return nil + } + out := new(Measurement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Metric) DeepCopyInto(out *Metric) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(intstr.IntOrString) + **out = **in + } + if in.FailureLimit != nil { + in, out := &in.FailureLimit, &out.FailureLimit + *out = new(intstr.IntOrString) + **out = **in + } + if in.InconclusiveLimit != nil { + in, out := &in.InconclusiveLimit, &out.InconclusiveLimit + *out = new(intstr.IntOrString) + **out = **in + } + if in.ConsecutiveErrorLimit != nil { + in, out := &in.ConsecutiveErrorLimit, &out.ConsecutiveErrorLimit + *out = new(intstr.IntOrString) + **out = **in + } + in.Provider.DeepCopyInto(&out.Provider) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Metric. +func (in *Metric) DeepCopy() *Metric { + if in == nil { + return nil + } + out := new(Metric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricProvider) DeepCopyInto(out *MetricProvider) { + *out = *in + if in.Prometheus != nil { + in, out := &in.Prometheus, &out.Prometheus + *out = new(PrometheusMetric) + **out = **in + } + if in.Kayenta != nil { + in, out := &in.Kayenta, &out.Kayenta + *out = new(KayentaMetric) + (*in).DeepCopyInto(*out) + } + if in.Web != nil { + in, out := &in.Web, &out.Web + *out = new(WebMetric) + (*in).DeepCopyInto(*out) + } + if in.Datadog != nil { + in, out := &in.Datadog, &out.Datadog + *out = new(DatadogMetric) + **out = **in + } + if in.Wavefront != nil { + in, out := &in.Wavefront, &out.Wavefront + *out = new(WavefrontMetric) + **out = **in + } + if in.NewRelic != nil { + in, out := &in.NewRelic, &out.NewRelic + *out = new(NewRelicMetric) + **out = **in + } + if in.Job != nil { + in, out := &in.Job, &out.Job + *out = new(JobMetric) + (*in).DeepCopyInto(*out) + } + if in.CloudWatch != nil { + in, out := &in.CloudWatch, &out.CloudWatch + *out = new(CloudWatchMetric) + (*in).DeepCopyInto(*out) + } + if in.Graphite != nil { + in, out := &in.Graphite, &out.Graphite + *out = new(GraphiteMetric) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricProvider. +func (in *MetricProvider) DeepCopy() *MetricProvider { + if in == nil { + return nil + } + out := new(MetricProvider) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricResult) DeepCopyInto(out *MetricResult) { + *out = *in + if in.Measurements != nil { + in, out := &in.Measurements, &out.Measurements + *out = make([]Measurement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricResult. +func (in *MetricResult) DeepCopy() *MetricResult { + if in == nil { + return nil + } + out := new(MetricResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NewRelicMetric) DeepCopyInto(out *NewRelicMetric) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NewRelicMetric. +func (in *NewRelicMetric) DeepCopy() *NewRelicMetric { + if in == nil { + return nil + } + out := new(NewRelicMetric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NginxTrafficRouting) DeepCopyInto(out *NginxTrafficRouting) { + *out = *in + if in.AdditionalIngressAnnotations != nil { + in, out := &in.AdditionalIngressAnnotations, &out.AdditionalIngressAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NginxTrafficRouting. +func (in *NginxTrafficRouting) DeepCopy() *NginxTrafficRouting { + if in == nil { + return nil + } + out := new(NginxTrafficRouting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectRef) DeepCopyInto(out *ObjectRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectRef. +func (in *ObjectRef) DeepCopy() *ObjectRef { + if in == nil { + return nil + } + out := new(ObjectRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PauseCondition) DeepCopyInto(out *PauseCondition) { + *out = *in + in.StartTime.DeepCopyInto(&out.StartTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PauseCondition. +func (in *PauseCondition) DeepCopy() *PauseCondition { + if in == nil { + return nil + } + out := new(PauseCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateMetadata) DeepCopyInto(out *PodTemplateMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateMetadata. +func (in *PodTemplateMetadata) DeepCopy() *PodTemplateMetadata { + if in == nil { + return nil + } + out := new(PodTemplateMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferredDuringSchedulingIgnoredDuringExecution) DeepCopyInto(out *PreferredDuringSchedulingIgnoredDuringExecution) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredDuringSchedulingIgnoredDuringExecution. +func (in *PreferredDuringSchedulingIgnoredDuringExecution) DeepCopy() *PreferredDuringSchedulingIgnoredDuringExecution { + if in == nil { + return nil + } + out := new(PreferredDuringSchedulingIgnoredDuringExecution) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusMetric) DeepCopyInto(out *PrometheusMetric) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusMetric. +func (in *PrometheusMetric) DeepCopy() *PrometheusMetric { + if in == nil { + return nil + } + out := new(PrometheusMetric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RequiredDuringSchedulingIgnoredDuringExecution) DeepCopyInto(out *RequiredDuringSchedulingIgnoredDuringExecution) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequiredDuringSchedulingIgnoredDuringExecution. +func (in *RequiredDuringSchedulingIgnoredDuringExecution) DeepCopy() *RequiredDuringSchedulingIgnoredDuringExecution { + if in == nil { + return nil + } + out := new(RequiredDuringSchedulingIgnoredDuringExecution) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rollout) DeepCopyInto(out *Rollout) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rollout. +func (in *Rollout) DeepCopy() *Rollout { + if in == nil { + return nil + } + out := new(Rollout) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Rollout) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutAnalysis) DeepCopyInto(out *RolloutAnalysis) { + *out = *in + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = make([]RolloutAnalysisTemplate, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]AnalysisRunArgument, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutAnalysis. +func (in *RolloutAnalysis) DeepCopy() *RolloutAnalysis { + if in == nil { + return nil + } + out := new(RolloutAnalysis) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutAnalysisBackground) DeepCopyInto(out *RolloutAnalysisBackground) { + *out = *in + in.RolloutAnalysis.DeepCopyInto(&out.RolloutAnalysis) + if in.StartingStep != nil { + in, out := &in.StartingStep, &out.StartingStep + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutAnalysisBackground. +func (in *RolloutAnalysisBackground) DeepCopy() *RolloutAnalysisBackground { + if in == nil { + return nil + } + out := new(RolloutAnalysisBackground) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutAnalysisRunStatus) DeepCopyInto(out *RolloutAnalysisRunStatus) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutAnalysisRunStatus. +func (in *RolloutAnalysisRunStatus) DeepCopy() *RolloutAnalysisRunStatus { + if in == nil { + return nil + } + out := new(RolloutAnalysisRunStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutAnalysisTemplate) DeepCopyInto(out *RolloutAnalysisTemplate) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutAnalysisTemplate. +func (in *RolloutAnalysisTemplate) DeepCopy() *RolloutAnalysisTemplate { + if in == nil { + return nil + } + out := new(RolloutAnalysisTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutCondition) DeepCopyInto(out *RolloutCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutCondition. +func (in *RolloutCondition) DeepCopy() *RolloutCondition { + if in == nil { + return nil + } + out := new(RolloutCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutExperimentStep) DeepCopyInto(out *RolloutExperimentStep) { + *out = *in + if in.Templates != nil { + in, out := &in.Templates, &out.Templates + *out = make([]RolloutExperimentTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Analyses != nil { + in, out := &in.Analyses, &out.Analyses + *out = make([]RolloutExperimentStepAnalysisTemplateRef, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutExperimentStep. +func (in *RolloutExperimentStep) DeepCopy() *RolloutExperimentStep { + if in == nil { + return nil + } + out := new(RolloutExperimentStep) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutExperimentStepAnalysisTemplateRef) DeepCopyInto(out *RolloutExperimentStepAnalysisTemplateRef) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]AnalysisRunArgument, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutExperimentStepAnalysisTemplateRef. +func (in *RolloutExperimentStepAnalysisTemplateRef) DeepCopy() *RolloutExperimentStepAnalysisTemplateRef { + if in == nil { + return nil + } + out := new(RolloutExperimentStepAnalysisTemplateRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutExperimentTemplate) DeepCopyInto(out *RolloutExperimentTemplate) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.Metadata.DeepCopyInto(&out.Metadata) + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutExperimentTemplate. +func (in *RolloutExperimentTemplate) DeepCopy() *RolloutExperimentTemplate { + if in == nil { + return nil + } + out := new(RolloutExperimentTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutList) DeepCopyInto(out *RolloutList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Rollout, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutList. +func (in *RolloutList) DeepCopy() *RolloutList { + if in == nil { + return nil + } + out := new(RolloutList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RolloutList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutPause) DeepCopyInto(out *RolloutPause) { + *out = *in + if in.Duration != nil { + in, out := &in.Duration, &out.Duration + *out = new(intstr.IntOrString) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutPause. +func (in *RolloutPause) DeepCopy() *RolloutPause { + if in == nil { + return nil + } + out := new(RolloutPause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutSpec) DeepCopyInto(out *RolloutSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + if in.WorkloadRef != nil { + in, out := &in.WorkloadRef, &out.WorkloadRef + *out = new(ObjectRef) + **out = **in + } + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + if in.RestartAt != nil { + in, out := &in.RestartAt, &out.RestartAt + *out = (*in).DeepCopy() + } + if in.Analysis != nil { + in, out := &in.Analysis, &out.Analysis + *out = new(AnalysisRunStrategy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutSpec. +func (in *RolloutSpec) DeepCopy() *RolloutSpec { + if in == nil { + return nil + } + out := new(RolloutSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutStatus) DeepCopyInto(out *RolloutStatus) { + *out = *in + if in.PauseConditions != nil { + in, out := &in.PauseConditions, &out.PauseConditions + *out = make([]PauseCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AbortedAt != nil { + in, out := &in.AbortedAt, &out.AbortedAt + *out = (*in).DeepCopy() + } + if in.CurrentStepIndex != nil { + in, out := &in.CurrentStepIndex, &out.CurrentStepIndex + *out = new(int32) + **out = **in + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]RolloutCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Canary.DeepCopyInto(&out.Canary) + in.BlueGreen.DeepCopyInto(&out.BlueGreen) + if in.RestartedAt != nil { + in, out := &in.RestartedAt, &out.RestartedAt + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutStatus. +func (in *RolloutStatus) DeepCopy() *RolloutStatus { + if in == nil { + return nil + } + out := new(RolloutStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutStrategy) DeepCopyInto(out *RolloutStrategy) { + *out = *in + if in.BlueGreen != nil { + in, out := &in.BlueGreen, &out.BlueGreen + *out = new(BlueGreenStrategy) + (*in).DeepCopyInto(*out) + } + if in.Canary != nil { + in, out := &in.Canary, &out.Canary + *out = new(CanaryStrategy) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutStrategy. +func (in *RolloutStrategy) DeepCopy() *RolloutStrategy { + if in == nil { + return nil + } + out := new(RolloutStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RolloutTrafficRouting) DeepCopyInto(out *RolloutTrafficRouting) { + *out = *in + if in.Istio != nil { + in, out := &in.Istio, &out.Istio + *out = new(IstioTrafficRouting) + (*in).DeepCopyInto(*out) + } + if in.Nginx != nil { + in, out := &in.Nginx, &out.Nginx + *out = new(NginxTrafficRouting) + (*in).DeepCopyInto(*out) + } + if in.ALB != nil { + in, out := &in.ALB, &out.ALB + *out = new(ALBTrafficRouting) + **out = **in + } + if in.SMI != nil { + in, out := &in.SMI, &out.SMI + *out = new(SMITrafficRouting) + **out = **in + } + if in.Ambassador != nil { + in, out := &in.Ambassador, &out.Ambassador + *out = new(AmbassadorTrafficRouting) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutTrafficRouting. +func (in *RolloutTrafficRouting) DeepCopy() *RolloutTrafficRouting { + if in == nil { + return nil + } + out := new(RolloutTrafficRouting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SMITrafficRouting) DeepCopyInto(out *SMITrafficRouting) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SMITrafficRouting. +func (in *SMITrafficRouting) DeepCopy() *SMITrafficRouting { + if in == nil { + return nil + } + out := new(SMITrafficRouting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeDetail) DeepCopyInto(out *ScopeDetail) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeDetail. +func (in *ScopeDetail) DeepCopy() *ScopeDetail { + if in == nil { + return nil + } + out := new(ScopeDetail) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeyRef) DeepCopyInto(out *SecretKeyRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeyRef. +func (in *SecretKeyRef) DeepCopy() *SecretKeyRef { + if in == nil { + return nil + } + out := new(SecretKeyRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SetCanaryScale) DeepCopyInto(out *SetCanaryScale) { + *out = *in + if in.Weight != nil { + in, out := &in.Weight, &out.Weight + *out = new(int32) + **out = **in + } + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SetCanaryScale. +func (in *SetCanaryScale) DeepCopy() *SetCanaryScale { + if in == nil { + return nil + } + out := new(SetCanaryScale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSRoute) DeepCopyInto(out *TLSRoute) { + *out = *in + if in.SNIHosts != nil { + in, out := &in.SNIHosts, &out.SNIHosts + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSRoute. +func (in *TLSRoute) DeepCopy() *TLSRoute { + if in == nil { + return nil + } + out := new(TLSRoute) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TrafficWeights) DeepCopyInto(out *TrafficWeights) { + *out = *in + out.Canary = in.Canary + out.Stable = in.Stable + if in.Additional != nil { + in, out := &in.Additional, &out.Additional + *out = make([]WeightDestination, len(*in)) + copy(*out, *in) + } + if in.Verified != nil { + in, out := &in.Verified, &out.Verified + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrafficWeights. +func (in *TrafficWeights) DeepCopy() *TrafficWeights { + if in == nil { + return nil + } + out := new(TrafficWeights) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValueFrom) DeepCopyInto(out *ValueFrom) { + *out = *in + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeyRef) + **out = **in + } + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(FieldRef) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValueFrom. +func (in *ValueFrom) DeepCopy() *ValueFrom { + if in == nil { + return nil + } + out := new(ValueFrom) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WavefrontMetric) DeepCopyInto(out *WavefrontMetric) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WavefrontMetric. +func (in *WavefrontMetric) DeepCopy() *WavefrontMetric { + if in == nil { + return nil + } + out := new(WavefrontMetric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebMetric) DeepCopyInto(out *WebMetric) { + *out = *in + if in.Headers != nil { + in, out := &in.Headers, &out.Headers + *out = make([]WebMetricHeader, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebMetric. +func (in *WebMetric) DeepCopy() *WebMetric { + if in == nil { + return nil + } + out := new(WebMetric) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebMetricHeader) DeepCopyInto(out *WebMetricHeader) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebMetricHeader. +func (in *WebMetricHeader) DeepCopy() *WebMetricHeader { + if in == nil { + return nil + } + out := new(WebMetricHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightDestination) DeepCopyInto(out *WeightDestination) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightDestination. +func (in *WeightDestination) DeepCopy() *WeightDestination { + if in == nil { + return nil + } + out := new(WeightDestination) + in.DeepCopyInto(out) + return out +} diff --git a/api/v1alpha1/awsapplicationloadbalancerconfig.go b/api/v1alpha1/awsapplicationloadbalancerconfig.go new file mode 100644 index 0000000..9618c32 --- /dev/null +++ b/api/v1alpha1/awsapplicationloadbalancerconfig.go @@ -0,0 +1,71 @@ +/* +Copyright 2020 The Okra authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AWSApplicationLoadBalancerConfigSpec defines the desired state of AWSApplicationLoadBalancerConfigp +type AWSApplicationLoadBalancerConfigSpec struct { + ListenerARN string `json:"listenerARN,omitempty"` + Forward Forward `json:"forward,omitempty"` +} + +type Forward struct { + TargetGroups []ForwardTargetGroup `json:"targetGroups,omitempty"` +} + +type ForwardTargetGroup struct { + Name string `json:"name,omitempty"` + ARN string `json:"arn,omitempty"` + Weight int `json:"weight,omitempty"` +} + +// AWSApplicationLoadBalancerConfigStatus defines the observed state of AWSApplicationLoadBalancerConfig +type AWSApplicationLoadBalancerConfigStatus struct { + LastSyncTime metav1.Time `json:"lastSyncTime"` + Phase string `json:"phase"` + Reason string `json:"reason"` + Message string `json:"message"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".status.lastSyncTime",name=Last Sync,type=date + +// AWSApplicationLoadBalancerConfig is the Schema for the AWSApplicationLoadBalancerConfig API +type AWSApplicationLoadBalancerConfig struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AWSApplicationLoadBalancerConfigSpec `json:"spec,omitempty"` + Status AWSApplicationLoadBalancerConfigStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CellList contains a list of Cell +type AWSApplicationLoadBalancerConfigList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSApplicationLoadBalancerConfig `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AWSApplicationLoadBalancerConfig{}, &AWSApplicationLoadBalancerConfigList{}) +} diff --git a/api/v1alpha1/awstargetgroup.go b/api/v1alpha1/awstargetgroup.go new file mode 100644 index 0000000..49215ce --- /dev/null +++ b/api/v1alpha1/awstargetgroup.go @@ -0,0 +1,61 @@ +/* +Copyright 2020 The Okra authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AWSTargetGroupSpec defines the desired state of AWSTargetGroupp +type AWSTargetGroupSpec struct { + ARN string `json:"arn,omitempty"` +} + +// AWSTargetGroupStatus defines the observed state of AWSTargetGroup +type AWSTargetGroupStatus struct { + Clusters ClusterSetStatusClusters `json:"clusters"` + LastSyncTime metav1.Time `json:"lastSyncTime"` + Phase string `json:"phase"` + Reason string `json:"reason"` + Message string `json:"message"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".status.lastSyncTime",name=Last Sync,type=date + +// AWSTargetGroup is the Schema for the AWSTargetGroup API +type AWSTargetGroup struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AWSTargetGroupSpec `json:"spec,omitempty"` + Status AWSTargetGroupStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CellList contains a list of Cell +type AWSTargetGroupList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSTargetGroup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AWSTargetGroup{}, &AWSTargetGroupList{}) +} diff --git a/api/v1alpha1/awstargetgroupset.go b/api/v1alpha1/awstargetgroupset.go new file mode 100644 index 0000000..7eaa537 --- /dev/null +++ b/api/v1alpha1/awstargetgroupset.go @@ -0,0 +1,87 @@ +/* +Copyright 2020 The Okra authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// AWSTargetGroupSpec defines the desired state of AWSTargetGroupp +type AWSTargetGroupSetSpec struct { + ARN string `json:"arn,omitempty"` + Generators []AWSTargetGroupGenerator `json:"generators,omitempty"` + Template AWSTargetGroupTemplate `json:"template,omitempty"` +} + +type AWSTargetGroupGenerator struct { + AWSEKS AWSTargetGroupGeneratorAWSEKS `json:"awseks,omitempty"` +} + +type AWSTargetGroupGeneratorAWSEKS struct { + ClusterSelector TargetGroupClusterSelector `json:"clusterSelector,omitempty"` + BindingSelector TargetGroupBindingSelector `json:"bindingSelector,omitempty"` +} + +type TargetGroupClusterSelector struct { + MatchLabels map[string]string `json:"matchLabels,omitempty"` +} + +type TargetGroupBindingSelector struct { + MatchLabels map[string]string `json:"matchLabels,omitempty"` +} + +type AWSTargetGroupTemplate struct { + Metadata AWSTargetGroupTemplateMetadata `json:"metadata,omitempty"` +} + +type AWSTargetGroupTemplateMetadata struct { + Labels map[string]string `json:"labels,omitempty"` +} + +// AWSTargetGroupSetStatus defines the observed state of AWSTargetGroupSet +type AWSTargetGroupSetStatus struct { + LastSyncTime metav1.Time `json:"lastSyncTime"` + Phase string `json:"phase"` + Reason string `json:"reason"` + Message string `json:"message"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".status.lastSyncTime",name=Last Sync,type=date + +// AWSTargetGroupSet is the Schema for the AWSTargetGroupSet API +type AWSTargetGroupSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec AWSTargetGroupSetSpec `json:"spec,omitempty"` + Status AWSTargetGroupSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CellList contains a list of Cell +type AWSTargetGroupSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []AWSTargetGroupSet `json:"items"` +} + +func init() { + SchemeBuilder.Register(&AWSTargetGroupSet{}, &AWSTargetGroupSetList{}) +} diff --git a/api/v1alpha1/cell.go b/api/v1alpha1/cell.go new file mode 100644 index 0000000..811df1a --- /dev/null +++ b/api/v1alpha1/cell.go @@ -0,0 +1,140 @@ +/* +Copyright 2020 The Okra authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + "strings" + + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + rolloutsv1alpha1 "github.com/mumoshu/okra/api/rollouts/v1alpha1" +) + +// CellSpec defines the desired state of ClusterSet +type CellSpec struct { + Ingress CellIngress `json:"ingress,omitempty"` + Replicas *int32 `json:"replicas,omitempty"` + UpdateStrategy CellUpdateStrategy `json:"updateStrategy,omitempty"` +} + +type CellIngress struct { + Type CellIngressType `json:"type,omitempty"` + AWSApplicationLoadBalancer *CellIngressAWSApplicationLoadBalancer `json:"awsApplicationLoadBalancer,omitempty"` + AWSNetworkLoadBalancer *CellIngressAWSNetworkLoadBalancer `json:"awsNetworkLoadBalancer,omitempty"` +} + +type CellIngressType string + +var ErrInvalidCellIngressType = fmt.Errorf("invalid cell ingress type") + +func (v CellIngressType) String() string { + return string(v) +} + +func (v CellIngressType) Valid() error { + switch v { + case CellIngressTypeAWSApplicationLoadBalancer: + return nil + default: + return errors.Wrapf(ErrInvalidCellIngressType, "get %s", v) + } +} + +func (v *CellIngressType) UnmarshalJSON(b []byte) error { + *v = CellIngressType(strings.Trim(string(b), `"`)) + return v.Valid() +} + +const ( + CellIngressTypeAWSApplicationLoadBalancer CellIngressType = "AWSApplicationLoadBalancer" + CellIngressTypeAWSNetworkLoadBalancer CellIngressType = "AWSNetworkLoadBalancer" +) + +type CellIngressAWSApplicationLoadBalancer struct { + ListenerARN string `json:"listenerARN,omitempty"` + TargetGroupSelector TargetGroupSelector `json:"targetGroupSelector,omitempty"` +} + +type CellIngressAWSNetworkLoadBalancer struct { + ListenerARN string `json:"listenerARN,omitempty"` + TargetGroupSelector TargetGroupSelector `json:"targetGroupSelector,omitempty"` +} + +type TargetGroupSelector struct { + MatchLabels map[string]string `json:"matchLabels,omitempty"` +} + +type CellUpdateStrategy struct { + Type CellUpdateStrategyType `json:"type,omitempty"` + Canary *CellUpdateStrategyCanary `json:"canary,omitempty"` + BlueGreen *CellUpdateStrategyBlueGreen `json:"blueGreen,omitempty"` +} + +type CellUpdateStrategyType string + +const ( + CellUpdateStrategyTypeCanary CellUpdateStrategyType = "Canary" + CellUpdateStrategyTypeBlueGreen CellUpdateStrategyType = "BlueGreen" +) + +type CellUpdateStrategyCanary struct { + // Steps define the order of phases to execute the canary deployment + // +optional + Steps []rolloutsv1alpha1.CanaryStep `json:"steps,omitempty" protobuf:"bytes,3,rep,name=steps"` + // Analysis runs a separate analysisRun while all the steps execute. This is intended to be a continuous validation of the new ReplicaSet + Analysis *rolloutsv1alpha1.RolloutAnalysisBackground `json:"analysis,omitempty" protobuf:"bytes,7,opt,name=analysis"` +} + +type CellUpdateStrategyBlueGreen struct { +} + +// CellStatus defines the observed state of ClusterSet +type CellStatus struct { + Clusters ClusterSetStatusClusters `json:"clusters"` + LastSyncTime metav1.Time `json:"lastSyncTime"` + Phase string `json:"phase"` + Reason string `json:"reason"` + Message string `json:"message"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".status.lastSyncTime",name=Last Sync,type=date + +// ClusterSet is the Schema for the ClusterSet API +type Cell struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec CellSpec `json:"spec,omitempty"` + Status CellStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// CellList contains a list of Cell +type CellList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []Cell `json:"items"` +} + +func init() { + SchemeBuilder.Register(&Cell{}, &CellList{}) +} diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go new file mode 100644 index 0000000..0999099 --- /dev/null +++ b/api/v1alpha1/groupversion_info.go @@ -0,0 +1,33 @@ +/* +Copyright 2020 The Okra authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1alpha1 contains API Schema definitions for the actions v1alpha1 API group +// +kubebuilder:object:generate=true +// +groupName=okra.mumo.co +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "okra.mumo.co", Version: "v1alpha1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/api/v1alpha1/types.go b/api/v1alpha1/types.go new file mode 100644 index 0000000..4aa11ea --- /dev/null +++ b/api/v1alpha1/types.go @@ -0,0 +1,87 @@ +/* +Copyright 2020 The argocd-clusterset authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// ClusterSetSpec defines the desired state of ClusterSet +type ClusterSetSpec struct { + Generators []ClusterGenerator `json:"generators,omitempty"` + Template ClusterSecretTemplate `json:"template"` +} + +type ClusterGenerator struct { + AWSEKS AWSEKSClusterGenerator `json:"awseks,omitempty"` +} + +type AWSEKSClusterGenerator struct { + Selector AWSEKSClusterSelector `json:"selector,omitempty"` +} + +type AWSEKSClusterSelector struct { + MatchTags map[string]string `json:"matchTags,omitempty"` +} + +type ClusterSecretTemplate struct { + Metadata ClusterSecretTemplateMetadata `json:"metadata"` +} + +type ClusterSecretTemplateMetadata struct { + Labels map[string]string `json:"labels"` +} + +// ClusterSetStatus defines the observed state of ClusterSet +type ClusterSetStatus struct { + Clusters ClusterSetStatusClusters `json:"clusters"` + LastSyncTime metav1.Time `json:"lastSyncTime"` + Phase string `json:"phase"` + Reason string `json:"reason"` + Message string `json:"message"` +} + +// ClusterSetStatusClusters contains runner registration status +type ClusterSetStatusClusters struct { + Names []string `json:"names,omitempty"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:JSONPath=".status.lastSyncTime",name=Last Sync,type=date + +// ClusterSet is the Schema for the ClusterSet API +type ClusterSet struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec ClusterSetSpec `json:"spec,omitempty"` + Status ClusterSetStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// ClusterSetList contains a list of ClusterSet +type ClusterSetList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []ClusterSet `json:"items"` +} + +func init() { + SchemeBuilder.Register(&ClusterSet{}, &ClusterSetList{}) +} diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..fd2987a --- /dev/null +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,912 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2020 The Okra authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + rolloutsv1alpha1 "github.com/mumoshu/okra/api/rollouts/v1alpha1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSApplicationLoadBalancerConfig) DeepCopyInto(out *AWSApplicationLoadBalancerConfig) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSApplicationLoadBalancerConfig. +func (in *AWSApplicationLoadBalancerConfig) DeepCopy() *AWSApplicationLoadBalancerConfig { + if in == nil { + return nil + } + out := new(AWSApplicationLoadBalancerConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSApplicationLoadBalancerConfig) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSApplicationLoadBalancerConfigList) DeepCopyInto(out *AWSApplicationLoadBalancerConfigList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSApplicationLoadBalancerConfig, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSApplicationLoadBalancerConfigList. +func (in *AWSApplicationLoadBalancerConfigList) DeepCopy() *AWSApplicationLoadBalancerConfigList { + if in == nil { + return nil + } + out := new(AWSApplicationLoadBalancerConfigList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSApplicationLoadBalancerConfigList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSApplicationLoadBalancerConfigSpec) DeepCopyInto(out *AWSApplicationLoadBalancerConfigSpec) { + *out = *in + in.Forward.DeepCopyInto(&out.Forward) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSApplicationLoadBalancerConfigSpec. +func (in *AWSApplicationLoadBalancerConfigSpec) DeepCopy() *AWSApplicationLoadBalancerConfigSpec { + if in == nil { + return nil + } + out := new(AWSApplicationLoadBalancerConfigSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSApplicationLoadBalancerConfigStatus) DeepCopyInto(out *AWSApplicationLoadBalancerConfigStatus) { + *out = *in + in.LastSyncTime.DeepCopyInto(&out.LastSyncTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSApplicationLoadBalancerConfigStatus. +func (in *AWSApplicationLoadBalancerConfigStatus) DeepCopy() *AWSApplicationLoadBalancerConfigStatus { + if in == nil { + return nil + } + out := new(AWSApplicationLoadBalancerConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSEKSClusterGenerator) DeepCopyInto(out *AWSEKSClusterGenerator) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEKSClusterGenerator. +func (in *AWSEKSClusterGenerator) DeepCopy() *AWSEKSClusterGenerator { + if in == nil { + return nil + } + out := new(AWSEKSClusterGenerator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSEKSClusterSelector) DeepCopyInto(out *AWSEKSClusterSelector) { + *out = *in + if in.MatchTags != nil { + in, out := &in.MatchTags, &out.MatchTags + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSEKSClusterSelector. +func (in *AWSEKSClusterSelector) DeepCopy() *AWSEKSClusterSelector { + if in == nil { + return nil + } + out := new(AWSEKSClusterSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSTargetGroup) DeepCopyInto(out *AWSTargetGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSTargetGroup. +func (in *AWSTargetGroup) DeepCopy() *AWSTargetGroup { + if in == nil { + return nil + } + out := new(AWSTargetGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSTargetGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSTargetGroupGenerator) DeepCopyInto(out *AWSTargetGroupGenerator) { + *out = *in + in.AWSEKS.DeepCopyInto(&out.AWSEKS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSTargetGroupGenerator. +func (in *AWSTargetGroupGenerator) DeepCopy() *AWSTargetGroupGenerator { + if in == nil { + return nil + } + out := new(AWSTargetGroupGenerator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSTargetGroupGeneratorAWSEKS) DeepCopyInto(out *AWSTargetGroupGeneratorAWSEKS) { + *out = *in + in.ClusterSelector.DeepCopyInto(&out.ClusterSelector) + in.BindingSelector.DeepCopyInto(&out.BindingSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSTargetGroupGeneratorAWSEKS. +func (in *AWSTargetGroupGeneratorAWSEKS) DeepCopy() *AWSTargetGroupGeneratorAWSEKS { + if in == nil { + return nil + } + out := new(AWSTargetGroupGeneratorAWSEKS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSTargetGroupList) DeepCopyInto(out *AWSTargetGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSTargetGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSTargetGroupList. +func (in *AWSTargetGroupList) DeepCopy() *AWSTargetGroupList { + if in == nil { + return nil + } + out := new(AWSTargetGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSTargetGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSTargetGroupSet) DeepCopyInto(out *AWSTargetGroupSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSTargetGroupSet. +func (in *AWSTargetGroupSet) DeepCopy() *AWSTargetGroupSet { + if in == nil { + return nil + } + out := new(AWSTargetGroupSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSTargetGroupSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSTargetGroupSetList) DeepCopyInto(out *AWSTargetGroupSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AWSTargetGroupSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSTargetGroupSetList. +func (in *AWSTargetGroupSetList) DeepCopy() *AWSTargetGroupSetList { + if in == nil { + return nil + } + out := new(AWSTargetGroupSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AWSTargetGroupSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSTargetGroupSetSpec) DeepCopyInto(out *AWSTargetGroupSetSpec) { + *out = *in + if in.Generators != nil { + in, out := &in.Generators, &out.Generators + *out = make([]AWSTargetGroupGenerator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSTargetGroupSetSpec. +func (in *AWSTargetGroupSetSpec) DeepCopy() *AWSTargetGroupSetSpec { + if in == nil { + return nil + } + out := new(AWSTargetGroupSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSTargetGroupSetStatus) DeepCopyInto(out *AWSTargetGroupSetStatus) { + *out = *in + in.LastSyncTime.DeepCopyInto(&out.LastSyncTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSTargetGroupSetStatus. +func (in *AWSTargetGroupSetStatus) DeepCopy() *AWSTargetGroupSetStatus { + if in == nil { + return nil + } + out := new(AWSTargetGroupSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSTargetGroupSpec) DeepCopyInto(out *AWSTargetGroupSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSTargetGroupSpec. +func (in *AWSTargetGroupSpec) DeepCopy() *AWSTargetGroupSpec { + if in == nil { + return nil + } + out := new(AWSTargetGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSTargetGroupStatus) DeepCopyInto(out *AWSTargetGroupStatus) { + *out = *in + in.Clusters.DeepCopyInto(&out.Clusters) + in.LastSyncTime.DeepCopyInto(&out.LastSyncTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSTargetGroupStatus. +func (in *AWSTargetGroupStatus) DeepCopy() *AWSTargetGroupStatus { + if in == nil { + return nil + } + out := new(AWSTargetGroupStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSTargetGroupTemplate) DeepCopyInto(out *AWSTargetGroupTemplate) { + *out = *in + in.Metadata.DeepCopyInto(&out.Metadata) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSTargetGroupTemplate. +func (in *AWSTargetGroupTemplate) DeepCopy() *AWSTargetGroupTemplate { + if in == nil { + return nil + } + out := new(AWSTargetGroupTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSTargetGroupTemplateMetadata) DeepCopyInto(out *AWSTargetGroupTemplateMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSTargetGroupTemplateMetadata. +func (in *AWSTargetGroupTemplateMetadata) DeepCopy() *AWSTargetGroupTemplateMetadata { + if in == nil { + return nil + } + out := new(AWSTargetGroupTemplateMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cell) DeepCopyInto(out *Cell) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cell. +func (in *Cell) DeepCopy() *Cell { + if in == nil { + return nil + } + out := new(Cell) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Cell) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CellIngress) DeepCopyInto(out *CellIngress) { + *out = *in + if in.AWSApplicationLoadBalancer != nil { + in, out := &in.AWSApplicationLoadBalancer, &out.AWSApplicationLoadBalancer + *out = new(CellIngressAWSApplicationLoadBalancer) + (*in).DeepCopyInto(*out) + } + if in.AWSNetworkLoadBalancer != nil { + in, out := &in.AWSNetworkLoadBalancer, &out.AWSNetworkLoadBalancer + *out = new(CellIngressAWSNetworkLoadBalancer) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CellIngress. +func (in *CellIngress) DeepCopy() *CellIngress { + if in == nil { + return nil + } + out := new(CellIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CellIngressAWSApplicationLoadBalancer) DeepCopyInto(out *CellIngressAWSApplicationLoadBalancer) { + *out = *in + in.TargetGroupSelector.DeepCopyInto(&out.TargetGroupSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CellIngressAWSApplicationLoadBalancer. +func (in *CellIngressAWSApplicationLoadBalancer) DeepCopy() *CellIngressAWSApplicationLoadBalancer { + if in == nil { + return nil + } + out := new(CellIngressAWSApplicationLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CellIngressAWSNetworkLoadBalancer) DeepCopyInto(out *CellIngressAWSNetworkLoadBalancer) { + *out = *in + in.TargetGroupSelector.DeepCopyInto(&out.TargetGroupSelector) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CellIngressAWSNetworkLoadBalancer. +func (in *CellIngressAWSNetworkLoadBalancer) DeepCopy() *CellIngressAWSNetworkLoadBalancer { + if in == nil { + return nil + } + out := new(CellIngressAWSNetworkLoadBalancer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CellList) DeepCopyInto(out *CellList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Cell, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CellList. +func (in *CellList) DeepCopy() *CellList { + if in == nil { + return nil + } + out := new(CellList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CellList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CellSpec) DeepCopyInto(out *CellSpec) { + *out = *in + in.Ingress.DeepCopyInto(&out.Ingress) + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CellSpec. +func (in *CellSpec) DeepCopy() *CellSpec { + if in == nil { + return nil + } + out := new(CellSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CellStatus) DeepCopyInto(out *CellStatus) { + *out = *in + in.Clusters.DeepCopyInto(&out.Clusters) + in.LastSyncTime.DeepCopyInto(&out.LastSyncTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CellStatus. +func (in *CellStatus) DeepCopy() *CellStatus { + if in == nil { + return nil + } + out := new(CellStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CellUpdateStrategy) DeepCopyInto(out *CellUpdateStrategy) { + *out = *in + if in.Canary != nil { + in, out := &in.Canary, &out.Canary + *out = new(CellUpdateStrategyCanary) + (*in).DeepCopyInto(*out) + } + if in.BlueGreen != nil { + in, out := &in.BlueGreen, &out.BlueGreen + *out = new(CellUpdateStrategyBlueGreen) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CellUpdateStrategy. +func (in *CellUpdateStrategy) DeepCopy() *CellUpdateStrategy { + if in == nil { + return nil + } + out := new(CellUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CellUpdateStrategyBlueGreen) DeepCopyInto(out *CellUpdateStrategyBlueGreen) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CellUpdateStrategyBlueGreen. +func (in *CellUpdateStrategyBlueGreen) DeepCopy() *CellUpdateStrategyBlueGreen { + if in == nil { + return nil + } + out := new(CellUpdateStrategyBlueGreen) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CellUpdateStrategyCanary) DeepCopyInto(out *CellUpdateStrategyCanary) { + *out = *in + if in.Steps != nil { + in, out := &in.Steps, &out.Steps + *out = make([]rolloutsv1alpha1.CanaryStep, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Analysis != nil { + in, out := &in.Analysis, &out.Analysis + *out = new(rolloutsv1alpha1.RolloutAnalysisBackground) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CellUpdateStrategyCanary. +func (in *CellUpdateStrategyCanary) DeepCopy() *CellUpdateStrategyCanary { + if in == nil { + return nil + } + out := new(CellUpdateStrategyCanary) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterGenerator) DeepCopyInto(out *ClusterGenerator) { + *out = *in + in.AWSEKS.DeepCopyInto(&out.AWSEKS) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterGenerator. +func (in *ClusterGenerator) DeepCopy() *ClusterGenerator { + if in == nil { + return nil + } + out := new(ClusterGenerator) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSecretTemplate) DeepCopyInto(out *ClusterSecretTemplate) { + *out = *in + in.Metadata.DeepCopyInto(&out.Metadata) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSecretTemplate. +func (in *ClusterSecretTemplate) DeepCopy() *ClusterSecretTemplate { + if in == nil { + return nil + } + out := new(ClusterSecretTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSecretTemplateMetadata) DeepCopyInto(out *ClusterSecretTemplateMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSecretTemplateMetadata. +func (in *ClusterSecretTemplateMetadata) DeepCopy() *ClusterSecretTemplateMetadata { + if in == nil { + return nil + } + out := new(ClusterSecretTemplateMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSet) DeepCopyInto(out *ClusterSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSet. +func (in *ClusterSet) DeepCopy() *ClusterSet { + if in == nil { + return nil + } + out := new(ClusterSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSetList) DeepCopyInto(out *ClusterSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetList. +func (in *ClusterSetList) DeepCopy() *ClusterSetList { + if in == nil { + return nil + } + out := new(ClusterSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSetSpec) DeepCopyInto(out *ClusterSetSpec) { + *out = *in + if in.Generators != nil { + in, out := &in.Generators, &out.Generators + *out = make([]ClusterGenerator, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Template.DeepCopyInto(&out.Template) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetSpec. +func (in *ClusterSetSpec) DeepCopy() *ClusterSetSpec { + if in == nil { + return nil + } + out := new(ClusterSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSetStatus) DeepCopyInto(out *ClusterSetStatus) { + *out = *in + in.Clusters.DeepCopyInto(&out.Clusters) + in.LastSyncTime.DeepCopyInto(&out.LastSyncTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetStatus. +func (in *ClusterSetStatus) DeepCopy() *ClusterSetStatus { + if in == nil { + return nil + } + out := new(ClusterSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterSetStatusClusters) DeepCopyInto(out *ClusterSetStatusClusters) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterSetStatusClusters. +func (in *ClusterSetStatusClusters) DeepCopy() *ClusterSetStatusClusters { + if in == nil { + return nil + } + out := new(ClusterSetStatusClusters) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Forward) DeepCopyInto(out *Forward) { + *out = *in + if in.TargetGroups != nil { + in, out := &in.TargetGroups, &out.TargetGroups + *out = make([]ForwardTargetGroup, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Forward. +func (in *Forward) DeepCopy() *Forward { + if in == nil { + return nil + } + out := new(Forward) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ForwardTargetGroup) DeepCopyInto(out *ForwardTargetGroup) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ForwardTargetGroup. +func (in *ForwardTargetGroup) DeepCopy() *ForwardTargetGroup { + if in == nil { + return nil + } + out := new(ForwardTargetGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupBindingSelector) DeepCopyInto(out *TargetGroupBindingSelector) { + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupBindingSelector. +func (in *TargetGroupBindingSelector) DeepCopy() *TargetGroupBindingSelector { + if in == nil { + return nil + } + out := new(TargetGroupBindingSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupClusterSelector) DeepCopyInto(out *TargetGroupClusterSelector) { + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupClusterSelector. +func (in *TargetGroupClusterSelector) DeepCopy() *TargetGroupClusterSelector { + if in == nil { + return nil + } + out := new(TargetGroupClusterSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TargetGroupSelector) DeepCopyInto(out *TargetGroupSelector) { + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetGroupSelector. +func (in *TargetGroupSelector) DeepCopy() *TargetGroupSelector { + if in == nil { + return nil + } + out := new(TargetGroupSelector) + in.DeepCopyInto(out) + return out +} diff --git a/cmd/okra/okra.go b/cmd/okra/okra.go new file mode 100644 index 0000000..ea4d320 --- /dev/null +++ b/cmd/okra/okra.go @@ -0,0 +1,446 @@ +package okra + +import ( + "errors" + "fmt" + "os" + "strings" + + _ "github.com/aws/aws-sdk-go/service/eks" + "github.com/mumoshu/okra/pkg/analysis" + "github.com/mumoshu/okra/pkg/awstargetgroupset" + "github.com/mumoshu/okra/pkg/clusterset" + "github.com/mumoshu/okra/pkg/manager" + "github.com/mumoshu/okra/pkg/okraerror" + "github.com/mumoshu/okra/pkg/targetgroupbinding" + _ "k8s.io/client-go/plugin/pkg/client/auth/exec" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +const ( + ApplicationName = "okra" +) + +type Config struct { + dryRun bool + ns string + name string + endpoint string + caData string + eksTags []string + labelKVs []string +} + +func InitFlags(flag *pflag.FlagSet) *Config { + var c Config + + flag.BoolVar(&c.dryRun, "dry-run", false, "") + flag.StringVar(&c.ns, "namespace", "", "") + flag.StringVar(&c.name, "name", "", "") + flag.StringVar(&c.endpoint, "endpoint", "", "") + flag.StringVar(&c.caData, "ca-data", "", "") + flag.StringSliceVar(&c.eksTags, "eks-tags", nil, "Comma-separated KEY=VALUE pairs of EKS control-plane tags") + flag.StringSliceVar(&c.labelKVs, "labels", nil, "Comma-separated KEY=VALUE pairs of cluster secret labels") + + return &c +} + +func InitListClustersFlags(flag *pflag.FlagSet, c *clusterset.ListClustersInput) func() *clusterset.ListClustersInput { + flag.StringVar(&c.NS, "namespace", c.NS, "") + flag.StringVar(&c.Selector, "selectoro", "", "") + + return func() *clusterset.ListClustersInput { + return c + } +} + +func InitCreateTargetGroupBindingFlags(flag *pflag.FlagSet, c *targetgroupbinding.CreateInput) func() *targetgroupbinding.CreateInput { + var labelKVs []string + + flag.BoolVar(&c.DryRun, "dry-run", c.DryRun, "") + flag.StringVar(&c.ClusterNamespace, "cluster-namespace", c.ClusterNamespace, "") + flag.StringVar(&c.ClusterName, "cluster-name", c.ClusterName, "") + flag.StringVar(&c.TargetGroupARN, "target-group-arn", c.TargetGroupARN, "") + flag.StringVar(&c.Name, "name", c.Name, "") + flag.StringVar(&c.Namespace, "namespace", c.Namespace, "") + flag.StringSliceVar(&labelKVs, "labels", nil, "Comma-separated KEY=VALUE pairs of cluster secret labels") + + return func() *targetgroupbinding.CreateInput { + labels := map[string]string{} + for _, kv := range labelKVs { + split := strings.Split(kv, "=") + labels[split[0]] = split[1] + } + + c.Labels = labels + return c + } +} + +func InitCreateClusterFlags(flag *pflag.FlagSet, c *clusterset.CreateClusterInput) func() *clusterset.CreateClusterInput { + var labelKVs []string + + flag.BoolVar(&c.DryRun, "dry-run", c.DryRun, "") + flag.StringVar(&c.NS, "namespace", c.NS, "") + flag.StringVar(&c.Name, "name", c.Name, "") + flag.StringVar(&c.Endpoint, "endpoint", c.Endpoint, "") + flag.StringVar(&c.CAData, "ca-data", "", "") + flag.StringSliceVar(&labelKVs, "labels", nil, "Comma-separated KEY=VALUE pairs of cluster secret labels") + + return func() *clusterset.CreateClusterInput { + labels := map[string]string{} + for _, kv := range labelKVs { + split := strings.Split(kv, "=") + labels[split[0]] = split[1] + } + + c.Labels = labels + return c + } +} + +func InitDeleteClusterFlags(flag *pflag.FlagSet, c *clusterset.DeleteClusterInput) func() *clusterset.DeleteClusterInput { + flag.BoolVar(&c.DryRun, "dry-run", c.DryRun, "") + flag.StringVar(&c.NS, "namespace", c.NS, "") + flag.StringVar(&c.Name, "name", c.Name, "") + + return func() *clusterset.DeleteClusterInput { + return c + } +} + +func InitSyncClusterSetFlags(flag *pflag.FlagSet, c *clusterset.SyncInput) func() *clusterset.SyncInput { + var ( + eksTags []string + labelKVs []string + ) + + flag.BoolVar(&c.DryRun, "dry-run", false, "") + flag.StringVar(&c.NS, "namespace", "", "") + flag.StringSliceVar(&eksTags, "eks-tags", nil, "Comma-separated KEY=VALUE pairs of EKS control-plane tags") + flag.StringSliceVar(&labelKVs, "labels", nil, "Comma-separated KEY=VALUE pairs of cluster secret labels") + + return func() *clusterset.SyncInput { + tags := map[string]string{} + for _, kv := range eksTags { + split := strings.Split(kv, "=") + tags[split[0]] = split[1] + } + + c.EKSTags = tags + + labels := map[string]string{} + for _, kv := range labelKVs { + split := strings.Split(kv, "=") + labels[split[0]] = split[1] + } + + c.Labels = labels + + return c + } +} + +func InitListTargetGroupBindingsFlags(flag *pflag.FlagSet, c *targetgroupbinding.ListInput) func() *targetgroupbinding.ListInput { + flag.StringVar(&c.ClusterName, "cluster-name", "", "") + flag.StringVar(&c.NS, "namespace", "", "") + + return func() *targetgroupbinding.ListInput { + return c + } +} + +func InitSyncAWSTargetGroupSetFlags(flag *pflag.FlagSet, c *awstargetgroupset.SyncInput) func() *awstargetgroupset.SyncInput { + var ( + bindingSelector string + labelKVs []string + ) + + flag.BoolVar(&c.DryRun, "dry-run", false, "") + flag.StringVar(&c.ClusterName, "cluster-name", "", "ArgoCD Cluster name on which we find TargetGroupBinding") + flag.StringVar(&c.NS, "namespace", "", "Namespace of the ArgoCD Cluster and the generated AWSTargetGroup resources") + flag.StringVar(&bindingSelector, "targetgroupbinding-selector", "", "Comma-separated KEY=VALUE pairs of TargetGroupBinding resource labels") + flag.StringSliceVar(&labelKVs, "labels", nil, "Comma-separated KEY=VALUE pairs of AWSTargetGroup labels") + + return func() *awstargetgroupset.SyncInput { + c.BindingSelector = bindingSelector + + labels := map[string]string{} + for _, kv := range labelKVs { + split := strings.Split(kv, "=") + labels[split[0]] = split[1] + } + + c.Labels = labels + + return c + } +} + +func InitListAWSTargetGroupsFlags(flag *pflag.FlagSet, c *awstargetgroupset.ListAWSTargetGroupsInput) func() *awstargetgroupset.ListAWSTargetGroupsInput { + flag.StringVar(&c.NS, "namespace", "", "Namespace of AWSTargetGroup resources") + flag.StringVar(&c.Selector, "selector", "", "Label selector for AWSTargetGroup resources") + + return func() *awstargetgroupset.ListAWSTargetGroupsInput { + return c + } +} + +func InitListLatestAWSTargetGroupsFlags(flag *pflag.FlagSet, c *awstargetgroupset.ListLatestAWSTargetGroupsInput) func() *awstargetgroupset.ListLatestAWSTargetGroupsInput { + flag.StringVar(&c.NS, "namespace", "", "Namespace of AWSTargetGroup resources") + flag.StringVar(&c.Selector, "selector", "", "Label selector for AWSTargetGroup resources") + flag.StringVar(&c.SemverLabelKey, "semver-label-key", "okra.mumo.co/version", "The key of the label as a container of the version number of the group") + + return func() *awstargetgroupset.ListLatestAWSTargetGroupsInput { + return c + } +} + +func InitRunAnalysisFlags(flag *pflag.FlagSet, c *analysis.RunInput) func() *analysis.RunInput { + flag.StringVar(&c.AnalysisTemplateName, "template-name", "", "") + flag.StringVar(&c.NS, "namespace", "", "") + flag.StringToStringVar(&c.AnalysisArgs, "args", map[string]string{}, "") + flag.StringToStringVar(&c.AnalysisArgsFromSecrets, "args-from-secrets", map[string]string{}, "A list of secret refs like \"arg-name=secret-name.field-name\" concatenated by \",\"s") + + return func() *analysis.RunInput { + return c + } +} + +func Run() error { + cmd := &cobra.Command{ + Use: ApplicationName, + } + + var listClustersInput func() *clusterset.ListClustersInput + listClusters := &cobra.Command{ + Use: "list-clusters", + SilenceErrors: true, + RunE: func(cmd *cobra.Command, args []string) error { + clusters, err := clusterset.ListClusters(*listClustersInput()) + if err != nil { + if !errors.Is(err, okraerror.Error{}) { + cmd.SilenceUsage = true + } + + return err + } + + for _, c := range clusters { + fmt.Fprintf(os.Stdout, "%v\n", c.Name) + } + + return nil + }, + } + listClustersInput = InitListClustersFlags(listClusters.Flags(), &clusterset.ListClustersInput{}) + cmd.AddCommand(listClusters) + + var createClusterInput func() *clusterset.CreateClusterInput + createCluster := &cobra.Command{ + Use: "create-cluster [--namespace ns] [--name name] [--labels k1=v1,k2=v2] [--endpoint https://...] [--ca-data cadata]", + SilenceErrors: true, + RunE: func(cmd *cobra.Command, args []string) error { + if err := clusterset.CreateCluster(*createClusterInput()); err != nil { + if !errors.Is(err, okraerror.Error{}) { + cmd.SilenceUsage = true + } + + return err + } + + return nil + }, + } + createClusterInput = InitCreateClusterFlags(createCluster.Flags(), &clusterset.CreateClusterInput{}) + cmd.AddCommand(createCluster) + + var deleteClusterInput func() *clusterset.DeleteClusterInput + deleteCluster := &cobra.Command{ + Use: "delete-cluster", + RunE: func(cmd *cobra.Command, args []string) error { + return clusterset.DeleteCluster(*deleteClusterInput()) + }, + } + deleteClusterInput = InitDeleteClusterFlags(deleteCluster.Flags(), &clusterset.DeleteClusterInput{}) + cmd.AddCommand(deleteCluster) + + var createMissingClustersInput func() *clusterset.SyncInput + createMissingClusters := &cobra.Command{ + Use: "create-missing-clusters", + RunE: func(cmd *cobra.Command, args []string) error { + return clusterset.CreateMissingClusters(*createMissingClustersInput()) + }, + } + createMissingClustersInput = InitSyncClusterSetFlags(createMissingClusters.Flags(), &clusterset.SyncInput{}) + cmd.AddCommand(createMissingClusters) + + var deleteOutdatedClustersInput func() *clusterset.SyncInput + deleteOutdatedClusters := &cobra.Command{ + Use: "delete-outdated-clusters", + RunE: func(cmd *cobra.Command, args []string) error { + return clusterset.DeleteOutdatedClusters(*deleteOutdatedClustersInput()) + }, + } + deleteOutdatedClustersInput = InitSyncClusterSetFlags(deleteOutdatedClusters.Flags(), &clusterset.SyncInput{}) + cmd.AddCommand(deleteOutdatedClusters) + + var syncClusterSetInput func() *clusterset.SyncInput + syncClusterSet := &cobra.Command{ + Use: "sync-clusterset", + RunE: func(cmd *cobra.Command, args []string) error { + return clusterset.Sync(*syncClusterSetInput()) + }, + } + syncClusterSetInput = InitSyncClusterSetFlags(syncClusterSet.Flags(), &clusterset.SyncInput{}) + cmd.AddCommand(syncClusterSet) + + var listTargetGroupBindingInput func() *targetgroupbinding.ListInput + listTargetGroupBindings := &cobra.Command{ + Use: "list-targetgroupbindings", + RunE: func(cmd *cobra.Command, args []string) error { + bindings, err := targetgroupbinding.List(*listTargetGroupBindingInput()) + + for _, b := range bindings { + fmt.Fprintf(os.Stdout, "%+v\n", b) + } + + return err + }, + } + listTargetGroupBindingInput = InitListTargetGroupBindingsFlags(listTargetGroupBindings.Flags(), &targetgroupbinding.ListInput{}) + cmd.AddCommand(listTargetGroupBindings) + + var createTargetGroupBindingInput func() *targetgroupbinding.CreateInput + createTargetGroupBinding := &cobra.Command{ + Use: "create-targetgroupbinding", + RunE: func(cmd *cobra.Command, args []string) error { + binding, err := targetgroupbinding.Create(*createTargetGroupBindingInput()) + + if binding != nil { + fmt.Fprintf(os.Stdout, "%+v\n", binding) + } + + return err + }, + } + createTargetGroupBindingInput = InitCreateTargetGroupBindingFlags(createTargetGroupBinding.Flags(), &targetgroupbinding.CreateInput{}) + cmd.AddCommand(createTargetGroupBinding) + + var createMissingAWSTargetGroupsInput func() *awstargetgroupset.SyncInput + createMissingAWSTargetGroups := &cobra.Command{ + Use: "create-missing-awstargetgroups", + RunE: func(cmd *cobra.Command, args []string) error { + bindings, err := awstargetgroupset.CreateMissingAWSTargetGroups(*createMissingAWSTargetGroupsInput()) + + for _, b := range bindings { + fmt.Fprintf(os.Stdout, "%+v\n", b) + } + + return err + }, + } + createMissingAWSTargetGroupsInput = InitSyncAWSTargetGroupSetFlags(createMissingAWSTargetGroups.Flags(), &awstargetgroupset.SyncInput{}) + cmd.AddCommand(createMissingAWSTargetGroups) + + var deleteOutdatedAWSTargetGroupsInput func() *awstargetgroupset.SyncInput + deleteOutdatedAWSTargetGroups := &cobra.Command{ + Use: "delete-outdated-awstargetgroups", + RunE: func(cmd *cobra.Command, args []string) error { + deleted, err := awstargetgroupset.DeleteOutdatedAWSTargetGroups(*deleteOutdatedAWSTargetGroupsInput()) + + for _, b := range deleted { + fmt.Fprintf(os.Stdout, "%+v\n", b) + } + + return err + }, + } + deleteOutdatedAWSTargetGroupsInput = InitSyncAWSTargetGroupSetFlags(deleteOutdatedAWSTargetGroups.Flags(), &awstargetgroupset.SyncInput{}) + cmd.AddCommand(deleteOutdatedAWSTargetGroups) + + var syncAWSTargetGroupSetInput func() *awstargetgroupset.SyncInput + syncAWSTargetGroupSet := &cobra.Command{ + Use: "sync-awstargetgroupset", + RunE: func(cmd *cobra.Command, args []string) error { + bindings, err := awstargetgroupset.Sync(*syncAWSTargetGroupSetInput()) + + for _, b := range bindings { + fmt.Fprintf(os.Stdout, "%+v\n", b) + } + + return err + }, + } + syncAWSTargetGroupSetInput = InitSyncAWSTargetGroupSetFlags(syncAWSTargetGroupSet.Flags(), &awstargetgroupset.SyncInput{}) + cmd.AddCommand(syncAWSTargetGroupSet) + + var listTargetGroupsInput func() *awstargetgroupset.ListAWSTargetGroupsInput + listTargetGroups := &cobra.Command{ + Use: "list-awstargetgroups", + RunE: func(cmd *cobra.Command, args []string) error { + bindings, err := awstargetgroupset.ListAWSTargetGroups(*listTargetGroupsInput()) + + for _, b := range bindings { + fmt.Fprintf(os.Stdout, "%+v\n", b) + } + + return err + }, + } + listTargetGroupsInput = InitListAWSTargetGroupsFlags(listTargetGroups.Flags(), &awstargetgroupset.ListAWSTargetGroupsInput{}) + cmd.AddCommand(listTargetGroups) + + var listLatestTargetGroupsInput func() *awstargetgroupset.ListLatestAWSTargetGroupsInput + listLatestTargetGroups := &cobra.Command{ + Use: "list-latest-awstargetgroups", + RunE: func(cmd *cobra.Command, args []string) error { + bindings, err := awstargetgroupset.ListLatestAWSTargetGroups(*listLatestTargetGroupsInput()) + + for _, b := range bindings { + fmt.Fprintf(os.Stdout, "%+v\n", b) + } + + return err + }, + } + listLatestTargetGroupsInput = InitListLatestAWSTargetGroupsFlags(listLatestTargetGroups.Flags(), &awstargetgroupset.ListLatestAWSTargetGroupsInput{}) + cmd.AddCommand(listLatestTargetGroups) + + var runAnalysisInput func() *analysis.RunInput + runAnalysis := &cobra.Command{ + Use: "run-analysis", + RunE: func(cmd *cobra.Command, args []string) error { + run, err := analysis.Run(*runAnalysisInput()) + + if run != nil { + fmt.Fprintf(os.Stdout, "%+v\n", *run) + } + + return err + }, + } + runAnalysisInput = InitRunAnalysisFlags(runAnalysis.Flags(), &analysis.RunInput{}) + cmd.AddCommand(runAnalysis) + + m := &manager.Manager{} + + controllerManager := &cobra.Command{ + Use: "controller-manager", + RunE: func(cmd *cobra.Command, args []string) error { + err := m.Run() + if !errors.Is(err, okraerror.Error{}) { + cmd.SilenceUsage = true + } + return err + }, + } + m.AddPFlags(controllerManager.Flags()) + cmd.AddCommand(controllerManager) + + err := cmd.Execute() + + return err +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..6614451 --- /dev/null +++ b/go.mod @@ -0,0 +1,72 @@ +module github.com/mumoshu/okra + +go 1.17 + +require ( + github.com/aws/aws-sdk-go v1.35.29 + github.com/blang/semver v3.5.0+incompatible + github.com/go-logr/logr v0.2.1 + github.com/imdario/mergo v0.3.11 // indirect + github.com/pkg/errors v0.9.1 + github.com/prometheus/common v0.4.1 + github.com/spf13/cobra v1.1.1 + github.com/spf13/pflag v1.0.5 + golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect + golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 + k8s.io/api v0.19.4 + k8s.io/apimachinery v0.19.4 + k8s.io/client-go v0.19.4 + k8s.io/utils v0.0.0-20201110183641-67b214c5f920 + sigs.k8s.io/controller-runtime v0.6.4 + sigs.k8s.io/yaml v1.2.0 +) + +require ( + cloud.google.com/go v0.51.0 // indirect + github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc // indirect + github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf // indirect + github.com/beorn7/perks v1.0.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/evanphx/json-patch v4.9.0+incompatible // indirect + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/go-logr/zapr v0.1.0 // indirect + github.com/gogo/protobuf v1.3.1 // indirect + github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 // indirect + github.com/golang/protobuf v1.4.2 // indirect + github.com/google/go-cmp v0.4.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect + github.com/google/uuid v1.1.1 // indirect + github.com/googleapis/gnostic v0.4.1 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/json-iterator/go v1.1.10 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/prometheus/client_golang v1.0.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/procfs v0.0.11 // indirect + github.com/sirupsen/logrus v1.4.2 // indirect + go.uber.org/atomic v1.4.0 // indirect + go.uber.org/multierr v1.1.0 // indirect + go.uber.org/zap v1.10.0 // indirect + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect + golang.org/x/net v0.0.0-20200707034311-ab3426394381 // indirect + golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 // indirect + golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 // indirect + golang.org/x/text v0.3.3 // indirect + gomodules.xyz/jsonpatch/v2 v2.0.1 // indirect + google.golang.org/appengine v1.6.5 // indirect + google.golang.org/protobuf v1.24.0 // indirect + gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.3.0 // indirect + k8s.io/apiextensions-apiserver v0.18.6 // indirect + k8s.io/klog/v2 v2.2.0 // indirect + k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.0.1 // indirect +) + +replace github.com/go-logr/zapr v0.1.0 => github.com/go-logr/zapr v0.2.0 diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..6d7e53e --- /dev/null +++ b/go.sum @@ -0,0 +1,686 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM= +cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= +github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= +github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/aws/aws-sdk-go v1.35.29 h1:1kYnwrWTp2e+lI9yYFaDo7OFaLug8yXC6Qdj+u8451Q= +github.com/aws/aws-sdk-go v1.35.29/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= +github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= +github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v0.2.1 h1:fV3MLmabKIZ383XifUjFSwcoGee0v9qgPp8wy5svibE= +github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= +github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= +github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= +github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= +github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= +github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= +github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= +github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= +github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= +github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.0.1 h1:xyiBuvkD2g5n7cYzx6u2sxQvsAy4QJsZFCzGVdzOXZ0= +gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= +k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo= +k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= +k8s.io/apiextensions-apiserver v0.18.6 h1:vDlk7cyFsDyfwn2rNAO2DbmUbvXy5yT5GE3rrqOzaMo= +k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= +k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= +k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= +k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q= +k8s.io/client-go v0.19.4 h1:85D3mDNoLF+xqpyE9Dh/OtrJDyJrSRKkHmDXIbEzer8= +k8s.io/client-go v0.19.4/go.mod h1:ZrEy7+wj9PjH5VMBCuu/BDlvtUAku0oVFk4MmnW9mWA= +k8s.io/code-generator v0.18.6/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= +k8s.io/component-base v0.18.6/go.mod h1:knSVsibPR5K6EW2XOjEHik6sdU5nCvKMrzMt2D4In14= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw= +k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= +sigs.k8s.io/controller-runtime v0.6.4 h1:4013CKsBs5bEqo+LevzDett+LLxag/FjQWG94nVZ/9g= +sigs.k8s.io/controller-runtime v0.6.4/go.mod h1:WlZNXcM0++oyaQt4B7C2lEE5JYRs8vJUzRP4N4JpdAY= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt new file mode 100644 index 0000000..4d4f76f --- /dev/null +++ b/hack/boilerplate.go.txt @@ -0,0 +1,15 @@ +/* +Copyright 2020 The Okra authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ diff --git a/main.go b/main.go new file mode 100644 index 0000000..deeba63 --- /dev/null +++ b/main.go @@ -0,0 +1,19 @@ +package main + +import ( + "fmt" + "os" + + "github.com/mumoshu/okra/cmd/okra" +) + +func main() { + if err := okra.Run(); err != nil { + if os.Getenv("TRACE") != "" { + fmt.Fprintf(os.Stderr, "Error: %+v\n", err) + } else { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + } + os.Exit(1) + } +} diff --git a/pkg/analysis/run.go b/pkg/analysis/run.go new file mode 100644 index 0000000..d3d53dd --- /dev/null +++ b/pkg/analysis/run.go @@ -0,0 +1,148 @@ +package analysis + +import ( + "context" + "fmt" + "strings" + "time" + + rolloutsv1alpha1 "github.com/mumoshu/okra/api/rollouts/v1alpha1" + "github.com/mumoshu/okra/pkg/clclient" + "github.com/mumoshu/okra/pkg/okraerror" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type RunInput struct { + AnalysisTemplateName string + NS string + AnalysisArgs map[string]string + AnalysisArgsFromSecrets map[string]string + DryRun bool +} + +// Run instantiates a new AnalysisRun object to let Argo Rollouts run an analysis. +// This command requires both AnalysisTemplate and AnalysisRun CRDs to be installed onto the cluster. +func Run(input RunInput) (*rolloutsv1alpha1.AnalysisRun, error) { + c, err := clclient.New() + if err != nil { + return nil, okraerror.New(err) + } + + templateName := input.AnalysisTemplateName + ns := input.NS + dryRun := input.DryRun + + var dryRunValues []string + if dryRun { + dryRunValues = []string{"All"} + } + + ctx := context.Background() + + var template rolloutsv1alpha1.AnalysisTemplate + if err := c.Get(ctx, types.NamespacedName{Namespace: ns, Name: templateName}, &template); err != nil { + return nil, okraerror.New(err) + } + + argsMap := map[string]rolloutsv1alpha1.Argument{} + for _, a := range template.Spec.Args { + // This is the default value + argsMap[a.Name] = a + } + + // The following two sets of for-range loops is basically + // an alternative to Argo Rollouts' MergeArgs. + // We needed our own implementation here to deal with the fact that we can set + // both args from immediate values and secretrefs. + // See below for MergeArgs + // https://github.com/argoproj/argo-rollouts/blob/1ee46cff2a3203fd2da7d540c9fd25c8a61900c2/utils/analysis/helpers.go#L165-L167 + + for k, v := range input.AnalysisArgs { + if _, ok := argsMap[k]; !ok { + return nil, okraerror.New(fmt.Errorf("argument %s does not exist in analysisrun template %s", k, templateName)) + } + + v := v + + argsMap[k] = rolloutsv1alpha1.Argument{ + Name: k, + Value: &v, + } + } + + for k, v := range input.AnalysisArgsFromSecrets { + if _, ok := argsMap[k]; !ok { + return nil, okraerror.New(fmt.Errorf("argument %s does not exist in analysisrun template %s", k, templateName)) + } + + vs := strings.SplitN(v, ".", 2) + + argsMap[k] = rolloutsv1alpha1.Argument{ + Name: k, + ValueFrom: &rolloutsv1alpha1.ValueFrom{ + SecretKeyRef: &rolloutsv1alpha1.SecretKeyRef{ + Name: vs[0], + Key: vs[1], + }, + }, + } + } + + var args []rolloutsv1alpha1.Argument + + for _, v := range argsMap { + args = append(args, v) + } + + timestamp := time.Now().Format("20060102150405") + + const TimestampLabel = "okra.mumo.co/timestamp" + + runLabels := map[string]string{ + TimestampLabel: timestamp, + } + + run := rolloutsv1alpha1.AnalysisRun{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: templateName + "-", + Namespace: ns, + Labels: runLabels, + }, + Spec: rolloutsv1alpha1.AnalysisRunSpec{ + Args: args, + Metrics: template.Spec.Metrics, + }, + } + + if err := c.Create(ctx, &run, &client.CreateOptions{DryRun: dryRunValues}); err != nil { + return nil, okraerror.New(err) + } + + var created rolloutsv1alpha1.AnalysisRunList + + var opts []client.ListOption + + if ns != "" { + opts = append(opts, client.InNamespace(ns)) + } + + lbls, err := labels.ValidatedSelectorFromSet(runLabels) + if err != nil { + return nil, okraerror.New(err) + } + + opts = append(opts, client.MatchingLabelsSelector{Selector: lbls}) + + if err := c.List(ctx, &created, opts...); err != nil { + return nil, okraerror.New(err) + } + + if len(created.Items) != 1 { + return nil, okraerror.New(fmt.Errorf("unexpected number of runs found: %d", len(created.Items))) + } + + return &created.Items[0], nil +} diff --git a/pkg/awsapplicationloadbalancer/awsapplicationloadbalancer.go b/pkg/awsapplicationloadbalancer/awsapplicationloadbalancer.go new file mode 100644 index 0000000..026d573 --- /dev/null +++ b/pkg/awsapplicationloadbalancer/awsapplicationloadbalancer.go @@ -0,0 +1,12 @@ +package awsapplicationloadbalancer + +type Provider struct { +} + +type CreateInput struct { + ListenerARN string +} + +func (p *Provider) CreateConfigFromAWS(input CreateInput) error { + return nil +} diff --git a/pkg/awsclicompat/awsclicompat.go b/pkg/awsclicompat/awsclicompat.go new file mode 100644 index 0000000..4fd85e1 --- /dev/null +++ b/pkg/awsclicompat/awsclicompat.go @@ -0,0 +1,43 @@ +package awsclicompat + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/session" + "os" +) + +// NewSession creates a new AWS session for the given AWS region. +// +// The following credential sources are supported: +// +// 1. static credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) +// 2. static credentials loaded from profiles (AWS_PROFILE, when AWS_SDK_LOAD_CONFIG=true) +// 3. dynamic credentials obtained by assuming the role using static credentials loaded from the profile (AWS_PROFILE, when AWS_SDK_LOAD_CONFIG=true) +// 4. dynamic credentials obtained by assuming the role using static credentials loaded from the env (FORCE_AWS_PROFILE=true w/ credential_source=Environment) +// +// The fourth option of using FORCE_AWS_PROFILE=true and AWS_PROFILE=yourprofile is equivalent to `aws --profile ${AWS_PROFILE}`. +// See https://github.com/variantdev/vals/issues/19#issuecomment-600437486 for more details and why and when this is needed. +func NewSession(region, profile string) *session.Session { + var cfg *aws.Config + if region != "" { + cfg = aws.NewConfig().WithRegion(region) + } else { + cfg = aws.NewConfig() + } + + opts := session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + SharedConfigState: session.SharedConfigEnable, + Config: *cfg, + Profile: profile, + } + + if os.Getenv("FORCE_AWS_PROFILE") == "true" { + opts.Profile = os.Getenv("AWS_PROFILE") + } + + sess := session.Must(session.NewSessionWithOptions(opts)) + + return sess +} diff --git a/pkg/awstargetgroupset/awstargetgroupset.go b/pkg/awstargetgroupset/awstargetgroupset.go new file mode 100644 index 0000000..bbd7d83 --- /dev/null +++ b/pkg/awstargetgroupset/awstargetgroupset.go @@ -0,0 +1,450 @@ +package awstargetgroupset + +import ( + "context" + "fmt" + "os" + + "github.com/blang/semver" + "github.com/mumoshu/okra/api/elbv2/v1beta1" + okrav1alpha1 "github.com/mumoshu/okra/api/v1alpha1" + "github.com/mumoshu/okra/pkg/clclient" + "github.com/mumoshu/okra/pkg/okraerror" + "golang.org/x/xerrors" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "sigs.k8s.io/controller-runtime/pkg/client" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/yaml" +) + +type Config struct { + DryRun bool + NS string + Name string + Endpoint string + CAData string + Labels map[string]string +} + +type ClusterSetConfig struct { + DryRun bool + NS string + EKSTags map[string]string + Labels map[string]string +} + +type CreateTargetGroupInput struct { + DryRun bool + NS string + Name string + ARN string + Labels map[string]string +} + +type SyncInput struct { + DryRun bool + NS string + ClusterName string + BindingSelector string + Labels map[string]string +} + +type DeleteInput struct { + NS string + Name string + DryRun bool +} + +type Provider struct { + client.Client +} + +func New(cl client.Client) *Provider { + return &Provider{ + Client: cl, + } +} + +func (p *Provider) CreateTargetGroup(config CreateTargetGroupInput) error { + ns := config.NS + name := config.Name + arn := config.ARN + dryRun := config.DryRun + labels := config.Labels + + if name == "" { + return okraerror.New(fmt.Errorf("name is required")) + } + + object := &okrav1alpha1.AWSTargetGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Labels: labels, + }, + Spec: okrav1alpha1.AWSTargetGroupSpec{ + ARN: arn, + }, + } + + if dryRun { + text, err := yaml.Marshal(object) + if err != nil { + return err + } + + fmt.Fprintf(os.Stdout, "%s\n", text) + + return nil + } + + if err := p.Client.Create(context.TODO(), object); err != nil { + return okraerror.New(err) + } + + fmt.Printf("AWSTargetGroup %q created successfully\n", name) + + return nil +} + +func CreateMissingAWSTargetGroups(config SyncInput) ([]SyncResult, error) { + ns := config.NS + dryRun := config.DryRun + + clientset, err := clclient.NewClientSet() + if err != nil { + return nil, xerrors.Errorf("creating clientset: %w", err) + } + + kubeclient := clientset.CoreV1().Secrets(ns) + + secret, err := kubeclient.Get(context.TODO(), config.ClusterName, metav1.GetOptions{}) + if err != nil { + return nil, xerrors.Errorf("listing cluster secrets: %w", err) + } + + managementClient, err := clclient.New() + if err != nil { + return nil, err + } + + client, err := clclient.NewFromClusterSecret(*secret) + if err != nil { + return nil, err + } + + var bindings v1beta1.TargetGroupBindingList + + optionalNS := "" + + sel, err := labels.Parse(config.BindingSelector) + if err != nil { + return nil, xerrors.Errorf("parsing binding selector: %v", err) + } + + if err := client.List(context.TODO(), &bindings, &runtimeclient.ListOptions{ + Namespace: optionalNS, + LabelSelector: sel, + }); err != nil { + return nil, okraerror.New(err) + } + + var objects []okrav1alpha1.AWSTargetGroup + + for _, b := range bindings.Items { + labels := map[string]string{} + + for k, v := range b.Labels { + labels[k] = v + } + + for k, v := range config.Labels { + labels[k] = v + } + + objects = append(objects, okrav1alpha1.AWSTargetGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.Name, + Labels: labels, + }, + Spec: okrav1alpha1.AWSTargetGroupSpec{ + ARN: b.Spec.TargetGroupARN, + }, + }) + } + + for _, object := range objects { + // Manage resource + if !dryRun { + err := managementClient.Create(context.TODO(), &object) + if err != nil { + if kerrors.IsAlreadyExists(err) { + fmt.Printf("AWSTargetGroup %q has no change\n", object.Name) + } else { + fmt.Fprintf(os.Stderr, "Failed creating object: %+v\n", object) + return nil, okraerror.New(err) + } + } else { + fmt.Printf("AWSTargetGroup %q created successfully\n", object.Name) + } + } else { + fmt.Printf("AWSTargetGroup %q created successfully (Dry Run)\n", object.Name) + } + } + + var created []SyncResult + + return created, nil +} + +func Delete(config DeleteInput) error { + ns := config.NS + name := config.Name + dryRun := config.DryRun + + clientset, err := clclient.NewClientSet() + if err != nil { + return xerrors.Errorf("creating clientset: %w", err) + } + + kubeclient := clientset.CoreV1().Secrets(ns) + + if dryRun { + fmt.Fprintf(os.Stdout, "Cluster secrer %q deleted successfully (dry run)\n", name) + + return nil + } + + // Manage resource + err = kubeclient.Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil { + return err + } + + fmt.Printf("Cluster secert %q deleted successfully\n", name) + + return nil +} + +func DeleteOutdatedAWSTargetGroups(config SyncInput) ([]SyncResult, error) { + ns := config.NS + dryRun := config.DryRun + + clientset, err := clclient.NewClientSet() + if err != nil { + return nil, xerrors.Errorf("creating clientset: %w", err) + } + + kubeclient := clientset.CoreV1().Secrets(ns) + + secret, err := kubeclient.Get(context.TODO(), config.ClusterName, metav1.GetOptions{}) + if err != nil { + return nil, xerrors.Errorf("listing cluster secrets: %w", err) + } + + managementClient, err := clclient.New() + if err != nil { + return nil, err + } + + client, err := clclient.NewFromClusterSecret(*secret) + if err != nil { + return nil, err + } + + var bindings v1beta1.TargetGroupBindingList + + optionalNS := "" + + sel, err := labels.Parse(config.BindingSelector) + if err != nil { + return nil, xerrors.Errorf("parsing binding selector: %v", err) + } + + if err := client.List(context.TODO(), &bindings, &runtimeclient.ListOptions{ + Namespace: optionalNS, + LabelSelector: sel, + }); err != nil { + return nil, okraerror.New(err) + } + + var objects []okrav1alpha1.AWSTargetGroup + + for _, b := range bindings.Items { + labels := map[string]string{} + + for k, v := range b.Labels { + labels[k] = v + } + + for k, v := range config.Labels { + labels[k] = v + } + + objects = append(objects, okrav1alpha1.AWSTargetGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: b.Name, + Labels: labels, + }, + Spec: okrav1alpha1.AWSTargetGroupSpec{ + ARN: b.Spec.TargetGroupARN, + }, + }) + } + + desiredTargetGroups := map[string]struct{}{} + + for _, obj := range objects { + desiredTargetGroups[obj.Name] = struct{}{} + } + + var current okrav1alpha1.AWSTargetGroupList + + if err := managementClient.List(context.TODO(), ¤t, &runtimeclient.ListOptions{ + Namespace: optionalNS, + LabelSelector: sel, + }); err != nil { + return nil, okraerror.New(err) + } + + var deleted []SyncResult + + for _, item := range current.Items { + name := item.Name + + if _, desired := desiredTargetGroups[name]; !desired { + if dryRun { + fmt.Printf("AWSTargetGroup %q deleted successfully (Dry Run)\n", name) + } else { + // Manage resource + err := kubeclient.Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil { + return nil, err + } + + fmt.Printf("AWSTargetGroup %q deleted successfully\n", name) + } + + deleted = append(deleted, SyncResult{ + Name: name, + Action: "Delete", + }) + } + } + + return deleted, nil +} + +type SyncResult struct { + Name string + Action string +} + +func Sync(config SyncInput) ([]SyncResult, error) { + created, err := CreateMissingAWSTargetGroups(config) + if err != nil { + return nil, xerrors.Errorf("creating missing cluster secrets: %w", err) + } + + deleted, err := DeleteOutdatedAWSTargetGroups(config) + if err != nil { + return created, xerrors.Errorf("deleting redundant cluster secrets: %w", err) + } + + all := append([]SyncResult{}, created...) + all = append(all, deleted...) + + return all, nil +} + +type ListLatestAWSTargetGroupsInput struct { + ListAWSTargetGroupsInput + + SemverLabelKey string +} + +type ListAWSTargetGroupsInput struct { + NS string + Selector string +} + +func ListLatestAWSTargetGroups(config ListLatestAWSTargetGroupsInput) ([]okrav1alpha1.AWSTargetGroup, error) { + groups, err := ListAWSTargetGroups(config.ListAWSTargetGroupsInput) + if err != nil { + return nil, err + } + + type entry struct { + ver semver.Version + groups []okrav1alpha1.AWSTargetGroup + } + + labelKey := config.SemverLabelKey + if labelKey == "" { + return nil, fmt.Errorf("missing semver label key") + } + + var latestVer *semver.Version + + versionedGroups := map[string]entry{} + + for _, g := range groups { + g := g + + verStr, ok := g.Labels[labelKey] + if !ok { + return nil, fmt.Errorf("no semver label found on group: %v", g) + } + + ver, err := semver.Parse(verStr) + if err != nil { + return nil, err + } + + if latestVer == nil { + latestVer = &ver + } else if latestVer.LT(ver) { + latestVer = &ver + } + + e := versionedGroups[ver.String()] + + e.ver = ver + e.groups = append(e.groups, g) + + versionedGroups[ver.String()] = e + } + + if latestVer == nil { + return nil, nil + } + + latest := versionedGroups[latestVer.String()] + + return latest.groups, nil +} + +func ListAWSTargetGroups(config ListAWSTargetGroupsInput) ([]okrav1alpha1.AWSTargetGroup, error) { + managementClient, err := clclient.New() + if err != nil { + return nil, err + } + + sel, err := labels.Parse(config.Selector) + if err != nil { + return nil, err + } + + var list okrav1alpha1.AWSTargetGroupList + + if err := managementClient.List(context.TODO(), &list, &runtimeclient.ListOptions{ + Namespace: config.NS, + LabelSelector: sel, + }); err != nil { + return nil, err + } + + return list.Items, nil +} diff --git a/pkg/clclient/argocd_api.go b/pkg/clclient/argocd_api.go new file mode 100644 index 0000000..8f23524 --- /dev/null +++ b/pkg/clclient/argocd_api.go @@ -0,0 +1,325 @@ +/* +Copyright The Argo Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Source (minus Cluster.Equals() func) +// https://github.com/argoproj/argo-cd/blob/ae02bc27fc500e871a4b0f5decd36591bb867b4a/pkg/apis/application/v1alpha1/types.go#L2274 + +package clclient + +import ( + "fmt" + "net" + "net/http" + "os" + "path/filepath" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilnet "k8s.io/apimachinery/pkg/util/net" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/tools/clientcmd/api" +) + +// ConnectionStatus represents the status indicator for a connection to a remote resource +type ConnectionStatus = string + +const ( + // ConnectionStatusSuccessful indicates that a connection has been successfully established + ConnectionStatusSuccessful = "Successful" + // ConnectionStatusFailed indicates that a connection attempt has failed + ConnectionStatusFailed = "Failed" + // ConnectionStatusUnknown indicates that the connection status could not be reliably determined + ConnectionStatusUnknown = "Unknown" +) + +// ConnectionState contains information about remote resource connection state, currently used for clusters and repositories +type ConnectionState struct { + // Status contains the current status indicator for the connection + Status ConnectionStatus `json:"status" protobuf:"bytes,1,opt,name=status"` + // Message contains human readable information about the connection status + Message string `json:"message" protobuf:"bytes,2,opt,name=message"` + // ModifiedAt contains the timestamp when this connection status has been determined + ModifiedAt *metav1.Time `json:"attemptedAt" protobuf:"bytes,3,opt,name=attemptedAt"` +} + +// Cluster is the definition of a cluster resource +type Cluster struct { + // ID is an internal field cluster identifier. Not exposed via API. + ID string `json:"-"` + // Server is the API server URL of the Kubernetes cluster + Server string `json:"server" protobuf:"bytes,1,opt,name=server"` + // Name of the cluster. If omitted, will use the server address + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // Config holds cluster information for connecting to a cluster + Config ClusterConfig `json:"config" protobuf:"bytes,3,opt,name=config"` + // DEPRECATED: use Info.ConnectionState field instead. + // ConnectionState contains information about cluster connection state + ConnectionState ConnectionState `json:"connectionState,omitempty" protobuf:"bytes,4,opt,name=connectionState"` + // DEPRECATED: use Info.ServerVersion field instead. + // The server version + ServerVersion string `json:"serverVersion,omitempty" protobuf:"bytes,5,opt,name=serverVersion"` + // Holds list of namespaces which are accessible in that cluster. Cluster level resources will be ignored if namespace list is not empty. + Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,6,opt,name=namespaces"` + // RefreshRequestedAt holds time when cluster cache refresh has been requested + RefreshRequestedAt *metav1.Time `json:"refreshRequestedAt,omitempty" protobuf:"bytes,7,opt,name=refreshRequestedAt"` + // Info holds information about cluster cache and state + Info ClusterInfo `json:"info,omitempty" protobuf:"bytes,8,opt,name=info"` + // Shard contains optional shard number. Calculated on the fly by the application controller if not specified. + Shard *int64 `json:"shard,omitempty" protobuf:"bytes,9,opt,name=shard"` + // Indicates if cluster level resources should be managed. This setting is used only if cluster is connected in a namespaced mode. + ClusterResources bool `json:"clusterResources,omitempty" protobuf:"bytes,10,opt,name=clusterResources"` + // Reference between project and cluster that allow you automatically to be added as item inside Destinations project entity + Project string `json:"project,omitempty" protobuf:"bytes,11,opt,name=project"` + // Labels for cluster secret metadata + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,12,opt,name=labels"` + // Annotations for cluster secret metadata + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,13,opt,name=annotations"` +} + +// ClusterInfo contains information about the cluster +type ClusterInfo struct { + // ConnectionState contains information about the connection to the cluster + ConnectionState ConnectionState `json:"connectionState,omitempty" protobuf:"bytes,1,opt,name=connectionState"` + // ServerVersion contains information about the Kubernetes version of the cluster + ServerVersion string `json:"serverVersion,omitempty" protobuf:"bytes,2,opt,name=serverVersion"` + // CacheInfo contains information about the cluster cache + CacheInfo ClusterCacheInfo `json:"cacheInfo,omitempty" protobuf:"bytes,3,opt,name=cacheInfo"` + // ApplicationsCount is the number of applications managed by Argo CD on the cluster + ApplicationsCount int64 `json:"applicationsCount" protobuf:"bytes,4,opt,name=applicationsCount"` + // APIVersions contains list of API versions supported by the cluster + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,5,opt,name=apiVersions"` +} + +func (c *ClusterInfo) GetKubeVersion() string { + return c.ServerVersion +} + +func (c *ClusterInfo) GetApiVersions() []string { + return c.APIVersions +} + +// ClusterCacheInfo contains information about the cluster cache +type ClusterCacheInfo struct { + // ResourcesCount holds number of observed Kubernetes resources + ResourcesCount int64 `json:"resourcesCount,omitempty" protobuf:"bytes,1,opt,name=resourcesCount"` + // APIsCount holds number of observed Kubernetes API count + APIsCount int64 `json:"apisCount,omitempty" protobuf:"bytes,2,opt,name=apisCount"` + // LastCacheSyncTime holds time of most recent cache synchronization + LastCacheSyncTime *metav1.Time `json:"lastCacheSyncTime,omitempty" protobuf:"bytes,3,opt,name=lastCacheSyncTime"` +} + +// ClusterList is a collection of Clusters. +type ClusterList struct { + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []Cluster `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// AWSAuthConfig is an AWS IAM authentication configuration +type AWSAuthConfig struct { + // ClusterName contains AWS cluster name + ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,1,opt,name=clusterName"` + + // RoleARN contains optional role ARN. If set then AWS IAM Authenticator assume a role to perform cluster operations instead of the default AWS credential provider chain. + RoleARN string `json:"roleARN,omitempty" protobuf:"bytes,2,opt,name=roleARN"` +} + +// ExecProviderConfig is config used to call an external command to perform cluster authentication +// See: https://godoc.org/k8s.io/client-go/tools/clientcmd/api#ExecConfig +type ExecProviderConfig struct { + // Command to execute + Command string `json:"command,omitempty" protobuf:"bytes,1,opt,name=command"` + + // Arguments to pass to the command when executing it + Args []string `json:"args,omitempty" protobuf:"bytes,2,rep,name=args"` + + // Env defines additional environment variables to expose to the process + Env map[string]string `json:"env,omitempty" protobuf:"bytes,3,opt,name=env"` + + // Preferred input version of the ExecInfo + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,4,opt,name=apiVersion"` + + // This text is shown to the user when the executable doesn't seem to be present + InstallHint string `json:"installHint,omitempty" protobuf:"bytes,5,opt,name=installHint"` +} + +// ClusterConfig is the configuration attributes. This structure is subset of the go-client +// rest.Config with annotations added for marshalling. +type ClusterConfig struct { + // Server requires Basic authentication + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + Password string `json:"password,omitempty" protobuf:"bytes,2,opt,name=password"` + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string `json:"bearerToken,omitempty" protobuf:"bytes,3,opt,name=bearerToken"` + + // TLSClientConfig contains settings to enable transport layer security + TLSClientConfig `json:"tlsClientConfig" protobuf:"bytes,4,opt,name=tlsClientConfig"` + + // AWSAuthConfig contains IAM authentication configuration + AWSAuthConfig *AWSAuthConfig `json:"awsAuthConfig,omitempty" protobuf:"bytes,5,opt,name=awsAuthConfig"` + + // ExecProviderConfig contains configuration for an exec provider + ExecProviderConfig *ExecProviderConfig `json:"execProviderConfig,omitempty" protobuf:"bytes,6,opt,name=execProviderConfig"` +} + +// TLSClientConfig contains settings to enable transport layer security +type TLSClientConfig struct { + // Insecure specifies that the server should be accessed without verifying the TLS certificate. For testing only. + Insecure bool `json:"insecure" protobuf:"bytes,1,opt,name=insecure"` + // ServerName is passed to the server for SNI and is used in the client to check server + // certificates against. If ServerName is empty, the hostname used to contact the + // server is used. + ServerName string `json:"serverName,omitempty" protobuf:"bytes,2,opt,name=serverName"` + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte `json:"certData,omitempty" protobuf:"bytes,3,opt,name=certData"` + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte `json:"keyData,omitempty" protobuf:"bytes,4,opt,name=keyData"` + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte `json:"caData,omitempty" protobuf:"bytes,5,opt,name=caData"` +} + +// SetK8SConfigDefaults sets Kubernetes REST config default settings +func SetK8SConfigDefaults(config *rest.Config) error { + config.QPS = K8sClientConfigQPS + config.Burst = K8sClientConfigBurst + tlsConfig, err := rest.TLSConfigFor(config) + if err != nil { + return err + } + + dial := (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext + transport := utilnet.SetTransportDefaults(&http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + MaxIdleConns: K8sMaxIdleConnections, + MaxIdleConnsPerHost: K8sMaxIdleConnections, + MaxConnsPerHost: K8sMaxIdleConnections, + DialContext: dial, + DisableCompression: config.DisableCompression, + }) + tr, err := rest.HTTPWrappersForConfig(config, transport) + if err != nil { + return err + } + + // set default tls config and remove auth/exec provides since we use it in a custom transport + config.TLSClientConfig = rest.TLSClientConfig{} + config.AuthProvider = nil + config.ExecProvider = nil + + config.Transport = tr + return nil +} + +// RawRestConfig returns a go-client REST config from cluster that might be serialized into the file using kube.WriteKubeConfig method. +func (c *Cluster) RawRestConfig() *rest.Config { + var config *rest.Config + var err error + if c.Server == KubernetesInternalAPIServerAddr && os.Getenv(EnvVarFakeInClusterConfig) == "true" { + conf, exists := os.LookupEnv("KUBECONFIG") + if exists { + config, err = clientcmd.BuildConfigFromFlags("", conf) + } else { + config, err = clientcmd.BuildConfigFromFlags("", filepath.Join(os.Getenv("HOME"), ".kube", "config")) + } + } else if c.Server == KubernetesInternalAPIServerAddr && c.Config.Username == "" && c.Config.Password == "" && c.Config.BearerToken == "" { + config, err = rest.InClusterConfig() + } else if c.Server == KubernetesInternalAPIServerAddr { + config, err = rest.InClusterConfig() + if err == nil { + config.Username = c.Config.Username + config.Password = c.Config.Password + config.BearerToken = c.Config.BearerToken + config.BearerTokenFile = "" + } + } else { + tlsClientConfig := rest.TLSClientConfig{ + Insecure: c.Config.TLSClientConfig.Insecure, + ServerName: c.Config.TLSClientConfig.ServerName, + CertData: c.Config.TLSClientConfig.CertData, + KeyData: c.Config.TLSClientConfig.KeyData, + CAData: c.Config.TLSClientConfig.CAData, + } + if c.Config.AWSAuthConfig != nil { + args := []string{"eks", "get-token", "--cluster-name", c.Config.AWSAuthConfig.ClusterName} + if c.Config.AWSAuthConfig.RoleARN != "" { + args = append(args, "--role-arn", c.Config.AWSAuthConfig.RoleARN) + } + config = &rest.Config{ + Host: c.Server, + TLSClientConfig: tlsClientConfig, + ExecProvider: &api.ExecConfig{ + APIVersion: "client.authentication.k8s.io/v1alpha1", + Command: "aws", + Args: args, + }, + } + } else if c.Config.ExecProviderConfig != nil { + var env []api.ExecEnvVar + if c.Config.ExecProviderConfig.Env != nil { + for key, value := range c.Config.ExecProviderConfig.Env { + env = append(env, api.ExecEnvVar{ + Name: key, + Value: value, + }) + } + } + config = &rest.Config{ + Host: c.Server, + TLSClientConfig: tlsClientConfig, + ExecProvider: &api.ExecConfig{ + APIVersion: c.Config.ExecProviderConfig.APIVersion, + Command: c.Config.ExecProviderConfig.Command, + Args: c.Config.ExecProviderConfig.Args, + Env: env, + InstallHint: c.Config.ExecProviderConfig.InstallHint, + }, + } + } else { + config = &rest.Config{ + Host: c.Server, + Username: c.Config.Username, + Password: c.Config.Password, + BearerToken: c.Config.BearerToken, + TLSClientConfig: tlsClientConfig, + } + } + } + if err != nil { + panic(fmt.Sprintf("Unable to create K8s REST config: %v", err)) + } + return config +} + +// RESTConfig returns a go-client REST config from cluster with tuned throttling and HTTP client settings. +func (c *Cluster) RESTConfig() *rest.Config { + config := c.RawRestConfig() + err := SetK8SConfigDefaults(config) + if err != nil { + panic(fmt.Sprintf("Unable to apply K8s REST config defaults: %v", err)) + } + return config +} diff --git a/pkg/clclient/argocd_application_defaults.go b/pkg/clclient/argocd_application_defaults.go new file mode 100644 index 0000000..066beb2 --- /dev/null +++ b/pkg/clclient/argocd_application_defaults.go @@ -0,0 +1,23 @@ +/* +Copyright The Argo Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Source https://github.com/argoproj/argo-cd/blob/ae02bc27fc500e871a4b0f5decd36591bb867b4a/pkg/apis/application/v1alpha1/application_defaults.go +package clclient + +const ( + // KubernetesInternalAPIServerAddr is address of the k8s API server when accessing internal to the cluster + KubernetesInternalAPIServerAddr = "https://kubernetes.default.svc" +) diff --git a/pkg/clclient/argocd_cluster_constants.go b/pkg/clclient/argocd_cluster_constants.go new file mode 100644 index 0000000..0aac3fb --- /dev/null +++ b/pkg/clclient/argocd_cluster_constants.go @@ -0,0 +1,73 @@ +/* +Copyright The Argo Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clclient + +import ( + "os" + "strconv" +) + +const ( + + // EnvVarFakeInClusterConfig is an environment variable to fake an in-cluster RESTConfig using + // the current kubectl context (for development purposes) + EnvVarFakeInClusterConfig = "ARGOCD_FAKE_IN_CLUSTER" + + // EnvK8sClientQPS is the QPS value used for the kubernetes client (default: 50) + EnvK8sClientQPS = "ARGOCD_K8S_CLIENT_QPS" + + // EnvK8sClientBurst is the burst value used for the kubernetes client (default: twice the client QPS) + EnvK8sClientBurst = "ARGOCD_K8S_CLIENT_BURST" + + // EnvK8sClientMaxIdleConnections is the number of max idle connections in K8s REST client HTTP transport (default: 500) + EnvK8sClientMaxIdleConnections = "ARGOCD_K8S_CLIENT_MAX_IDLE_CONNECTIONS" +) + +// Constants associated with the Cluster API +var ( + + // K8sClientConfigQPS controls the QPS to be used in K8s REST client configs + K8sClientConfigQPS float32 = 50 + + // K8sClientConfigBurst controls the burst to be used in K8s REST client configs + K8sClientConfigBurst int = 100 + + // K8sMaxIdleConnections controls the number of max idle connections in K8s REST client HTTP transport + K8sMaxIdleConnections = 500 +) + +func init() { + if envQPS := os.Getenv(EnvK8sClientQPS); envQPS != "" { + if qps, err := strconv.ParseFloat(envQPS, 32); err != nil { + K8sClientConfigQPS = float32(qps) + } + } + if envBurst := os.Getenv(EnvK8sClientBurst); envBurst != "" { + if burst, err := strconv.Atoi(envBurst); err != nil { + K8sClientConfigBurst = burst + } + } else { + K8sClientConfigBurst = 2 * int(K8sClientConfigQPS) + } + + if envMaxConn := os.Getenv(EnvK8sClientMaxIdleConnections); envMaxConn != "" { + if maxConn, err := strconv.Atoi(envMaxConn); err != nil { + K8sMaxIdleConnections = maxConn + } + } + +} diff --git a/pkg/clclient/clclient.go b/pkg/clclient/clclient.go new file mode 100644 index 0000000..7ac987e --- /dev/null +++ b/pkg/clclient/clclient.go @@ -0,0 +1,192 @@ +package clclient + +import ( + "encoding/json" + "fmt" + "os/exec" + "strconv" + "strings" + "time" + + elbv2v1beta1 "github.com/mumoshu/okra/api/elbv2/v1beta1" + rolloutsv1alpha1 "github.com/mumoshu/okra/api/rollouts/v1alpha1" + okrav1alpha1 "github.com/mumoshu/okra/api/v1alpha1" + "github.com/mumoshu/okra/pkg/okraerror" + "github.com/prometheus/common/log" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/utils/pointer" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/config" +) + +var ( + scheme = runtime.NewScheme() +) + +func init() { + _ = clientgoscheme.AddToScheme(scheme) + + _ = okrav1alpha1.AddToScheme(scheme) + _ = rolloutsv1alpha1.AddToScheme(scheme) + _ = elbv2v1beta1.AddToScheme(scheme) + // +kubebuilder:scaffold:scheme +} + +func Scheme() *runtime.Scheme { + return scheme +} + +func New() (client.Client, error) { + return NewFromRestConfig(config.GetConfigOrDie()) +} + +func NewFromRestConfig(config *rest.Config) (client.Client, error) { + cl, err := client.New(config, client.Options{ + Scheme: scheme, + }) + if err != nil { + return nil, okraerror.New(err) + } + + return cl, nil +} + +func NewFromBytes(kubeconfig []byte) (client.Client, error) { + clCfg, err := clientcmd.NewClientConfigFromBytes(kubeconfig) + if err != nil { + return nil, okraerror.New(err) + } + + clClCfg, err := clCfg.ClientConfig() + if err != nil { + return nil, okraerror.New(err) + } + + cl, err := client.New(clClCfg, client.Options{ + Scheme: scheme, + }) + if err != nil { + return nil, okraerror.New(err) + } + + return cl, nil +} + +// NewFromClusterSecret returns a controller-runtime client that is able to interact with +// the Kubernetes API server via a dynamic interface. +// If the cluster access ends up with `Unauthorized` errors, try isolating the cause by running +// kubectl with the connection details, like +// kubectl --token k8s-aws-v1.REDACTED --server https://REDACTED.REDACTED.ap-northeast-1.eks.amazonaws.com get no +// where the token is obtained by +// aws eks get-token --cluster-name $CLUSTER_NAME +// and server +// aws eks describe-cluster --name $CLUSTER_NAME +// CDK users- Beware that you need to use the CDK role when accessing the cluster +// https://github.com/aws/aws-cdk/issues/3752#issuecomment-525213763 +// In that case `aws eks get-token` command should include `--role-arn` flag like +// aws eks get-token --cluster-name $CLUSTER_NAME --role-arn $ROLE_ARN +// If the get-token command ends up failing with `AccessDenied`, you will need to recreate the cluster with a proper `masterRole` config. +// See below for more about that. +// https://docs.aws.amazon.com/cdk/api/latest/docs/aws-eks-readme.html#masters-role +func NewFromClusterSecret(clusterSecret corev1.Secret) (client.Client, error) { + cluster, err := SecretToCluster(&clusterSecret) + if err != nil { + return nil, err + } + + if cluster.Config.AWSAuthConfig != nil && cluster.Config.AWSAuthConfig.ClusterName != "" { + if _, err := exec.LookPath("aws"); err != nil { + return nil, okraerror.New(fmt.Errorf("looking for executable \"aws\": %v", err)) + } + } + + return NewFromRestConfig(cluster.RESTConfig()) +} + +// SecretToCluster converts a secret into a Cluster object +// Derived from https://github.com/argoproj/argo-cd/blob/2147ed3aea727ba128df629d53a1d25fd0f6927c/util/db/cluster.go#L290 +func SecretToCluster(s *corev1.Secret) (*Cluster, error) { + const ( + // AnnotationKeyRefresh is the annotation key which indicates that app needs to be refreshed. Removed by application controller after app is refreshed. + // Might take values 'normal'/'hard'. Value 'hard' means manifes + // Copied from https://github.com/argoproj/argo-cd/blob/cc4eea0d6951f1025c9ebb487374658186fa8984/pkg/apis/application/v1alpha1/application_annotations.go#L4-L6 + AnnotationKeyRefresh string = "argocd.argoproj.io/refresh" + + // LabelKeySecretType contains the type of argocd secret (currently: 'cluster', 'repository', 'repo-config' or 'repo-creds') + // Copied from https://github.com/argoproj/argo-cd/blob/3c874ae065c14102003d041d76d4a337abd72f1e/common/common.go#L107-L108 + LabelKeySecretType = "argocd.argoproj.io/secret-type" + + // AnnotationKeyManagedBy is annotation name which indicates that k8s resource is managed by an application. + // Copied from https://github.com/argoproj/argo-cd/blob/3c874ae065c14102003d041d76d4a337abd72f1e/common/common.go#L122-L123 + AnnotationKeyManagedBy = "managed-by" + ) + + var config ClusterConfig + if len(s.Data["config"]) > 0 { + err := json.Unmarshal(s.Data["config"], &config) + if err != nil { + return nil, err + } + } + + var namespaces []string + for _, ns := range strings.Split(string(s.Data["namespaces"]), ",") { + if ns = strings.TrimSpace(ns); ns != "" { + namespaces = append(namespaces, ns) + } + } + var refreshRequestedAt *metav1.Time + if v, found := s.Annotations[AnnotationKeyRefresh]; found { + requestedAt, err := time.Parse(time.RFC3339, v) + if err != nil { + log.Warnf("Error while parsing date in cluster secret '%s': %v", s.Name, err) + } else { + refreshRequestedAt = &metav1.Time{Time: requestedAt} + } + } + var shard *int64 + if shardStr := s.Data["shard"]; shardStr != nil { + if val, err := strconv.Atoi(string(shardStr)); err != nil { + log.Warnf("Error while parsing shard in cluster secret '%s': %v", s.Name, err) + } else { + shard = pointer.Int64Ptr(int64(val)) + } + } + + // copy labels and annotations excluding system ones + labels := map[string]string{} + if s.Labels != nil { + for k, v := range s.Labels { + labels[k] = v + } + delete(labels, LabelKeySecretType) + } + annotations := map[string]string{} + if s.Annotations != nil { + for k, v := range s.Annotations { + annotations[k] = v + } + delete(annotations, AnnotationKeyManagedBy) + } + + cluster := Cluster{ + ID: string(s.UID), + Server: strings.TrimRight(string(s.Data["server"]), "/"), + Name: string(s.Data["name"]), + Namespaces: namespaces, + ClusterResources: string(s.Data["clusterResources"]) == "true", + Config: config, + RefreshRequestedAt: refreshRequestedAt, + Shard: shard, + Project: string(s.Data["project"]), + Labels: labels, + Annotations: annotations, + } + return &cluster, nil +} diff --git a/pkg/clclient/clientset.go b/pkg/clclient/clientset.go new file mode 100644 index 0000000..f0e2eee --- /dev/null +++ b/pkg/clclient/clientset.go @@ -0,0 +1,50 @@ +package clclient + +import ( + "log" + "os" + "path/filepath" + + "golang.org/x/xerrors" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/homedir" +) + +func NewClientSet() (*kubernetes.Clientset, error) { + var kubeconfig string + kubeconfig, ok := os.LookupEnv("KUBECONFIG") + if !ok { + kubeconfig = filepath.Join(homedir.HomeDir(), ".kube", "config") + } + + var config *rest.Config + + if info, _ := os.Stat(kubeconfig); info == nil { + var err error + + log.Printf("Using in-cluster Kubernetes API client") + + config, err = rest.InClusterConfig() + if err != nil { + return nil, xerrors.Errorf("GetNodeSClient: %w", err) + } + } else { + var err error + + log.Printf("Using kubeconfig-based Kubernetes API client") + + config, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, xerrors.Errorf("GetNodesClient: %w", err) + } + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, xerrors.Errorf("new for config: %w", err) + } + + return clientset, nil +} diff --git a/pkg/clusterset/run.go b/pkg/clusterset/run.go new file mode 100644 index 0000000..1b271a0 --- /dev/null +++ b/pkg/clusterset/run.go @@ -0,0 +1,483 @@ +package clusterset + +import ( + "context" + "fmt" + "log" + "os" + "path/filepath" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/service/eks" + "github.com/mumoshu/okra/pkg/awsclicompat" + "github.com/mumoshu/okra/pkg/clclient" + "github.com/mumoshu/okra/pkg/okraerror" + "golang.org/x/xerrors" + corev1 "k8s.io/api/core/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/client-go/util/homedir" + "sigs.k8s.io/yaml" +) + +type Config struct { + DryRun bool + NS string + Name string + Endpoint string + CAData string + Labels map[string]string +} + +type ClusterSetConfig struct { + DryRun bool + NS string + EKSTags map[string]string + Labels map[string]string +} + +type CreateClusterInput struct { + DryRun bool + NS string + Name string + Endpoint string + CAData string + Labels map[string]string +} + +type SyncInput struct { + DryRun bool + NS string + Labels map[string]string + EKSTags map[string]string +} + +type DeleteClusterInput struct { + NS string + Name string + DryRun bool +} + +func CreateCluster(config CreateClusterInput) error { + ns := config.NS + name := config.Name + endpoint := config.Endpoint + caData := config.CAData + dryRun := config.DryRun + labels := config.Labels + + if name == "" { + return okraerror.New(fmt.Errorf("name is required")) + } + + clientset, err := newClientset() + if err != nil { + return xerrors.Errorf("creating clientset: %w", err) + } + + kubeclient := clientset.CoreV1().Secrets(ns) + + var object *corev1.Secret + + if endpoint == "" || caData == "" { + var err error + + object, err = newClusterSecretFromName(ns, name, labels) + if err != nil { + return err + } + } else { + object = newClusterSecretFromValues(ns, name, labels, endpoint, caData) + } + + if dryRun { + text, err := yaml.Marshal(object) + if err != nil { + return err + } + + fmt.Fprintf(os.Stdout, "%s\n", text) + + return nil + } + + // Manage resource + _, err = kubeclient.Create(context.TODO(), object, metav1.CreateOptions{}) + if err != nil { + return err + } + + fmt.Printf("Cluster secert %q created successfully\n", name) + + return nil +} + +func CreateMissingClusters(config SyncInput) error { + ns := config.NS + dryRun := config.DryRun + + clientset, err := newClientset() + if err != nil { + return xerrors.Errorf("creating clientset: %w", err) + } + + kubeclient := clientset.CoreV1().Secrets(ns) + + objects, err := clusterSecretsFromClusters(ns, config.EKSTags, config.Labels) + if err != nil { + return err + } + + for _, object := range objects { + // Manage resource + if !dryRun { + _, err := kubeclient.Create(context.TODO(), object, metav1.CreateOptions{}) + if err != nil { + if kerrors.IsAlreadyExists(err) { + fmt.Printf("Cluster secret %q has no change\n", object.Name) + } else { + fmt.Fprintf(os.Stderr, "Failed creating object: %+v\n", object) + return okraerror.New(err) + } + } else { + fmt.Printf("Cluster secert %q created successfully\n", object.Name) + } + } else { + fmt.Printf("Cluster secert %q created successfully (Dry Run)\n", object.Name) + } + } + + return nil +} + +func DeleteCluster(config DeleteClusterInput) error { + ns := config.NS + name := config.Name + dryRun := config.DryRun + + clientset, err := newClientset() + if err != nil { + return xerrors.Errorf("creating clientset: %w", err) + } + + kubeclient := clientset.CoreV1().Secrets(ns) + + if dryRun { + fmt.Fprintf(os.Stdout, "Cluster secrer %q deleted successfully (dry run)\n", name) + + return nil + } + + // Manage resource + err = kubeclient.Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil { + return err + } + + fmt.Printf("Cluster secert %q deleted successfully\n", name) + + return nil +} + +func DeleteOutdatedClusters(config SyncInput) error { + ns := config.NS + dryRun := config.DryRun + + clientset, err := newClientset() + if err != nil { + return xerrors.Errorf("creating clientset: %w", err) + } + + kubeclient := clientset.CoreV1().Secrets(ns) + + labelSelectors := []string{ + fmt.Sprintf("%s=%s", SecretLabelKeyArgoCDType, SecretLabelValueArgoCDCluster), + } + + for k, v := range config.Labels { + labelSelectors = append(labelSelectors, fmt.Sprintf("%s=%s", k, v)) + } + + result, err := kubeclient.List(context.TODO(), metav1.ListOptions{ + LabelSelector: strings.Join(labelSelectors, ","), + }) + if err != nil { + return xerrors.Errorf("listing cluster secrets: %w", err) + } + + objects, err := clusterSecretsFromClusters(ns, config.EKSTags, config.Labels) + if err != nil { + return err + } + + desiredClusters := map[string]struct{}{} + + for _, obj := range objects { + desiredClusters[obj.Name] = struct{}{} + } + + for _, item := range result.Items { + name := item.Name + + if _, desired := desiredClusters[name]; !desired { + if dryRun { + fmt.Printf("Cluster secert %q deleted successfully (Dry Run)\n", name) + } else { + // Manage resource + err := kubeclient.Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil { + return err + } + + fmt.Printf("Cluster secert %q deleted successfully\n", name) + } + } + } + + return nil +} + +type ListClustersInput struct { + NS string + Selector string +} + +func ListClusters(config ListClustersInput) ([]clclient.Cluster, error) { + ns := config.NS + + clientset, err := newClientset() + if err != nil { + return nil, xerrors.Errorf("creating clientset: %w", err) + } + + kubeclient := clientset.CoreV1().Secrets(ns) + + selStr := config.Selector + if selStr != "" { + selStr += "," + } + + selStr += fmt.Sprintf("%s=%s", SecretLabelKeyArgoCDType, SecretLabelValueArgoCDCluster) + + sel, err := labels.Parse(selStr) + if err != nil { + return nil, err + } + + result, err := kubeclient.List(context.TODO(), metav1.ListOptions{ + LabelSelector: sel.String(), + }) + if err != nil { + return nil, xerrors.Errorf("listing cluster secrets: %w", err) + } + + var clusters []clclient.Cluster + + for _, s := range result.Items { + cluster, err := clclient.SecretToCluster(&s) + if err != nil { + return nil, err + } + + clusters = append(clusters, *cluster) + } + + return clusters, nil +} + +func Sync(config SyncInput) error { + if err := CreateMissingClusters(config); err != nil { + return xerrors.Errorf("creating missing cluster secrets: %w", err) + } + + if err := DeleteOutdatedClusters(config); err != nil { + return xerrors.Errorf("deleting redundant cluster secrets: %w", err) + } + + return nil +} + +func clusterSecretsFromClusters(ns string, tags, labels map[string]string) ([]*corev1.Secret, error) { + sess := awsclicompat.NewSession("", "") + + eksClient := eks.New(sess) + + var secrets []*corev1.Secret + + process := func(nextToken *string) (*string, error) { + log.Printf("Calling EKS ListClusters...") + + result, err := eksClient.ListClusters(&eks.ListClustersInput{ + NextToken: nextToken, + }) + + if err != nil { + return nil, xerrors.Errorf("listing clusters: %w", err) + } + + log.Printf("Found %d clusters.", len(result.Clusters)) + + for _, clusterName := range result.Clusters { + log.Printf("Checking cluster %s...", *clusterName) + + result, err := eksClient.DescribeCluster(&eks.DescribeClusterInput{Name: aws.String(*clusterName)}) + if err != nil { + return nil, xerrors.Errorf("creating cluster secret: %w", err) + } + + all := true + for k, v := range tags { + value := result.Cluster.Tags[k] + + all = all && value != nil && *value == v + } + + if all { + sec := newClusterSecretFromCluster(ns, *clusterName, labels, result) + + secrets = append(secrets, sec) + } else { + log.Printf("Cluster %s with tags %v did not match selector %v", *clusterName, result.Cluster.Tags, tags) + } + } + + return result.NextToken, nil + } + + log.Printf("Computing desired cluster secrets from EKS clusters...") + + nextToken, err := process(nil) + if err != nil { + return nil, xerrors.Errorf("processing first set of EKS clusters: %w", err) + } + + for nextToken = nil; nextToken != nil; { + var err error + + nextToken, err = process(nextToken) + + if err != nil { + return nil, err + } + } + + return secrets, nil +} + +func newClusterSecretFromName(ns, name string, labels map[string]string) (*corev1.Secret, error) { + sess := awsclicompat.NewSession("", "") + + eksClient := eks.New(sess) + + result, err := eksClient.DescribeCluster(&eks.DescribeClusterInput{Name: aws.String(name)}) + if err != nil { + if awsErr, ok := err.(awserr.Error); ok { + switch awsErr.Code() { + case eks.ErrCodeResourceNotFoundException: + result, err := eksClient.ListClusters(&eks.ListClustersInput{}) + if err != nil { + fmt.Fprintf(os.Stderr, "failed listing clusters: %v", err) + } else { + fmt.Fprintf(os.Stderr, "Available clusters are:\n") + for _, c := range result.Clusters { + fmt.Fprintf(os.Stderr, "%s\n", *c) + } + } + } + } + return nil, okraerror.New(fmt.Errorf("%w", err)) + } + + return newClusterSecretFromCluster(ns, name, labels, result), nil +} + +func newClusterSecretFromCluster(ns, name string, labels map[string]string, result *eks.DescribeClusterOutput) *corev1.Secret { + return newClusterSecretFromValues(ns, name, labels, *result.Cluster.Endpoint, *result.Cluster.CertificateAuthority.Data) +} + +const ( + SecretLabelKeyArgoCDType = "argocd.argoproj.io/secret-type" + SecretLabelValueArgoCDCluster = "cluster" +) + +func newClusterSecretFromValues(ns, name string, labels map[string]string, server, base64CA string) *corev1.Secret { + lbls := map[string]string{ + SecretLabelKeyArgoCDType: SecretLabelValueArgoCDCluster, + } + + for k, v := range labels { + lbls[k] = v + } + + // Create resource object + object := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: "v1", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: ns, + Labels: lbls, + }, + StringData: map[string]string{ + "name": name, + "server": server, + "config": fmt.Sprintf(`{ + "awsAuthConfig": { + "clusterName": "%s" + }, + "tlsClientConfig": { + "insecure": false, + "caData": "%s" + } + } +`, name, base64CA), + }, + } + + return object +} + +func newClientset() (*kubernetes.Clientset, error) { + var kubeconfig string + kubeconfig, ok := os.LookupEnv("KUBECONFIG") + if !ok { + kubeconfig = filepath.Join(homedir.HomeDir(), ".kube", "config") + } + + var config *rest.Config + + if info, _ := os.Stat(kubeconfig); info == nil { + var err error + + log.Printf("Using in-cluster Kubernetes API client") + + config, err = rest.InClusterConfig() + if err != nil { + return nil, xerrors.Errorf("GetNodeSClient: %w", err) + } + } else { + var err error + + log.Printf("Using kubeconfig-based Kubernetes API client") + + config, err = clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, xerrors.Errorf("GetNodesClient: %w", err) + } + } + + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, xerrors.Errorf("new for config: %w", err) + } + + return clientset, nil +} diff --git a/pkg/controllers/clusterset.go b/pkg/controllers/clusterset.go new file mode 100644 index 0000000..9e0d3c9 --- /dev/null +++ b/pkg/controllers/clusterset.go @@ -0,0 +1,156 @@ +/* +Copyright 2020 The Okra authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "time" + + "github.com/go-logr/logr" + //"k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + + corev1 "k8s.io/api/core/v1" + //metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/mumoshu/okra/api/v1alpha1" + "github.com/mumoshu/okra/pkg/clusterset" +) + +const ( + containerName = "runner" + finalizerName = "runner.okra.mumo.co" +) + +// ClusterSetReconciler reconciles a ClusterSet object +type ClusterSetReconciler struct { + client.Client + Log logr.Logger + Recorder record.EventRecorder + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=okra.mumo.co,resources=clustersets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=okra.mumo.co,resources=clustersets/finalizers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=okra.mumo.co,resources=clustersets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=secrets/finalizers,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch + +func (r *ClusterSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) { + ctx := context.Background() + log := r.Log.WithValues("clusterSet", req.NamespacedName) + + var clusterSet v1alpha1.ClusterSet + if err := r.Get(ctx, req.NamespacedName, &clusterSet); err != nil { + return ctrl.Result{}, client.IgnoreNotFound(err) + } + + if clusterSet.ObjectMeta.DeletionTimestamp.IsZero() { + finalizers, added := addFinalizer(clusterSet.ObjectMeta.Finalizers) + + if added { + newRunner := clusterSet.DeepCopy() + newRunner.ObjectMeta.Finalizers = finalizers + + if err := r.Update(ctx, newRunner); err != nil { + log.Error(err, "Failed to update clusterSet") + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil + } + } else { + finalizers, removed := removeFinalizer(clusterSet.ObjectMeta.Finalizers) + + if removed { + // TODO do someo finalization if necessary + + newRunner := clusterSet.DeepCopy() + newRunner.ObjectMeta.Finalizers = finalizers + + if err := r.Update(ctx, newRunner); err != nil { + log.Error(err, "Failed to update clusterSet") + return ctrl.Result{}, err + } + + log.Info("Removed clusterSet") + } + + return ctrl.Result{}, nil + } + + config := clusterset.SyncInput{ + DryRun: false, + NS: req.Namespace, + EKSTags: clusterSet.Spec.Generators[0].AWSEKS.Selector.MatchTags, + Labels: clusterSet.Spec.Template.Metadata.Labels, + } + + if err := clusterset.Sync(config); err != nil { + log.Error(err, "Syncing clusters") + + return ctrl.Result{RequeueAfter: 10 * time.Second}, err + } + + r.Recorder.Event(&clusterSet, corev1.EventTypeNormal, "SyncFinished", fmt.Sprintf("Sync finished on '%s'", clusterSet.Name)) + + return ctrl.Result{}, nil +} + +func (r *ClusterSetReconciler) SetupWithManager(mgr ctrl.Manager) error { + r.Recorder = mgr.GetEventRecorderFor("clusterset-controller") + + return ctrl.NewControllerManagedBy(mgr). + For(&v1alpha1.ClusterSet{}). + Owns(&corev1.Secret{}). + Complete(r) +} + +func addFinalizer(finalizers []string) ([]string, bool) { + exists := false + for _, name := range finalizers { + if name == finalizerName { + exists = true + } + } + + if exists { + return finalizers, false + } + + return append(finalizers, finalizerName), true +} + +func removeFinalizer(finalizers []string) ([]string, bool) { + removed := false + result := []string{} + + for _, name := range finalizers { + if name == finalizerName { + removed = true + continue + } + result = append(result, name) + } + + return result, removed +} diff --git a/pkg/manager/manager.go b/pkg/manager/manager.go new file mode 100644 index 0000000..6826f1b --- /dev/null +++ b/pkg/manager/manager.go @@ -0,0 +1,89 @@ +package manager + +import ( + "flag" + "time" + + "github.com/spf13/pflag" + + "github.com/mumoshu/okra/pkg/clclient" + "github.com/mumoshu/okra/pkg/controllers" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + // +kubebuilder:scaffold:imports +) + +var ( + setupLog = ctrl.Log.WithName("setup") +) + +type Manager struct { + MetricsAddr string + EnableLeaderElection bool + SyncPeriod time.Duration +} + +func (m *Manager) AddFlags(fs flag.FlagSet) { + fs.StringVar(&m.MetricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + fs.BoolVar(&m.EnableLeaderElection, "enable-leader-election", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + fs.DurationVar(&m.SyncPeriod, "sync-period", 30*time.Second, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.") + + // flag.Parse() +} + +func (m *Manager) AddPFlags(fs *pflag.FlagSet) { + fs.StringVar(&m.MetricsAddr, "metrics-addr", ":8080", "The address the metric endpoint binds to.") + fs.BoolVar(&m.EnableLeaderElection, "enable-leader-election", false, + "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") + fs.DurationVar(&m.SyncPeriod, "sync-period", 30*time.Second, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.") + + // flag.Parse() +} + +func (m *Manager) Run() error { + var ( + err error + ) + + logger := zap.New(func(o *zap.Options) { + o.Development = true + }) + + ctrl.SetLogger(logger) + + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ + Scheme: clclient.Scheme(), + MetricsBindAddress: m.MetricsAddr, + LeaderElection: m.EnableLeaderElection, + LeaderElectionID: "okra", + Port: 9443, + SyncPeriod: &m.SyncPeriod, + }) + if err != nil { + setupLog.Error(err, "unable to start manager") + return err + } + + clusterSetReconciler := &controllers.ClusterSetReconciler{ + Client: mgr.GetClient(), + Log: ctrl.Log.WithName("controllers").WithName("ClusterSet"), + Scheme: mgr.GetScheme(), + } + + if err = clusterSetReconciler.SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "ClusterSet") + return err + } + + // +kubebuilder:scaffold:builder + + setupLog.Info("starting manager") + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { + setupLog.Error(err, "problem running manager") + return err + } + + return nil +} diff --git a/pkg/okraerror/oerror.go b/pkg/okraerror/oerror.go new file mode 100644 index 0000000..2eab971 --- /dev/null +++ b/pkg/okraerror/oerror.go @@ -0,0 +1,124 @@ +package okraerror + +import ( + "fmt" + "io" + "path" + "runtime" + "strconv" + "strings" +) + +func New(err error) Error { + return Error{ + st: callers(), + err: err, + } +} + +// Error is emitted when there's any validation error in commamd input +type Error struct { + err error + st *stack +} + +func (e Error) Error() string { + return e.err.Error() +} + +type stack []uintptr + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// name returns the name of this function, if known. +func (f Frame) name() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + return fn.Name() +} + +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + io.WriteString(s, f.name()) + io.WriteString(s, "\n\t") + io.WriteString(s, f.file()) + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + io.WriteString(s, strconv.Itoa(f.line())) + case 'n': + io.WriteString(s, funcname(f.name())) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} + +func (e Error) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + fmt.Fprintf(s, "%+v\n", e.err) + for _, f := range *e.st { + Frame(f).Format(s, verb) + fmt.Fprintf(s, "\n") + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", e.err) + default: + fmt.Fprintf(s, "%v", e.err) + } + case 's': + fmt.Fprintf(s, "%s", e.err) + } +} diff --git a/pkg/targetgroupbinding/targetgroupbinding.go b/pkg/targetgroupbinding/targetgroupbinding.go new file mode 100644 index 0000000..b55af3e --- /dev/null +++ b/pkg/targetgroupbinding/targetgroupbinding.go @@ -0,0 +1,100 @@ +package targetgroupbinding + +import ( + "context" + + "github.com/mumoshu/okra/api/elbv2/v1beta1" + "github.com/mumoshu/okra/pkg/clclient" + "github.com/mumoshu/okra/pkg/okraerror" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ListInput struct { + ClusterName string + NS string +} + +func List(input ListInput) ([]v1beta1.TargetGroupBinding, error) { + clusterName := input.ClusterName + ns := input.NS + + clientset, err := clclient.NewClientSet() + if err != nil { + return nil, okraerror.New(err) + } + + ctx := context.Background() + + secret, err := clientset.CoreV1().Secrets(ns).Get(ctx, clusterName, metav1.GetOptions{}) + if err != nil { + return nil, okraerror.New(err) + } + + // for k, v := range secret.Data { + // fmt.Fprintf(os.Stderr, "%s=%s\n", k, v) + // } + + client, err := clclient.NewFromClusterSecret(*secret) + if err != nil { + return nil, err + } + + var bindings v1beta1.TargetGroupBindingList + + optionalNS := "" + + if err := client.List(ctx, &bindings, &runtimeclient.ListOptions{Namespace: optionalNS}); err != nil { + return nil, okraerror.New(err) + } + + return bindings.Items, nil +} + +type CreateInput struct { + ClusterName string + ClusterNamespace string + Name string + Namespace string + TargetGroupARN string + Labels map[string]string + DryRun bool +} + +func Create(input CreateInput) (*v1beta1.TargetGroupBinding, error) { + clientset, err := clclient.NewClientSet() + if err != nil { + return nil, okraerror.New(err) + } + + ctx := context.Background() + + secret, err := clientset.CoreV1().Secrets(input.ClusterNamespace).Get(ctx, input.ClusterName, metav1.GetOptions{}) + if err != nil { + return nil, okraerror.New(err) + } + + client, err := clclient.NewFromClusterSecret(*secret) + if err != nil { + return nil, err + } + + var binding v1beta1.TargetGroupBinding + + binding.Name = input.Name + binding.Namespace = input.Namespace + binding.Labels = input.Labels + binding.Spec.TargetGroupARN = input.TargetGroupARN + + var dryRun []string + + if input.DryRun { + dryRun = []string{metav1.DryRunAll} + } + + if err := client.Create(ctx, &binding, &runtimeclient.CreateOptions{DryRun: dryRun}); err != nil { + return nil, okraerror.New(err) + } + + return &binding, nil +} diff --git a/testdata/analysis_run.yaml b/testdata/analysis_run.yaml new file mode 100644 index 0000000..30139b1 --- /dev/null +++ b/testdata/analysis_run.yaml @@ -0,0 +1,18 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisRun +metadata: + name: run1 +spec: + metrics: + - name: success-rate + successCondition: result[0] >= 0.95 + provider: + prometheus: + address: "http://prometheus.example.com:9090" + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"foo",response_code!~"5.*"}[5m] + )) / + sum(irate( + istio_requests_total{reporter="source",destination_service=~"foo"}[5m] + )) diff --git a/testdata/analysis_template.yaml b/testdata/analysis_template.yaml new file mode 100644 index 0000000..4172c0a --- /dev/null +++ b/testdata/analysis_template.yaml @@ -0,0 +1,22 @@ +apiVersion: argoproj.io/v1alpha1 +kind: AnalysisTemplate +metadata: + name: success-rate +spec: + args: + - name: service-name + - name: prometheus-port + value: "9090" + metrics: + - name: success-rate + successCondition: result[0] >= 0.95 + provider: + prometheus: + address: "http://prometheus.example.com:{{args.prometheus-port}}" + query: | + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}",response_code!~"5.*"}[5m] + )) / + sum(irate( + istio_requests_total{reporter="source",destination_service=~"{{args.service-name}}"}[5m] + )) diff --git a/testdata/clusterset.yaml b/testdata/clusterset.yaml new file mode 100644 index 0000000..9473c9c --- /dev/null +++ b/testdata/clusterset.yaml @@ -0,0 +1,14 @@ +apiVersion: okra.mumo.co/v1alpha1 +kind: ClusterSet +metadata: + name: clusterset1 +spec: + generators: + - awseks: + selector: + matchTags: + Service: demo + template: + metadata: + labels: + service: demo