Skip to content

Commit

Permalink
chore(deps): upgrade k8s version and client-go (#15852)
Browse files Browse the repository at this point in the history
* chore(deps): upgrade k8s version and client-go

Signed-off-by: fengshunli <1171313930@qq.com>

* revert bad merge

Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>

* fix codegen

Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>

* fix codegen

Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>

* fix: check for double definition

As found in #13965 (and as a follow-up to #13999), we also need to
define what happens if _both_ managedNamespaceMetadata _and_ an
Application manifest are both defined for the same namespace.

The idea here is that if that happens, we emit an
`ApplicationConditionRepeatedResourceWarning`, and set the sync status
to `Unknown`, since it's unclear what is supposed to happen.

The user will then have the option of removing one of the two
definitions.

Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>

* fix: check for double definition

A simpler fix - don't add a managed namespace to the targetObjs list if
a namespace already exists in the application source.

Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>

* build: extra space in doc

Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>

* build: extra space in doc, again

Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>

* chore: bump gitops-engine

Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>

---------

Signed-off-by: fengshunli <1171313930@qq.com>
Signed-off-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
Signed-off-by: Blake Pettersson <blake.pettersson@gmail.com>
Co-authored-by: fengshunli <1171313930@qq.com>
Co-authored-by: Michael Crenshaw <350466+crenshaw-dev@users.noreply.github.com>
  • Loading branch information
3 people authored Oct 18, 2023
1 parent 5b07a12 commit 9e0e8d5
Show file tree
Hide file tree
Showing 82 changed files with 446 additions and 579 deletions.
32 changes: 17 additions & 15 deletions applicationset/controllers/applicationset_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -544,22 +544,24 @@ func ignoreNotAllowedNamespaces(namespaces []string) predicate.Predicate {
}
}

func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProgressiveSyncs bool, maxConcurrentReconciliations int) error {
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", func(rawObj client.Object) []string {
// grab the job object, extract the owner...
app := rawObj.(*argov1alpha1.Application)
owner := metav1.GetControllerOf(app)
if owner == nil {
return nil
}
// ...make sure it's a application set...
if owner.APIVersion != argov1alpha1.SchemeGroupVersion.String() || owner.Kind != "ApplicationSet" {
return nil
}
func appControllerIndexer(rawObj client.Object) []string {
// grab the job object, extract the owner...
app := rawObj.(*argov1alpha1.Application)
owner := metav1.GetControllerOf(app)
if owner == nil {
return nil
}
// ...make sure it's a application set...
if owner.APIVersion != argov1alpha1.SchemeGroupVersion.String() || owner.Kind != "ApplicationSet" {
return nil
}

// ...and if so, return it
return []string{owner.Name}
}); err != nil {
// ...and if so, return it
return []string{owner.Name}
}

func (r *ApplicationSetReconciler) SetupWithManager(mgr ctrl.Manager, enableProgressiveSyncs bool, maxConcurrentReconciliations int) error {
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &argov1alpha1.Application{}, ".metadata.controller", appControllerIndexer); err != nil {
return fmt.Errorf("error setting up with manager: %w", err)
}

Expand Down
28 changes: 18 additions & 10 deletions applicationset/controllers/applicationset_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -994,7 +994,7 @@ func TestCreateOrUpdateInCluster(t *testing.T) {
initObjs = append(initObjs, &a)
}

client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()

r := ApplicationSetReconciler{
Client: client,
Expand Down Expand Up @@ -1088,7 +1088,7 @@ func TestRemoveFinalizerOnInvalidDestination_FinalizerTypes(t *testing.T) {

initObjs := []crtclient.Object{&app, &appSet}

client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-secret",
Expand Down Expand Up @@ -1250,7 +1250,7 @@ func TestRemoveFinalizerOnInvalidDestination_DestinationTypes(t *testing.T) {

initObjs := []crtclient.Object{&app, &appSet}

client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Name: "my-secret",
Expand Down Expand Up @@ -1482,7 +1482,7 @@ func TestCreateApplications(t *testing.T) {
initObjs = append(initObjs, &a)
}

client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()

r := ApplicationSetReconciler{
Client: client,
Expand Down Expand Up @@ -1626,7 +1626,7 @@ func TestDeleteInCluster(t *testing.T) {
initObjs = append(initObjs, &temp)
}

client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(initObjs...).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()

r := ApplicationSetReconciler{
Client: client,
Expand Down Expand Up @@ -2000,7 +2000,15 @@ func TestReconcilerValidationProjectErrorBehaviour(t *testing.T) {
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{&project}

client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
badCluster := v1alpha1.Cluster{Server: "https://bad-cluster", Name: "bad-cluster"}
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
argoDBMock.On("GetCluster", mock.Anything, "https://bad-cluster").Return(&badCluster, nil)
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
goodCluster,
}}, nil)

r := ApplicationSetReconciler{
Client: client,
Scheme: scheme,
Expand Down Expand Up @@ -2076,7 +2084,7 @@ func TestSetApplicationSetStatusCondition(t *testing.T) {
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{}

client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()

r := ApplicationSetReconciler{
Client: client,
Expand Down Expand Up @@ -2146,7 +2154,7 @@ func applicationsUpdateSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{&defaultProject}

client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
Expand Down Expand Up @@ -2316,7 +2324,7 @@ func applicationsDeleteSyncPolicyTest(t *testing.T, applicationsSyncPolicy v1alp
argoDBMock := dbmocks.ArgoDB{}
argoObjs := []runtime.Object{&defaultProject}

client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()
goodCluster := v1alpha1.Cluster{Server: "https://good-cluster", Name: "good-cluster"}
argoDBMock.On("GetCluster", mock.Anything, "https://good-cluster").Return(&goodCluster, nil)
argoDBMock.On("ListClusters", mock.Anything).Return(&v1alpha1.ClusterList{Items: []v1alpha1.Cluster{
Expand Down Expand Up @@ -2627,7 +2635,7 @@ func TestPolicies(t *testing.T) {
},
}

client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).Build()
client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(&appSet).WithIndex(&v1alpha1.Application{}, ".metadata.controller", appControllerIndexer).Build()

r := ApplicationSetReconciler{
Client: client,
Expand Down
2 changes: 1 addition & 1 deletion applicationset/utils/clusterUtils.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ func secretToCluster(s *corev1.Secret) (*appv1.Cluster, error) {
if val, err := strconv.Atoi(string(shardStr)); err != nil {
log.Warnf("Error while parsing shard in cluster secret '%s': %v", s.Name, err)
} else {
shard = pointer.Int64Ptr(int64(val))
shard = pointer.Int64(int64(val))
}
}
cluster := appv1.Cluster{
Expand Down
8 changes: 2 additions & 6 deletions assets/swagger.json
Original file line number Diff line number Diff line change
Expand Up @@ -5089,7 +5089,7 @@
}
},
"runtimeRawExtension": {
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\n// Internal package:\ntype MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.Object `json:\"myPlugin\"`\n}\ntype PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// External package:\ntype MyAPIObject struct {\n\truntime.TypeMeta `json:\",inline\"`\n\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n}\ntype PluginA struct {\n\tAOption string `json:\"aOption\"`\n}\n\n// On the wire, the JSON will look something like this:\n{\n\t\"kind\":\"MyAPIObject\",\n\t\"apiVersion\":\"v1\",\n\t\"myPlugin\": {\n\t\t\"kind\":\"PluginA\",\n\t\t\"aOption\":\"foo\",\n\t},\n}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true",
"description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned\nstruct, and Object in your internal struct. You also need to register your\nvarious plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into\nyour external MyAPIObject. That causes the raw JSON to be stored, but not unpacked.\nThe next step is to copy (using pkg/conversion) into the internal struct. The runtime\npackage's DefaultScheme has conversion functions installed which will unpack the\nJSON stored in RawExtension, turning it into the correct object type, and storing it\nin the Object. (TODO: In the case where the object is of an unknown type, a\nruntime.Unknown object will be created and stored.)\n\n+k8s:deepcopy-gen=true\n+protobuf=true\n+k8s:openapi-gen=true",
"type": "object",
"properties": {
"raw": {
Expand Down Expand Up @@ -5496,10 +5496,6 @@
"type": "string"
}
},
"clusterName": {
"description": "Deprecated: ClusterName is a legacy field that was always cleared by\nthe system and never used; it will be removed completely in 1.25.\n\nThe name in the go struct is changed to help clients detect\naccidental use.\n\n+optional",
"type": "string"
},
"creationTimestamp": {
"$ref": "#/definitions/v1Time"
},
Expand Down Expand Up @@ -5571,8 +5567,8 @@
}
},
"v1ObjectReference": {
"description": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\n\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic",
"type": "object",
"title": "ObjectReference contains enough information to let you inspect or modify the referred object.\n---\nNew uses of this type are discouraged because of difficulty describing its usage when embedded in APIs.\n 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage.\n 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular\n restrictions like, \"must refer only to types A and B\" or \"UID not honored\" or \"name must be restricted\".\n Those cannot be well described when embedded.\n 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen.\n 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity\n during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple\n and the version of the actual struct is irrelevant.\n 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type\n will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control.\nInstead of using this type, create a locally provided and used type that is well-focused on your reference.\nFor example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 .\n+k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object\n+structType=atomic",
"properties": {
"apiVersion": {
"type": "string",
Expand Down
2 changes: 1 addition & 1 deletion cmd/argocd/commands/admin/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func loadClusters(ctx context.Context, kubeClient *kubernetes.Clientset, appClie
if replicas > 0 {
distributionFunction := sharding.GetDistributionFunction(argoDB, common.DefaultShardingAlgorithm)
distributionFunction(&cluster)
cluster.Shard = pointer.Int64Ptr(int64(clusterShard))
cluster.Shard = pointer.Int64(int64(clusterShard))
log.Infof("Cluster with uid: %s will be processed by shard %d", cluster.ID, clusterShard)
}

Expand Down
10 changes: 5 additions & 5 deletions cmd/argocd/commands/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -524,8 +524,8 @@ func NewApplicationLogsCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
} else {
return
}
} //Done with receive message
} //Done with retry
} // Done with receive message
} // Done with retry
},
}

Expand Down Expand Up @@ -889,7 +889,7 @@ func unset(source *argoappv1.ApplicationSource, opts unsetOpts) (updated bool, n
for i, item := range source.Kustomize.Images {
if argoappv1.KustomizeImage(kustomizeImage).Match(item) {
updated = true
//remove i
// remove i
a := source.Kustomize.Images
copy(a[i:], a[i+1:]) // Shift a[i+1:] left one index.
a[len(a)-1] = "" // Erase last element (write zero value).
Expand Down Expand Up @@ -1904,7 +1904,7 @@ func NewApplicationSyncCommand(clientOpts *argocdclient.ClientOptions) *cobra.Co
Backoff: &argoappv1.Backoff{
Duration: retryBackoffDuration.String(),
MaxDuration: retryBackoffMaxDuration.String(),
Factor: pointer.Int64Ptr(retryBackoffFactor),
Factor: pointer.Int64(retryBackoffFactor),
},
}
}
Expand Down Expand Up @@ -2143,7 +2143,7 @@ func checkResourceStatus(watch watchOpts, healthStatus string, syncStatus string
} else if watch.degraded && watch.health {
healthCheckPassed = healthStatus == string(health.HealthStatusHealthy) ||
healthStatus == string(health.HealthStatusDegraded)
//below are good
// below are good
} else if watch.suspended && watch.health {
healthCheckPassed = healthStatus == string(health.HealthStatusHealthy) ||
healthStatus == string(health.HealthStatusSuspended)
Expand Down
2 changes: 1 addition & 1 deletion cmd/util/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ func SetAppSpecOptions(flags *pflag.FlagSet, spec *argoappv1.ApplicationSpec, ap
Backoff: &argoappv1.Backoff{
Duration: appOpts.retryBackoffDuration.String(),
MaxDuration: appOpts.retryBackoffMaxDuration.String(),
Factor: pointer.Int64Ptr(appOpts.retryBackoffFactor),
Factor: pointer.Int64(appOpts.retryBackoffFactor),
},
}
} else if appOpts.retryLimit == 0 {
Expand Down
2 changes: 1 addition & 1 deletion cmd/util/project.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func GetOrphanedResourcesSettings(flagSet *pflag.FlagSet, opts ProjectOpts) *v1a
if opts.orphanedResourcesEnabled || warnChanged {
settings := v1alpha1.OrphanedResourcesMonitorSettings{}
if warnChanged {
settings.Warn = pointer.BoolPtr(opts.orphanedResourcesWarn)
settings.Warn = pointer.Bool(opts.orphanedResourcesWarn)
}
return &settings
}
Expand Down
15 changes: 11 additions & 4 deletions controller/appcontroller.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,7 +181,8 @@ func NewApplicationController(
appInformer, appLister := ctrl.newApplicationInformerAndLister()
indexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
projInformer := v1alpha1.NewAppProjectInformer(applicationClientset, namespace, appResyncPeriod, indexers)
projInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
var err error
_, err = projInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if key, err := cache.MetaNamespaceKeyFunc(obj); err == nil {
ctrl.projectRefreshQueue.Add(key)
Expand All @@ -208,6 +209,9 @@ func NewApplicationController(
}
},
})
if err != nil {
return nil, err
}

factory := informers.NewSharedInformerFactoryWithOptions(ctrl.kubeClientset, defaultDeploymentInformerResyncDuration, informers.WithNamespace(settingsMgr.GetNamespace()))
deploymentInformer := factory.Apps().V1().Deployments()
Expand Down Expand Up @@ -235,7 +239,7 @@ func NewApplicationController(
}

metricsAddr := fmt.Sprintf("0.0.0.0:%d", metricsPort)
var err error

ctrl.metricsServer, err = metrics.NewMetricsServer(metricsAddr, appLister, ctrl.canProcessApp, readinessHealthCheck, metricsApplicationLabels)
if err != nil {
return nil, err
Expand Down Expand Up @@ -1569,7 +1573,7 @@ func (ctrl *ApplicationController) needRefreshAppStatus(app *appv1.Application,
} else if hardExpired || softExpired {
// The commented line below mysteriously crashes if app.Status.ReconciledAt is nil
// reason = fmt.Sprintf("comparison expired. reconciledAt: %v, expiry: %v", app.Status.ReconciledAt, statusRefreshTimeout)
//TODO: find existing Golang bug or create a new one
// TODO: find existing Golang bug or create a new one
reconciledAtStr := "never"
if app.Status.ReconciledAt != nil {
reconciledAtStr = app.Status.ReconciledAt.String()
Expand Down Expand Up @@ -1979,7 +1983,7 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
},
)
lister := applisters.NewApplicationLister(informer.GetIndexer())
informer.AddEventHandler(
_, err := informer.AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
if !ctrl.canProcessApp(obj) {
Expand Down Expand Up @@ -2023,6 +2027,9 @@ func (ctrl *ApplicationController) newApplicationInformerAndLister() (cache.Shar
},
},
)
if err != nil {
return nil, nil
}
return informer, lister
}

Expand Down
Loading

0 comments on commit 9e0e8d5

Please sign in to comment.