-
Notifications
You must be signed in to change notification settings - Fork 232
/
operconfig_controller.go
619 lines (557 loc) · 24.6 KB
/
operconfig_controller.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
package operconfig
import (
"context"
"fmt"
"log"
"os"
"reflect"
"strings"
"time"
"github.com/openshift/cluster-network-operator/pkg/hypershift"
"github.com/pkg/errors"
configv1 "github.com/openshift/api/config/v1"
operv1 "github.com/openshift/api/operator/v1"
configclient "github.com/openshift/client-go/config/clientset/versioned"
configinformers "github.com/openshift/client-go/config/informers/externalversions"
"github.com/openshift/cluster-network-operator/pkg/apply"
cnoclient "github.com/openshift/cluster-network-operator/pkg/client"
"github.com/openshift/cluster-network-operator/pkg/controller/statusmanager"
"github.com/openshift/cluster-network-operator/pkg/names"
"github.com/openshift/cluster-network-operator/pkg/network"
"github.com/openshift/cluster-network-operator/pkg/platform"
"github.com/openshift/cluster-network-operator/pkg/util"
ipsecMetrics "github.com/openshift/cluster-network-operator/pkg/util/ipsec"
"github.com/openshift/library-go/pkg/operator/configobserver/featuregates"
"github.com/openshift/library-go/pkg/operator/events"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
uns "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
v1coreinformers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/klog/v2"
crclient "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// The periodic resync interval.
// We will re-run the reconciliation logic, even if the network configuration
// hasn't changed.
var ResyncPeriod = 3 * time.Minute
// ManifestPaths is the path to the manifest templates
// bad, but there's no way to pass configuration to the reconciler right now
var ManifestPath = "./bindata"
// Add creates a new OperConfig Controller and adds it to the Manager. The Manager will set fields on the Controller
// and Start it when the Manager is Started.
func Add(mgr manager.Manager, status *statusmanager.StatusManager, c cnoclient.Client) error {
rc, err := newReconciler(mgr, status, c)
if err != nil {
return err
}
return add(mgr, rc)
}
const ControllerName = "operconfig"
// newReconciler returns a new reconcile.Reconciler
func newReconciler(mgr manager.Manager, status *statusmanager.StatusManager, c cnoclient.Client) (*ReconcileOperConfig, error) {
kubeConfig := c.Default().Config()
kubeClient := c.Default().Kubernetes()
configClient, err := configclient.NewForConfig(kubeConfig)
if err != nil {
return nil, err
}
configInformers := configinformers.NewSharedInformerFactory(configClient, 10*time.Minute)
desiredVersion := os.Getenv("RELEASE_VERSION")
missingVersion := "0.0.1-snapshot"
eventRecorder := events.NewKubeRecorder(kubeClient.CoreV1().Events("openshift-network-operator"), "cluster-network-operator", &corev1.ObjectReference{
APIVersion: "apps/v1",
Kind: "Deployment",
Namespace: "openshift-network-operator",
Name: "network-operator",
})
// By default, this will exit(0) the process if the featuregates ever change to a different set of values.
featureGateAccessor := featuregates.NewFeatureGateAccess(
desiredVersion, missingVersion,
configInformers.Config().V1().ClusterVersions(), configInformers.Config().V1().FeatureGates(),
eventRecorder,
)
// TODO: 1) If other controllers in CNO also want to use featureGates then we should move this code to outside
// operconfig-controller 2) For now we pass the neverStop channel; FIXME: use c.Default().AddCustomInformer and
// change this to pass a proper stop channel and context which are closed and cancelled properly upon exit.
go featureGateAccessor.Run(context.TODO())
go configInformers.Start(wait.NeverStop)
klog.Infof("Waiting for feature gates initialization...")
select {
case <-featureGateAccessor.InitialFeatureGatesObserved():
featureGates, err := featureGateAccessor.CurrentFeatureGates()
if err != nil {
return nil, err
} else {
klog.Infof("FeatureGates initialized: knownFeatureGates=%v", featureGates.KnownFeatures())
}
case <-time.After(1 * time.Minute):
return nil, fmt.Errorf("timed out waiting for FeatureGate detection")
}
featureGates, err := featureGateAccessor.CurrentFeatureGates()
if err != nil {
return nil, err
}
return &ReconcileOperConfig{
client: c,
status: status,
mapper: mgr.GetRESTMapper(),
featureGates: featureGates,
}, nil
}
// add adds a new Controller to mgr with r as the reconcile.Reconciler
func add(mgr manager.Manager, r *ReconcileOperConfig) error {
// Create a new controller
c, err := controller.New("operconfig-controller", mgr, controller.Options{Reconciler: r})
if err != nil {
return err
}
// Watch for changes to networkDiagnostics in network.config
err = c.Watch(source.Kind(mgr.GetCache(), &configv1.Network{}), &handler.EnqueueRequestForObject{}, predicate.Funcs{
UpdateFunc: func(evt event.UpdateEvent) bool {
old, ok := evt.ObjectOld.(*configv1.Network)
if !ok {
return true
}
new, ok := evt.ObjectNew.(*configv1.Network)
if !ok {
return true
}
if reflect.DeepEqual(old.Spec.NetworkDiagnostics, new.Spec.NetworkDiagnostics) {
return false
}
return true
},
})
if err != nil {
return err
}
// Watch for changes to primary resource Network (as long as the spec changes)
err = c.Watch(source.Kind(mgr.GetCache(), &operv1.Network{}), &handler.EnqueueRequestForObject{}, predicate.Funcs{
UpdateFunc: func(evt event.UpdateEvent) bool {
old, ok := evt.ObjectOld.(*operv1.Network)
if !ok {
return true
}
new, ok := evt.ObjectNew.(*operv1.Network)
if !ok {
return true
}
if reflect.DeepEqual(old.Spec, new.Spec) {
log.Printf("Skipping reconcile of Network.operator.openshift.io: spec unchanged")
return false
}
return true
},
})
if err != nil {
return err
}
// watch for changes in all configmaps in our namespace
// Currently, this would catch the mtu-prober reporting or the ovs flows config map.
// Need to do this with a custom namespaced informer.
cmInformer := v1coreinformers.NewConfigMapInformer(
r.client.Default().Kubernetes(),
names.APPLIED_NAMESPACE,
0, // don't resync
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
r.client.Default().AddCustomInformer(cmInformer) // Tell the ClusterClient about this informer
if err := c.Watch(&source.Informer{Informer: cmInformer},
handler.EnqueueRequestsFromMapFunc(reconcileOperConfig),
predicate.ResourceVersionChangedPredicate{},
predicate.NewPredicateFuncs(func(object crclient.Object) bool {
// Ignore ConfigMaps we manage as part of this loop
return !(object.GetName() == "network-operator-lock" ||
object.GetName() == "applied-cluster")
}),
); err != nil {
return err
}
// Watch when nodes are created and updated.
// We need to watch when nodes are updated since we are interested in the labels
// of nodes for hardware offloading.
nodePredicate := predicate.Funcs{
CreateFunc: func(_ event.CreateEvent) bool {
return true
},
UpdateFunc: func(ev event.UpdateEvent) bool {
// Node conditions change *a lot* and we don't care. We only care
// about updates when the labels change.
return !reflect.DeepEqual(
ev.ObjectOld.GetLabels(),
ev.ObjectNew.GetLabels(),
)
},
DeleteFunc: func(_ event.DeleteEvent) bool {
return true
},
}
if err := c.Watch(
source.Kind(mgr.GetCache(), &corev1.Node{}),
handler.EnqueueRequestsFromMapFunc(reconcileOperConfig),
nodePredicate,
); err != nil {
return err
}
return nil
}
var _ reconcile.Reconciler = &ReconcileOperConfig{}
// ReconcileOperConfig reconciles a Network.operator.openshift.io object
type ReconcileOperConfig struct {
client cnoclient.Client
status *statusmanager.StatusManager
mapper meta.RESTMapper
// If we can skip cleaning up the MTU prober job.
mtuProberCleanedUp bool
// maintain the copy of feature gates in the cluster
featureGates featuregates.FeatureGate
}
// Reconcile updates the state of the cluster to match that which is desired
// in the operator configuration (Network.operator.openshift.io)
func (r *ReconcileOperConfig) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) {
defer utilruntime.HandleCrash(r.status.SetDegradedOnPanicAndCrash)
log.Printf("Reconciling Network.operator.openshift.io %s\n", request.Name)
// We won't create more than one network
if request.Name != names.OPERATOR_CONFIG {
log.Printf("Ignoring Network.operator.openshift.io without default name")
return reconcile.Result{}, nil
}
// Fetch the Network.operator.openshift.io instance
operConfig := &operv1.Network{TypeMeta: metav1.TypeMeta{APIVersion: operv1.GroupVersion.String(), Kind: "Network"}}
err := r.client.Default().CRClient().Get(ctx, request.NamespacedName, operConfig)
if err != nil {
if apierrors.IsNotFound(err) {
r.status.SetDegraded(statusmanager.OperatorConfig, "NoOperatorConfig",
fmt.Sprintf("Operator configuration %s was deleted", request.NamespacedName.String()))
// Request object not found, could have been deleted after reconcile request.
// Owned objects are automatically garbage collected, since we set
// the ownerReference (see https://kubernetes.io/docs/concepts/workloads/controllers/garbage-collection/).
// Return and don't requeue
return reconcile.Result{}, nil
}
// Error reading the object - requeue the request.
log.Printf("Unable to retrieve Network.operator.openshift.io object: %v", err)
// FIXME: operator status?
return reconcile.Result{}, err
}
if operConfig.Spec.ManagementState == operv1.Unmanaged {
log.Printf("Operator configuration state is %s - skipping operconfig reconciliation", operConfig.Spec.ManagementState)
return reconcile.Result{}, nil
}
// Fetch the Network.config.openshift.io instance
clusterConfig := &configv1.Network{}
err = r.client.Default().CRClient().Get(ctx, types.NamespacedName{Name: names.CLUSTER_CONFIG}, clusterConfig)
if err != nil {
log.Printf("Unable to retrieve network.config.openshift.io object: %v", err)
return reconcile.Result{}, err
}
// Merge in the cluster configuration, in case the administrator has updated some "downstream" fields
// This will also commit the change back to the apiserver.
if err := r.MergeClusterConfig(ctx, operConfig, clusterConfig); err != nil {
log.Printf("Failed to merge the cluster configuration: %v", err)
// not set degraded if the err is a version conflict, but return a reconcile err for retry.
if !apierrors.IsConflict(err) {
r.status.SetDegraded(statusmanager.OperatorConfig, "MergeClusterConfig",
fmt.Sprintf("Internal error while merging cluster configuration and operator configuration: %v", err))
}
return reconcile.Result{}, err
}
// Convert certain fields to canonicalized form for backward compatibility
network.DeprecatedCanonicalize(&operConfig.Spec)
// Validate the configuration
if err := network.Validate(&operConfig.Spec); err != nil {
log.Printf("Failed to validate Network.operator.openshift.io.Spec: %v", err)
r.status.SetDegraded(statusmanager.OperatorConfig, "InvalidOperatorConfig",
fmt.Sprintf("The operator configuration is invalid (%v). Use 'oc edit network.operator.openshift.io cluster' to fix.", err))
return reconcile.Result{}, err
}
// Retrieve the previously applied operator configuration
prev, err := GetAppliedConfiguration(ctx, r.client.Default().CRClient(), operConfig.ObjectMeta.Name)
if err != nil {
log.Printf("Failed to retrieve previously applied configuration: %v", err)
// FIXME: operator status?
return reconcile.Result{}, err
}
// Gather the Infra status, we'll need it a few places
infraStatus, err := platform.InfraStatus(r.client)
if err != nil {
log.Printf("Failed to retrieve infrastructure status: %v", err)
return reconcile.Result{}, err
}
// If we need to, probe the host's MTU via a Job.
// Note that running clusters have no need of this but we want the configmap
// mtu to be created for consistancy with other non-hypershift clusters.
// A hypershift cluster may not have any worker nodes for running the mtu prober.
mtu := 0
err = r.client.Default().CRClient().Get(ctx, types.NamespacedName{Namespace: util.MTU_CM_NAMESPACE, Name: util.MTU_CM_NAME}, &corev1.ConfigMap{})
if network.NeedMTUProbe(prev, &operConfig.Spec) || (apierrors.IsNotFound(err) && infraStatus.HostedControlPlane == nil) {
mtu, err = r.probeMTU(ctx, operConfig, infraStatus)
if err != nil {
log.Printf("Failed to probe MTU: %v", err)
r.status.SetDegraded(statusmanager.OperatorConfig, "MTUProbeFailed",
fmt.Sprintf("Failed to probe MTU: %v", err))
return reconcile.Result{}, fmt.Errorf("could not probe MTU -- maybe no available nodes: %w", err)
}
log.Printf("Using detected MTU %d", mtu)
}
// up-convert Prev by filling defaults
if prev != nil {
network.FillDefaults(prev, prev, mtu)
}
// Reserve operConfig for the DeepEqual check before UpdateOperConfig
newOperConfig := operConfig.DeepCopy()
// Fill all defaults explicitly
network.FillDefaults(&newOperConfig.Spec, prev, mtu)
// Compare against previous applied configuration to see if this change
// is safe.
if prev != nil {
// We may need to fill defaults here -- sort of as a poor-man's
// upconversion scheme -- if we add additional fields to the config.
err = network.IsChangeSafe(prev, &newOperConfig.Spec, infraStatus)
if err != nil {
log.Printf("Not applying unsafe change: %v", err)
r.status.SetDegraded(statusmanager.OperatorConfig, "InvalidOperatorConfig",
fmt.Sprintf("Not applying unsafe configuration change: %v. Use 'oc edit network.operator.openshift.io cluster' to undo the change.", err))
return reconcile.Result{}, err
}
}
// Bootstrap any resources
bootstrapResult, err := network.Bootstrap(newOperConfig, r.client)
if err != nil {
log.Printf("Failed to reconcile platform networking resources: %v", err)
r.status.SetDegraded(statusmanager.OperatorConfig, "BootstrapError",
fmt.Sprintf("Internal error while reconciling platform networking resources: %v", err))
return reconcile.Result{}, err
}
if !reflect.DeepEqual(operConfig, newOperConfig) {
if err := r.UpdateOperConfig(ctx, newOperConfig); err != nil {
log.Printf("Failed to update the operator configuration: %v", err)
// not set degraded if the err is a version conflict, but return a reconcile err for retry.
if !apierrors.IsConflict(err) {
r.status.SetDegraded(statusmanager.OperatorConfig, "UpdateOperatorConfig",
fmt.Sprintf("Internal error while updating operator configuration: %v", err))
}
return reconcile.Result{}, err
}
}
updateIPsecMetric(&newOperConfig.Spec)
// once updated, use the new config
operConfig = newOperConfig
// Generate the objects.
// Note that Render might have side effects in the passed in operConfig that
// will be reflected later on in the updated status.
objs, progressing, err := network.Render(&operConfig.Spec, &clusterConfig.Spec, ManifestPath, r.client, r.featureGates, bootstrapResult)
if err != nil {
log.Printf("Failed to render: %v", err)
r.status.SetDegraded(statusmanager.OperatorConfig, "RenderError",
fmt.Sprintf("Internal error while rendering operator configuration: %v", err))
return reconcile.Result{}, err
}
if progressing {
r.status.SetProgressing(statusmanager.OperatorRender, "RenderProgressing",
"Waiting to render manifests")
} else {
r.status.UnsetProgressing(statusmanager.OperatorRender)
}
// The first object we create should be the record of our applied configuration. The last object we create is config.openshift.io/v1/Network.Status
app, err := AppliedConfiguration(operConfig)
if err != nil {
log.Printf("Failed to render applied: %v", err)
r.status.SetDegraded(statusmanager.OperatorConfig, "RenderError",
fmt.Sprintf("Internal error while recording new operator configuration: %v", err))
return reconcile.Result{}, err
}
objs = append([]*uns.Unstructured{app}, objs...)
relatedObjects := []configv1.ObjectReference{}
relatedClusterObjects := []hypershift.RelatedObject{}
hcpCfg := hypershift.NewHyperShiftConfig()
for _, obj := range objs {
// Label all DaemonSets, Deployments, and StatefulSets with the label that generates Status.
if obj.GetAPIVersion() == "apps/v1" && (obj.GetKind() == "DaemonSet" || obj.GetKind() == "Deployment" || obj.GetKind() == "StatefulSet") {
l := obj.GetLabels()
if l == nil {
l = map[string]string{}
}
// Resources with GenerateStatusLabel set to "" are not meant to generate status
if v, exists := l[names.GenerateStatusLabel]; !exists || v != "" {
// In HyperShift use the infrastructure name to differentiate between resources deployed by the management cluster CNO and CNO deployed in the hosted clusters control plane namespace
// Without that the CNO running against the management cluster would pick the resources rendered by the hosted cluster CNO
if hcpCfg.Enabled {
l[names.GenerateStatusLabel] = bootstrapResult.Infra.InfraName
} else {
l[names.GenerateStatusLabel] = names.StandAloneClusterName
}
obj.SetLabels(l)
}
}
restMapping, err := r.mapper.RESTMapping(obj.GroupVersionKind().GroupKind())
if err != nil {
log.Printf("Failed to get REST mapping for storing related object: %v", err)
continue
}
if apply.GetClusterName(obj) != "" {
relatedClusterObjects = append(relatedClusterObjects, hypershift.RelatedObject{
ObjectReference: configv1.ObjectReference{
Group: obj.GetObjectKind().GroupVersionKind().Group,
Resource: restMapping.Resource.Resource,
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
},
ClusterName: apply.GetClusterName(obj),
})
// Don't add management cluster objects in relatedObjects
continue
}
relatedObjects = append(relatedObjects, configv1.ObjectReference{
Group: obj.GetObjectKind().GroupVersionKind().Group,
Resource: restMapping.Resource.Resource,
Name: obj.GetName(),
Namespace: obj.GetNamespace(),
})
}
relatedObjects = append(relatedObjects, configv1.ObjectReference{
Resource: "namespaces",
Name: names.APPLIED_NAMESPACE,
})
// Add operator.openshift.io/v1/network to relatedObjects for must-gather
relatedObjects = append(relatedObjects, configv1.ObjectReference{
Group: "operator.openshift.io",
Resource: "networks",
Name: "cluster",
})
// This Namespace is rendered by the CVO, but it's really our operand.
relatedObjects = append(relatedObjects, configv1.ObjectReference{
Resource: "namespaces",
Name: "openshift-cloud-network-config-controller",
})
r.status.SetRelatedObjects(relatedObjects)
r.status.SetRelatedClusterObjects(relatedClusterObjects)
// Apply the objects to the cluster
setDegraded := false
var degradedErr error
for _, obj := range objs {
// TODO: OwnerRef for non default clusters. For HyperShift this should probably be HostedControlPlane CR
if apply.GetClusterName(obj) == "" {
// Mark the object to be GC'd if the owner is deleted.
if err := controllerutil.SetControllerReference(operConfig, obj, r.client.ClientFor(apply.GetClusterName(obj)).Scheme()); err != nil {
err = errors.Wrapf(err, "could not set reference for (%s) %s/%s", obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
log.Println(err)
r.status.SetDegraded(statusmanager.OperatorConfig, "InternalError",
fmt.Sprintf("Internal error while updating operator configuration: %v", err))
return reconcile.Result{}, err
}
}
// Open question: should an error here indicate we will never retry?
if err := apply.ApplyObject(ctx, r.client, obj, ControllerName); err != nil {
err = errors.Wrapf(err, "could not apply (%s) %s/%s", obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
// If error comes from nonexistent namespace print out a help message.
if obj.GroupVersionKind().Kind == "NetworkAttachmentDefinition" && strings.Contains(err.Error(), "namespaces") {
err = errors.Wrapf(err, "could not apply (%s) %s/%s; Namespace error for networkattachment definition, consider possible solutions: (1) Edit config files to include existing namespace (2) Create non-existent namespace (3) Delete erroneous network-attachment-definition", obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
}
log.Println(err)
// Ignore errors if we've asked to do so.
anno := obj.GetAnnotations()
if anno != nil {
if _, ok := anno[names.IgnoreObjectErrorAnnotation]; ok {
log.Println("Object has ignore-errors annotation set, continuing")
continue
}
}
setDegraded = true
degradedErr = err
}
}
if setDegraded {
r.status.SetDegraded(statusmanager.OperatorConfig, "ApplyOperatorConfig",
fmt.Sprintf("Error while updating operator configuration: %v", degradedErr))
return reconcile.Result{}, degradedErr
}
if operConfig.Spec.Migration != nil && operConfig.Spec.Migration.NetworkType != "" {
if !(operConfig.Spec.Migration.NetworkType == string(operv1.NetworkTypeOpenShiftSDN) || operConfig.Spec.Migration.NetworkType == string(operv1.NetworkTypeOVNKubernetes)) {
err = fmt.Errorf("Error: operConfig.Spec.Migration.NetworkType: %s is not equal to either \"OpenshiftSDN\" or \"OVNKubernetes\"", operConfig.Spec.Migration.NetworkType)
return reconcile.Result{}, err
}
migration := operConfig.Spec.Migration
if migration.Features == nil || migration.Features.EgressFirewall {
err = migrateEgressFirewallCRs(ctx, operConfig, r.client)
if err != nil {
log.Printf("Could not migrate EgressFirewall CRs: %v", err)
return reconcile.Result{}, err
}
}
if migration.Features == nil || migration.Features.Multicast {
err = migrateMulticastEnablement(ctx, operConfig, r.client)
if err != nil {
log.Printf("Could not migrate Multicast settings: %v", err)
return reconcile.Result{}, err
}
}
if migration.Features == nil || migration.Features.EgressIP {
err = migrateEgressIpCRs(ctx, operConfig, r.client)
if err != nil {
log.Printf("Could not migrate EgressIP CRs: %v", err)
return reconcile.Result{}, err
}
}
}
// Update Network.config.openshift.io.Status
status, err := r.ClusterNetworkStatus(ctx, operConfig, bootstrapResult)
if err != nil {
log.Printf("Could not generate network status: %v", err)
r.status.SetDegraded(statusmanager.OperatorConfig, "StatusError",
fmt.Sprintf("Could not update cluster configuration status: %v", err))
return reconcile.Result{}, err
}
if status != nil {
// Don't set the owner reference in this case -- we're updating
// the status of our owner.
if err := apply.ApplyObject(ctx, r.client, status, ControllerName); err != nil {
err = errors.Wrapf(err, "could not apply (%s) %s/%s", status.GroupVersionKind(), status.GetNamespace(), status.GetName())
log.Println(err)
r.status.SetDegraded(statusmanager.OperatorConfig, "StatusError",
fmt.Sprintf("Could not update cluster configuration status: %v", err))
return reconcile.Result{}, err
}
}
r.status.SetNotDegraded(statusmanager.OperatorConfig)
// All was successful. Request that this be re-triggered after ResyncPeriod,
// so we can reconcile state again.
log.Printf("Operconfig Controller complete")
return reconcile.Result{RequeueAfter: ResyncPeriod}, nil
}
func updateIPsecMetric(newOperConfigSpec *operv1.NetworkSpec) {
if newOperConfigSpec == nil {
// spec is not initilized yet
klog.V(5).Infof("IPsec: << updateIPsecTelemetry, new spec is nil, skipping")
} else if newOperConfigSpec.DefaultNetwork.OVNKubernetesConfig == nil {
// non ovn-k network, ipsec is not supported
ipsecMetrics.UpdateIPsecMetricNA()
} else {
// ovn-k network, ipsec is supported, update the ipsec state metric
newOVNKubeConfig := newOperConfigSpec.DefaultNetwork.OVNKubernetesConfig
mode := string(network.GetIPsecMode(newOVNKubeConfig))
legacyAPI := network.IsIPsecLegacyAPI(newOVNKubeConfig)
ipsecMetrics.UpdateIPsecMetric(mode, legacyAPI)
}
}
func reconcileOperConfig(ctx context.Context, obj crclient.Object) []reconcile.Request {
log.Printf("%s %s/%s changed, triggering operconf reconciliation", obj.GetObjectKind().GroupVersionKind().Kind, obj.GetNamespace(), obj.GetName())
// Update reconcile.Request object to align with unnamespaced default network,
// to ensure we don't have multiple requeueing reconcilers running
return []reconcile.Request{{NamespacedName: types.NamespacedName{
Name: names.OPERATOR_CONFIG,
}}}
}