diff --git a/cloud/scope/load_balancer_reconciler.go b/cloud/scope/load_balancer_reconciler.go index 4d91e4ad..45d3df0d 100644 --- a/cloud/scope/load_balancer_reconciler.go +++ b/cloud/scope/load_balancer_reconciler.go @@ -273,12 +273,12 @@ func (s *ClusterScope) GetLoadBalancers(ctx context.Context) (*loadbalancer.Load return nil, errors.New("cluster api tags have been modified out of context") } } - var page *string; + var page *string for { lbs, err := s.LoadBalancerClient.ListLoadBalancers(ctx, loadbalancer.ListLoadBalancersRequest{ CompartmentId: common.String(s.GetCompartmentId()), DisplayName: common.String(s.GetControlPlaneLoadBalancerName()), - Page: page, + Page: page, }) if err != nil { s.Logger.Error(err, "Failed to list lb by name") diff --git a/cloud/scope/machine.go b/cloud/scope/machine.go index b8e21212..bbf09393 100644 --- a/cloud/scope/machine.go +++ b/cloud/scope/machine.go @@ -323,8 +323,8 @@ func (m *MachineScope) getMachineFromOCID(ctx context.Context, instanceID *strin // GetMachineByDisplayName returns the machine from the compartment if there is a matching DisplayName, // and it was created by the cluster func (m *MachineScope) GetMachineByDisplayName(ctx context.Context, name string) (*core.Instance, error) { - var page *string; - for { + var page *string + for { req := core.ListInstancesRequest{DisplayName: common.String(name), CompartmentId: common.String(m.getCompartmentId()), Page: page} resp, err := m.ComputeClient.ListInstances(ctx, req) diff --git a/main.go b/main.go index 8578e77c..e9189d6d 100644 --- a/main.go +++ b/main.go @@ -19,6 +19,7 @@ package main import ( "flag" "os" + "time" infrastructurev1beta1 "github.com/oracle/cluster-api-provider-oci/api/v1beta1" infrastructurev1beta2 "github.com/oracle/cluster-api-provider-oci/api/v1beta2" @@ -35,6 +36,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" + "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/component-base/logs" logsV1 "k8s.io/component-base/logs/api/v1" _ "k8s.io/component-base/logs/json/register" @@ -79,6 +81,11 @@ func init() { func main() { var metricsAddr string var enableLeaderElection bool + var leaderElectionNamespace string + var leaderElectionLeaseDuration time.Duration + var leaderElectionRenewDeadline time.Duration + var leaderElectionRetryPeriod time.Duration + var watchNamespace string var probeAddr string var webhookPort int @@ -91,6 +98,30 @@ func main() { flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") + flag.StringVar( + &leaderElectionNamespace, + "leader-election-namespace", + "", + "Namespace that the controller performs leader election in. If unspecified, the controller will discover which namespace it is running in.", + ) + flag.DurationVar( + &leaderElectionLeaseDuration, + "leader-elect-lease-duration", + 15*time.Second, + "Interval at which non-leader candidates will wait to force acquire leadership (duration string)", + ) + flag.DurationVar( + &leaderElectionRenewDeadline, + "leader-elect-renew-deadline", + 10*time.Second, + "Duration that the leading controller manager will retry refreshing leadership before giving up (duration string)", + ) + flag.DurationVar( + &leaderElectionRetryPeriod, + "leader-elect-retry-period", + 2*time.Second, + "Duration the LeaderElector clients should wait between tries of actions (duration string)", + ) flag.IntVar(&webhookPort, "webhook-port", 9443, @@ -122,6 +153,12 @@ func main() { true, "Initialize OCI clients on startup", ) + flag.StringVar( + &watchNamespace, + "namespace", + "", + "Namespace that the controller watches to reconcile cluster-api objects. If unspecified, the controller watches for cluster-api objects across all namespaces.", + ) opts := zap.Options{ Development: true, @@ -142,13 +179,19 @@ func main() { ctrl.SetLogger(klog.Background()) mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, - MetricsBindAddress: metricsAddr, - Port: webhookPort, - HealthProbeBindAddress: probeAddr, - LeaderElection: enableLeaderElection, - LeaderElectionID: "controller-leader-elect-capoci", - CertDir: webhookCertDir, + Scheme: scheme, + MetricsBindAddress: metricsAddr, + Port: webhookPort, + HealthProbeBindAddress: probeAddr, + LeaderElection: enableLeaderElection, + LeaderElectionID: "controller-leader-elect-capoci", + LeaderElectionResourceLock: resourcelock.LeasesResourceLock, + LeaderElectionNamespace: leaderElectionNamespace, + LeaseDuration: &leaderElectionLeaseDuration, + RenewDeadline: &leaderElectionRenewDeadline, + RetryPeriod: &leaderElectionRetryPeriod, + CertDir: webhookCertDir, + Namespace: watchNamespace, }) if err != nil { setupLog.Error(err, "unable to start manager")