Skip to content

Commit

Permalink
refactor: main.go
Browse files Browse the repository at this point in the history
- use infrav1 instead of infrav1beta1
- functionize inits of webhooks, reconcilers and health checks
- don't set ctrl options in line for better readability
- use caching for namespace watches
- process restconfig options
- process profiling and tls options
- NOTE: actual profiling may still need more work, this is mostly focused on handling flags

Signed-off-by: Chris Privitere <23177737+cprivitere@users.noreply.github.com>
  • Loading branch information
cprivitere committed Dec 18, 2023
1 parent 2f2251e commit 1af02c9
Showing 1 changed file with 122 additions and 65 deletions.
187 changes: 122 additions & 65 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ limitations under the License.
package main

import (
"context"
"flag"
"fmt"
"net/http"
Expand All @@ -26,40 +27,45 @@ import (
"time"

"github.com/spf13/pflag"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"k8s.io/client-go/tools/leaderelection/resourcelock"
cgrecord "k8s.io/client-go/tools/record"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/component-base/version"
"k8s.io/klog/v2"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1"
"sigs.k8s.io/cluster-api/controllers/remote"
"sigs.k8s.io/cluster-api/util/flags"
"sigs.k8s.io/cluster-api/util/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/webhook"

"k8s.io/component-base/logs"
logsv1 "k8s.io/component-base/logs/api/v1"

infrav1beta1 "sigs.k8s.io/cluster-api-provider-packet/api/v1beta1"
infrav1 "sigs.k8s.io/cluster-api-provider-packet/api/v1beta1"
"sigs.k8s.io/cluster-api-provider-packet/controllers"
packet "sigs.k8s.io/cluster-api-provider-packet/pkg/cloud/packet"
// +kubebuilder:scaffold:imports
)

var (
scheme = runtime.NewScheme()
logOptions = logs.NewOptions()
setupLog = ctrl.Log.WithName("setup")
scheme = runtime.NewScheme()
setupLog = ctrl.Log.WithName("setup")
controllerName = "cluster-api-packet-controller-manager"
)

func init() {
utilruntime.Must(clientgoscheme.AddToScheme(scheme))
utilruntime.Must(infrav1beta1.AddToScheme(scheme))
utilruntime.Must(clusterv1.AddToScheme(scheme))
utilruntime.Must(bootstrapv1.AddToScheme(scheme))
// +kubebuilder:scaffold:scheme
_ = clientgoscheme.AddToScheme(scheme)
_ = infrav1.AddToScheme(scheme)
_ = clusterv1.AddToScheme(scheme)
_ = bootstrapv1.AddToScheme(scheme)
}

var (
Expand All @@ -82,28 +88,105 @@ var (
restConfigBurst int
tlsOptions = flags.TLSOptions{}
diagnosticsOptions = flags.DiagnosticsOptions{}
logOptions = logs.NewOptions()
)

func main() {
initFlags(pflag.CommandLine)
pflag.CommandLine.SetNormalizeFunc(cliflag.WordSepNormalizeFunc)
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
// Set log level 2 as default.
if err := pflag.CommandLine.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level: %v")
os.Exit(1)
}
pflag.Parse()

if err := logsv1.ValidateAndApply(logOptions, nil); err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}

// klog.Background will automatically use the right logger.
ctrl.SetLogger(klog.Background())

// Machine and cluster operations can create enough events to trigger the event recorder spam filter
// Setting the burst size higher ensures all events will be recorded and submitted to the API
broadcaster := cgrecord.NewBroadcasterWithCorrelatorOptions(cgrecord.CorrelatorOptions{
BurstSize: 100,
})

restConfig := ctrl.GetConfigOrDie()
restConfig.QPS = restConfigQPS
restConfig.Burst = restConfigBurst
restConfig.UserAgent = remote.DefaultClusterAPIUserAgent(controllerName)

tlsOptionOverrides, err := flags.GetTLSOptionOverrideFuncs(tlsOptions)
if err != nil {
setupLog.Error(err, "unable to add TLS settings to the webhook server")
os.Exit(1)
}

diagnosticsOpts := flags.GetDiagnosticsOptions(diagnosticsOptions)

var watchNamespaces map[string]cache.Config
if watchNamespace != "" {
setupLog.Info("Watching cluster-api objects only in namespace for reconciliation", "namespace", watchNamespace)
watchNamespaces = map[string]cache.Config{
watchNamespace: {},
}
}

if enableContentionProfiling {
goruntime.SetBlockProfileRate(1)
}

ctrlOptions := ctrl.Options{
Scheme: scheme,
LeaderElection: enableLeaderElection,
LeaderElectionID: "controller-leader-election-capp",
LeaseDuration: &leaderElectionLeaseDuration,
RenewDeadline: &leaderElectionRenewDeadline,
RetryPeriod: &leaderElectionRetryPeriod,
LeaderElectionResourceLock: resourcelock.LeasesResourceLock,
HealthProbeBindAddress: healthAddr,
PprofBindAddress: profilerAddress,
Metrics: diagnosticsOpts,
Cache: cache.Options{
DefaultNamespaces: watchNamespaces,
SyncPeriod: &syncPeriod,
},
Client: client.Options{
Cache: &client.CacheOptions{
DisableFor: []client.Object{
&corev1.ConfigMap{},
&corev1.Secret{},
},
},
},
WebhookServer: webhook.NewServer(
webhook.Options{
Port: webhookPort,
CertDir: webhookCertDir,
TLSOpts: tlsOptionOverrides,
},
),
EventBroadcaster: broadcaster,
}

mgr, err := ctrl.NewManager(restConfig, ctrlOptions)
if err != nil {
setupLog.Error(err, "unable to start manager")
os.Exit(1)
}

// Setup the context that's going to be used in controllers and for the manager.
ctx := ctrl.SetupSignalHandler()

setupChecks(mgr)
setupReconcilers(ctx, mgr)
setupWebhooks(mgr)

if profilerAddress != "" {
setupLog.Info(fmt.Sprintf("Profiler listening for requests at %s", profilerAddress))
go func() {
Expand All @@ -114,96 +197,70 @@ func main() {
}()
}

// klog.Background will automatically use the right logger.
ctrl.SetLogger(klog.Background())
// Initialize event recorder.
record.InitFromRecorder(mgr.GetEventRecorderFor("packet-controller"))

// Machine and cluster operations can create enough events to trigger the event recorder spam filter
// Setting the burst size higher ensures all events will be recorded and submitted to the API
broadcaster := cgrecord.NewBroadcasterWithCorrelatorOptions(cgrecord.CorrelatorOptions{
BurstSize: 100,
})
setupLog.Info("starting manager", "version", version.Get().String())
if err := mgr.Start(ctx); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}

mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
Scheme: scheme,
LeaderElection: enableLeaderElection,
LeaderElectionID: "controller-leader-election-capp",
LeaderElectionNamespace: leaderElectionNamespace,
LeaseDuration: &leaderElectionLeaseDuration,
RenewDeadline: &leaderElectionRenewDeadline,
RetryPeriod: &leaderElectionRetryPeriod,
SyncPeriod: &syncPeriod,
Namespace: watchNamespace,
Port: webhookPort,
CertDir: webhookCertDir,
HealthProbeBindAddress: healthAddr,
EventBroadcaster: broadcaster,
PprofBindAddress: profilerAddress,
Metrics: diagnosticsOpts,
})
if err != nil {
setupLog.Error(err, "unable to start manager")
func setupChecks(mgr ctrl.Manager) {
if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil {
setupLog.Error(err, "unable to create ready check")
os.Exit(1)
}

if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil {
setupLog.Error(err, "unable to create health check")
os.Exit(1)
}
}

func setupReconcilers(ctx context.Context, mgr ctrl.Manager) {
// get a packet client
client, err := packet.GetClient()
if err != nil {
setupLog.Error(err, "unable to get Packet client")
os.Exit(1)
}

// Initialize event recorder.
record.InitFromRecorder(mgr.GetEventRecorderFor("packet-controller"))

// Setup the context that's going to be used in controllers and for the manager.
ctx := ctrl.SetupSignalHandler()

if err = (&controllers.PacketClusterReconciler{
if err := (&controllers.PacketClusterReconciler{
Client: mgr.GetClient(),
WatchFilterValue: watchFilterValue,
PacketClient: client,
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: packetClusterConcurrency}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "PacketCluster")
os.Exit(1)
}
if err = (&controllers.PacketMachineReconciler{

if err := (&controllers.PacketMachineReconciler{
Client: mgr.GetClient(),
WatchFilterValue: watchFilterValue,
PacketClient: client,
}).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: packetMachineConcurrency}); err != nil {
}).SetupWithManager(ctx, mgr, controller.Options{
MaxConcurrentReconciles: packetMachineConcurrency,
}); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "PacketMachine")
os.Exit(1)
}
}

if err = (&infrav1beta1.PacketCluster{}).SetupWebhookWithManager(mgr); err != nil {
func setupWebhooks(mgr ctrl.Manager) {
if err := (&infrav1.PacketCluster{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "PacketCluster")
os.Exit(1)
}
if err = (&infrav1beta1.PacketMachine{}).SetupWebhookWithManager(mgr); err != nil {
if err := (&infrav1.PacketMachine{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "PacketMachine")
os.Exit(1)
}
if err = (&infrav1beta1.PacketMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil {
if err := (&infrav1.PacketMachineTemplate{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "PacketMachineTemplate")
os.Exit(1)
}

if err := mgr.AddReadyzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil {
setupLog.Error(err, "unable to create ready check")
os.Exit(1)
}

if err := mgr.AddHealthzCheck("webhook", mgr.GetWebhookServer().StartedChecker()); err != nil {
setupLog.Error(err, "unable to create health check")
os.Exit(1)
}

// +kubebuilder:scaffold:builder
setupLog.Info("starting manager", "version", version.Get().String())
if err := mgr.Start(ctx); err != nil {
setupLog.Error(err, "problem running manager")
os.Exit(1)
}
}

func initFlags(fs *pflag.FlagSet) {
Expand Down

0 comments on commit 1af02c9

Please sign in to comment.