diff --git a/pkg/agent/v1/rules.go b/pkg/agent/v1/rules.go index 2e0fc1ea63..94b390eabe 100644 --- a/pkg/agent/v1/rules.go +++ b/pkg/agent/v1/rules.go @@ -53,9 +53,7 @@ func (a *Agent) streamRuleGroupUpdates(ctx context.Context) (<-chan [][]byte, er searchInterval = duration } notifier := notifier.NewPeriodicUpdateNotifier(ctx, finder, searchInterval) - a.logger.With( - "interval", searchInterval.String(), - ).Debug("rule discovery notifier configured") + a.logger.Debug("rule discovery notifier configured", "interval", searchInterval.String()) notifierC := notifier.NotifyC(ctx) a.logger.Debug("starting rule group update notifier") @@ -82,9 +80,7 @@ func (a *Agent) marshalRuleGroups(ruleGroups []rules.RuleGroup) [][]byte { for _, ruleGroup := range ruleGroups { doc, err := yaml.Marshal(ruleGroup) if err != nil { - a.logger.With( - "group", ruleGroup.Name, - ).Error("failed to marshal rule group", "error", err) + a.logger.Error("failed to marshal rule group", "error", err, "group", ruleGroup.Name,) continue } yamlDocs = append(yamlDocs, doc) diff --git a/pkg/agent/v2/agent.go b/pkg/agent/v2/agent.go index 346cc3c388..af577288a1 100644 --- a/pkg/agent/v2/agent.go +++ b/pkg/agent/v2/agent.go @@ -419,9 +419,7 @@ func (a *Agent) ListenAndServe(ctx context.Context) error { if err != nil { return err } - a.logger.With( - "address", listener.Addr(), - ).Info("agent http server starting") + a.logger.Info("agent http server starting", "address", listener.Addr()) ctx, ca := context.WithCancel(ctx) diff --git a/pkg/clients/gateway_client.go b/pkg/clients/gateway_client.go index 33449b46aa..ef1ce0722e 100644 --- a/pkg/clients/gateway_client.go +++ b/pkg/clients/gateway_client.go @@ -188,16 +188,12 @@ func (gc *gatewayClient) Connect(ctx context.Context) (_ grpc.ClientConnInterfac var headerMd metadata.MD splicedStream, err := streamClient.Connect(ctx, grpc.Header(&headerMd)) if err != nil { - gc.logger.With( - "name", sc.name, - ).Warn("failed to connect to spliced stream, skipping", "error", err) + gc.logger.Warn("failed to connect to spliced stream, skipping", "name", sc.name, "error", err) continue } if err := ts.Splice(splicedStream, totem.WithStreamName(sc.name)); err != nil { - gc.logger.With( - "name", sc.name, - ).Warn("failed to splice remote stream, skipping", "error", err) + gc.logger.Warn("failed to splice remote stream, skipping", "error", err, "name", sc.name) continue } @@ -213,9 +209,7 @@ func (gc *gatewayClient) Connect(ctx context.Context) (_ grpc.ClientConnInterfac Type: streamv1.EventType_DiscoveryComplete, CorrelationId: correlationId, }); err != nil { - gc.logger.With( - "name", sc.name, - ).Error("failed to notify remote stream", "error", err) + gc.logger.Error("failed to notify remote stream", "name", sc.name, "error", err) } }() } diff --git a/pkg/gateway/delegate.go b/pkg/gateway/delegate.go index f3eba35cd5..2a219b0cf0 100644 --- a/pkg/gateway/delegate.go +++ b/pkg/gateway/delegate.go @@ -56,9 +56,7 @@ func (d *DelegateServer) HandleAgentConnection(ctx context.Context, clientSet ag cluster, err := d.clusterStore.GetCluster(ctx, &corev1.Reference{Id: id}) if err != nil { - d.logger.With( - "id", id, - ).Error("internal error: failed to look up connecting agent", "error", err) + d.logger.Error("internal error: failed to look up connecting agent", "error", err, "id", id) return } @@ -67,14 +65,14 @@ func (d *DelegateServer) HandleAgentConnection(ctx context.Context, clientSet ag labels: cluster.GetLabels(), id: id, } - d.logger.With("id", id).Debug("agent connected") + d.logger.Debug("agent connected", "id", id) d.mu.Unlock() <-ctx.Done() d.mu.Lock() delete(d.activeAgents, id) - d.logger.With("id", id).Debug("agent disconnected") + d.logger.Debug("agent disconnected", "id", id) d.mu.Unlock() } diff --git a/pkg/gateway/grpc.go b/pkg/gateway/grpc.go index f5b5333150..d33c55c095 100644 --- a/pkg/gateway/grpc.go +++ b/pkg/gateway/grpc.go @@ -97,9 +97,7 @@ func (s *GatewayGRPCServer) ListenAndServe(ctx context.Context) error { } s.servicesMu.Unlock() - s.logger.With( - "address", listener.Addr().String(), - ).Info("gateway gRPC server starting") + s.logger.Info("gateway gRPC server starting", "address", listener.Addr().String()) errC := lo.Async(func() error { return server.Serve(listener) diff --git a/pkg/gateway/http.go b/pkg/gateway/http.go index b162a7fb4e..fae3dc34d3 100644 --- a/pkg/gateway/http.go +++ b/pkg/gateway/http.go @@ -198,10 +198,7 @@ ROUTES: for _, route := range cfg.Routes { for _, reservedPrefix := range s.reservedPrefixRoutes { if strings.HasPrefix(route.Path, reservedPrefix) { - s.logger.With( - "route", route.Method+" "+route.Path, - "plugin", pluginMeta.Module, - ).Warn("skipping route for plugin as it conflicts with a reserved prefix") + s.logger.Warn("skipping route for plugin as it conflicts with a reserved prefix", "route", route.Method+" "+route.Path, "plugin", pluginMeta.Module) continue ROUTES } } diff --git a/pkg/gateway/stream.go b/pkg/gateway/stream.go index 6f03d69abc..b821f1712d 100644 --- a/pkg/gateway/stream.go +++ b/pkg/gateway/stream.go @@ -122,9 +122,7 @@ func (s *StreamServer) Connect(stream streamv1.Stream_ConnectServer) error { Id: id, }) if err != nil { - s.logger.With( - "id", id, - ).Error("failed to get cluster", "error", err) + s.logger.Error("failed to get cluster", "error", err, "id", id) return err } eventC, err := s.clusterStore.WatchCluster(ctx, c) @@ -137,15 +135,11 @@ func (s *StreamServer) Connect(stream streamv1.Stream_ConnectServer) error { streamClient := streamv1.NewStreamClient(r.cc) splicedStream, err := streamClient.Connect(ctx) if err != nil { - s.logger.With( - "clusterId", c.Id, - ).Warn("failed to connect to remote stream, skipping", "error", err) + s.logger.Warn("failed to connect to remote stream, skipping", "error", err, "clusterId", c.Id) continue } if err := ts.Splice(splicedStream, totem.WithStreamName(r.name)); err != nil { - s.logger.With( - "clusterId", c.Id, - ).Warn("failed to splice remote stream, skipping", "error", err) + s.logger.Warn("failed to splice remote stream, skipping", "error", err, "clusterId", c.Id) continue } } @@ -178,9 +172,7 @@ func (s *StreamServer) Connect(stream streamv1.Stream_ConnectServer) error { } func (s *StreamServer) RegisterService(desc *grpc.ServiceDesc, impl any) { - s.logger.With( - "service", desc.ServiceName, - ).Debug("registering service") + s.logger.Debug("registering service", "service", desc.ServiceName) if len(desc.Streams) > 0 { panic("failed to register service: nested streams are currently not supported") } @@ -188,9 +180,7 @@ func (s *StreamServer) RegisterService(desc *grpc.ServiceDesc, impl any) { } func (s *StreamServer) registerInternalService(desc *grpc.ServiceDesc, impl any) { - s.logger.With( - "service", desc.ServiceName, - ).Debug("registering internal service") + s.logger.Debug("registering internal service", "service", desc.ServiceName) if len(desc.Streams) > 0 { panic("failed to register internal service: nested streams are currently not supported") } diff --git a/pkg/gateway/sync.go b/pkg/gateway/sync.go index 3a4e376a0c..caa3f403d7 100644 --- a/pkg/gateway/sync.go +++ b/pkg/gateway/sync.go @@ -46,7 +46,7 @@ func (f *SyncRequester) HandleAgentConnection(ctx context.Context, clientSet age f.mu.Lock() id := cluster.StreamAuthorizedID(ctx) f.activeAgents[id] = clientSet - f.logger.With("id", id).Debug("agent connected") + f.logger.Debug("agent connected", "id", id) f.mu.Unlock() // blocks until ctx is canceled @@ -59,7 +59,7 @@ func (f *SyncRequester) HandleAgentConnection(ctx context.Context, clientSet age f.mu.Lock() delete(f.activeAgents, id) - f.logger.With("id", id).Debug("agent disconnected") + f.logger.Debug("agent disconnected", "id", id) f.mu.Unlock() } diff --git a/pkg/management/extensions.go b/pkg/management/extensions.go index c56a6165c4..e9699c5aec 100644 --- a/pkg/management/extensions.go +++ b/pkg/management/extensions.go @@ -70,9 +70,7 @@ func (m *Server) configureApiExtensionDirector(ctx context.Context, pl plugins.L reflectClient := grpcreflect.NewClient(ctx, rpb.NewServerReflectionClient(cc)) sds, err := p.Descriptors(ctx, &emptypb.Empty{}) if err != nil { - m.logger.With( - "plugin", md.Module, - ).Error("failed to get extension descriptors", "error", err) + m.logger.Error("failed to get extension descriptors", "error", err, "plugin", md.Module) return } for _, sd := range sds.Items { diff --git a/pkg/opni/commands/agent_v2.go b/pkg/opni/commands/agent_v2.go index 8710264c29..294742f743 100644 --- a/pkg/opni/commands/agent_v2.go +++ b/pkg/opni/commands/agent_v2.go @@ -55,7 +55,8 @@ func BuildAgentV2Cmd() *cobra.Command { path, err := config.FindConfig() if err != nil { if errors.Is(err, config.ErrConfigNotFound) { - agentlg.Error(`could not find a config file in current directory or ["/etc/opni"], and --config was not given`) + wd, _ := os.Getwd() + agentlg.Error(`could not find a config file in working directory or ["/etc/opni"], and --config was not given`, "workingDir", wd) } agentlg.Error("an error occurred while searching for a config file") os.Exit(1) diff --git a/pkg/opni/commands/support.go b/pkg/opni/commands/support.go index ed5b19ca9e..f923502360 100644 --- a/pkg/opni/commands/support.go +++ b/pkg/opni/commands/support.go @@ -75,7 +75,7 @@ func BuildSupportBootstrapCmd() *cobra.Command { wd, _ := os.Getwd() agentlg.Infof(`could not find a config file in ["%s", "$home/.opni], and --config was not given`, wd) default: - agentlg.With(zap.Error(err)).Fatal(cmd.Context(), "an error occurred while searching for a config file") + agentlg.With(zap.Error(err)).Fatal("an error occurred while searching for a config file") } } @@ -83,12 +83,12 @@ func BuildSupportBootstrapCmd() *cobra.Command { if configFile != "" { objects, err := config.LoadObjectsFromFile(configFile) if err != nil { - agentlg.With(zap.Error(err)).Fatal(cmd.Context(), "failed to load config") + agentlg.With(zap.Error(err)).Fatal("failed to load config") } if ok := objects.Visit(func(config *v1beta1.SupportAgentConfig) { agentConfig = config }); !ok { - agentlg.Fatal(cmd.Context(), "no support agent config found in config file") + agentlg.Fatal("no support agent config found in config file") } } else { agentConfig.TypeMeta = v1beta1.SupportAgentConfigTypeMeta @@ -111,7 +111,7 @@ func BuildSupportBootstrapCmd() *cobra.Command { case agentConfig.Spec.AuthData.Token != "": token = agentConfig.Spec.AuthData.Token default: - agentlg.Fatal(cmd.Context(), "no token provided") + agentlg.Fatal("no token provided") } bootstrapper, err := configureSupportAgentBootstrap( @@ -123,14 +123,14 @@ func BuildSupportBootstrapCmd() *cobra.Command { if err != nil { agentlg.With( zap.Error(err), - ).Fatal(cmd.Context(), "failed to configure bootstrap") + ).Fatal("failed to configure bootstrap") } ipBuilder, err := ident.GetProviderBuilder("supportagent") if err != nil { agentlg.With( zap.Error(err), - ).Fatal(cmd.Context(), "failed to get ident provider") + ).Fatal("failed to get ident provider") } ip := ipBuilder(agentConfig) @@ -138,7 +138,7 @@ func BuildSupportBootstrapCmd() *cobra.Command { if err != nil { agentlg.With( zap.Error(err), - ).Fatal(cmd.Context(), "failed to get unique identifier") + ).Fatal("failed to get unique identifier") } kr, err := bootstrapper.Bootstrap(ctx, ip) @@ -187,7 +187,7 @@ func BuildSupportPingCmd() *cobra.Command { ctx, ca := context.WithCancel(waitctx.FromContext(cmd.Context())) defer ca() - agentlg := logger.NewZap() + agentlg := logger.NewZap(logger.WithLogLevel(logger.ParseLevel(logLevel))) config := supportagentconfig.MustLoadConfig(configFile, agentlg) @@ -302,7 +302,7 @@ func BuildSupportPasswordCmd() *cobra.Command { Use: "password", Short: "Shows the initial password for Opensearch Dashboards", Run: func(cmd *cobra.Command, args []string) { - agentlg := logger.NewZap() + agentlg := logger.NewZap(logger.WithLogLevel(logger.ParseLevel(logLevel))) kr, err := supportagentconfig.LoadKeyring(getRetrievePassword) if err != nil { diff --git a/pkg/plugins/apis/apiextensions/stream/plugin_agent.go b/pkg/plugins/apis/apiextensions/stream/plugin_agent.go index f3631aa5c5..01165fedba 100644 --- a/pkg/plugins/apis/apiextensions/stream/plugin_agent.go +++ b/pkg/plugins/apis/apiextensions/stream/plugin_agent.go @@ -131,7 +131,7 @@ func (e *agentStreamExtensionServerImpl) Connect(stream streamv1.Stream_ConnectS // and reconnect. return status.Errorf(codes.DeadlineExceeded, "stream client discovery timed out after %s", timeout) case <-stream.Context().Done(): - e.logger.With("error", stream.Context().Err()).Error("stream disconnected while waiting for discovery") + e.logger.Error("stream disconnected while waiting for discovery", "error", stream.Context().Err()) return stream.Context().Err() } @@ -143,7 +143,7 @@ func (e *agentStreamExtensionServerImpl) Connect(stream streamv1.Stream_ConnectS } case err := <-errC: if err != nil { - e.logger.With("error", stream.Context().Err()).Error("stream encountered an error while waiting for discovery") + e.logger.Error("stream encountered an error while waiting for discovery", "error", stream.Context().Err()) return status.Errorf(codes.Internal, "stream encountered an error while waiting for discovery: %v", err) } } @@ -160,9 +160,7 @@ func (e *agentStreamExtensionServerImpl) Connect(stream streamv1.Stream_ConnectS } func (e *agentStreamExtensionServerImpl) Notify(_ context.Context, event *streamv1.StreamEvent) (*emptypb.Empty, error) { - e.logger.With( - "type", event.Type.String(), - ).Debug("received notify event for:", "name", e.name) + e.logger.Debug("received notify event for:", "name", e.name, "type", event.Type.String()) e.activeStreamsMu.Lock() defer e.activeStreamsMu.Unlock() diff --git a/pkg/plugins/apis/apiextensions/stream/plugin_gateway.go b/pkg/plugins/apis/apiextensions/stream/plugin_gateway.go index 347e2631d9..8da5871f8d 100644 --- a/pkg/plugins/apis/apiextensions/stream/plugin_gateway.go +++ b/pkg/plugins/apis/apiextensions/stream/plugin_gateway.go @@ -109,9 +109,7 @@ type gatewayStreamExtensionServerImpl struct { func (e *gatewayStreamExtensionServerImpl) Connect(stream streamv1.Stream_ConnectServer) error { id := cluster.StreamAuthorizedID(stream.Context()) - e.logger.With( - "id", id, - ).Debug("stream connected") + e.logger.Debug("stream connected", "id", id) opts := []totem.ServerOption{totem.WithName("plugin_" + e.name)} diff --git a/pkg/plugins/discovery.go b/pkg/plugins/discovery.go index 4feca51c9d..98fe51fa64 100644 --- a/pkg/plugins/discovery.go +++ b/pkg/plugins/discovery.go @@ -38,20 +38,14 @@ PLUGINS: f, err := dc.Fs.Open(path) if err != nil { if dc.Logger != nil { - dc.Logger.With( - "plugin", path, - "error", err, - ).Error("failed to open plugin for reading") + dc.Logger.Error("failed to open plugin for reading", "plugin", path, "error", err) } continue } md, err := meta.ReadFile(f) if err != nil { if dc.Logger != nil { - dc.Logger.With( - "plugin", path, - "error", err, - ).Error("failed to read plugin metadata") + dc.Logger.Error("failed to read plugin metadata", "plugin", path, "error", err) } f.Close() continue diff --git a/pkg/plugins/loader.go b/pkg/plugins/loader.go index 9de2bfaa00..fe90d0543d 100644 --- a/pkg/plugins/loader.go +++ b/pkg/plugins/loader.go @@ -277,10 +277,7 @@ func (p *PluginLoader) LoadPlugins(ctx context.Context, pluginDir string, scheme if secureConfig, ok := secureConfigs[md.Module]; ok { clientOpts = append(clientOpts, WithSecureConfig(secureConfig)) } else { - p.logger.With( - "module", md.Module, - "path", md.BinaryPath, - ).Warn("plugin is not present in manifest, skipping") + p.logger.Warn("plugin is not present in manifest, skipping", "module", md.Module, "path", md.BinaryPath) continue } } diff --git a/pkg/resources/collector/discovery/servicemonitor.go b/pkg/resources/collector/discovery/servicemonitor.go index 9afd8ba367..c13303ae99 100644 --- a/pkg/resources/collector/discovery/servicemonitor.go +++ b/pkg/resources/collector/discovery/servicemonitor.go @@ -499,7 +499,7 @@ func (s *serviceMonitorScrapeConfigRetriever) generateStaticServiceConfig( CredentialsFile: bearerSecretRes.Path(), } } else { - s.logger.Warn("failed to find a specified bearer token secret", "sercret", ep.BearerTokenSecret) + s.logger.Warn("failed to find a specified bearer token secret", "secret", ep.BearerTokenSecret) } } diff --git a/pkg/storage/crds/token_store.go b/pkg/storage/crds/token_store.go index f292eda3d1..39730fd482 100644 --- a/pkg/storage/crds/token_store.go +++ b/pkg/storage/crds/token_store.go @@ -135,9 +135,7 @@ func (c *CRDStore) UpdateToken(ctx context.Context, ref *corev1.Reference, mutat // garbageCollectToken performs a best-effort deletion of an expired token. func (c *CRDStore) garbageCollectToken(token *corev1beta1.BootstrapToken) { - c.logger.With( - "token", token.GetName(), - ).Debug("garbage-collecting expired token") + c.logger.Debug("garbage-collecting expired token", "token", token.GetName()) retry.OnError(retry.DefaultBackoff, func(err error) bool { return !k8serrors.IsNotFound(err) }, func() error { diff --git a/pkg/storage/etcd/token_store.go b/pkg/storage/etcd/token_store.go index 6eca4595f2..fc02b8482d 100644 --- a/pkg/storage/etcd/token_store.go +++ b/pkg/storage/etcd/token_store.go @@ -185,9 +185,7 @@ func (e *EtcdStore) addLeaseMetadata( // garbageCollectToken performs a best-effort deletion of an expired token. func (e *EtcdStore) garbageCollectToken(token *corev1.BootstrapToken) { - e.Logger.With( - "token", token.GetTokenID(), - ).Debug("garbage-collecting expired token") + e.Logger.Debug("garbage-collecting expired token", "token", token.GetTokenID()) if token.Metadata.LeaseID != 0 { defer func(id int64) { @@ -200,8 +198,6 @@ func (e *EtcdStore) garbageCollectToken(token *corev1.BootstrapToken) { _, err := e.Client.Delete(context.Background(), path.Join(e.Prefix, tokensKey, token.TokenID)) if err != nil { - e.Logger.With( - "token", token.TokenID, - ).Warn("failed to garbage-collect expired token", "error", err) + e.Logger.Warn("failed to garbage-collect expired token", "error", err, "token", token.TokenID) } } diff --git a/pkg/storage/jetstream/cluster_store.go b/pkg/storage/jetstream/cluster_store.go index 9ca8d8274f..d04e438d23 100644 --- a/pkg/storage/jetstream/cluster_store.go +++ b/pkg/storage/jetstream/cluster_store.go @@ -163,9 +163,7 @@ func (s *JetStreamStore) translateClusterWatchEvent(update nats.KeyValueEntry) ( case nats.KeyValuePut: cluster := &corev1.Cluster{} if err := protojson.Unmarshal(update.Value(), cluster); err != nil { - s.logger.With( - "cluster", update.Key(), - ).Warn("failed to unmarshal cluster", "error", err) + s.logger.Warn("failed to unmarshal cluster", "error", err, "cluster", update.Key()) return storage.WatchEvent[*corev1.Cluster]{}, false } cluster.SetResourceVersion(fmt.Sprint(update.Revision())) diff --git a/pkg/storage/jetstream/token_store.go b/pkg/storage/jetstream/token_store.go index 3ddf6aec49..7f9a39f515 100644 --- a/pkg/storage/jetstream/token_store.go +++ b/pkg/storage/jetstream/token_store.go @@ -86,9 +86,7 @@ func (s *JetStreamStore) UpdateToken(ctx context.Context, ref *corev1.Reference, } mutator(token) if token.Metadata.UsageCount >= token.Metadata.MaxUsages && token.Metadata.MaxUsages > 0 { - s.logger.With( - "token", token.TokenID, - ).Debug("delete token because it has reached max usage") + s.logger.Debug("delete token because it has reached max usage", "token", token.TokenID) if err := s.kv.Tokens.Delete(token.TokenID); err != nil { if !errors.Is(err, nats.ErrKeyNotFound) { return nil, fmt.Errorf("failed to delete token: %w", err) @@ -161,14 +159,10 @@ func patchTTL(token *corev1.BootstrapToken, entry nats.KeyValueEntry) { // garbageCollectToken performs a best-effort deletion of an expired token. func (s *JetStreamStore) garbageCollectToken(token *corev1.BootstrapToken) { - s.logger.With( - "token", token.TokenID, - ).Debug("garbage-collecting expired token") + s.logger.Debug("garbage-collecting expired token", "token", token.TokenID) if err := s.kv.Tokens.Delete(token.TokenID); err != nil { if !errors.Is(err, nats.ErrKeyNotFound) { - s.logger.With( - "token", token.TokenID, - ).Warn("failed to garbage-collect expired token", "error", err) + s.logger.Warn("failed to garbage-collect expired token", "error", err, "token", token.TokenID) } } } diff --git a/pkg/task/controller.go b/pkg/task/controller.go index b44461846d..25c5cc2429 100644 --- a/pkg/task/controller.go +++ b/pkg/task/controller.go @@ -82,7 +82,7 @@ func NewController(ctx context.Context, name string, store KVStore, runner TaskR rw := storage.NewValueStoreLocker(storage.NewValueStore(ctrl.store, id), ctrl.locks.Get(id)) rw.Lock() if err := rw.Delete(ctx); err != nil { - ctrl.logger.With("id", id).Warn("failed to clean task state", "error", err) + ctrl.logger.Warn("failed to clean task state", "error", err, "id", id) // continue anyway, this will be retried next time // the controller is started } diff --git a/pkg/test/environment.go b/pkg/test/environment.go index e7eb4cd2bc..64d82ae037 100644 --- a/pkg/test/environment.go +++ b/pkg/test/environment.go @@ -1289,7 +1289,7 @@ func (e *Environment) StartNodeExporter() { } time.Sleep(time.Second) } - e.Logger.With("address", fmt.Sprintf("http://localhost:%d", e.ports.NodeExporterPort)).Info("Node exporter started") + e.Logger.Info("Node exporter started", "address", fmt.Sprintf("http://localhost:%d", e.ports.NodeExporterPort)) waitctx.Go(e.ctx, func() { <-e.ctx.Done() session.Wait() diff --git a/pkg/update/patch/filesystem.go b/pkg/update/patch/filesystem.go index 34baf9cedc..a8c480140c 100644 --- a/pkg/update/patch/filesystem.go +++ b/pkg/update/patch/filesystem.go @@ -84,9 +84,7 @@ func (p *FilesystemCache) Archive(manifest *controlv1.PluginArchive) error { continue } - p.logger.With( - "plugin", item.Metadata.Path, - ).Warn("existing cached plugin is corrupted, overwriting") + p.logger.Warn("existing cached plugin is corrupted, overwriting", "plugin", item.Metadata.Path) } item := item @@ -202,9 +200,7 @@ func (p *FilesystemCache) GetBinaryFile(dir, hash string) ([]byte, error) { if hex.EncodeToString(b2hash.Sum(nil)) != hash { defer p.Clean(hash) - p.logger.With( - "hash", hash, - ).Error("binary corrupted: hash mismatch") + p.logger.Error("binary corrupted: hash mismatch", "hash", hash) return nil, fmt.Errorf("binary corrupted: hash mismatch") } return pluginData, nil diff --git a/pkg/update/server.go b/pkg/update/server.go index 7d03a228a8..490e5b1144 100644 --- a/pkg/update/server.go +++ b/pkg/update/server.go @@ -51,9 +51,7 @@ func (s *UpdateServer) SyncManifest(ctx context.Context, manifest *controlv1.Upd lg.Warn("could not sync agent manifest", "error", err) return nil, status.Error(codes.InvalidArgument, err.Error()) } - s.logger.With( - "strategy", strategy, - ).Info("syncing agent manifest") + s.logger.Info("syncing agent manifest", "strategy", strategy) s.handlerMu.RLock() handler, ok := s.updateHandlers[strategy] diff --git a/plugins/alerting/pkg/alerting/alarms/v1/setup.go b/plugins/alerting/pkg/alerting/alarms/v1/setup.go index da331d475c..f49663ae70 100644 --- a/plugins/alerting/pkg/alerting/alarms/v1/setup.go +++ b/plugins/alerting/pkg/alerting/alarms/v1/setup.go @@ -219,7 +219,7 @@ func (p *AlarmServerComponent) handleKubeAlertCreation(ctx context.Context, cond cond.GetRoutingAnnotations(), k, nil, baseKubeRule, ) - p.logger.With("handler", "kubeStateAlertCreate").Debug("kube state alert created", "kubeRuleContent", kubeRuleContent) + p.logger.Debug("kube state alert created", "kubeRuleContent", kubeRuleContent, "handler", "kubeStateAlertCreate") if err != nil { return err } @@ -227,7 +227,7 @@ func (p *AlarmServerComponent) handleKubeAlertCreation(ctx context.Context, cond if err != nil { return err } - p.logger.With("Expr", "kube-state").Debug(string(out)) + p.logger.Debug(string(out), "Expr", "kube-state") adminClient, err := p.adminClient.GetContext(ctx) if err != nil { return err @@ -272,7 +272,7 @@ func (p *AlarmServerComponent) handleCpuSaturationAlertCreation( if err != nil { return err } - p.logger.With("Expr", "cpu").Debug(string(out)) + p.logger.Debug(string(out), "Expr", "cpu") adminClient, err := p.adminClient.GetContext(ctx) if err != nil { return err @@ -314,7 +314,7 @@ func (p *AlarmServerComponent) handleMemorySaturationAlertCreation(ctx context.C if err != nil { return err } - p.logger.With("Expr", "mem").Debug(string(out)) + p.logger.Debug(string(out), "Expr", "mem") adminClient, err := p.adminClient.GetContext(ctx) if err != nil { return err @@ -356,7 +356,7 @@ func (p *AlarmServerComponent) handleFsSaturationAlertCreation(ctx context.Conte if err != nil { return err } - p.logger.With("Expr", "fs").Debug(string(out)) + p.logger.Debug(string(out), "Expr", "fs") adminClient, err := p.adminClient.GetContext(ctx) if err != nil { return err @@ -393,7 +393,7 @@ func (p *AlarmServerComponent) handlePrometheusQueryAlertCreation(ctx context.Co if err != nil { return err } - p.logger.With("Expr", "user-query").Debug(out.String()) + p.logger.Debug(out.String(), "Expr", "user-query") adminClient, err := p.adminClient.GetContext(ctx) if err != nil { return err diff --git a/plugins/alerting/pkg/alerting/management.go b/plugins/alerting/pkg/alerting/management.go index efb9c8563a..e0bc18c842 100644 --- a/plugins/alerting/pkg/alerting/management.go +++ b/plugins/alerting/pkg/alerting/management.go @@ -27,7 +27,7 @@ func (p *Plugin) configureDriver(ctx context.Context, opts ...driverutil.Option) priorityOrder := []string{"alerting-manager", "gateway-manager", "local-alerting", "test-environment", "noop"} for _, name := range priorityOrder { if builder, ok := drivers.Drivers.Get(name); ok { - p.logger.With("driver", name).Info("using cluster driver") + p.logger.Info("using cluster driver", "driver", name) driver, err := builder(ctx, opts...) if err != nil { p.logger.With( diff --git a/plugins/alerting/pkg/alerting/system.go b/plugins/alerting/pkg/alerting/system.go index 23fd57150e..7b7bb0987c 100644 --- a/plugins/alerting/pkg/alerting/system.go +++ b/plugins/alerting/pkg/alerting/system.go @@ -45,16 +45,12 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { cfg, err := client.GetConfig(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) if err != nil { - p.logger.With( - "err", err, - ).Error("Failed to get mgmnt config") + p.logger.Error("Failed to get mgmnt config", "err", err) os.Exit(1) } objectList, err := machinery.LoadDocuments(cfg.Documents) if err != nil { - p.logger.With( - "err", err, - ).Error("failed to load config") + p.logger.Error("failed to load config", "err", err) os.Exit(1) } objectList.Visit(func(config *v1beta1.GatewayConfig) { @@ -127,7 +123,7 @@ func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { }), ) if err != nil { - p.logger.With("err", err).Error("fatal error connecting to NATs") + p.logger.Error("fatal error connecting to NATs", "err", err) } p.natsConn.Set(nc) mgr, err := p.natsConn.Get().JetStream() @@ -145,13 +141,13 @@ func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { } clStatus, err := p.GetClusterStatus(p.ctx, &emptypb.Empty{}) if err != nil { - p.logger.With("err", err).Error("failed to get cluster status") + p.logger.Error("failed to get cluster status", "err", err) return } if clStatus.State == alertops.InstallState_Installed || clStatus.State == alertops.InstallState_InstallUpdating { syncInfo, err := p.getSyncInfo(p.ctx) if err != nil { - p.logger.With("err", err).Error("failed to get sync info") + p.logger.Error("failed to get sync info", "err", err) } else { for _, comp := range p.Components() { comp.Sync(p.ctx, syncInfo) @@ -159,7 +155,7 @@ func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { } conf, err := p.GetClusterConfiguration(p.ctx, &emptypb.Empty{}) if err != nil { - p.logger.With("err", err).Error("failed to get cluster configuration") + p.logger.Error("failed to get cluster configuration", "err", err) return } peers := listPeers(int(conf.GetNumReplicas())) diff --git a/plugins/alerting/test/test_drivers.go b/plugins/alerting/test/test_drivers.go index 4c29e5eac6..07152551cb 100644 --- a/plugins/alerting/test/test_drivers.go +++ b/plugins/alerting/test/test_drivers.go @@ -340,7 +340,7 @@ func (l *TestEnvAlertingClusterDriver) StartAlertingBackendServer( ctxCa, cancelFunc := context.WithCancel(ctx) alertmanagerCmd := exec.CommandContext(ctxCa, opniBin, alertmanagerArgs...) plugins.ConfigureSysProcAttr(alertmanagerCmd) - l.logger.With("alertmanager-port", webPort, "opni-port", opniPort).Info("Starting AlertManager") + l.logger.Info("Starting AlertManager", "alertmanager-port", webPort, "opni-port", opniPort) session, err := testutil.StartCmd(alertmanagerCmd) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { @@ -367,7 +367,7 @@ func (l *TestEnvAlertingClusterDriver) StartAlertingBackendServer( syncerCmd := exec.CommandContext(ctxCa, opniBin, syncerArgs...) plugins.ConfigureSysProcAttr(syncerCmd) - l.logger.With("port", syncerPort).Info("Starting AlertManager Syncer") + l.logger.Info("Starting AlertManager Syncer", "port", syncerPort) _, err = testutil.StartCmd(syncerCmd) if err != nil { if !errors.Is(ctx.Err(), context.Canceled) { @@ -377,7 +377,7 @@ func (l *TestEnvAlertingClusterDriver) StartAlertingBackendServer( } } - l.logger.With("address", fmt.Sprintf("http://127.0.0.1:%d", webPort)).Info("AlertManager started") + l.logger.Info("AlertManager started", "address", fmt.Sprintf("http://127.0.0.1:%d", webPort)) waitctx.Permissive.Go(ctx, func() { <-ctx.Done() cmd, _ := session.G() diff --git a/plugins/logging/pkg/agent/drivers/kubernetes_manager/kubernetes_manager.go b/plugins/logging/pkg/agent/drivers/kubernetes_manager/kubernetes_manager.go index 94888e1500..bf8913c792 100644 --- a/plugins/logging/pkg/agent/drivers/kubernetes_manager/kubernetes_manager.go +++ b/plugins/logging/pkg/agent/drivers/kubernetes_manager/kubernetes_manager.go @@ -121,15 +121,11 @@ BACKOFF: for backoff.Continue(b) { collectorConf := m.buildLoggingCollectorConfig() if err := m.reconcileObject(collectorConf, config.Enabled); err != nil { - m.logger.With( - "object", client.ObjectKeyFromObject(collectorConf).String(), - ).Error("error reconciling object", "error", err) + m.logger.Error("error reconciling object", "error", err, "object", client.ObjectKeyFromObject(collectorConf).String()) continue BACKOFF } if err := m.reconcileCollector(config.Enabled); err != nil { - m.logger.With( - "object", "opni collector", - ).Error("error reconciling object", "error", err) + m.logger.Error("error reconciling object", "error", err, "object", "opni collector") } success = true diff --git a/plugins/logging/pkg/backend/metadata.go b/plugins/logging/pkg/backend/metadata.go index df06811289..8a597e5f81 100644 --- a/plugins/logging/pkg/backend/metadata.go +++ b/plugins/logging/pkg/backend/metadata.go @@ -18,15 +18,10 @@ func (b *LoggingBackend) updateClusterMetadata(ctx context.Context, event *manag return nil } - b.Logger.With( - "oldName", oldName, - "newName", newName, - ).Debug("cluster was renamed") + b.Logger.Debug("newName", newName, "oldName", oldName) if err := b.ClusterDriver.StoreClusterMetadata(ctx, event.Cluster.GetId(), newName); err != nil { - b.Logger.With( - "cluster", event.Cluster.Id, - ).Debug("could not update cluster metadata", "error", err) + b.Logger.Debug("could not update cluster metadata", "error", err, "cluster", event.Cluster.Id) return nil } @@ -64,9 +59,7 @@ func (b *LoggingBackend) reconcileClusterMetadata(ctx context.Context, clusters for _, cluster := range clusters { err := b.ClusterDriver.StoreClusterMetadata(ctx, cluster.GetId(), cluster.Metadata.Labels[opnicorev1.NameLabel]) if err != nil { - b.Logger.With( - "cluster", cluster.Id, - ).Warn("could not update cluster metadata", "error", err) + b.Logger.Warn("could not update cluster metadata", "error", err, "cluster", cluster.Id) retErr = err } } diff --git a/plugins/logging/pkg/backend/sync.go b/plugins/logging/pkg/backend/sync.go index 8aa3a85c17..e3fbd82fa6 100644 --- a/plugins/logging/pkg/backend/sync.go +++ b/plugins/logging/pkg/backend/sync.go @@ -129,8 +129,5 @@ func (b *LoggingBackend) requestNodeSync(ctx context.Context, cluster *opnicorev ).Warn("failed to request node sync; nodes may not be updated immediately", "error", err) return } - b.Logger.With( - "cluster", name, - "capability", wellknown.CapabilityLogs, - ).Info("node sync requested") + b.Logger.Info("capability", wellknown.CapabilityLogs, "cluster", name) } diff --git a/plugins/logging/pkg/gateway/system.go b/plugins/logging/pkg/gateway/system.go index 025f89eeeb..75c84f0fd5 100644 --- a/plugins/logging/pkg/gateway/system.go +++ b/plugins/logging/pkg/gateway/system.go @@ -22,17 +22,13 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { p.mgmtApi.Set(client) cfg, err := client.GetConfig(context.Background(), &emptypb.Empty{}, grpc.WaitForReady(true)) if err != nil { - p.logger.With( - "err", err, - ).Error("failed to get config") + p.logger.Error("failed to get config", "err", err) os.Exit(1) } objectList, err := machinery.LoadDocuments(cfg.Documents) if err != nil { - p.logger.With( - "err", err, - ).Error("failed to load config") + p.logger.Error("failed to load config", "err", err) os.Exit(1) } @@ -41,9 +37,7 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { objectList.Visit(func(config *v1beta1.GatewayConfig) { backend, err := machinery.ConfigureStorageBackend(p.ctx, &config.Spec.Storage) if err != nil { - p.logger.With( - "err", err, - ).Error("failed to configure storage backend") + p.logger.Error("failed to configure storage backend", "err", err) os.Exit(1) } p.storageBackend.Set(backend) @@ -66,9 +60,7 @@ func (p *Plugin) UseKeyValueStore(client system.KeyValueStoreClient) { logger: p.logger.WithGroup("uninstaller"), }) if err != nil { - p.logger.With( - "err", err, - ).Error("failed to create task controller") + p.logger.Error("failed to create task controller", "err", err) os.Exit(1) } diff --git a/plugins/logging/pkg/opensearchdata/status.go b/plugins/logging/pkg/opensearchdata/status.go index 5705f49480..7700c113af 100644 --- a/plugins/logging/pkg/opensearchdata/status.go +++ b/plugins/logging/pkg/opensearchdata/status.go @@ -17,13 +17,13 @@ func (m *Manager) GetClusterStatus() ClusterStatus { resp, err := m.Client.Cluster.GetClusterHealth(context.TODO()) if err != nil { - m.logger.With("err", err).Error("failed to fetch opensearch cluster status") + m.logger.Error("failed to fetch opensearch cluster status", "err", err) return ClusterStatusError } defer resp.Body.Close() if resp.IsError() { - m.logger.With("resp", resp.String).Error("failure response from cluster status") + m.logger.Error("failure response from cluster status", "resp", resp.String) return ClusterStatusError } diff --git a/plugins/metrics/pkg/agent/rules.go b/plugins/metrics/pkg/agent/rules.go index da04885108..49996e9ea2 100644 --- a/plugins/metrics/pkg/agent/rules.go +++ b/plugins/metrics/pkg/agent/rules.go @@ -131,9 +131,7 @@ func (s *RuleStreamer) streamRuleGroupUpdates( searchInterval = duration } notifier := notifier.NewPeriodicUpdateNotifier(ctx, finder, searchInterval) - s.logger.With( - "interval", searchInterval.String(), - ).Debug("rule discovery notifier configured") + s.logger.Debug("rule discovery notifier configured", "interval", searchInterval.String()) notifierC := notifier.NotifyC(ctx) s.logger.Debug("starting rule group update notifier") @@ -160,9 +158,7 @@ func (s *RuleStreamer) marshalRuleGroups(ruleGroups []rules.RuleGroup) [][]byte for _, ruleGroup := range ruleGroups { doc, err := yaml.Marshal(ruleGroup) if err != nil { - s.logger.With( - "group", ruleGroup.Name, - ).Error("failed to marshal rule group", "error", err) + s.logger.Error("failed to marshal rule group", "error", err, "group", ruleGroup.Name) continue } yamlDocs = append(yamlDocs, doc) diff --git a/plugins/metrics/pkg/backend/metrics.go b/plugins/metrics/pkg/backend/metrics.go index bb278fb9b7..1eb55c8b43 100644 --- a/plugins/metrics/pkg/backend/metrics.go +++ b/plugins/metrics/pkg/backend/metrics.go @@ -102,10 +102,7 @@ func (m *MetricsBackend) requestNodeSync(ctx context.Context, cluster *corev1.Re ).Warn("failed to request node sync; nodes may not be updated immediately", "error", err) return } - m.Logger.With( - "cluster", name, - "capability", wellknown.CapabilityMetrics, - ).Info("node sync requested") + m.Logger.Info("capability", wellknown.CapabilityMetrics, "cluster", name) } // Implements node.NodeMetricsCapabilityServer @@ -131,9 +128,7 @@ func (m *MetricsBackend) Sync(ctx context.Context, req *node.SyncRequest) (*node // auto-disable if cortex is not installed if err := m.ClusterDriver.ShouldDisableNode(cluster.Reference()); err != nil { reason := status.Convert(err).Message() - m.Logger.With( - "reason", reason, - ).Info("disabling metrics capability for node") + m.Logger.Info("disabling metrics capability for node", "reason", reason) enabled = false conditions = append(conditions, reason) } @@ -151,10 +146,7 @@ func (m *MetricsBackend) Sync(ctx context.Context, req *node.SyncRequest) (*node status.Enabled = req.GetCurrentConfig().GetEnabled() status.Conditions = req.GetCurrentConfig().GetConditions() status.LastSync = timestamppb.Now() - m.Logger.With( - "id", id, - "time", status.LastSync.AsTime(), - ).Debug("synced node") + m.Logger.Debug("time", status.LastSync.AsTime(), "id", id) nodeSpec, err := m.getNodeSpecOrDefault(ctx, id) if err != nil { diff --git a/plugins/metrics/pkg/backend/remoteread.go b/plugins/metrics/pkg/backend/remoteread.go index 8680376285..ccd037f7b7 100644 --- a/plugins/metrics/pkg/backend/remoteread.go +++ b/plugins/metrics/pkg/backend/remoteread.go @@ -317,9 +317,7 @@ func (m *MetricsBackend) Discover(ctx context.Context, request *remoteread.Disco }).Discover(ctx, request) if err != nil { - m.Logger.With( - "capability", wellknown.CapabilityMetrics, - ).Error("failed to run import discovery", "error", err) + m.Logger.Error("failed to run import discovery", "error", err, "capability", wellknown.CapabilityMetrics) return nil, err } diff --git a/plugins/metrics/pkg/cortex/admin.go b/plugins/metrics/pkg/cortex/admin.go index 3e45f13065..4683e08d14 100644 --- a/plugins/metrics/pkg/cortex/admin.go +++ b/plugins/metrics/pkg/cortex/admin.go @@ -761,15 +761,11 @@ func (p *CortexAdminServer) proxyCortexToPrometheus( req.Header.Set(orgIDCodec.Key(), orgIDCodec.Encode([]string{tenant})) resp, err := p.CortexClientSet.HTTP().Do(req) if err != nil { - p.Logger.With( - "request", url, - ).Error("failed with err:", "error", err) + p.Logger.Error("failed with err:", "error", err, "request", url) return nil, err } if resp.StatusCode != http.StatusOK { - p.Logger.With( - "request", url, - ).Error("request failed with", "status", resp.Status) + p.Logger.Error("request failed with", "status", resp.Status, "request", url) return nil, fmt.Errorf("request failed with: %s", resp.Status) } return resp, nil diff --git a/plugins/metrics/pkg/cortex/aggregation.go b/plugins/metrics/pkg/cortex/aggregation.go index ed7d129de1..93c24476ef 100644 --- a/plugins/metrics/pkg/cortex/aggregation.go +++ b/plugins/metrics/pkg/cortex/aggregation.go @@ -93,9 +93,7 @@ func (a *MultiTenantRuleAggregator) Handle(c *gin.Context) { resp, err := a.cortexClient.Do(req) if err != nil { - a.logger.With( - "request", c.FullPath(), - ).Error("error querying cortex", "error", err) + a.logger.Error("error querying cortex", "error", err, "request", c.FullPath()) c.AbortWithError(http.StatusInternalServerError, err) return } diff --git a/plugins/metrics/pkg/cortex/api.go b/plugins/metrics/pkg/cortex/api.go index 85fd8840f4..5a9fb00891 100644 --- a/plugins/metrics/pkg/cortex/api.go +++ b/plugins/metrics/pkg/cortex/api.go @@ -70,9 +70,7 @@ func (p *HttpApiServer) ConfigureRoutes(router *gin.Engine) { rbacMiddleware := rbac.NewMiddleware(rbacProvider, orgIDCodec) authMiddleware, ok := p.AuthMiddlewares[p.Config.AuthProvider] if !ok { - p.Logger.With( - "name", p.Config.AuthProvider, - ).Error("auth provider not found") + p.Logger.Error("auth provider not found", "name", p.Config.AuthProvider) os.Exit(1) } diff --git a/plugins/metrics/pkg/gateway/management.go b/plugins/metrics/pkg/gateway/management.go index de56179691..f9ef7205a4 100644 --- a/plugins/metrics/pkg/gateway/management.go +++ b/plugins/metrics/pkg/gateway/management.go @@ -12,9 +12,7 @@ func (p *Plugin) configureCortexManagement() { builder, ok := drivers.ClusterDrivers.Get(driverName) if !ok { - p.logger.With( - "driver", driverName, - ).Error("unknown cluster driver, using fallback noop driver") + p.logger.Error("unknown cluster driver, using fallback noop driver", "driver", driverName) builder, ok = drivers.ClusterDrivers.Get("noop") if !ok { @@ -24,9 +22,7 @@ func (p *Plugin) configureCortexManagement() { driver, err := builder(p.ctx) if err != nil { - p.logger.With( - "driver", driverName, - ).Error("failed to initialize cluster driver", "error", err) + p.logger.Error("failed to initialize cluster driver", "error", err, "driver", driverName) return } diff --git a/plugins/topology/pkg/backend/topology.go b/plugins/topology/pkg/backend/topology.go index bcfa28f278..03cafe4ebd 100644 --- a/plugins/topology/pkg/backend/topology.go +++ b/plugins/topology/pkg/backend/topology.go @@ -126,10 +126,7 @@ func (t *TopologyBackend) requestNodeSync(ctx context.Context, cluster *corev1.R ).Warn("failed to request node sync; nodes may not be updated immediately", "error", err) return } - t.Logger.With( - "cluster", name, - "capability", wellknown.CapabilityTopology, - ).Info("node sync requested") + t.Logger.Info("capability", wellknown.CapabilityTopology, "cluster", name) } func (t *TopologyBackend) Install(ctx context.Context, req *capabilityv1.InstallRequest) (*capabilityv1.InstallResponse, error) { diff --git a/plugins/topology/pkg/topology/gateway/management.go b/plugins/topology/pkg/topology/gateway/management.go index e0a9a1fc1e..240f00a88f 100644 --- a/plugins/topology/pkg/topology/gateway/management.go +++ b/plugins/topology/pkg/topology/gateway/management.go @@ -15,9 +15,7 @@ func (p *Plugin) configureTopologyManagement() { name := "topology-manager" driver, err := drivers.GetClusterDriver(name) if err != nil { - p.logger.With( - "driver", name, - ).Error("failed to load cluster driver, using fallback no-op driver", "error", err) + p.logger.Error("failed to load cluster driver, using fallback no-op driver", "error", err, "driver", name) driver = &drivers.NoopClusterDriver{} } p.clusterDriver.Set(driver) diff --git a/plugins/topology/pkg/topology/gateway/stream/stream.go b/plugins/topology/pkg/topology/gateway/stream/stream.go index 903747b831..3a81b8af4a 100644 --- a/plugins/topology/pkg/topology/gateway/stream/stream.go +++ b/plugins/topology/pkg/topology/gateway/stream/stream.go @@ -77,7 +77,7 @@ func (t *TopologyStreamWriter) Push(_ context.Context, payload *stream.Payload) if err != nil { return nil, err } - t.Logger.With("info", info).Debug("successfully pushed topology data") + t.Logger.Debug("successfully pushed topology data", "info", info) return &emptypb.Empty{}, nil } diff --git a/plugins/topology/pkg/topology/gateway/system.go b/plugins/topology/pkg/topology/gateway/system.go index 8deebc5396..4e9d7e8fec 100644 --- a/plugins/topology/pkg/topology/gateway/system.go +++ b/plugins/topology/pkg/topology/gateway/system.go @@ -28,22 +28,20 @@ func (p *Plugin) UseManagementAPI(client managementv1.ManagementClient) { grpc.WaitForReady(true), ) if err != nil { - p.logger.With("err", err).Error("failed to get config") + p.logger.Error("failed to get config", "err", err) os.Exit(1) } objectList, err := machinery.LoadDocuments(cfg.Documents) if err != nil { - p.logger.With("err", err).Error("failed to load config") + p.logger.Error("failed to load config", "err", err) os.Exit(1) } machinery.LoadAuthProviders(p.ctx, objectList) objectList.Visit(func(config *v1beta1.GatewayConfig) { backend, err := machinery.ConfigureStorageBackend(p.ctx, &config.Spec.Storage) if err != nil { - p.logger.With( - "err", err, - ).Error("failed to configure storage backend") + p.logger.Error("failed to configure storage backend", "err", err) os.Exit(1) } p.storageBackend.Set(backend)