Skip to content

Commit

Permalink
Migrate logger from knative to controller-runtime (#6150)
Browse files Browse the repository at this point in the history
  • Loading branch information
jonathan-innis authored May 21, 2024
1 parent 9c580fd commit d39fdb5
Show file tree
Hide file tree
Showing 53 changed files with 155 additions and 164 deletions.
1 change: 1 addition & 0 deletions cmd/controller/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (

func main() {
ctx, op := operator.NewOperator(coreoperator.NewOperator())

awsCloudProvider := cloudprovider.New(
op.InstanceTypesProvider,
op.InstanceProvider,
Expand Down
6 changes: 3 additions & 3 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ require (
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
knative.dev/pkg v0.0.0-20231010144348-ca8c009405dd
sigs.k8s.io/controller-runtime v0.18.2
sigs.k8s.io/karpenter v0.36.1-0.20240516162236-0e678127e788
sigs.k8s.io/karpenter v0.36.1-0.20240521002315-9b145a6d85b4
sigs.k8s.io/yaml v1.4.0
)

Expand Down Expand Up @@ -108,9 +108,9 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/cloud-provider v0.30.0 // indirect
k8s.io/cloud-provider v0.30.1 // indirect
k8s.io/component-base v0.30.1 // indirect
k8s.io/csi-translation-lib v0.30.0 // indirect
k8s.io/csi-translation-lib v0.30.1 // indirect
k8s.io/klog/v2 v2.120.1 // indirect
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
Expand Down
12 changes: 6 additions & 6 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -740,12 +740,12 @@ k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U=
k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc=
k8s.io/client-go v0.30.1 h1:uC/Ir6A3R46wdkgCV3vbLyNOYyCJ8oZnjtJGKfytl/Q=
k8s.io/client-go v0.30.1/go.mod h1:wrAqLNs2trwiCH/wxxmT/x3hKVH9PuV0GGW0oDoHVqc=
k8s.io/cloud-provider v0.30.0 h1:hz1MXkFjsyO167sRZVchXEi2YYMQ6kolBi79nuICjzw=
k8s.io/cloud-provider v0.30.0/go.mod h1:iyVcGvDfmZ7m5cliI9TTHj0VTjYDNpc/K71Gp6hukjU=
k8s.io/cloud-provider v0.30.1 h1:OslHpog97zG9Kr7/vV1ki8nLKq8xTPUkN/kepCxBqKI=
k8s.io/cloud-provider v0.30.1/go.mod h1:1uZp+FSskXQoeAAIU91/XCO8X/9N1U3z5usYeSLT4MI=
k8s.io/component-base v0.30.1 h1:bvAtlPh1UrdaZL20D9+sWxsJljMi0QZ3Lmw+kmZAaxQ=
k8s.io/component-base v0.30.1/go.mod h1:e/X9kDiOebwlI41AvBHuWdqFriSRrX50CdwA9TFaHLI=
k8s.io/csi-translation-lib v0.30.0 h1:pEe6jshNVE4od2AdgYlsAtiKP/MH+NcsBbUPA/dWA6U=
k8s.io/csi-translation-lib v0.30.0/go.mod h1:5TT/awOiKEX+8CcbReVYJyddT7xqlFrp3ChE9e45MyU=
k8s.io/csi-translation-lib v0.30.1 h1:fIBtNMQjyr7HFv3xGSSH9cWOQS1K1kIBmZ1zRsHuVKs=
k8s.io/csi-translation-lib v0.30.1/go.mod h1:l0HrIBIxUKRvqnNWqn6AXTYgUa2mAFLT6bjo1lU+55U=
k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw=
k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag=
Expand All @@ -761,8 +761,8 @@ sigs.k8s.io/controller-runtime v0.18.2 h1:RqVW6Kpeaji67CY5nPEfRz6ZfFMk0lWQlNrLql
sigs.k8s.io/controller-runtime v0.18.2/go.mod h1:tuAt1+wbVsXIT8lPtk5RURxqAnq7xkpv2Mhttslg7Hw=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/karpenter v0.36.1-0.20240516162236-0e678127e788 h1:xrzVuIjd2MWfdoiIElJlJgzMvYA6MDaA1CVQUxCOhRk=
sigs.k8s.io/karpenter v0.36.1-0.20240516162236-0e678127e788/go.mod h1:Ov8+tDVcF2BIPti+HL0hgoxIGy+rGIymKZAYZprl0Ww=
sigs.k8s.io/karpenter v0.36.1-0.20240521002315-9b145a6d85b4 h1:zIKW8TX593mp/rlOdCqIbgUdVRQGHzeFkgDM6+zgeE8=
sigs.k8s.io/karpenter v0.36.1-0.20240521002315-9b145a6d85b4/go.mod h1:5XYrIz9Bi7HgQyaUsx7O08ft+TJjrH+htlnPq8Sz9J8=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4=
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
Expand Down
2 changes: 1 addition & 1 deletion pkg/apis/v1beta1/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ import (

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"

. "sigs.k8s.io/karpenter/pkg/test/expectations"
. "sigs.k8s.io/karpenter/pkg/utils/testing"

"sigs.k8s.io/karpenter/pkg/operator/scheme"
coretest "sigs.k8s.io/karpenter/pkg/test"
Expand Down
4 changes: 2 additions & 2 deletions pkg/batcher/createfleet.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/log"
)

type CreateFleetBatcher struct {
Expand Down Expand Up @@ -70,7 +70,7 @@ func execCreateFleetBatch(ec2api ec2iface.EC2API) BatchExecutor[ec2.CreateFleetI
for _, instanceID := range reservation.InstanceIds {
requestIdx++
if requestIdx >= len(inputs) {
logging.FromContext(ctx).Errorf("received more instances than requested, ignoring instance %s", aws.StringValue(instanceID))
log.FromContext(ctx).Error(fmt.Errorf("received more instances than requested, ignoring instance %s", aws.StringValue(instanceID)), "received error while batching")
continue
}
results = append(results, Result[ec2.CreateFleetOutput]{
Expand Down
4 changes: 2 additions & 2 deletions pkg/batcher/describeinstances.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import (
"github.com/mitchellh/hashstructure/v2"
"github.com/samber/lo"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/log"
)

type DescribeInstancesBatcher struct {
Expand Down Expand Up @@ -56,7 +56,7 @@ func (b *DescribeInstancesBatcher) DescribeInstances(ctx context.Context, descri
func FilterHasher(ctx context.Context, input *ec2.DescribeInstancesInput) uint64 {
hash, err := hashstructure.Hash(input.Filters, hashstructure.FormatV2, &hashstructure.HashOptions{SlicesAsSets: true})
if err != nil {
logging.FromContext(ctx).Errorf("error hashing")
log.FromContext(ctx).Error(err, "failed hashing input filters")
}
return hash
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/batcher/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import (

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"
. "sigs.k8s.io/karpenter/pkg/utils/testing"
)

var fakeEC2API *fake.EC2API
Expand Down
4 changes: 2 additions & 2 deletions pkg/batcher/terminateinstances.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ import (
"github.com/aws/aws-sdk-go/service/ec2/ec2iface"
"github.com/samber/lo"
"k8s.io/apimachinery/pkg/util/sets"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/log"
)

type TerminateInstancesBatcher struct {
Expand Down Expand Up @@ -68,7 +68,7 @@ func execTerminateInstancesBatch(ec2api ec2iface.EC2API) BatchExecutor[ec2.Termi
// We don't care about the error here since we'll break up the batch upon any sort of failure
output, err := ec2api.TerminateInstancesWithContext(ctx, firstInput)
if err != nil {
logging.FromContext(ctx).Errorf("terminating instances, %s", err)
log.FromContext(ctx).Error(err, "failed terminating instances")
}

if output == nil {
Expand Down
6 changes: 3 additions & 3 deletions pkg/cache/unavailableofferings.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/patrickmn/go-cache"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/log"
)

// UnavailableOfferings stores any offerings that return ICE (insufficient capacity errors) when
Expand Down Expand Up @@ -54,12 +54,12 @@ func (u *UnavailableOfferings) IsUnavailable(instanceType, zone, capacityType st
// MarkUnavailable communicates recently observed temporary capacity shortages in the provided offerings
func (u *UnavailableOfferings) MarkUnavailable(ctx context.Context, unavailableReason, instanceType, zone, capacityType string) {
// even if the key is already in the cache, we still need to call Set to extend the cached entry's TTL
logging.FromContext(ctx).With(
log.FromContext(ctx).WithValues(
"reason", unavailableReason,
"instance-type", instanceType,
"zone", zone,
"capacity-type", capacityType,
"ttl", UnavailableOfferingsTTL).Debugf("removing offering from offerings")
"ttl", UnavailableOfferingsTTL).V(1).Info("removing offering from offerings")
u.cache.SetDefault(u.key(instanceType, zone, capacityType), struct{}{})
atomic.AddUint64(&u.SeqNum, 1)
}
Expand Down
8 changes: 3 additions & 5 deletions pkg/cloudprovider/cloudprovider.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime/schema"

"sigs.k8s.io/controller-runtime/pkg/log"
corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1"
"sigs.k8s.io/karpenter/pkg/events"
"sigs.k8s.io/karpenter/pkg/scheduling"
Expand All @@ -39,7 +40,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"

cloudproviderevents "github.com/aws/karpenter-provider-aws/pkg/cloudprovider/events"
Expand Down Expand Up @@ -135,7 +135,7 @@ func (c *CloudProvider) Get(ctx context.Context, providerID string) (*corev1beta
if err != nil {
return nil, fmt.Errorf("getting instance ID, %w", err)
}
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("id", id))
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("id", id))
instance, err := c.instanceProvider.Get(ctx, id)
if err != nil {
return nil, fmt.Errorf("getting instance, %w", err)
Expand Down Expand Up @@ -172,13 +172,11 @@ func (c *CloudProvider) GetInstanceTypes(ctx context.Context, nodePool *corev1be
}

func (c *CloudProvider) Delete(ctx context.Context, nodeClaim *corev1beta1.NodeClaim) error {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("nodeclaim", nodeClaim.Name))

id, err := utils.ParseInstanceID(nodeClaim.Status.ProviderID)
if err != nil {
return fmt.Errorf("getting instance ID, %w", err)
}
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("id", id))
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("id", id))
return c.instanceProvider.Delete(ctx, id)
}

Expand Down
2 changes: 1 addition & 1 deletion pkg/cloudprovider/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,8 @@ import (

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"
. "sigs.k8s.io/karpenter/pkg/test/expectations"
. "sigs.k8s.io/karpenter/pkg/utils/testing"
)

var ctx context.Context
Expand Down
16 changes: 8 additions & 8 deletions pkg/controllers/interruption/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/client-go/util/workqueue"
"k8s.io/utils/clock"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/karpenter/pkg/metrics"
Expand Down Expand Up @@ -81,9 +81,9 @@ func NewController(kubeClient client.Client, clk clock.Clock, recorder events.Re
}

func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconcile.Result, error) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("queue", c.sqsProvider.Name()))
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("queue", c.sqsProvider.Name()))
if c.cm.HasChanged(c.sqsProvider.Name(), nil) {
logging.FromContext(ctx).Debugf("watching interruption queue")
log.FromContext(ctx).V(1).Info("watching interruption queue")
}
sqsMessages, err := c.sqsProvider.GetSQSMessages(ctx)
if err != nil {
Expand All @@ -105,7 +105,7 @@ func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconc
msg, e := c.parseMessage(sqsMessages[i])
if e != nil {
// If we fail to parse, then we should delete the message but still log the error
logging.FromContext(ctx).Errorf("parsing message, %v", e)
log.FromContext(ctx).Error(err, "failed parsing interruption message")
errs[i] = c.deleteMessage(ctx, sqsMessages[i])
return
}
Expand Down Expand Up @@ -144,7 +144,7 @@ func (c *Controller) parseMessage(raw *sqsapi.Message) (messages.Message, error)
func (c *Controller) handleMessage(ctx context.Context, nodeClaimInstanceIDMap map[string]*v1beta1.NodeClaim,
nodeInstanceIDMap map[string]*v1.Node, msg messages.Message) (err error) {

ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("messageKind", msg.Kind()))
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("messageKind", msg.Kind()))
receivedMessages.WithLabelValues(string(msg.Kind())).Inc()

if msg.Kind() == messages.NoOpKind {
Expand Down Expand Up @@ -179,9 +179,9 @@ func (c *Controller) deleteMessage(ctx context.Context, msg *sqsapi.Message) err
// handleNodeClaim retrieves the action for the message and then performs the appropriate action against the node
func (c *Controller) handleNodeClaim(ctx context.Context, msg messages.Message, nodeClaim *v1beta1.NodeClaim, node *v1.Node) error {
action := actionForMessage(msg)
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("nodeclaim", nodeClaim.Name, "action", string(action)))
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("nodeclaim", nodeClaim.Name, "action", string(action)))
if node != nil {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("node", node.Name))
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("node", node.Name))
}

// Record metric and event for this action
Expand Down Expand Up @@ -215,7 +215,7 @@ func (c *Controller) deleteNodeClaim(ctx context.Context, nodeClaim *v1beta1.Nod
if err := c.kubeClient.Delete(ctx, nodeClaim); err != nil {
return client.IgnoreNotFound(fmt.Errorf("deleting the node on interruption message, %w", err))
}
logging.FromContext(ctx).Infof("initiating delete from interruption message")
log.FromContext(ctx).Info("initiating delete from interruption message")
c.recorder.Publish(interruptionevents.TerminatingOnInterruption(node, nodeClaim)...)
metrics.NodeClaimsTerminatedCounter.With(prometheus.Labels{
metrics.ReasonLabel: terminationReasonLabel,
Expand Down
19 changes: 10 additions & 9 deletions pkg/controllers/interruption/interruption_benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,16 +32,17 @@ import (
"github.com/aws/aws-sdk-go/aws/session"
servicesqs "github.com/aws/aws-sdk-go/service/sqs"
"github.com/aws/aws-sdk-go/service/sqs/sqsiface"
"github.com/go-logr/zapr"
"github.com/samber/lo"
"go.uber.org/multierr"
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/util/workqueue"
clock "k8s.io/utils/clock/testing"
"knative.dev/pkg/logging"
controllerruntime "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/karpenter/pkg/apis/v1beta1"

"sigs.k8s.io/karpenter/pkg/operator/scheme"
Expand Down Expand Up @@ -78,7 +79,7 @@ func BenchmarkNotification100(b *testing.B) {

//nolint:gocyclo
func benchmarkNotificationController(b *testing.B, messageCount int) {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("message-count", messageCount))
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("message-count", messageCount))
fakeClock = &clock.FakeClock{}
ctx = coreoptions.ToContext(ctx, coretest.Options())
ctx = options.ToContext(ctx, test.Options(test.OptionsFields{
Expand Down Expand Up @@ -118,20 +119,20 @@ func benchmarkNotificationController(b *testing.B, messageCount int) {
interruptionController := interruption.NewController(env.Client, fakeClock, recorder, providers.sqsProvider, unavailableOfferingsCache)

messages, nodes := makeDiverseMessagesAndNodes(messageCount)
logging.FromContext(ctx).Infof("provisioning nodes")
log.FromContext(ctx).Info("provisioning nodes")
if err := provisionNodes(ctx, env.Client, nodes); err != nil {
b.Fatalf("provisioning nodes, %v", err)
}
logging.FromContext(ctx).Infof("completed provisioning nodes")
log.FromContext(ctx).Info("completed provisioning nodes")

logging.FromContext(ctx).Infof("provisioning messages into the SQS Queue")
log.FromContext(ctx).Info("provisioning messages into the SQS Queue")
if err := providers.provisionMessages(ctx, messages...); err != nil {
b.Fatalf("provisioning messages, %v", err)
}
logging.FromContext(ctx).Infof("completed provisioning messages into the SQS Queue")
log.FromContext(ctx).Info("completed provisioning messages into the SQS Queue")

m, err := controllerruntime.NewManager(env.Config, controllerruntime.Options{
BaseContext: func() context.Context { return logging.WithLogger(ctx, zap.NewNop().Sugar()) },
BaseContext: func() context.Context { return log.IntoContext(ctx, zapr.NewLogger(zap.NewNop())) },
})
if err != nil {
b.Fatalf("creating manager, %v", err)
Expand All @@ -146,7 +147,7 @@ func benchmarkNotificationController(b *testing.B, messageCount int) {
start := time.Now()
managerErr := make(chan error)
go func() {
logging.FromContext(ctx).Infof("starting controller manager")
log.FromContext(ctx).Info("starting controller manager")
managerErr <- m.Start(ctx)
}()

Expand Down Expand Up @@ -225,7 +226,7 @@ func (p *providerSet) monitorMessagesProcessed(ctx context.Context, eventRecorde
eventRecorder.Calls(events.Unhealthy(coretest.Node(), coretest.NodeClaim())[0].Reason) +
eventRecorder.Calls(events.RebalanceRecommendation(coretest.Node(), coretest.NodeClaim())[0].Reason) +
eventRecorder.Calls(events.SpotInterrupted(coretest.Node(), coretest.NodeClaim())[0].Reason)
logging.FromContext(ctx).With("processed-message-count", totalProcessed).Infof("processed messages from the queue")
log.FromContext(ctx).WithValues("processed-message-count", totalProcessed).Info("processed messages from the queue")
time.Sleep(time.Second)
}
close(done)
Expand Down
3 changes: 1 addition & 2 deletions pkg/controllers/interruption/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/tools/record"
clock "k8s.io/utils/clock/testing"
_ "knative.dev/pkg/system/testing"
"sigs.k8s.io/controller-runtime/pkg/client"

corev1beta1 "sigs.k8s.io/karpenter/pkg/apis/v1beta1"
Expand All @@ -53,8 +52,8 @@ import (

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"
. "sigs.k8s.io/karpenter/pkg/test/expectations"
. "sigs.k8s.io/karpenter/pkg/utils/testing"
)

const (
Expand Down
8 changes: 4 additions & 4 deletions pkg/controllers/nodeclaim/garbagecollection/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,8 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/workqueue"
"knative.dev/pkg/logging"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/karpenter/pkg/cloudprovider"
Expand Down Expand Up @@ -85,11 +85,11 @@ func (c *Controller) Reconcile(ctx context.Context, _ reconcile.Request) (reconc
}

func (c *Controller) garbageCollect(ctx context.Context, nodeClaim *v1beta1.NodeClaim, nodeList *v1.NodeList) error {
ctx = logging.WithLogger(ctx, logging.FromContext(ctx).With("provider-id", nodeClaim.Status.ProviderID))
ctx = log.IntoContext(ctx, log.FromContext(ctx).WithValues("provider-id", nodeClaim.Status.ProviderID))
if err := c.cloudProvider.Delete(ctx, nodeClaim); err != nil {
return cloudprovider.IgnoreNodeClaimNotFoundError(err)
}
logging.FromContext(ctx).Debugf("garbage collected cloudprovider instance")
log.FromContext(ctx).V(1).Info("garbage collected cloudprovider instance")

// Go ahead and cleanup the node if we know that it exists to make scheduling go quicker
if node, ok := lo.Find(nodeList.Items, func(n v1.Node) bool {
Expand All @@ -98,7 +98,7 @@ func (c *Controller) garbageCollect(ctx context.Context, nodeClaim *v1beta1.Node
if err := c.kubeClient.Delete(ctx, &node); err != nil {
return client.IgnoreNotFound(err)
}
logging.FromContext(ctx).With("node", node.Name).Debugf("garbage collected node")
log.FromContext(ctx).WithValues("node", node.Name).V(1).Info("garbage collected node")
}
return nil
}
Expand Down
2 changes: 1 addition & 1 deletion pkg/controllers/nodeclaim/garbagecollection/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ import (

. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
. "knative.dev/pkg/logging/testing"
. "sigs.k8s.io/karpenter/pkg/test/expectations"
. "sigs.k8s.io/karpenter/pkg/utils/testing"
)

var ctx context.Context
Expand Down
Loading

0 comments on commit d39fdb5

Please sign in to comment.