Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Replace import sirupsen/logrus by pkg/log #323

Merged
merged 10 commits into from
Oct 18, 2019
22 changes: 13 additions & 9 deletions cmd/controller/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,13 +28,13 @@ import (
"os/signal"
"syscall"

log "github.com/sirupsen/logrus"
"k8s.io/client-go/rest"

"github.com/kanisterio/kanister/pkg/controller"
_ "github.com/kanisterio/kanister/pkg/function"
"github.com/kanisterio/kanister/pkg/handler"
"github.com/kanisterio/kanister/pkg/kube"
"github.com/kanisterio/kanister/pkg/log"
"github.com/kanisterio/kanister/pkg/resource"
)

Expand All @@ -44,46 +44,50 @@ func main() {
s := handler.NewServer()
defer func() {
if err := s.Shutdown(ctx); err != nil {
log.Errorf("Failed to shutdown health check server: %+v", err)
log.WithError(err).Print("Failed to shutdown health check server")
}
}()
go func() {
if err := s.ListenAndServe(); err != nil {
log.Errorf("Failed to shutdown health check server: %+v", err)
log.WithError(err).Print("Failed to shutdown health check server")
}
}()

// Initialize the clients.
log.Infof("Getting kubernetes context")
log.Print("Getting kubernetes context")
config, err := rest.InClusterConfig()
if err != nil {
log.Fatalf("Failed to get k8s config. %+v", err)
log.WithError(err).Print("Failed to get k8s config")
return
}

// Make sure the CRD's exist.
if err := resource.CreateCustomResources(ctx, config); err != nil {
log.Fatalf("Failed to create CustomResources. %+v", err)
log.WithError(err).Print("Failed to create CustomResources.")
return
}

ns, err := kube.GetControllerNamespace()
if err != nil {
log.Fatalf("Failed to determine this pod's namespace %+v", err)
log.WithError(err).Print("Failed to determine this pod's namespace.")
return
}

// Create and start the watcher.
ctx, cancel := context.WithCancel(ctx)
c := controller.New(config)
err = c.StartWatch(ctx, ns)
if err != nil {
log.Fatalf("Failed to start controller. %+v", err)
log.WithError(err).Print("Failed to start controller.")
return
}

// create signals to stop watching the resources
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM)
select {
case <-signalChan:
log.Infof("shutdown signal received, exiting...")
log.Print("shutdown signal received, exiting...")
cancel()
return
}
Expand Down
30 changes: 15 additions & 15 deletions pkg/blockstorage/awsebs/awsebs.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@ package awsebs

import (
"context"
"fmt"
"net/http"
"time"

Expand All @@ -30,12 +29,13 @@ import (
"github.com/jpillora/backoff"
"github.com/kanisterio/kanister/pkg/poll"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"

"github.com/kanisterio/kanister/pkg/blockstorage"
ktags "github.com/kanisterio/kanister/pkg/blockstorage/tags"
"github.com/kanisterio/kanister/pkg/blockstorage/zone"
awsconfig "github.com/kanisterio/kanister/pkg/config/aws"
"github.com/kanisterio/kanister/pkg/field"
"github.com/kanisterio/kanister/pkg/log"
)

var _ blockstorage.Provider = (*ebsStorage)(nil)
Expand Down Expand Up @@ -120,7 +120,7 @@ func (s *ebsStorage) VolumeGet(ctx context.Context, id string, zone string) (*bl
dvi := &ec2.DescribeVolumesInput{VolumeIds: volIDs}
dvo, err := s.ec2Cli.DescribeVolumesWithContext(ctx, dvi)
if err != nil {
log.Errorf("Failed to get volumes %v Error: %+v", aws.StringValueSlice(volIDs), err)
log.WithError(err).Print("Failed to get volumes", field.M{"VolumeIds": volIDs})
return nil, err
}
if len(dvo.Volumes) != len(volIDs) {
Expand Down Expand Up @@ -307,7 +307,7 @@ func (s *ebsStorage) SnapshotCreate(ctx context.Context, volume blockstorage.Vol
Tags: mapToEC2Tags(ktags.GetTags(tags)),
},
})
log.Infof("Snapshotting EBS volume: %s", *csi.VolumeId)
log.Print("Snapshotting EBS volume", field.M{"volume_id": *csi.VolumeId})
csi.SetDryRun(s.ec2Cli.DryRun)
snap, err := s.ec2Cli.CreateSnapshotWithContext(ctx, csi)
if err != nil && !isDryRunErr(err) {
Expand Down Expand Up @@ -339,14 +339,14 @@ func (s *ebsStorage) SnapshotCreateWaitForCompletion(ctx context.Context, snap *
}

func (s *ebsStorage) SnapshotDelete(ctx context.Context, snapshot *blockstorage.Snapshot) error {
log.Infof("EBS Snapshot ID %s", snapshot.ID)
log.Print("Deleting EBS Snapshot", field.M{"SnapshotID": snapshot.ID})
rmsi := &ec2.DeleteSnapshotInput{}
rmsi.SetSnapshotId(snapshot.ID)
rmsi.SetDryRun(s.ec2Cli.DryRun)
_, err := s.ec2Cli.DeleteSnapshotWithContext(ctx, rmsi)
if isSnapNotFoundErr(err) {
// If the snapshot is already deleted, we log, but don't return an error.
log.Debugf("Snapshot already deleted")
log.Debug().Print("Snapshot already deleted")
return nil
}
if err != nil && !isDryRunErr(err) {
Expand Down Expand Up @@ -376,7 +376,7 @@ func (s *ebsStorage) VolumeDelete(ctx context.Context, volume *blockstorage.Volu
_, err := s.ec2Cli.DeleteVolumeWithContext(ctx, rmvi)
if isVolNotFoundErr(err) {
// If the volume is already deleted, we log, but don't return an error.
log.Debugf("Volume already deleted")
log.Debug().Print("Volume already deleted")
return nil
}
if err != nil && !isDryRunErr(err) {
Expand Down Expand Up @@ -455,7 +455,7 @@ func createVolume(ctx context.Context, ec2Cli *EC2, cvi *ec2.CreateVolumeInput,
return "", nil
}
if err != nil {
log.Errorf("Failed to create volume for %v Error: %+v", *cvi, err)
log.WithError(err).Print("Failed to create volume", field.M{"input": cvi})
return "", err
}

Expand All @@ -475,7 +475,7 @@ func getSnapshots(ctx context.Context, ec2Cli *EC2, snapIDs []*string) ([]*ec2.S
}
// TODO: handle paging and continuation
if len(dso.Snapshots) != len(snapIDs) {
log.Errorf("Did not find all requested snapshots, snapshots_requested: %p, snapshots_found: %p", snapIDs, dso.Snapshots)
log.Error().Print("Did not find all requested snapshots", field.M{"snapshots_requested": snapIDs, "snapshots_found": dso.Snapshots})
// TODO: Move mapping to HTTP error to the caller
return nil, errors.New("Object not found")
}
Expand Down Expand Up @@ -522,7 +522,7 @@ func waitOnVolume(ctx context.Context, ec2Cli *EC2, vol *ec2.Volume) error {
for {
dvo, err := ec2Cli.DescribeVolumesWithContext(ctx, dvi)
if err != nil {
log.Errorf("Failed to describe volume %s Error: %+v", aws.StringValue(vol.VolumeId), err)
log.WithError(err).Print("Failed to describe volume", field.M{"VolumeID": aws.StringValue(vol.VolumeId)})
return err
}
if len(dvo.Volumes) != 1 {
Expand All @@ -533,10 +533,10 @@ func waitOnVolume(ctx context.Context, ec2Cli *EC2, vol *ec2.Volume) error {
return errors.New("Creating EBS volume failed")
}
if *s.State == ec2.VolumeStateAvailable {
log.Infof("Volume %s complete", *vol.VolumeId)
log.Print("Volume creation complete", field.M{"VolumeID": *vol.VolumeId})
return nil
}
log.Infof("Volume %s state: %s", *vol.VolumeId, *s.State)
log.Print("Update", field.M{"Volume": *vol.VolumeId, "State": *s.State})
time.Sleep(volWaitBackoff.Duration())
}
}
Expand Down Expand Up @@ -568,18 +568,18 @@ func waitOnSnapshotID(ctx context.Context, ec2Cli *EC2, snapID string) error {
return false, errors.New("Snapshot EBS volume failed")
}
if *s.State == ec2.SnapshotStateCompleted {
log.Infof("Snapshot with snapshot_id: %s completed", snapID)
log.Print("Snapshot completed", field.M{"SnapshotID": snapID})
return true, nil
}
log.Debugf("Snapshot progress: snapshot_id: %s, progress: %s", snapID, fmt.Sprintf("%+v", *s.Progress))
log.Debug().Print("Snapshot progress", field.M{"snapshot_id": snapID, "progress": *s.Progress})
return false, nil
})
}

// GetRegionFromEC2Metadata retrieves the region from the EC2 metadata service.
// Only works when the call is performed from inside AWS
func GetRegionFromEC2Metadata() (string, error) {
log.Debug("Retrieving region from metadata")
log.Debug().Print("Retrieving region from metadata")
conf := aws.Config{
HTTPClient: &http.Client{
Transport: http.DefaultTransport,
Expand Down
5 changes: 3 additions & 2 deletions pkg/blockstorage/blockstorage_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,13 +20,14 @@ import (
"strings"
"testing"

log "github.com/sirupsen/logrus"
. "gopkg.in/check.v1"

"github.com/kanisterio/kanister/pkg/blockstorage"
"github.com/kanisterio/kanister/pkg/blockstorage/getter"
ktags "github.com/kanisterio/kanister/pkg/blockstorage/tags"
awsconfig "github.com/kanisterio/kanister/pkg/config/aws"
"github.com/kanisterio/kanister/pkg/field"
"github.com/kanisterio/kanister/pkg/log"
SupriyaKasten marked this conversation as resolved.
Show resolved Hide resolved
)

const (
Expand Down Expand Up @@ -150,7 +151,7 @@ func (s *BlockStorageProviderSuite) TestSnapshotCopy(c *C) {
snap, err := s.provider.SnapshotCopy(context.TODO(), *srcSnapshot, *dstSnapshot)
c.Assert(err, IsNil)

log.Infof("Copied snapshot %v to %v", srcSnapshot.ID, snap.ID)
log.Print("Snapshot copied", field.M{"FromSnapshotID": srcSnapshot.ID, "ToSnapshotID": snap.ID})

config := s.getConfig(c, dstSnapshot.Region)
provider, err := getter.New().Get(s.storageType, config)
Expand Down
19 changes: 10 additions & 9 deletions pkg/blockstorage/gcepd/gcepd.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,14 +25,15 @@ import (
"github.com/jpillora/backoff"
"github.com/pkg/errors"
uuid "github.com/satori/go.uuid"
log "github.com/sirupsen/logrus"
compute "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
"k8s.io/apimachinery/pkg/util/sets"

"github.com/kanisterio/kanister/pkg/blockstorage"
ktags "github.com/kanisterio/kanister/pkg/blockstorage/tags"
"github.com/kanisterio/kanister/pkg/blockstorage/zone"
"github.com/kanisterio/kanister/pkg/field"
"github.com/kanisterio/kanister/pkg/log"
"github.com/kanisterio/kanister/pkg/poll"
)

Expand Down Expand Up @@ -145,7 +146,7 @@ func (s *gpdStorage) VolumeDelete(ctx context.Context, volume *blockstorage.Volu
op, err = s.service.Disks.Delete(s.project, volume.Az, volume.ID).Context(ctx).Do()
}
if isNotFoundError(err) {
log.Debugf("Cannot delete volume with id:%s Volume not found. ", volume.ID)
log.Debug().Print("Cannot delete volume.", field.M{"VolumeID": volume.ID, "reason": "Volume not found"})
return nil
}
if err != nil {
Expand Down Expand Up @@ -214,7 +215,7 @@ func (s *gpdStorage) SnapshotCreateWaitForCompletion(ctx context.Context, snap *
func (s *gpdStorage) SnapshotDelete(ctx context.Context, snapshot *blockstorage.Snapshot) error {
op, err := s.service.Snapshots.Delete(s.project, snapshot.ID).Context(ctx).Do()
if isNotFoundError(err) {
log.Debugf("Cannot delete snapshot with id:%s Snapshot not found. ", snapshot.ID)
log.Debug().Print("Cannot delete snapshot", field.M{"SnapshotID": snapshot.ID, "reason": "Snapshot not found"})
return nil
}
if err != nil {
Expand All @@ -236,7 +237,7 @@ func (s *gpdStorage) volumeParse(ctx context.Context, volume interface{}, zone s
vol := volume.(*compute.Disk)
volCreationTime, err := time.Parse(time.RFC3339, vol.CreationTimestamp)
if err != nil {
log.Errorf("Cannot parse GCP Disk timestamp")
log.Error().Print("Cannot parse GCP Disk timestamp")

}

Expand Down Expand Up @@ -266,7 +267,7 @@ func (s *gpdStorage) snapshotParse(ctx context.Context, snap *compute.Snapshot)
}
snapCreationTIme, err := time.Parse(time.RFC3339, snap.CreationTimestamp)
if err != nil {
log.Errorf("Cannot parse GCP Snapshot timestamp")
log.Error().Print("Cannot parse GCP Snapshot timestamp")
}
// TODO: fix getting region from zone
return &blockstorage.Snapshot{
Expand Down Expand Up @@ -493,10 +494,10 @@ func (s *gpdStorage) waitOnOperation(ctx context.Context, op *compute.Operation,
}
return false, errors.Errorf("%s", errJSON)
}
log.Infof("Operation %s done", op.OperationType)
log.Print("Operation done", field.M{"OperationType": op.OperationType})
return true, nil
case operationPending, operationRunning:
log.Debugf("Operation %s status: %s %s progress %d", op.OperationType, op.Status, op.StatusMessage, op.Progress)
log.Debug().Print("Operation status update", field.M{"Operation": op.OperationType, "Status": op.Status, "Status message": op.StatusMessage, "Progress": op.Progress})
return false, nil
default:
return false, errors.Errorf("Unknown operation status")
Expand All @@ -521,10 +522,10 @@ func (s *gpdStorage) waitOnSnapshotID(ctx context.Context, id string) error {
return false, errors.New("Snapshot GCP volume failed")
}
if snap.Status == "READY" {
log.Infof("Snapshot with snapshot_id: %s completed", id)
log.Print("Snapshot completed", field.M{"SnapshotID": id})
return true, nil
}
log.Debugf("Snapshot status: snapshot_id: %s, status: %s", id, snap.Status)
log.Debug().Print("Snapshot status", field.M{"snapshot_id": id, "status": snap.Status})
return false, nil
})
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/blockstorage/ibm/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,11 +23,11 @@ import (
ibmprov "github.com/IBM/ibmcloud-storage-volume-lib/lib/provider"
ibmprovutils "github.com/IBM/ibmcloud-storage-volume-lib/provider/utils"
"github.com/pkg/errors"
log "github.com/sirupsen/logrus"
"go.uber.org/zap"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

"github.com/kanisterio/kanister/pkg/kube"
"github.com/kanisterio/kanister/pkg/log"
)

// IBM Cloud environment variable names
Expand Down Expand Up @@ -136,7 +136,7 @@ func findDefaultConfig(ctx context.Context, args map[string]string, zaplog *zap.
if err == nil {
return ibmCfg, nil
}
log.WithError(err).Info("Could not get IBM default store secret")
log.WithError(err).Print("Could not get IBM default store secret")
// Checking if an api key is provided via args
// If it present will use api value and default Softlayer config
if apik, ok := args[APIKeyArgName]; ok {
Expand Down
Loading