Skip to content

Commit

Permalink
Merge pull request #389 from red-hat-storage/sync_us--devel
Browse files Browse the repository at this point in the history
Syncing latest changes from upstream devel for ceph-csi
  • Loading branch information
openshift-merge-bot[bot] authored Sep 27, 2024
2 parents 2650738 + 2d82ceb commit c2c2c9e
Show file tree
Hide file tree
Showing 10 changed files with 51 additions and 58 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ for its support details.
| | Dynamically provision, de-provision File mode ROX volume | Alpha | >= v3.0.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.14.0 |
| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.5.0 | >= v1.5.0 | Pacific (>=v16.2.0) | >= v1.22.0 |
| | Creating and deleting snapshot | GA | >= v3.1.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.17.0 |
| | Creating and deleting volume group snapshot | Alpha | >= v3.11.0 | >= v1.9.0 | Squid (>=v19.0.0) | >= v1.31.0 |
| | Provision volume from snapshot | GA | >= v3.1.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.17.0 |
| | Provision volume from another volume | GA | >= v3.1.0 | >= v1.0.0 | Pacific (>=v16.2.0) | >= v1.16.0 |
| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Pacific (>=v16.2.0) | >= v1.15.0 |
Expand Down
8 changes: 7 additions & 1 deletion internal/controller/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ import (

"github.com/ceph/ceph-csi/internal/util/log"

"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/leaderelection/resourcelock"
clientConfig "sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/manager"
Expand Down Expand Up @@ -69,7 +71,11 @@ func Start(config Config) error {
LeaderElectionResourceLock: resourcelock.LeasesResourceLock,
LeaderElectionID: electionID,
}
mgr, err := manager.New(clientConfig.GetConfigOrDie(), opts)

kubeConfig := clientConfig.GetConfigOrDie()
coreKubeConfig := rest.CopyConfig(kubeConfig)
coreKubeConfig.ContentType = runtime.ContentTypeProtobuf
mgr, err := manager.New(coreKubeConfig, opts)
if err != nil {
log.ErrorLogMsg("failed to create manager %s", err)

Expand Down
2 changes: 1 addition & 1 deletion internal/csi-addons/rbd/volumegroup.go
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ func (vs *VolumeGroupServer) DeleteVolumeGroup(
}

// delete the volume group
err = mgr.DeleteVolumeGroup(ctx, vg)
err = vg.Delete(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal,
"failed to delete volume group %q: %s",
Expand Down
25 changes: 25 additions & 0 deletions internal/rbd/group/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,31 @@ func (cvg *commonVolumeGroup) GetIOContext(ctx context.Context) (*rados.IOContex
return ioctx, nil
}

// Delete removes the volume group from the journal.
func (cvg *commonVolumeGroup) Delete(ctx context.Context) error {
name, err := cvg.GetName(ctx)
if err != nil {
return fmt.Errorf("failed to get name for volume group %q: %w", cvg, err)
}

csiID, err := cvg.GetID(ctx)
if err != nil {
return fmt.Errorf("failed to get id for volume group %q: %w", cvg, err)
}

pool, err := cvg.GetPool(ctx)
if err != nil {
return fmt.Errorf("failed to get pool for volume group %q: %w", cvg, err)
}

err = cvg.journal.UndoReservation(ctx, pool, name, csiID)
if err != nil /* TODO? !errors.Is(..., err) */ {
return fmt.Errorf("failed to undo the reservation for volume group %q: %w", cvg, err)
}

return nil
}

// GetCreationTime fetches the creation time of the volume group from the
// journal and returns it.
func (cvg *commonVolumeGroup) GetCreationTime(ctx context.Context) (*time.Time, error) {
Expand Down
2 changes: 1 addition & 1 deletion internal/rbd/group/volume_group.go
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ func (vg *volumeGroup) Delete(ctx context.Context) error {

log.DebugLog(ctx, "volume group %q has been removed", vg)

return nil
return vg.commonVolumeGroup.Delete(ctx)
}

func (vg *volumeGroup) AddVolume(ctx context.Context, vol types.Volume) error {
Expand Down
41 changes: 0 additions & 41 deletions internal/rbd/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,6 @@ import (
"errors"
"fmt"

"github.com/ceph/go-ceph/rados"

"github.com/ceph/ceph-csi/internal/journal"
rbd_group "github.com/ceph/ceph-csi/internal/rbd/group"
"github.com/ceph/ceph-csi/internal/rbd/types"
Expand Down Expand Up @@ -255,42 +253,3 @@ func (mgr *rbdManager) CreateVolumeGroup(ctx context.Context, name string) (type

return vg, nil
}

func (mgr *rbdManager) DeleteVolumeGroup(ctx context.Context, vg types.VolumeGroup) error {
err := vg.Delete(ctx)
if err != nil && !errors.Is(rados.ErrNotFound, err) {
return fmt.Errorf("failed to delete volume group %q: %w", vg, err)
}

clusterID, err := vg.GetClusterID(ctx)
if err != nil {
return fmt.Errorf("failed to get cluster id for volume group %q: %w", vg, err)
}

vgJournal, err := mgr.getVolumeGroupJournal(clusterID)
if err != nil {
return err
}

name, err := vg.GetName(ctx)
if err != nil {
return fmt.Errorf("failed to get name for volume group %q: %w", vg, err)
}

csiID, err := vg.GetID(ctx)
if err != nil {
return fmt.Errorf("failed to get id for volume group %q: %w", vg, err)
}

pool, err := vg.GetPool(ctx)
if err != nil {
return fmt.Errorf("failed to get pool for volume group %q: %w", vg, err)
}

err = vgJournal.UndoReservation(ctx, pool, name, csiID)
if err != nil /* TODO? !errors.Is(..., err) */ {
return fmt.Errorf("failed to undo the reservation for volume group %q: %w", vg, err)
}

return nil
}
17 changes: 9 additions & 8 deletions internal/rbd/rbd_journal.go
Original file line number Diff line number Diff line change
Expand Up @@ -352,29 +352,30 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er
return true, nil
}

// repairImageID checks if rv.ImageID is already available (if so, it was
// repairImageID checks if ri.ImageID is already available (if so, it was
// fetched from the journal), in case it is missing, the imageID is obtained
// and stored in the journal.
// if the force is set to true, the latest imageID will get added/updated in OMAP.
func (rv *rbdVolume) repairImageID(ctx context.Context, j *journal.Connection, force bool) error {
func (ri *rbdImage) repairImageID(ctx context.Context, j *journal.Connection, force bool) error {
if force {
// reset the imageID so that we can fetch latest imageID from ceph cluster.
rv.ImageID = ""
ri.ImageID = ""
}

if rv.ImageID != "" {
if ri.ImageID != "" {
return nil
}

err := rv.getImageID()
err := ri.getImageID()
if err != nil {
log.ErrorLog(ctx, "failed to get image id %s: %v", rv, err)
log.ErrorLog(ctx, "failed to get image id %s: %v", ri, err)

return err
}
err = j.StoreImageID(ctx, rv.JournalPool, rv.ReservedID, rv.ImageID)

err = j.StoreImageID(ctx, ri.JournalPool, ri.ReservedID, ri.ImageID)
if err != nil {
log.ErrorLog(ctx, "failed to store volume id %s: %v", rv, err)
log.ErrorLog(ctx, "failed to store volume id %s: %v", ri, err)

return err
}
Expand Down
7 changes: 5 additions & 2 deletions internal/rbd/rbd_util.go
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ type rbdImage struct {
// This does not have a JSON tag as it is not stashed in JSON encoded
// config maps in v1.0.0
RequestName string
ReservedID string
NamePrefix string
// ParentName represents the parent image name of the image.
ParentName string
Expand Down Expand Up @@ -167,7 +168,6 @@ type rbdVolume struct {
AdminID string
UserID string
Mounter string
ReservedID string
MapOptions string
UnmapOptions string
LogDir string
Expand All @@ -190,7 +190,6 @@ type rbdSnapshot struct {
// SourceVolumeID is the volume ID of RbdImageName, that is exchanged with CSI drivers
// RbdSnapName is the name of the RBD snapshot backing this rbdSnapshot
SourceVolumeID string
ReservedID string
RbdSnapName string
}

Expand Down Expand Up @@ -390,15 +389,19 @@ func (ri *rbdImage) Connect(cr *util.Credentials) error {
func (ri *rbdImage) Destroy(ctx context.Context) {
if ri.ioctx != nil {
ri.ioctx.Destroy()
ri.ioctx = nil
}
if ri.conn != nil {
ri.conn.Destroy()
ri.conn = nil
}
if ri.isBlockEncrypted() {
ri.blockEncryption.Destroy()
ri.blockEncryption = nil
}
if ri.isFileEncrypted() {
ri.fileEncryption.Destroy()
ri.fileEncryption = nil
}
}

Expand Down
4 changes: 0 additions & 4 deletions internal/rbd/types/manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,4 @@ type Manager interface {
// CreateVolumeGroup allocates a new VolumeGroup in the backend storage
// and records details about it in the journal.
CreateVolumeGroup(ctx context.Context, name string) (VolumeGroup, error)

// DeleteVolumeGroup removes VolumeGroup from the backend storage and
// any details from the journal.
DeleteVolumeGroup(ctx context.Context, vg VolumeGroup) error
}
2 changes: 2 additions & 0 deletions internal/util/k8s/client.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"fmt"
"os"

"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
Expand Down Expand Up @@ -47,6 +48,7 @@ func NewK8sClient() (*kubernetes.Clientset, error) {
return nil, fmt.Errorf("failed to get cluster config: %w", err)
}
}
cfg.ContentType = runtime.ContentTypeProtobuf
client, err := kubernetes.NewForConfig(cfg)
if err != nil {
return nil, fmt.Errorf("failed to create client: %w", err)
Expand Down

0 comments on commit c2c2c9e

Please sign in to comment.