Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(shared): adding shared mount support ZFSPV volumes #164

Merged
merged 1 commit into from
Jul 1, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelogs/unreleased/164-pawanpraka1
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
adding shared mount support ZFSPV volumes
1,014 changes: 684 additions & 330 deletions deploy/operators/centos7/zfs-operator.yaml

Large diffs are not rendered by default.

1,014 changes: 684 additions & 330 deletions deploy/operators/centos8/zfs-operator.yaml

Large diffs are not rendered by default.

498 changes: 336 additions & 162 deletions deploy/yamls/zfssnapshot-crd.yaml

Large diffs are not rendered by default.

516 changes: 348 additions & 168 deletions deploy/yamls/zfsvolume-crd.yaml

Large diffs are not rendered by default.

1,014 changes: 684 additions & 330 deletions deploy/zfs-operator.yaml

Large diffs are not rendered by default.

7 changes: 7 additions & 0 deletions pkg/apis/openebs.io/zfs/v1/zfsvolume.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,6 +193,13 @@ type VolumeInfo struct {
// FsType can not be modified once volume has been provisioned.
// Default Value: ext4.
FsType string `json:"fsType,omitempty"`

// Shared specifies whether the volume can be shared among multiple pods.
// If it is not set to "yes", then the ZFS-LocalPV Driver will not allow
// the volumes to be mounted by more than one pods.
// +kubebuilder:validation:Required
// +kubebuilder:validation:Enum=yes;no
Shared string `json:"shared,omitempty"`
}

type VolStatus struct {
Expand Down
6 changes: 6 additions & 0 deletions pkg/builder/volbuilder/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,12 @@ func (b *Builder) WithFsType(fstype string) *Builder {
return b
}

// WithShared sets where filesystem is shared or not
func (b *Builder) WithShared(shared string) *Builder {
b.volume.Object.Spec.Shared = shared
return b
}

// WithSnapshot sets Snapshot name for creating clone volume
func (b *Builder) WithSnapshot(snap string) *Builder {
b.volume.Object.Spec.SnapName = snap
Expand Down
2 changes: 2 additions & 0 deletions pkg/driver/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,7 @@ func CreateZFSVolume(req *csi.CreateVolumeRequest) (string, error) {
tp := parameters["thinprovision"]
schld := parameters["scheduler"]
fstype := parameters["fstype"]
shared := parameters["shared"]

vtype := zfs.GetVolumeType(fstype)

Expand All @@ -124,6 +125,7 @@ func CreateZFSVolume(req *csi.CreateVolumeRequest) (string, error) {
WithVolumeType(vtype).
WithVolumeStatus(zfs.ZFSStatusPending).
WithFsType(fstype).
WithShared(shared).
WithCompression(compression).Build()

if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion pkg/driver/grpc.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ func logGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, h

log := isInfotrmativeLog(info.FullMethod)
if log == true {
klog.Infof("GRPC call: %s\n requests %s", info.FullMethod, protosanitizer.StripSecrets(req))
klog.Infof("GRPC call: %s requests %s", info.FullMethod, protosanitizer.StripSecrets(req))
}

resp, err := handler(ctx, req)
Expand Down
39 changes: 21 additions & 18 deletions pkg/zfs/mount.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,24 +147,27 @@ func verifyMountRequest(vol *apis.ZFSVolume, mountpath string) error {
return status.Errorf(codes.Internal, "verifyMount: GetVolumePath failed %s", err.Error())
}

/*
* This check is the famous *Wall Of North*
* It will not let the volume to be mounted
* at more than two places. The volume should
* be unmounted before proceeding to the mount
* operation.
*/
currentMounts, err := GetMounts(devicePath)
if err != nil {
klog.Errorf("can not get mounts for volume:%s dev %s err: %v",
vol.Name, devicePath, err.Error())
return status.Errorf(codes.Internal, "verifyMount: Getmounts failed %s", err.Error())
} else if len(currentMounts) >= 1 {
klog.Errorf(
"can not mount, volume:%s already mounted dev %s mounts: %v",
vol.Name, devicePath, currentMounts,
)
return status.Errorf(codes.Internal, "verifyMount: device already mounted at %s", currentMounts)
// if it is not a shared volume, then make sure it is not mounted to more than one path
if vol.Spec.Shared != "yes" {
/*
* This check is the famous *Wall Of North*
* It will not let the volume to be mounted
* at more than two places. The volume should
* be unmounted before proceeding to the mount
* operation.
*/
currentMounts, err := GetMounts(devicePath)
if err != nil {
klog.Errorf("can not get mounts for volume:%s dev %s err: %v",
vol.Name, devicePath, err.Error())
return status.Errorf(codes.Internal, "verifyMount: Getmounts failed %s", err.Error())
} else if len(currentMounts) >= 1 {
klog.Errorf(
"can not mount, volume:%s already mounted dev %s mounts: %v",
vol.Name, devicePath, currentMounts,
)
return status.Errorf(codes.Internal, "verifyMount: device already mounted at %s", currentMounts)
}
}
return nil
}
Expand Down