Skip to content

Commit

Permalink
Merge pull request #138 from ceph/devel
Browse files Browse the repository at this point in the history
Sync the upstream changes from `ceph/ceph-csi:devel` into the `devel` branch
  • Loading branch information
openshift-merge-robot authored Oct 14, 2022
2 parents b01e71d + b7703fa commit 1cce89f
Show file tree
Hide file tree
Showing 13 changed files with 147 additions and 40 deletions.
8 changes: 0 additions & 8 deletions docs/ceph-csi-upgrade.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@
- [6. Upgrade NFS Nodeplugin resources](#6-upgrade-nfs-nodeplugin-resources)
- [6.1 Update the NFS Nodeplugin RBAC](#61-update-the-nfs-nodeplugin-rbac)
- [6.2 Update the NFS Nodeplugin daemonset](#62-update-the-nfs-nodeplugin-daemonset)
- [6.3 Delete the old NFS Nodeplugin daemonset](#63-delete-the-old-nfs-nodeplugin-daemonset)
- [CSI Sidecar containers consideration](#csi-sidecar-containers-consideration)

## Pre-upgrade considerations
Expand Down Expand Up @@ -392,13 +391,6 @@ daemonset.apps/csi-nfsplugin configured
service/csi-metrics-nfsplugin configured
```

##### 6.3 Delete the old NFS Nodeplugin daemonset

```bash
$ kubectl delete daemonsets.apps csi-nfs-node
daemonset.apps "csi-nfs-node" deleted
```

we have successfully upgraded nfs csi from v3.6 to v3.7

### CSI Sidecar containers consideration
Expand Down
9 changes: 9 additions & 0 deletions e2e/deployment.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,9 @@ func waitForDeploymentInAvailableState(clientSet kubernetes.Interface, name, ns
if isRetryableAPIError(err) {
return false, nil
}
if apierrs.IsNotFound(err) {
return false, nil
}
e2elog.Logf("%q deployment to be Available (%d seconds elapsed)", name, int(time.Since(start).Seconds()))

return false, err
Expand Down Expand Up @@ -390,6 +393,12 @@ func waitForContainersArgsUpdate(
) error {
e2elog.Logf("waiting for deployment updates %s/%s", ns, deploymentName)

// wait for the deployment to be available
err := waitForDeploymentInAvailableState(c, deploymentName, ns, deployTimeout)
if err != nil {
return fmt.Errorf("deployment %s/%s did not become available yet: %w", ns, deploymentName, err)
}

// Scale down to 0.
scale, err := c.AppsV1().Deployments(ns).GetScale(context.TODO(), deploymentName, metav1.GetOptions{})
if err != nil {
Expand Down
1 change: 1 addition & 0 deletions e2e/e2e_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ func init() {
flag.BoolVar(&deployNFS, "deploy-nfs", false, "deploy nfs csi driver")
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver")
flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver")
flag.BoolVar(&testNBD, "test-nbd", false, "test rbd csi driver with rbd-nbd mounter")
flag.BoolVar(&testNFS, "test-nfs", false, "test nfs csi driver")
flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm")
flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing")
Expand Down
10 changes: 9 additions & 1 deletion e2e/pvc.go
Original file line number Diff line number Diff line change
Expand Up @@ -284,21 +284,29 @@ func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai
int(time.Since(start).Seconds()))
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
if err == nil {
e2elog.Logf("PVC %s (status: %s) has not been deleted yet, rechecking...", name, pvc.Status)

return false, nil
}
if isRetryableAPIError(err) {
e2elog.Logf("failed to verify deletion of PVC %s (status: %s): %v", name, pvc.Status, err)

return false, nil
}
if !apierrs.IsNotFound(err) {
return false, fmt.Errorf("get on deleted PVC %v failed with error other than \"not found\": %w", name, err)
}

// Examine the pv.ClaimRef and UID. Expect nil values.
_, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
oldPV, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
if err == nil {
e2elog.Logf("PV %s (status: %s) has not been deleted yet, rechecking...", pv.Name, oldPV.Status)

return false, nil
}
if isRetryableAPIError(err) {
e2elog.Logf("failed to verify deletion of PV %s (status: %s): %v", pv.Name, oldPV.Status, err)

return false, nil
}
if !apierrs.IsNotFound(err) {
Expand Down
54 changes: 54 additions & 0 deletions e2e/rbd.go
Original file line number Diff line number Diff line change
Expand Up @@ -1047,6 +1047,12 @@ var _ = Describe("RBD", func() {
})

By("create a PVC and bind it to an app using rbd-nbd mounter", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")

return
}

err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
Expand Down Expand Up @@ -1083,6 +1089,12 @@ var _ = Describe("RBD", func() {
})

By("Resize rbd-nbd PVC and check application directory size", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")

return
}

if util.CheckKernelSupport(kernelRelease, nbdResizeSupport) {
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
Expand Down Expand Up @@ -1290,6 +1302,12 @@ var _ = Describe("RBD", func() {

By("create PVC with journaling,fast-diff image-features and bind it to an app using rbd-nbd mounter",
func() {
if !testNBD {
e2elog.Logf("skipping NBD test")

return
}

if util.CheckKernelSupport(kernelRelease, fastDiffSupport) {
err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
Expand Down Expand Up @@ -1330,6 +1348,12 @@ var _ = Describe("RBD", func() {
// NOTE: RWX is restricted for FileSystem VolumeMode at ceph-csi,
// see pull#261 for more details.
By("Create RWX+Block Mode PVC and bind to multiple pods via deployment using rbd-nbd mounter", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")

return
}

err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
Expand Down Expand Up @@ -1415,6 +1439,12 @@ var _ = Describe("RBD", func() {
})

By("Create ROX+FS Mode PVC and bind to multiple pods via deployment using rbd-nbd mounter", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")

return
}

err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
Expand Down Expand Up @@ -1540,6 +1570,12 @@ var _ = Describe("RBD", func() {
})

By("Create ROX+Block Mode PVC and bind to multiple pods via deployment using rbd-nbd mounter", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")

return
}

err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
Expand Down Expand Up @@ -1666,6 +1702,12 @@ var _ = Describe("RBD", func() {
})

By("perform IO on rbd-nbd volume after nodeplugin restart", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")

return
}

err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
Expand Down Expand Up @@ -1830,6 +1872,12 @@ var _ = Describe("RBD", func() {
})

By("create a PVC and bind it to an app using rbd-nbd mounter with encryption", func() {
if !testNBD {
e2elog.Logf("skipping NBD test")

return
}

err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
Expand Down Expand Up @@ -2199,6 +2247,12 @@ var _ = Describe("RBD", func() {
By(
"create a PVC and Bind it to an app with journaling/exclusive-lock image-features and rbd-nbd mounter",
func() {
if !testNBD {
e2elog.Logf("skipping NBD test")

return
}

err := deleteResource(rbdExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass: %v", err)
Expand Down
1 change: 1 addition & 0 deletions e2e/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,7 @@ var (
deployNFS bool
testCephFS bool
testRBD bool
testNBD bool
testNFS bool
helmTest bool
upgradeTesting bool
Expand Down
2 changes: 1 addition & 1 deletion internal/cephfs/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -637,7 +637,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
}

if stat.Mode().IsDir() {
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath, false)
}

return nil, status.Errorf(codes.InvalidArgument, "targetpath %q is not a directory or device", targetPath)
Expand Down
53 changes: 30 additions & 23 deletions internal/csi-common/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,7 @@ func FilesystemNodeGetVolumeStats(
ctx context.Context,
mounter mount.Interface,
targetPath string,
includeInodes bool,
) (*csi.NodeGetVolumeStatsResponse, error) {
isMnt, err := util.IsMountPoint(mounter, targetPath)
if err != nil {
Expand Down Expand Up @@ -274,38 +275,44 @@ func FilesystemNodeGetVolumeStats(
if !ok {
log.ErrorLog(ctx, "failed to fetch used bytes")
}
inodes, ok := (*(volMetrics.Inodes)).AsInt64()
if !ok {
log.ErrorLog(ctx, "failed to fetch available inodes")

return nil, status.Error(codes.Unknown, "failed to fetch available inodes")
}
inodesFree, ok := (*(volMetrics.InodesFree)).AsInt64()
if !ok {
log.ErrorLog(ctx, "failed to fetch free inodes")
}

inodesUsed, ok := (*(volMetrics.InodesUsed)).AsInt64()
if !ok {
log.ErrorLog(ctx, "failed to fetch used inodes")
}

return &csi.NodeGetVolumeStatsResponse{
res := &csi.NodeGetVolumeStatsResponse{
Usage: []*csi.VolumeUsage{
{
Available: requirePositive(available),
Total: requirePositive(capacity),
Used: requirePositive(used),
Unit: csi.VolumeUsage_BYTES,
},
{
Available: requirePositive(inodesFree),
Total: requirePositive(inodes),
Used: requirePositive(inodesUsed),
Unit: csi.VolumeUsage_INODES,
},
},
}, nil
}

if includeInodes {
inodes, ok := (*(volMetrics.Inodes)).AsInt64()
if !ok {
log.ErrorLog(ctx, "failed to fetch available inodes")

return nil, status.Error(codes.Unknown, "failed to fetch available inodes")
}
inodesFree, ok := (*(volMetrics.InodesFree)).AsInt64()
if !ok {
log.ErrorLog(ctx, "failed to fetch free inodes")
}

inodesUsed, ok := (*(volMetrics.InodesUsed)).AsInt64()
if !ok {
log.ErrorLog(ctx, "failed to fetch used inodes")
}

res.Usage = append(res.Usage, &csi.VolumeUsage{
Available: requirePositive(inodesFree),
Total: requirePositive(inodes),
Used: requirePositive(inodesUsed),
Unit: csi.VolumeUsage_INODES,
})
}

return res, nil
}

// requirePositive returns the value for `x` when it is greater or equal to 0,
Expand Down
2 changes: 1 addition & 1 deletion internal/csi-common/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func TestFilesystemNodeGetVolumeStats(t *testing.T) {

// retry until a mountpoint is found
for {
stats, err := FilesystemNodeGetVolumeStats(context.TODO(), mount.New(""), cwd)
stats, err := FilesystemNodeGetVolumeStats(context.TODO(), mount.New(""), cwd, true)
if err != nil && cwd != "/" && strings.HasSuffix(err.Error(), "is not mounted") {
// try again with the parent directory
cwd = filepath.Dir(cwd)
Expand Down
2 changes: 1 addition & 1 deletion internal/nfs/nodeserver/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
}

if stat.Mode().IsDir() {
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath, false)
}

return nil, status.Errorf(codes.InvalidArgument,
Expand Down
2 changes: 1 addition & 1 deletion internal/rbd/nodeserver.go
Original file line number Diff line number Diff line change
Expand Up @@ -1240,7 +1240,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
}

if stat.Mode().IsDir() {
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath, true)
} else if (stat.Mode() & os.ModeDevice) == os.ModeDevice {
return blockNodeGetVolumeStats(ctx, targetPath)
}
Expand Down
2 changes: 1 addition & 1 deletion scripts/install-helm.sh
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,7 @@ install() {
mkdir -p ${TEMP}
# shellcheck disable=SC2021
dist=$(echo "${dist}" | tr "[A-Z]" "[a-z]")
wget "https://get.helm.sh/helm-${HELM_VERSION}-${dist}-${arch}.tar.gz" -O "${TEMP}/helm.tar.gz"
wget "https://get.helm.sh/helm-${HELM_VERSION}-${dist}-${arch}.tar.gz" -O "${TEMP}/helm.tar.gz" || exit 1
tar -C "${TEMP}" -zxvf "${TEMP}/helm.tar.gz"
fi
echo "Helm install successful"
Expand Down
Loading

0 comments on commit 1cce89f

Please sign in to comment.