Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Optionally create entities for cephfs storage pool #12538

Merged
merged 7 commits into from
Nov 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions doc/api-extensions.md
Original file line number Diff line number Diff line change
Expand Up @@ -2321,3 +2321,6 @@ Calling `POST /1.0/storage-pools/<pool>/custom/<volume>?target=<target>` will mo

## `disk_io_bus`
This introduces a new `io.bus` property to disk devices which can be used to override the bus the disk is attached to.

## `storage_cephfs_create_missing`
This introduces the configuration keys `cephfs.create_missing`, `cephfs.osd_pg_num`, `cephfs.meta_pool` and `cephfs.osd_pool` to be used when adding a `cephfs` storage pool to instruct LXD to create the necessary entities for the storage pool, if they do not exist.
4 changes: 4 additions & 0 deletions doc/reference/storage_cephfs.md
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,10 @@ Key | Type | Default
`cephfs.fscache` | bool | `false` | Enable use of kernel `fscache` and `cachefilesd`
`cephfs.path` | string | `/` | The base path for the CephFS mount
`cephfs.user.name` | string | `admin` | The Ceph user to use
`cephfs.create_missing` | bool | `false` | Create the file-system and missing data and metadata OSD pools
`cephfs.osd_pg_num` | string | - | OSD pool `pg_num` to use when creating missing OSD pools
`cephfs.meta_pool` | string | - | Metadata OSD pool name to create for the file-system
`cephfs.data_pool` | string | - | Data OSD pool name to create for the file-system
`source` | string | - | Existing CephFS file system or file system path to use
`volatile.pool.pristine` | string | `true` | Whether the CephFS file system was empty on creation time

Expand Down
118 changes: 114 additions & 4 deletions lxd/storage/drivers/driver_cephfs.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (

"github.com/canonical/lxd/lxd/migration"
"github.com/canonical/lxd/lxd/operations"
"github.com/canonical/lxd/lxd/revert"
"github.com/canonical/lxd/lxd/storage/filesystem"
"github.com/canonical/lxd/shared"
"github.com/canonical/lxd/shared/api"
Expand Down Expand Up @@ -105,6 +106,9 @@ func (d *cephfs) FillConfig() error {
// Create is called during pool creation and is effectively using an empty driver struct.
// WARNING: The Create() function cannot rely on any of the struct attributes being set.
func (d *cephfs) Create() error {
revert := revert.New()
defer revert.Fail()

err := d.FillConfig()
if err != nil {
return err
Expand All @@ -129,9 +133,109 @@ func (d *cephfs) Create() error {
fsPath = fields[1]
}

// Check that the filesystem exists.
if !d.fsExists(d.config["cephfs.cluster_name"], d.config["cephfs.user.name"], fsName) {
return fmt.Errorf("The requested '%v' CEPHFS doesn't exist", fsName)
// If the filesystem already exists, disallow keys associated to creating the filesystem.
fsExists, err := d.fsExists(d.config["cephfs.cluster_name"], d.config["cephfs.user.name"], fsName)
if err != nil {
return fmt.Errorf("Failed to check if %q CephFS exists: %w", fsName, err)
}

if fsExists {
for _, key := range []string{"create_missing", "osd_pg_num", "meta_pool", "data_pool"} {
cephfsSourceKey := fmt.Sprintf("cephfs.%s", key)
if d.config[cephfsSourceKey] != "" {
return fmt.Errorf("Invalid config key %q: CephFS filesystem already exists", cephfsSourceKey)
masnax marked this conversation as resolved.
Show resolved Hide resolved
}
}
} else {
createMissing := shared.IsTrue(d.config["cephfs.create_missing"])
if !createMissing {
return fmt.Errorf("The requested %q CephFS doesn't exist", fsName)
}

// Set the pg_num to 32 because we need to specify something, but ceph will automatically change it if necessary.
pgNum := d.config["cephfs.osd_pg_num"]
if pgNum == "" {
d.config["cephfs.osd_pg_num"] = "32"
}

// Create the meta and data pools if necessary.
for _, key := range []string{"cephfs.meta_pool", "cephfs.data_pool"} {
pool := d.config[key]

if pool == "" {
return fmt.Errorf("Missing required key %q for creating cephfs osd pool", key)
}

osdPoolExists, err := d.osdPoolExists(d.config["cephfs.cluster_name"], d.config["cephfs.user.name"], pool)
if err != nil {
return fmt.Errorf("Failed to check if %q OSD Pool exists: %w", pool, err)
}

if !osdPoolExists {
// Create new osd pool.
_, err := shared.RunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["cephfs.user.name"]),
"--cluster", d.config["cephfs.cluster_name"],
"osd",
"pool",
"create",
pool,
d.config["cephfs.osd_pg_num"],
)
if err != nil {
return fmt.Errorf("Failed to create ceph OSD pool %q: %w", pool, err)
}

revert.Add(func() {
// Delete the OSD pool.
_, _ = shared.RunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["cephfs.user.name"]),
"--cluster", d.config["cephfs.cluster_name"],
"osd",
"pool",
"delete",
pool,
pool,
"--yes-i-really-really-mean-it",
)
})
}
}

// Create the filesystem.
_, err := shared.RunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["cephfs.user.name"]),
"--cluster", d.config["cephfs.cluster_name"],
"fs",
"new",
fsName,
d.config["cephfs.meta_pool"],
d.config["cephfs.data_pool"],
)
if err != nil {
return fmt.Errorf("Failed to create CephFS %q: %w", fsName, err)
}

revert.Add(func() {
// Set the FS to fail so that we can remove it.
_, _ = shared.RunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["cephfs.user.name"]),
"--cluster", d.config["cephfs.cluster_name"],
"fs",
"fail",
fsName,
)

// Delete the FS.
_, _ = shared.RunCommand("ceph",
"--name", fmt.Sprintf("client.%s", d.config["cephfs.user.name"]),
"--cluster", d.config["cephfs.cluster_name"],
"fs",
"rm",
fsName,
"--yes-i-really-mean-it",
)
})
}

// Create a temporary mountpoint.
Expand Down Expand Up @@ -178,9 +282,11 @@ func (d *cephfs) Create() error {
// Check that the existing path is empty.
ok, _ := shared.PathIsEmpty(filepath.Join(mountPoint, fsPath))
if !ok {
return fmt.Errorf("Only empty CEPHFS paths can be used as a LXD storage pool")
return fmt.Errorf("Only empty CephFS paths can be used as a LXD storage pool")
}

revert.Success()

return nil
}

Expand Down Expand Up @@ -261,6 +367,10 @@ func (d *cephfs) Validate(config map[string]string) error {
"cephfs.fscache": validate.Optional(validate.IsBool),
"cephfs.path": validate.IsAny,
"cephfs.user.name": validate.IsAny,
"cephfs.create_missing": validate.Optional(validate.IsBool),
"cephfs.osd_pg_num": validate.Optional(validate.IsInt64),
"cephfs.meta_pool": validate.IsAny,
"cephfs.data_pool": validate.IsAny,
"volatile.pool.pristine": validate.IsAny,
}

Expand Down
36 changes: 34 additions & 2 deletions lxd/storage/drivers/driver_cephfs_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,41 @@ import (
)

// fsExists checks that the Ceph FS instance indeed exists.
func (d *cephfs) fsExists(clusterName string, userName string, fsName string) bool {
func (d *cephfs) fsExists(clusterName string, userName string, fsName string) (bool, error) {
_, err := shared.RunCommand("ceph", "--name", fmt.Sprintf("client.%s", userName), "--cluster", clusterName, "fs", "get", fsName)
return err == nil
if err != nil {
status, _ := shared.ExitStatus(err)
// If the error status code is 2, the fs definitely doesn't exist.
if status == 2 {
return false, nil
}

// Else, the error status is not 0 or 2,
// so we can't be sure if the fs exists or not
// as it might be a network issue, an internal ceph issue, etc.
return false, err
}

return true, nil
}

// osdPoolExists checks that the Ceph OSD Pool indeed exists.
func (d *cephfs) osdPoolExists(clusterName string, userName string, osdPoolName string) (bool, error) {
_, err := shared.RunCommand("ceph", "--name", fmt.Sprintf("client.%s", userName), "--cluster", clusterName, "osd", "pool", "get", osdPoolName, "size")
masnax marked this conversation as resolved.
Show resolved Hide resolved
if err != nil {
status, _ := shared.ExitStatus(err)
// If the error status code is 2, the pool definitely doesn't exist.
if status == 2 {
return false, nil
}

// Else, the error status is not 0 or 2,
// so we can't be sure if the pool exists or not
// as it might be a network issue, an internal ceph issue, etc.
return false, err
}

return true, nil
}

// getConfig parses the Ceph configuration file and returns the list of monitors and secret key.
Expand Down
1 change: 1 addition & 0 deletions shared/version/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,7 @@ var APIExtensions = []string{
"operation_wait",
"cluster_internal_custom_volume_copy",
"disk_io_bus",
"storage_cephfs_create_missing",
}

// APIExtensionsCount returns the number of available API extensions.
Expand Down
84 changes: 54 additions & 30 deletions test/suites/storage_driver_cephfs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -11,34 +11,58 @@ test_storage_driver_cephfs() {
lxc storage create cephfs cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")"
lxc storage delete cephfs

# Second create (confirm got cleaned up properly)
lxc storage create cephfs cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")"
lxc storage info cephfs

# Creation, rename and deletion
lxc storage volume create cephfs vol1
lxc storage volume set cephfs vol1 size 100MiB
lxc storage volume rename cephfs vol1 vol2
lxc storage volume copy cephfs/vol2 cephfs/vol1
lxc storage volume delete cephfs vol1
lxc storage volume delete cephfs vol2

# Snapshots
lxc storage volume create cephfs vol1
lxc storage volume snapshot cephfs vol1
lxc storage volume snapshot cephfs vol1
lxc storage volume snapshot cephfs vol1 blah1
lxc storage volume rename cephfs vol1/blah1 vol1/blah2
lxc storage volume snapshot cephfs vol1 blah1
lxc storage volume delete cephfs vol1/snap0
lxc storage volume delete cephfs vol1/snap1
lxc storage volume restore cephfs vol1 blah1
lxc storage volume copy cephfs/vol1 cephfs/vol2 --volume-only
lxc storage volume copy cephfs/vol1 cephfs/vol3 --volume-only
lxc storage volume delete cephfs vol1
lxc storage volume delete cephfs vol2
lxc storage volume delete cephfs vol3

# Cleanup
lxc storage delete cephfs
# Test invalid key combinations for auto-creation of cephfs entities.
! lxc storage create cephfs cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")" cephfs.osd_pg_num=32 || true
masnax marked this conversation as resolved.
Show resolved Hide resolved
! lxc storage create cephfs cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")" cephfs.meta_pool=xyz || true
! lxc storage create cephfs cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")" cephfs.data_pool=xyz || true
! lxc storage create cephfs cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")" cephfs.create_missing=true cephfs.data_pool=xyz_data cephfs.meta_pool=xyz_meta || true


# Test cephfs storage volumes.
for fs in "cephfs" "cephfs2" ; do
if [ "${fs}" = "cephfs" ]; then
# Create one cephfs with pre-existing OSDs.
lxc storage create "${fs}" cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")"
else
# Create one cephfs by creating the OSDs and the cephfs itself.
lxc storage create "${fs}" cephfs source=cephfs2 cephfs.create_missing=true cephfs.data_pool=xyz_data cephfs.meta_pool=xyz_meta
fi

# Confirm got cleaned up properly
lxc storage info "${fs}"

# Creation, rename and deletion
lxc storage volume create "${fs}" vol1
lxc storage volume set "${fs}" vol1 size 100MiB
lxc storage volume rename "${fs}" vol1 vol2
lxc storage volume copy "${fs}"/vol2 "${fs}"/vol1
lxc storage volume delete "${fs}" vol1
lxc storage volume delete "${fs}" vol2

# Snapshots
lxc storage volume create "${fs}" vol1
lxc storage volume snapshot "${fs}" vol1
lxc storage volume snapshot "${fs}" vol1
lxc storage volume snapshot "${fs}" vol1 blah1
lxc storage volume rename "${fs}" vol1/blah1 vol1/blah2
lxc storage volume snapshot "${fs}" vol1 blah1
lxc storage volume delete "${fs}" vol1/snap0
lxc storage volume delete "${fs}" vol1/snap1
lxc storage volume restore "${fs}" vol1 blah1
lxc storage volume copy "${fs}"/vol1 "${fs}"/vol2 --volume-only
lxc storage volume copy "${fs}"/vol1 "${fs}"/vol3 --volume-only
lxc storage volume delete "${fs}" vol1
lxc storage volume delete "${fs}" vol2
lxc storage volume delete "${fs}" vol3

# Cleanup
lxc storage delete "${fs}"

# Remove the filesystem so we can create a new one.
ceph fs fail "${fs}"
ceph fs rm "${fs}" --yes-i-really-mean-it
done

# Recreate the fs for other tests.
ceph fs new cephfs cephfs_meta cephfs_data --force
}
Loading