From f80d9d8ef36d032e2e5e83179fa535ac3f336540 Mon Sep 17 00:00:00 2001 From: Max Asnaashari Date: Tue, 28 Nov 2023 00:36:03 +0000 Subject: [PATCH] test/suites: Add cephfs create_missing test Signed-off-by: Max Asnaashari --- test/suites/storage_driver_cephfs.sh | 82 ++++++++++++++++++---------- 1 file changed, 52 insertions(+), 30 deletions(-) diff --git a/test/suites/storage_driver_cephfs.sh b/test/suites/storage_driver_cephfs.sh index 233f415e1586..5bdfae3a35a4 100644 --- a/test/suites/storage_driver_cephfs.sh +++ b/test/suites/storage_driver_cephfs.sh @@ -11,34 +11,56 @@ test_storage_driver_cephfs() { lxc storage create cephfs cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")" lxc storage delete cephfs - # Second create (confirm got cleaned up properly) - lxc storage create cephfs cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")" - lxc storage info cephfs - - # Creation, rename and deletion - lxc storage volume create cephfs vol1 - lxc storage volume set cephfs vol1 size 100MiB - lxc storage volume rename cephfs vol1 vol2 - lxc storage volume copy cephfs/vol2 cephfs/vol1 - lxc storage volume delete cephfs vol1 - lxc storage volume delete cephfs vol2 - - # Snapshots - lxc storage volume create cephfs vol1 - lxc storage volume snapshot cephfs vol1 - lxc storage volume snapshot cephfs vol1 - lxc storage volume snapshot cephfs vol1 blah1 - lxc storage volume rename cephfs vol1/blah1 vol1/blah2 - lxc storage volume snapshot cephfs vol1 blah1 - lxc storage volume delete cephfs vol1/snap0 - lxc storage volume delete cephfs vol1/snap1 - lxc storage volume restore cephfs vol1 blah1 - lxc storage volume copy cephfs/vol1 cephfs/vol2 --volume-only - lxc storage volume copy cephfs/vol1 cephfs/vol3 --volume-only - lxc storage volume delete cephfs vol1 - lxc storage volume delete cephfs vol2 - lxc storage volume delete cephfs vol3 - - # Cleanup - lxc storage delete cephfs + # Test invalid key combinations for auto-creation of cephfs entities. + ! lxc storage create cephfs cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")" cephfs.osd_pg_num=32 || true + ! lxc storage create cephfs cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")" cephfs.meta_pool=xyz || true + ! lxc storage create cephfs cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")" cephfs.data_pool=xyz || true + ! lxc storage create cephfs cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")" cephfs.create_missing=true cephfs.data_pool=xyz_data cephfs.meta_pool=xyz_meta || true + + + # Test cephfs storage volumes. + for fs in "cephfs" "cephfs2" ; do + if [ "${fs}" = "cephfs" ]; then + # Create one cephfs with pre-existing OSDs. + lxc storage create "${fs}" cephfs source="${LXD_CEPH_CEPHFS}/$(basename "${LXD_DIR}")" + else + # Create one cephfs by creating the OSDs and the cephfs itself. + lxc storage create "${fs}" cephfs source=cephfs2 cephfs.create_missing=true cephfs.data_pool=xyz_data cephfs.meta_pool=xyz_meta + fi + + # Confirm got cleaned up properly + lxc storage info "${fs}" + + # Creation, rename and deletion + lxc storage volume create "${fs}" vol1 + lxc storage volume set "${fs}" vol1 size 100MiB + lxc storage volume rename "${fs}" vol1 vol2 + lxc storage volume copy "${fs}"/vol2 "${fs}"/vol1 + lxc storage volume delete "${fs}" vol1 + lxc storage volume delete "${fs}" vol2 + + # Snapshots + lxc storage volume create "${fs}" vol1 + lxc storage volume snapshot "${fs}" vol1 + lxc storage volume snapshot "${fs}" vol1 + lxc storage volume snapshot "${fs}" vol1 blah1 + lxc storage volume rename "${fs}" vol1/blah1 vol1/blah2 + lxc storage volume snapshot "${fs}" vol1 blah1 + lxc storage volume delete "${fs}" vol1/snap0 + lxc storage volume delete "${fs}" vol1/snap1 + lxc storage volume restore "${fs}" vol1 blah1 + lxc storage volume copy "${fs}"/vol1 "${fs}"/vol2 --volume-only + lxc storage volume copy "${fs}"/vol1 "${fs}"/vol3 --volume-only + lxc storage volume delete "${fs}" vol1 + lxc storage volume delete "${fs}" vol2 + lxc storage volume delete "${fs}" vol3 + + # Cleanup + lxc storage delete "${fs}" + + # Remove the filesystem so we can create a new one. + ceph fs fail "${fs}" + ceph fs rm "${fs}" --yes-i-really-mean-it + done + }