diff --git a/test/TEST-64-UDEV-STORAGE/test.sh b/test/TEST-64-UDEV-STORAGE/test.sh index 2b9d0735e34..89de1ea6927 100755 --- a/test/TEST-64-UDEV-STORAGE/test.sh +++ b/test/TEST-64-UDEV-STORAGE/test.sh @@ -1,5 +1,13 @@ #!/usr/bin/env bash # vi: ts=4 sw=4 tw=0 et: +# +# TODO: +# * iSCSI +# * LVM over iSCSI (?) +# * SW raid (mdadm) +# * LUKS -> MD (mdadm) -> LVM +# * BTRFS +# * MD BTRFS set -e TEST_DESCRIPTION="systemd-udev storage tests" @@ -30,6 +38,11 @@ test_append_files() { install_multipath fi + # Configure LVM + if command -v lvm; then + install_lvm + fi + for i in {0..127}; do dd if=/dev/zero of="${TESTDIR:?}/disk$i.img" bs=1M count=1 echo "device$i" >"${TESTDIR:?}/disk$i.img" @@ -246,6 +259,31 @@ testcase_simultaneous_events() { test_run_one "${1:?}" } +testcase_lvm_basic() { + if ! command -v lvm; then + echo "Missing lvm tools, skipping the test..." + return 77 + fi + + local qemu_opts=("-device ahci,id=ahci0") + local diskpath + + # Attach 4 SATA disks to the VM (and set their model and serial fields + # to something predictable, so we can refer to them later) + for i in {0..3}; do + diskpath="${TESTDIR:?}/lvmbasic${i}.img" + dd if=/dev/zero of="$diskpath" bs=1M count=32 + qemu_opts+=( + "-device ide-hd,bus=ahci0.$i,drive=drive$i,model=foobar,serial=deadbeeflvm$i" + "-drive format=raw,cache=unsafe,file=$diskpath,if=none,id=drive$i" + ) + done + + KERNEL_APPEND="systemd.setenv=TEST_FUNCTION_NAME=${FUNCNAME[0]} ${USER_KERNEL_APPEND:-}" + QEMU_OPTIONS="${qemu_opts[*]} ${USER_QEMU_OPTIONS:-}" + test_run_one "${1:?}" +} + # Allow overriding which tests should be run from the "outside", useful for manual # testing (make -C test/... TESTCASES="testcase1 testcase2") if [[ -v "TESTCASES" && -n "$TESTCASES" ]]; then diff --git a/test/units/testsuite-64.sh b/test/units/testsuite-64.sh index 951a5f42914..1574d51401b 100755 --- a/test/units/testsuite-64.sh +++ b/test/units/testsuite-64.sh @@ -193,6 +193,92 @@ EOF rm -f "$partscript" } +testcase_lvm_basic() { + local i part + local vgroup="MyTestGroup$RANDOM" + local devices=( + /dev/disk/by-id/ata-foobar_deadbeeflvm{0..3} + ) + + # Make sure all the necessary soon-to-be-LVM devices exist + ls -l "${devices[@]}" + + # Add all test devices into a volume group, create two logical volumes, + # and check if necessary symlinks exist (and are valid) + lvm pvcreate -y "${devices[@]}" + lvm pvs + lvm vgcreate "$vgroup" -y "${devices[@]}" + lvm vgs + lvm vgchange -ay "$vgroup" + lvm lvcreate -y -L 4M "$vgroup" -n mypart1 + lvm lvcreate -y -L 8M "$vgroup" -n mypart2 + lvm lvs + udevadm settle + test -e "/dev/$vgroup/mypart1" + test -e "/dev/$vgroup/mypart2" + mkfs.ext4 -L mylvpart1 "/dev/$vgroup/mypart1" + udevadm settle + test -e "/dev/disk/by-label/mylvpart1" + helper_check_device_symlinks "/dev/disk" "/dev/$vgroup" + + # Disable the VG and check symlinks... + lvm vgchange -an "$vgroup" + udevadm settle + test ! -e "/dev/$vgroup" + test ! -e "/dev/disk/by-label/mylvpart1" + helper_check_device_symlinks "/dev/disk" + + # reenable the VG and check the symlinks again if all LVs are properly activated + lvm vgchange -ay "$vgroup" + udevadm settle + test -e "/dev/$vgroup/mypart1" + test -e "/dev/$vgroup/mypart2" + test -e "/dev/disk/by-label/mylvpart1" + helper_check_device_symlinks "/dev/disk" "/dev/$vgroup" + + # Same as above, but now with more "stress" + for i in {1..100}; do + lvm vgchange -an "$vgroup" + lvm vgchange -ay "$vgroup" + + if ((i % 10 == 0)); then + udevadm settle + test -e "/dev/$vgroup/mypart1" + test -e "/dev/$vgroup/mypart2" + test -e "/dev/disk/by-label/mylvpart1" + helper_check_device_symlinks "/dev/disk" "/dev/$vgroup" + fi + done + + # Remove the first LV + lvm lvremove -y "$vgroup/mypart1" + udevadm settle + test ! -e "/dev/$vgroup/mypart1" + test -e "/dev/$vgroup/mypart2" + helper_check_device_symlinks "/dev/disk" "/dev/$vgroup" + + # Create & remove LVs in a loop, i.e. with more "stress" + for i in {1..50}; do + # 1) Create 16 logical volumes + for part in {0..15}; do + lvm lvcreate -y -L 4M "$vgroup" -n "looppart$part" + done + + # 2) Immediately remove them + lvm lvremove -y "$vgroup"/looppart{0..15} + + # 3) On every 10th iteration settle udev and check if all partitions are + # indeed gone, and if all symlinks are still valid + if ((i % 10 == 0)); then + udevadm settle + for part in {0..15}; do + test ! -e "/dev/$vgroup/looppart$part" + done + helper_check_device_symlinks "/dev/disk" "/dev/$vgroup" + fi + done +} + : >/failed udevadm settle