test: use udevadm wait and lock

Hopefully fixes #22911.
This commit is contained in:
Yu Watanabe 2022-04-05 03:31:58 +09:00
parent d90dbba7ec
commit 14f9c81fb4

View file

@ -41,132 +41,6 @@ helper_check_device_symlinks() {(
done < <(find "${paths[@]}" -type l)
)}
# Wrapper around `helper_wait_for_lvm_activate()` and `helper_wait_for_pvscan()`
# functions to cover differences between pre and post lvm 2.03.14, which introduced
# a new way of vgroup autoactivation
# See: https://sourceware.org/git/?p=lvm2.git;a=commit;h=67722b312390cdab29c076c912e14bd739c5c0f6
# Arguments:
# $1 - device path (for helper_wait_for_pvscan())
# $2 - volume group name (for helper_wait_for_lvm_activate())
# $3 - number of retries (default: 10)
helper_wait_for_vgroup() {
local dev="${1:?}"
local vgroup="${2:?}"
local ntries="${3:-10}"
if ! systemctl -q list-unit-files lvm2-pvscan@.service >/dev/null; then
helper_wait_for_lvm_activate "$vgroup" "$ntries"
else
helper_wait_for_pvscan "$dev" "$ntries"
fi
}
# Wait for the lvm-activate-$vgroup.service of a specific $vgroup to finish
# Arguments:
# $1 - volume group name
# $2 - number of retries (default: 10)
helper_wait_for_lvm_activate() {
local vgroup="${1:?}"
local ntries="${2:-10}"
local i lvm_activate_svc
lvm_activate_svc="lvm-activate-$vgroup.service"
for ((i = 0; i < ntries; i++)); do
if systemctl -q is-active "$lvm_activate_svc"; then
# Since the service is started via `systemd-run --no-block`, we need
# to wait until it finishes, otherwise we might continue while
# `vgchange` is still running
if [[ "$(systemctl show -P SubState "$lvm_activate_svc")" == exited ]]; then
return 0
fi
else
# Since lvm 2.03.15 the lvm-activate transient unit no longer remains
# after finishing, so we have to treat non-existent units as a success
# as well
# See: https://sourceware.org/git/?p=lvm2.git;a=commit;h=fbd8b0cf43dc67f51f86f060dce748f446985855
if [[ "$(systemctl show -P LoadState "$lvm_activate_svc")" == not-found ]]; then
return 0
fi
fi
sleep .5
done
return 1
}
# Wait for the lvm2-pvscan@.service of a specific device to finish
# Arguments:
# $1 - device path
# $2 - number of retries (default: 10)
helper_wait_for_pvscan() {
local dev="${1:?}"
local ntries="${2:-10}"
local MAJOR MINOR i pvscan_svc real_dev
# Sanity check we got a valid block device (or a symlink to it)
real_dev="$(readlink -f "$dev")"
if [[ ! -b "$real_dev" ]]; then
echo >&2 "ERROR: '$dev ($real_dev) is not a valid block device'"
return 1
fi
# Get major and minor numbers from the udev database
# (udevadm returns MAJOR= and MINOR= expressions, so let's pull them into
# the current environment via `source` for easier parsing)
#
# shellcheck source=/dev/null
source <(udevadm info -q property "$real_dev" | grep -E "(MAJOR|MINOR)=")
# Sanity check if we got correct major and minor numbers
test -e "/sys/dev/block/$MAJOR:$MINOR/"
# Wait n_tries*0.5 seconds until the respective lvm2-pvscan service becomes
# active (i.e. it got executed and finished)
pvscan_svc="lvm2-pvscan@$MAJOR:$MINOR.service"
for ((i = 0; i < ntries; i++)); do
! systemctl -q is-active "$pvscan_svc" || return 0
sleep .5
done
return 1
}
# Generate an `flock` command line for a device list
#
# This is useful mainly for mkfs.btrfs, which doesn't hold the lock on each
# device for the entire duration of mkfs.btrfs, causing weird races between udev
# and mkfs.btrfs. This function creates an array of chained flock calls to take
# the lock of all involved devices, which can be then used in combination with
# mkfs.btrfs to mitigate the issue.
#
# For example, calling:
# helper_generate_flock_cmdline my_array /dev/loop1 /dev/loop2 /dev/loop3
#
# will result in "${my_array[@]}" containing:
# flock -x /dev/loop1 flock -x /dev/loop2 flock -x /dev/loop3
#
# Note: the array will be CLEARED before the first assignment
#
# Arguments:
# $1 - NAME of an array in which the commands/argument will be stored
# $2-$n - path to devices
helper_generate_flock_cmdline() {
# Create a name reference to the array passed as the first argument
# (requires bash 4.3+)
local -n cmd_array="${1:?}"
shift
if [[ $# -eq 0 ]]; then
echo >&2 "Missing argument(s): device path(s)"
return 1
fi
cmd_array=()
for dev in "$@"; do
cmd_array+=("flock" "-x" "$dev")
done
}
testcase_megasas2_basic() {
lsblk -S
[[ "$(lsblk --scsi --noheadings | wc -l)" -ge 128 ]]
@ -235,9 +109,7 @@ EOF
"/dev/disk/by-label/failover_vol"
"/dev/disk/by-uuid/deadbeef-dead-dead-beef-111111111111"
)
for link in "${part_links[@]}"; do
test -e "$link"
done
udevadm wait --settle --timeout=30 "${part_links[@]}"
# Choose a random symlink to the failover data partition each time, for
# a better coverage
@ -268,9 +140,7 @@ EOF
echo -n "$expected" >"$mpoint/test"
# Make sure all symlinks are still valid
for link in "${part_links[@]}"; do
test -e "$link"
done
udevadm wait --settle --timeout=30 "${part_links[@]}"
done
multipath -l "$path"
@ -311,7 +181,7 @@ EOF
sfdisk -q -X gpt "$blockdev" <"$partscript"
if ((i % 10 == 0)); then
udevadm settle
udevadm wait --settle --timeout=30 "$blockdev"
helper_check_device_symlinks
fi
done
@ -339,27 +209,19 @@ testcase_lvm_basic() {
lvm lvcreate -y -L 4M "$vgroup" -n mypart1
lvm lvcreate -y -L 8M "$vgroup" -n mypart2
lvm lvs
udevadm settle
test -e "/dev/$vgroup/mypart1"
test -e "/dev/$vgroup/mypart2"
udevadm wait --settle --timeout=30 "/dev/$vgroup/mypart1" "/dev/$vgroup/mypart2"
mkfs.ext4 -L mylvpart1 "/dev/$vgroup/mypart1"
udevadm settle
test -e "/dev/disk/by-label/mylvpart1"
udevadm wait --settle --timeout=30 "/dev/disk/by-label/mylvpart1"
helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
# Disable the VG and check symlinks...
lvm vgchange -an "$vgroup"
udevadm settle
test ! -e "/dev/$vgroup"
test ! -e "/dev/disk/by-label/mylvpart1"
udevadm wait --settle --timeout=30 --removed "/dev/$vgroup" "/dev/disk/by-label/mylvpart1"
helper_check_device_symlinks "/dev/disk"
# reenable the VG and check the symlinks again if all LVs are properly activated
lvm vgchange -ay "$vgroup"
udevadm settle
test -e "/dev/$vgroup/mypart1"
test -e "/dev/$vgroup/mypart2"
test -e "/dev/disk/by-label/mylvpart1"
udevadm wait --settle --timeout=30 "/dev/$vgroup/mypart1" "/dev/$vgroup/mypart2" "/dev/disk/by-label/mylvpart1"
helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
# Same as above, but now with more "stress"
@ -368,19 +230,15 @@ testcase_lvm_basic() {
lvm vgchange -ay "$vgroup"
if ((i % 5 == 0)); then
udevadm settle
test -e "/dev/$vgroup/mypart1"
test -e "/dev/$vgroup/mypart2"
test -e "/dev/disk/by-label/mylvpart1"
udevadm wait --settle --timeout=30 "/dev/$vgroup/mypart1" "/dev/$vgroup/mypart2" "/dev/disk/by-label/mylvpart1"
helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
fi
done
# Remove the first LV
lvm lvremove -y "$vgroup/mypart1"
udevadm settle
test ! -e "/dev/$vgroup/mypart1"
test -e "/dev/$vgroup/mypart2"
udevadm wait --settle --timeout=30 --removed "/dev/$vgroup/mypart1"
udevadm wait --timeout=0 "/dev/$vgroup/mypart2"
helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
# Create & remove LVs in a loop, i.e. with more "stress"
@ -396,9 +254,8 @@ testcase_lvm_basic() {
# 3) On every 4th iteration settle udev and check if all partitions are
# indeed gone, and if all symlinks are still valid
if ((i % 4 == 0)); then
udevadm settle
for part in {0..15}; do
test ! -e "/dev/$vgroup/looppart$part"
udevadm wait --settle --timeout=30 --removed "/dev/$vgroup/looppart$part"
done
helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
fi
@ -407,7 +264,6 @@ testcase_lvm_basic() {
testcase_btrfs_basic() {
local dev_stub i label mpoint uuid
local flock_cmd=()
local devices=(
/dev/disk/by-id/ata-foobar_deadbeefbtrfs{0..3}
)
@ -417,12 +273,9 @@ testcase_btrfs_basic() {
echo "Single device: default settings"
uuid="deadbeef-dead-dead-beef-000000000000"
label="btrfs_root"
helper_generate_flock_cmdline flock_cmd "${devices[0]}"
"${flock_cmd[@]}" mkfs.btrfs -L "$label" -U "$uuid" "${devices[0]}"
udevadm settle
udevadm lock --device="${devices[0]}" mkfs.btrfs -L "$label" -U "$uuid" "${devices[0]}"
udevadm wait --settle --timeout=30 "${devices[0]}" "/dev/disk/by-uuid/$uuid" "/dev/disk/by-label/$label"
btrfs filesystem show
test -e "/dev/disk/by-uuid/$uuid"
test -e "/dev/disk/by-label/$label"
helper_check_device_symlinks
echo "Multiple devices: using partitions, data: single, metadata: raid1"
@ -436,26 +289,25 @@ name="diskpart2", size=85M
name="diskpart3", size=85M
name="diskpart4", size=85M
EOF
udevadm settle
# We need to flock only the device itself, not its partitions
helper_generate_flock_cmdline flock_cmd "${devices[0]}"
"${flock_cmd[@]}" mkfs.btrfs -d single -m raid1 -L "$label" -U "$uuid" /dev/disk/by-partlabel/diskpart{1..4}
udevadm settle
udevadm wait --settle --timeout=30 /dev/disk/by-partlabel/diskpart{1..4}
udevadm lock --device="${devices[0]}" mkfs.btrfs -d single -m raid1 -L "$label" -U "$uuid" /dev/disk/by-partlabel/diskpart{1..4}
udevadm wait --settle --timeout=30 "/dev/disk/by-uuid/$uuid" "/dev/disk/by-label/$label"
btrfs filesystem show
test -e "/dev/disk/by-uuid/$uuid"
test -e "/dev/disk/by-label/$label"
helper_check_device_symlinks
wipefs -a -f "${devices[0]}"
udevadm wait --settle --timeout=30 --removed /dev/disk/by-partlabel/diskpart{1..4}
echo "Multiple devices: using disks, data: raid10, metadata: raid10, mixed mode"
uuid="deadbeef-dead-dead-beef-000000000002"
label="btrfs_mdisk"
helper_generate_flock_cmdline flock_cmd "${devices[@]}"
"${flock_cmd[@]}" mkfs.btrfs -M -d raid10 -m raid10 -L "$label" -U "$uuid" "${devices[@]}"
udevadm settle
udevadm lock \
--device=/dev/disk/by-id/ata-foobar_deadbeefbtrfs0 \
--device=/dev/disk/by-id/ata-foobar_deadbeefbtrfs1 \
--device=/dev/disk/by-id/ata-foobar_deadbeefbtrfs2 \
--device=/dev/disk/by-id/ata-foobar_deadbeefbtrfs3 \
mkfs.btrfs -M -d raid10 -m raid10 -L "$label" -U "$uuid" "${devices[@]}"
udevadm wait --settle --timeout=30 "/dev/disk/by-uuid/$uuid" "/dev/disk/by-label/$label"
btrfs filesystem show
test -e "/dev/disk/by-uuid/$uuid"
test -e "/dev/disk/by-label/$label"
helper_check_device_symlinks
echo "Multiple devices: using LUKS encrypted disks, data: raid1, metadata: raid1, mixed mode"
@ -475,9 +327,7 @@ EOF
cryptsetup luksFormat -q \
--use-urandom --pbkdf pbkdf2 --pbkdf-force-iterations 1000 \
--uuid "deadbeef-dead-dead-beef-11111111111$i" --label "encdisk$i" "${devices[$i]}" /etc/btrfs_keyfile
udevadm settle
test -e "/dev/disk/by-uuid/deadbeef-dead-dead-beef-11111111111$i"
test -e "/dev/disk/by-label/encdisk$i"
udevadm wait --settle --timeout=30 "/dev/disk/by-uuid/deadbeef-dead-dead-beef-11111111111$i" "/dev/disk/by-label/encdisk$i"
# Add the device into /etc/crypttab, reload systemd, and then activate
# the device so we can create a filesystem on it later
echo "encbtrfs$i UUID=deadbeef-dead-dead-beef-11111111111$i /etc/btrfs_keyfile luks,noearly" >>/etc/crypttab
@ -488,12 +338,14 @@ EOF
# Check if we have all necessary DM devices
ls -l /dev/mapper/encbtrfs{0..3}
# Create a multi-device btrfs filesystem on the LUKS devices
helper_generate_flock_cmdline flock_cmd /dev/mapper/encbtrfs{0..3}
"${flock_cmd[@]}" mkfs.btrfs -M -d raid1 -m raid1 -L "$label" -U "$uuid" /dev/mapper/encbtrfs{0..3}
udevadm settle
udevadm lock \
--device=/dev/mapper/encbtrfs0 \
--device=/dev/mapper/encbtrfs1 \
--device=/dev/mapper/encbtrfs2 \
--device=/dev/mapper/encbtrfs3 \
mkfs.btrfs -M -d raid1 -m raid1 -L "$label" -U "$uuid" /dev/mapper/encbtrfs{0..3}
udevadm wait --settle --timeout=30 "/dev/disk/by-uuid/$uuid" "/dev/disk/by-label/$label"
btrfs filesystem show
test -e "/dev/disk/by-uuid/$uuid"
test -e "/dev/disk/by-label/$label"
helper_check_device_symlinks
# Mount it and write some data to it we can compare later
mount -t btrfs /dev/mapper/encbtrfs0 "$mpoint"
@ -501,7 +353,7 @@ EOF
# "Deconstruct" the btrfs device and check if we're in a sane state (symlink-wise)
umount "$mpoint"
systemctl stop systemd-cryptsetup@encbtrfs{0..3}
test ! -e "/dev/disk/by-uuid/$uuid"
udevadm wait --settle --timeout=30 --removed "/dev/disk/by-uuid/$uuid"
helper_check_device_symlinks
# Add the mount point to /etc/fstab and check if the device can be put together
# automagically. The source device is the DM name of the first LUKS device
@ -516,9 +368,8 @@ EOF
# Start the corresponding mount unit and check if the btrfs device was reconstructed
# correctly
systemctl start "${mpoint##*/}.mount"
udevadm wait --settle --timeout=30 "/dev/disk/by-uuid/$uuid" "/dev/disk/by-label/$label"
btrfs filesystem show
test -e "/dev/disk/by-uuid/$uuid"
test -e "/dev/disk/by-label/$label"
helper_check_device_symlinks
grep "hello there" "$mpoint/test"
# Cleanup
@ -581,7 +432,7 @@ testcase_iscsi_lvm() {
expected_symlinks=()
# Use the first device as it's configured with larger capacity
mkfs.ext4 -L iscsi_store "${devices[0]}"
udevadm settle
udevadm wait --settle --timeout=30 "${devices[0]}"
mount "${devices[0]}" "$mpoint"
for i in {1..4}; do
dd if=/dev/zero of="$mpoint/lun$i.img" bs=1M count=32
@ -613,12 +464,9 @@ testcase_iscsi_lvm() {
lvm lvcreate -y -L 4M "$vgroup" -n mypart1
lvm lvcreate -y -L 8M "$vgroup" -n mypart2
lvm lvs
udevadm settle
test -e "/dev/$vgroup/mypart1"
test -e "/dev/$vgroup/mypart2"
udevadm wait --settle --timeout=30 "/dev/$vgroup/mypart1" "/dev/$vgroup/mypart2"
mkfs.ext4 -L mylvpart1 "/dev/$vgroup/mypart1"
udevadm settle
test -e "/dev/disk/by-label/mylvpart1"
udevadm wait --settle --timeout=30 "/dev/disk/by-label/mylvpart1"
helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
# Disconnect the iSCSI devices and check all the symlinks
iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --logout
@ -631,13 +479,7 @@ testcase_iscsi_lvm() {
# Reconnect the iSCSI devices and check if everything get detected correctly
iscsiadm --mode discoverydb --type sendtargets --portal "$target_ip" --discover
iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --login
udevadm wait --settle --timeout=30 "${expected_symlinks[@]}"
for link in "${expected_symlinks[@]}"; do
helper_wait_for_vgroup "$link" "$vgroup"
done
test -e "/dev/$vgroup/mypart1"
test -e "/dev/$vgroup/mypart2"
test -e "/dev/disk/by-label/mylvpart1"
udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" "/dev/$vgroup/mypart1" "/dev/$vgroup/mypart2" "/dev/disk/by-label/mylvpart1"
helper_check_device_symlinks "/dev/disk" "/dev/$vgroup"
# Cleanup
iscsiadm --mode node --targetname "$target_name" --portal "$target_ip:$target_port" --logout
@ -662,9 +504,7 @@ testcase_long_sysfs_path() {
stat /sys/block/vda
readlink -f /sys/block/vda/dev
for link in "${expected_symlinks[@]}"; do
test -e "$link"
done
udevadm wait --settle --timeout=30 "${expected_symlinks[@]}"
# Try to mount the data partition manually (using its label)
mpoint="$(mktemp -d /logsysfsXXX)"