TEST-64-UDEV-STORAGE: Use virtio-scsi-pci instead of ahci

The debian cloud kernel does not support ahci, so let's use
virtio-scsi-pci everywhere instead.
This commit is contained in:
Daan De Meyer 2024-05-13 15:04:16 +02:00
parent f41b6394d5
commit b37ed039f0
2 changed files with 23 additions and 23 deletions

View file

@ -369,7 +369,7 @@ testcase_lvm_basic() {
return 77 return 77
fi fi
local qemu_opts=("-device ahci,id=ahci0") local qemu_opts=("-device virtio-scsi-pci,id=scsi0")
local diskpath i local diskpath i
# Attach 4 SATA disks to the VM (and set their model and serial fields # Attach 4 SATA disks to the VM (and set their model and serial fields
@ -378,7 +378,7 @@ testcase_lvm_basic() {
diskpath="${TESTDIR:?}/lvmbasic${i}.img" diskpath="${TESTDIR:?}/lvmbasic${i}.img"
dd if=/dev/zero of="$diskpath" bs=1M count=32 dd if=/dev/zero of="$diskpath" bs=1M count=32
qemu_opts+=( qemu_opts+=(
"-device ide-hd,bus=ahci0.$i,drive=drive$i,model=foobar,serial=deadbeeflvm$i" "-device scsi-hd,drive=drive$i,vendor=systemd,product=foobar,serial=deadbeeflvm$i"
"-drive format=raw,cache=unsafe,file=$diskpath,if=none,id=drive$i" "-drive format=raw,cache=unsafe,file=$diskpath,if=none,id=drive$i"
) )
done done
@ -396,7 +396,7 @@ testcase_btrfs_basic() {
return 77 return 77
fi fi
local qemu_opts=("-device ahci,id=ahci0") local qemu_opts=("-device virtio-scsi-pci,id=scsi0")
local diskpath i size local diskpath i size
for i in {0..3}; do for i in {0..3}; do
@ -406,7 +406,7 @@ testcase_btrfs_basic() {
dd if=/dev/zero of="$diskpath" bs=1M count="$size" dd if=/dev/zero of="$diskpath" bs=1M count="$size"
qemu_opts+=( qemu_opts+=(
"-device ide-hd,bus=ahci0.$i,drive=drive$i,model=foobar,serial=deadbeefbtrfs$i" "-device scsi-hd,drive=drive$i,vendor=systemd,product=foobar,serial=deadbeefbtrfs$i"
"-drive format=raw,cache=unsafe,file=$diskpath,if=none,id=drive$i" "-drive format=raw,cache=unsafe,file=$diskpath,if=none,id=drive$i"
) )
done done
@ -424,7 +424,7 @@ testcase_iscsi_lvm() {
return 77 return 77
fi fi
local qemu_opts=("-device ahci,id=ahci0") local qemu_opts=("-device virtio-scsi-pci,id=scsi0")
local diskpath i size local diskpath i size
for i in {0..3}; do for i in {0..3}; do
@ -435,7 +435,7 @@ testcase_iscsi_lvm() {
dd if=/dev/zero of="$diskpath" bs=1M count="$size" dd if=/dev/zero of="$diskpath" bs=1M count="$size"
qemu_opts+=( qemu_opts+=(
"-device ide-hd,bus=ahci0.$i,drive=drive$i,model=foobar,serial=deadbeefiscsi$i" "-device scsi-hd,drive=drive$i,vendor=systemd,product=foobar,serial=deadbeefiscsi$i"
"-drive format=raw,cache=unsafe,file=$diskpath,if=none,id=drive$i" "-drive format=raw,cache=unsafe,file=$diskpath,if=none,id=drive$i"
) )
done done
@ -479,7 +479,7 @@ testcase_mdadm_basic() {
return 77 return 77
fi fi
local qemu_opts=("-device ahci,id=ahci0") local qemu_opts=("-device virtio-scsi-pci,id=scsi0")
local diskpath i size local diskpath i size
for i in {0..4}; do for i in {0..4}; do
@ -487,7 +487,7 @@ testcase_mdadm_basic() {
dd if=/dev/zero of="$diskpath" bs=1M count=64 dd if=/dev/zero of="$diskpath" bs=1M count=64
qemu_opts+=( qemu_opts+=(
"-device ide-hd,bus=ahci0.$i,drive=drive$i,model=foobar,serial=deadbeefmdadm$i" "-device scsi-hd,drive=drive$i,vendor=systemd,product=foobar,serial=deadbeefmdadm$i"
"-drive format=raw,cache=unsafe,file=$diskpath,if=none,id=drive$i" "-drive format=raw,cache=unsafe,file=$diskpath,if=none,id=drive$i"
) )
done done
@ -505,7 +505,7 @@ testcase_mdadm_lvm() {
return 77 return 77
fi fi
local qemu_opts=("-device ahci,id=ahci0") local qemu_opts=("-device virtio-scsi-pci,id=scsi0")
local diskpath i size local diskpath i size
for i in {0..4}; do for i in {0..4}; do
@ -513,7 +513,7 @@ testcase_mdadm_lvm() {
dd if=/dev/zero of="$diskpath" bs=1M count=64 dd if=/dev/zero of="$diskpath" bs=1M count=64
qemu_opts+=( qemu_opts+=(
"-device ide-hd,bus=ahci0.$i,drive=drive$i,model=foobar,serial=deadbeefmdadmlvm$i" "-device scsi-hd,drive=drive$i,vendor=systemd,product=foobar,serial=deadbeefmdadmlvm$i"
"-drive format=raw,cache=unsafe,file=$diskpath,if=none,id=drive$i" "-drive format=raw,cache=unsafe,file=$diskpath,if=none,id=drive$i"
) )
done done

View file

@ -541,7 +541,7 @@ testcase_lvm_basic() {
local i iterations partitions part timeout local i iterations partitions part timeout
local vgroup="MyTestGroup$RANDOM" local vgroup="MyTestGroup$RANDOM"
local devices=( local devices=(
/dev/disk/by-id/ata-foobar_deadbeeflvm{0..3} /dev/disk/by-id/scsi-0systemd_foobar_deadbeeflvm{0..3}
) )
if [[ -v ASAN_OPTIONS || "$(systemd-detect-virt -v)" == "qemu" ]]; then if [[ -v ASAN_OPTIONS || "$(systemd-detect-virt -v)" == "qemu" ]]; then
@ -713,7 +713,7 @@ testcase_lvm_basic() {
testcase_btrfs_basic() { testcase_btrfs_basic() {
local dev_stub i label mpoint uuid local dev_stub i label mpoint uuid
local devices=( local devices=(
/dev/disk/by-id/ata-foobar_deadbeefbtrfs{0..3} /dev/disk/by-id/scsi-0systemd_foobar_deadbeefbtrfs{0..3}
) )
ls -l "${devices[@]}" ls -l "${devices[@]}"
@ -751,10 +751,10 @@ EOF
uuid="deadbeef-dead-dead-beef-000000000002" uuid="deadbeef-dead-dead-beef-000000000002"
label="btrfs_mdisk" label="btrfs_mdisk"
udevadm lock \ udevadm lock \
--device=/dev/disk/by-id/ata-foobar_deadbeefbtrfs0 \ --device=/dev/disk/by-id/scsi-0systemd_foobar_deadbeefbtrfs0 \
--device=/dev/disk/by-id/ata-foobar_deadbeefbtrfs1 \ --device=/dev/disk/by-id/scsi-0systemd_foobar_deadbeefbtrfs1 \
--device=/dev/disk/by-id/ata-foobar_deadbeefbtrfs2 \ --device=/dev/disk/by-id/scsi-0systemd_foobar_deadbeefbtrfs2 \
--device=/dev/disk/by-id/ata-foobar_deadbeefbtrfs3 \ --device=/dev/disk/by-id/scsi-0systemd_foobar_deadbeefbtrfs3 \
mkfs.btrfs -f -M -d raid10 -m raid10 -L "$label" -U "$uuid" "${devices[@]}" mkfs.btrfs -f -M -d raid10 -m raid10 -L "$label" -U "$uuid" "${devices[@]}"
udevadm wait --settle --timeout=30 "/dev/disk/by-uuid/$uuid" "/dev/disk/by-label/$label" udevadm wait --settle --timeout=30 "/dev/disk/by-uuid/$uuid" "/dev/disk/by-label/$label"
btrfs filesystem show btrfs filesystem show
@ -844,7 +844,7 @@ testcase_iscsi_lvm() {
local vgroup="iscsi_lvm$RANDOM" local vgroup="iscsi_lvm$RANDOM"
local expected_symlinks=() local expected_symlinks=()
local devices=( local devices=(
/dev/disk/by-id/ata-foobar_deadbeefiscsi{0..3} /dev/disk/by-id/scsi-0systemd_foobar_deadbeefiscsi{0..3}
) )
ls -l "${devices[@]}" ls -l "${devices[@]}"
@ -1022,7 +1022,7 @@ testcase_mdadm_basic() {
local i part_name raid_name raid_dev uuid local i part_name raid_name raid_dev uuid
local expected_symlinks=() local expected_symlinks=()
local devices=( local devices=(
/dev/disk/by-id/ata-foobar_deadbeefmdadm{0..4} /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..4}
) )
ls -l "${devices[@]}" ls -l "${devices[@]}"
@ -1039,7 +1039,7 @@ testcase_mdadm_basic() {
"/dev/disk/by-label/$part_name" # ext4 partition "/dev/disk/by-label/$part_name" # ext4 partition
) )
# Create a simple RAID 1 with an ext4 filesystem # Create a simple RAID 1 with an ext4 filesystem
echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/ata-foobar_deadbeefmdadm{0..1} -v -f --level=1 --raid-devices=2 echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..1} -v -f --level=1 --raid-devices=2
udevadm wait --settle --timeout=30 "$raid_dev" udevadm wait --settle --timeout=30 "$raid_dev"
mkfs.ext4 -L "$part_name" "$raid_dev" mkfs.ext4 -L "$part_name" "$raid_dev"
udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" udevadm wait --settle --timeout=30 "${expected_symlinks[@]}"
@ -1068,7 +1068,7 @@ testcase_mdadm_basic() {
"/dev/disk/by-label/$part_name" # ext4 partition "/dev/disk/by-label/$part_name" # ext4 partition
) )
# Create a simple RAID 5 with an ext4 filesystem # Create a simple RAID 5 with an ext4 filesystem
echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/ata-foobar_deadbeefmdadm{0..2} -v -f --level=5 --raid-devices=3 echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..2} -v -f --level=5 --raid-devices=3
udevadm wait --settle --timeout=30 "$raid_dev" udevadm wait --settle --timeout=30 "$raid_dev"
mkfs.ext4 -L "$part_name" "$raid_dev" mkfs.ext4 -L "$part_name" "$raid_dev"
udevadm wait --settle --timeout=30 "${expected_symlinks[@]}" udevadm wait --settle --timeout=30 "${expected_symlinks[@]}"
@ -1108,7 +1108,7 @@ testcase_mdadm_basic() {
"/dev/disk/by-id/md-uuid-$uuid-part3" "/dev/disk/by-id/md-uuid-$uuid-part3"
) )
# Create a simple RAID 10 with an ext4 filesystem # Create a simple RAID 10 with an ext4 filesystem
echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/ata-foobar_deadbeefmdadm{0..3} -v -f --level=10 --raid-devices=4 echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..3} -v -f --level=10 --raid-devices=4
udevadm wait --settle --timeout=30 "$raid_dev" udevadm wait --settle --timeout=30 "$raid_dev"
# Partition the raid device # Partition the raid device
# Here, 'udevadm lock' is meaningless, as udevd does not lock MD devices. # Here, 'udevadm lock' is meaningless, as udevd does not lock MD devices.
@ -1142,7 +1142,7 @@ testcase_mdadm_lvm() {
local part_name raid_name raid_dev uuid vgroup local part_name raid_name raid_dev uuid vgroup
local expected_symlinks=() local expected_symlinks=()
local devices=( local devices=(
/dev/disk/by-id/ata-foobar_deadbeefmdadmlvm{0..4} /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadmlvm{0..4}
) )
ls -l "${devices[@]}" ls -l "${devices[@]}"
@ -1161,7 +1161,7 @@ testcase_mdadm_lvm() {
"/dev/disk/by-label/$part_name" # ext4 partition "/dev/disk/by-label/$part_name" # ext4 partition
) )
# Create a RAID 10 with LVM + ext4 # Create a RAID 10 with LVM + ext4
echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/ata-foobar_deadbeefmdadmlvm{0..3} -v -f --level=10 --raid-devices=4 echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadmlvm{0..3} -v -f --level=10 --raid-devices=4
udevadm wait --settle --timeout=30 "$raid_dev" udevadm wait --settle --timeout=30 "$raid_dev"
# Create an LVM on the MD # Create an LVM on the MD
lvm pvcreate -y "$raid_dev" lvm pvcreate -y "$raid_dev"