2 # vi: ts=4 sw=4 tw=0 et:
7 # Check if all symlinks under /dev/disk/ are valid
8 # shellcheck disable=SC2120
9 helper_check_device_symlinks
() {(
12 local dev link path paths target
14 [[ $# -gt 0 ]] && paths
=("$@") || paths
=("/dev/disk" "/dev/mapper")
16 # Check if all given paths are valid
17 for path
in "${paths[@]}"; do
18 if ! test -e "$path"; then
19 echo >&2 "Path '$path' doesn't exist"
24 while read -r link
; do
25 target
="$(readlink -f "$link")"
26 echo "$link -> $target"
27 # Both checks should do virtually the same thing, but check both to be
29 if [[ ! -e "$link" ||
! -e "$target" ]]; then
30 echo >&2 "ERROR: symlink '$link' points to '$target' which doesn't exist"
34 # Check if the symlink points to the correct device in /dev
35 dev
="/dev/$(udevadm info -q name "$link")"
36 if [[ "$target" != "$dev" ]]; then
37 echo >&2 "ERROR: symlink '$link' points to '$target' but '$dev' was expected"
40 done < <(find "${paths[@]}" -type l
)
43 # Wait for a specific device link to appear
46 # $2 - number of retries (default: 10)
47 helper_wait_for_dev
() {
49 local ntries
="${2:-10}"
52 for ((i
= 0; i
< ntries
; i
++)); do
53 test ! -e "$dev" ||
return 0
60 # Wait for the lvm2-pvscan@.service of a specific device to finish
63 # $2 - number of retries (default: 10)
64 helper_wait_for_pvscan
() {
66 local ntries
="${2:-10}"
67 local MAJOR MINOR pvscan_svc real_dev
69 # Sanity check we got a valid block device (or a symlink to it)
70 real_dev
="$(readlink -f "$dev")"
71 if [[ ! -b "$real_dev" ]]; then
72 echo >&2 "ERROR: '$dev ($real_dev) is not a valid block device'"
76 # Get major and minor numbers from the udev database
77 # (udevadm returns MAJOR= and MINOR= expressions, so let's pull them into
78 # the current environment via `source` for easier parsing)
80 # shellcheck source=/dev/null
81 source <(udevadm info
-q property
"$real_dev" |
grep -E "(MAJOR|MINOR)=")
82 # Sanity check if we got correct major and minor numbers
83 test -e "/sys/dev/block/$MAJOR:$MINOR/"
85 # Wait n_tries*0.5 seconds until the respective lvm2-pvscan service becomes
86 # active (i.e. it got executed and finished)
87 pvscan_svc
="lvm2-pvscan@$MAJOR:$MINOR.service"
88 for ((i
= 0; i
< ntries
; i
++)); do
89 ! systemctl
-q is-active
"$pvscan_svc" ||
return 0
96 testcase_megasas2_basic
() {
98 [[ "$(lsblk --scsi --noheadings | wc -l)" -ge 128 ]]
101 testcase_nvme_basic
() {
102 lsblk
--noheadings |
grep "^nvme"
103 [[ "$(lsblk --noheadings | grep -c "^nvme
")" -ge 28 ]]
106 testcase_virtio_scsi_identically_named_partitions
() {
107 lsblk
--noheadings -a -o NAME
,PARTLABEL
108 [[ "$(lsblk --noheadings -a -o NAME,PARTLABEL | grep -c "Hello world
")" -eq $
((16 * 8)) ]]
111 testcase_multipath_basic_failover
() {
112 local dmpath i path wwid
114 # Configure multipath
115 cat >/etc
/multipath.conf
<<\EOF
117 # Use /dev/mapper/$WWN paths instead of /dev/mapper/mpathX
118 user_friendly_names no
123 blacklist_exceptions
{
124 property
"(SCSI_IDENT_|ID_WWN)"
130 modprobe
-v dm_multipath
131 systemctl start multipathd.service
132 systemctl status multipathd.service
134 ls -l /dev
/disk
/by-id
/
137 wwid
="deaddeadbeef$(printf "%.4d
" "$i")"
138 path
="/dev/disk/by-id/wwn-0x$wwid"
139 dmpath
="$(readlink -f "$path")"
142 multipath
-C "$dmpath"
143 # We should have 4 active paths for each multipath device
144 [[ "$(multipath -l "$path" | grep -c running)" -eq 4 ]]
147 # Test failover (with the first multipath device that has a partitioned disk)
148 echo "${FUNCNAME[0]}: test failover"
149 local device expected link mpoint part
151 mpoint
="$(mktemp -d /mnt/mpathXXX)"
152 wwid
="deaddeadbeef0000"
153 path
="/dev/disk/by-id/wwn-0x$wwid"
155 # All following symlinks should exists and should be valid
156 local -a part_links
=(
157 "/dev/disk/by-id/wwn-0x$wwid-part2"
158 "/dev/disk/by-partlabel/failover_part"
159 "/dev/disk/by-partuuid/deadbeef-dead-dead-beef-000000000000"
160 "/dev/disk/by-label/failover_vol"
161 "/dev/disk/by-uuid/deadbeef-dead-dead-beef-111111111111"
163 for link
in "${part_links[@]}"; do
167 # Choose a random symlink to the failover data partition each time, for
169 part
="${part_links[$RANDOM % ${#part_links[@]}]}"
171 # Get all devices attached to a specific multipath device (in H:C:T:L format)
172 # and sort them in a random order, so we cut off different paths each time
173 mapfile
-t devices
< <(multipath
-l "$path" |
grep -Eo '[0-9]+:[0-9]+:[0-9]+:[0-9]+' |
sort -R)
174 if [[ "${#devices[@]}" -ne 4 ]]; then
175 echo "Expected 4 devices attached to WWID=$wwid, got ${#devices[@]} instead"
178 # Drop the last path from the array, since we want to leave at least one path active
180 # Mount the first multipath partition, write some data we can check later,
181 # and then disconnect the remaining paths one by one while checking if we
182 # can still read/write from the mount
183 mount
-t ext4
"$part" "$mpoint"
185 echo -n "$expected" >"$mpoint/test"
186 # Sanity check we actually wrote what we wanted
187 [[ "$(<"$mpoint/test")" == "$expected" ]]
189 for device
in "${devices[@]}"; do
190 echo offline
>"/sys/class/scsi_device/$device/device/state"
191 [[ "$(<"$mpoint/test")" == "$expected" ]]
192 expected
="$((expected + 1))"
193 echo -n "$expected" >"$mpoint/test"
195 # Make sure all symlinks are still valid
196 for link
in "${part_links[@]}"; do
202 # Three paths should be now marked as 'offline' and one as 'running'
203 [[ "$(multipath -l "$path" | grep -c offline)" -eq 3 ]]
204 [[ "$(multipath -l "$path" | grep -c running)" -eq 1 ]]
210 testcase_simultaneous_events
() {
211 local blockdev part partscript
213 blockdev
="$(readlink -f /dev/disk/by-id/scsi-*_deadbeeftest)"
214 partscript
="$(mktemp)"
216 if [[ ! -b "$blockdev" ]]; then
217 echo "ERROR: failed to find the test SCSI block device"
221 cat >"$partscript" <<EOF
222 $(printf 'name="test%d", size=2M\n' {1..50})
225 # Initial partition table
226 sfdisk
-q -X gpt
"$blockdev" <"$partscript"
228 # Delete the partitions, immediatelly recreate them, wait for udev to settle
229 # down, and then check if we have any dangling symlinks in /dev/disk/. Rinse
232 # On unpatched udev versions the delete-recreate cycle may trigger a race
233 # leading to dead symlinks in /dev/disk/
234 for i
in {1.
.100}; do
235 sfdisk
-q --delete "$blockdev"
236 sfdisk
-q -X gpt
"$blockdev" <"$partscript"
238 if ((i
% 10 == 0)); then
240 helper_check_device_symlinks
247 testcase_lvm_basic
() {
249 local vgroup
="MyTestGroup$RANDOM"
251 /dev
/disk
/by-id
/ata-foobar_deadbeeflvm
{0.
.3}
254 # Make sure all the necessary soon-to-be-LVM devices exist
255 ls -l "${devices[@]}"
257 # Add all test devices into a volume group, create two logical volumes,
258 # and check if necessary symlinks exist (and are valid)
259 lvm pvcreate
-y "${devices[@]}"
261 lvm vgcreate
"$vgroup" -y "${devices[@]}"
263 lvm vgchange
-ay "$vgroup"
264 lvm lvcreate
-y -L 4M
"$vgroup" -n mypart1
265 lvm lvcreate
-y -L 8M
"$vgroup" -n mypart2
268 test -e "/dev/$vgroup/mypart1"
269 test -e "/dev/$vgroup/mypart2"
270 mkfs.ext4
-L mylvpart1
"/dev/$vgroup/mypart1"
272 test -e "/dev/disk/by-label/mylvpart1"
273 helper_check_device_symlinks
"/dev/disk" "/dev/$vgroup"
275 # Disable the VG and check symlinks...
276 lvm vgchange
-an "$vgroup"
278 test ! -e "/dev/$vgroup"
279 test ! -e "/dev/disk/by-label/mylvpart1"
280 helper_check_device_symlinks
"/dev/disk"
282 # reenable the VG and check the symlinks again if all LVs are properly activated
283 lvm vgchange
-ay "$vgroup"
285 test -e "/dev/$vgroup/mypart1"
286 test -e "/dev/$vgroup/mypart2"
287 test -e "/dev/disk/by-label/mylvpart1"
288 helper_check_device_symlinks
"/dev/disk" "/dev/$vgroup"
290 # Same as above, but now with more "stress"
292 lvm vgchange
-an "$vgroup"
293 lvm vgchange
-ay "$vgroup"
295 if ((i
% 5 == 0)); then
297 test -e "/dev/$vgroup/mypart1"
298 test -e "/dev/$vgroup/mypart2"
299 test -e "/dev/disk/by-label/mylvpart1"
300 helper_check_device_symlinks
"/dev/disk" "/dev/$vgroup"
304 # Remove the first LV
305 lvm lvremove
-y "$vgroup/mypart1"
307 test ! -e "/dev/$vgroup/mypart1"
308 test -e "/dev/$vgroup/mypart2"
309 helper_check_device_symlinks
"/dev/disk" "/dev/$vgroup"
311 # Create & remove LVs in a loop, i.e. with more "stress"
313 # 1) Create 16 logical volumes
314 for part
in {0.
.15}; do
315 lvm lvcreate
-y -L 4M
"$vgroup" -n "looppart$part"
318 # 2) Immediately remove them
319 lvm lvremove
-y "$vgroup"/looppart
{0.
.15}
321 # 3) On every 4th iteration settle udev and check if all partitions are
322 # indeed gone, and if all symlinks are still valid
323 if ((i
% 4 == 0)); then
325 for part
in {0.
.15}; do
326 test ! -e "/dev/$vgroup/looppart$part"
328 helper_check_device_symlinks
"/dev/disk" "/dev/$vgroup"
333 testcase_btrfs_basic
() {
334 local dev_stub i label mpoint uuid
336 /dev
/disk
/by-id
/ata-foobar_deadbeefbtrfs
{0.
.3}
339 ls -l "${devices[@]}"
341 echo "Single device: default settings"
342 uuid
="deadbeef-dead-dead-beef-000000000000"
344 mkfs.btrfs
-L "$label" -U "$uuid" "${devices[0]}"
346 btrfs filesystem show
347 test -e "/dev/disk/by-uuid/$uuid"
348 test -e "/dev/disk/by-label/$label"
349 helper_check_device_symlinks
351 echo "Multiple devices: using partitions, data: single, metadata: raid1"
352 uuid
="deadbeef-dead-dead-beef-000000000001"
354 sfdisk
--wipe=always
"${devices[0]}" <<EOF
357 name="diskpart1", size=85M
358 name="diskpart2", size=85M
359 name="diskpart3", size=85M
360 name="diskpart4", size=85M
363 mkfs.btrfs
-d single
-m raid1
-L "$label" -U "$uuid" /dev
/disk
/by-partlabel
/diskpart
{1.
.4}
365 btrfs filesystem show
366 test -e "/dev/disk/by-uuid/$uuid"
367 test -e "/dev/disk/by-label/$label"
368 helper_check_device_symlinks
369 wipefs
-a -f "${devices[0]}"
371 echo "Multiple devices: using disks, data: raid10, metadata: raid10, mixed mode"
372 uuid
="deadbeef-dead-dead-beef-000000000002"
374 mkfs.btrfs
-M -d raid10
-m raid10
-L "$label" -U "$uuid" "${devices[@]}"
376 btrfs filesystem show
377 test -e "/dev/disk/by-uuid/$uuid"
378 test -e "/dev/disk/by-label/$label"
379 helper_check_device_symlinks
381 echo "Multiple devices: using LUKS encrypted disks, data: raid1, metadata: raid1, mixed mode"
382 uuid
="deadbeef-dead-dead-beef-000000000003"
383 label
="btrfs_mencdisk"
384 mpoint
="/btrfs_enc$RANDOM"
387 dd if=/dev
/urandom of
=/etc
/btrfs_keyfile bs
=64 count
=1 iflag
=fullblock
388 chmod 0600 /etc
/btrfs_keyfile
389 # Encrypt each device and add it to /etc/crypttab, so it can be mounted
390 # automagically later
392 for ((i
= 0; i
< ${#devices[@]}; i
++)); do
393 # Intentionally use weaker cipher-related settings, since we don't care
394 # about security here as it's a throwaway LUKS partition
395 cryptsetup luksFormat
-q \
396 --use-urandom --pbkdf pbkdf2
--pbkdf-force-iterations 1000 \
397 --uuid "deadbeef-dead-dead-beef-11111111111$i" --label "encdisk$i" "${devices[$i]}" /etc
/btrfs_keyfile
399 test -e "/dev/disk/by-uuid/deadbeef-dead-dead-beef-11111111111$i"
400 test -e "/dev/disk/by-label/encdisk$i"
401 # Add the device into /etc/crypttab, reload systemd, and then activate
402 # the device so we can create a filesystem on it later
403 echo "encbtrfs$i UUID=deadbeef-dead-dead-beef-11111111111$i /etc/btrfs_keyfile luks,noearly" >>/etc
/crypttab
404 systemctl daemon-reload
405 systemctl start
"systemd-cryptsetup@encbtrfs$i"
407 helper_check_device_symlinks
408 # Check if we have all necessary DM devices
409 ls -l /dev
/mapper
/encbtrfs
{0.
.3}
410 # Create a multi-device btrfs filesystem on the LUKS devices
411 mkfs.btrfs
-M -d raid1
-m raid1
-L "$label" -U "$uuid" /dev
/mapper
/encbtrfs
{0.
.3}
413 btrfs filesystem show
414 test -e "/dev/disk/by-uuid/$uuid"
415 test -e "/dev/disk/by-label/$label"
416 helper_check_device_symlinks
417 # Mount it and write some data to it we can compare later
418 mount
-t btrfs
/dev
/mapper
/encbtrfs0
"$mpoint"
419 echo "hello there" >"$mpoint/test"
420 # "Deconstruct" the btrfs device and check if we're in a sane state (symlink-wise)
422 systemctl stop systemd-cryptsetup@encbtrfs
{0.
.3}
423 test ! -e "/dev/disk/by-uuid/$uuid"
424 helper_check_device_symlinks
425 # Add the mount point to /etc/fstab and check if the device can be put together
426 # automagically. The source device is the DM name of the first LUKS device
427 # (from /etc/crypttab). We have to specify all LUKS devices manually, as
428 # registering the necessary devices is usually initrd's job (via btrfs device scan)
429 dev_stub
="/dev/mapper/encbtrfs"
430 echo "/dev/mapper/encbtrfs0 $mpoint btrfs device=${dev_stub}0,device=${dev_stub}1,device=${dev_stub}2,device=${dev_stub}3 0 2" >>/etc
/fstab
431 # Tell systemd about the new mount
432 systemctl daemon-reload
433 # Restart cryptsetup.target to trigger autounlock of partitions in /etc/crypttab
434 systemctl restart cryptsetup.target
435 # Start the corresponding mount unit and check if the btrfs device was reconstructed
437 systemctl start
"${mpoint##*/}.mount"
438 btrfs filesystem show
439 test -e "/dev/disk/by-uuid/$uuid"
440 test -e "/dev/disk/by-label/$label"
441 helper_check_device_symlinks
442 grep "hello there" "$mpoint/test"
444 systemctl stop
"${mpoint##*/}.mount"
445 systemctl stop systemd-cryptsetup@encbtrfs
{0.
.3}
446 sed -i "/${mpoint##*/}/d" /etc
/fstab
449 systemctl daemon-reload
453 testcase_iscsi_lvm
() {
454 local dev i label link lun_id mpoint target_name uuid
455 local target_ip
="127.0.0.1"
456 local target_port
="3260"
457 local vgroup
="iscsi_lvm$RANDOM"
458 local expected_symlinks
=()
460 /dev
/disk
/by-id
/ata-foobar_deadbeefiscsi
{0.
.3}
463 ls -l "${devices[@]}"
465 # Start the target daemon
467 systemctl status tgtd
469 echo "iSCSI LUNs backed by devices"
470 # See RFC3721 and RFC7143
471 target_name
="iqn.2021-09.com.example:iscsi.test"
472 # Initialize a new iSCSI target <$target_name> consisting of 4 LUNs, each
474 tgtadm
--lld iscsi
--op new
--mode target
--tid=1 --targetname "$target_name"
475 for ((i
= 0; i
< ${#devices[@]}; i
++)); do
476 # lun-0 is reserved by iSCSI
478 tgtadm
--lld iscsi
--op new
--mode logicalunit
--tid 1 --lun "$lun_id" -b "${devices[$i]}"
479 tgtadm
--lld iscsi
--op update
--mode logicalunit
--tid 1 --lun "$lun_id"
481 "/dev/disk/by-path/ip-$target_ip:$target_port-iscsi-$target_name-lun-$lun_id"
484 tgtadm
--lld iscsi
--op bind --mode target
--tid 1 -I ALL
485 # Configure the iSCSI initiator
486 iscsiadm
--mode discoverydb
--type sendtargets
--portal "$target_ip" --discover
487 iscsiadm
--mode node
--targetname "$target_name" --portal "$target_ip:$target_port" --login
489 # Check if all device symlinks are valid and if all expected device symlinks exist
490 for link
in "${expected_symlinks[@]}"; do
491 # We need to do some active waiting anyway, as it may take kernel a bit
492 # to attach the newly connected SCSI devices
493 helper_wait_for_dev
"$link"
497 helper_check_device_symlinks
499 iscsiadm
--mode node
--targetname "$target_name" --portal "$target_ip:$target_port" --logout
500 tgtadm
--lld iscsi
--op delete
--mode target
--tid=1
502 echo "iSCSI LUNs backed by files + LVM"
503 # Note: we use files here to "trick" LVM the disks are indeed on a different
504 # host, so it doesn't automagically detect another path to the backing
505 # device once we disconnect the iSCSI devices
506 target_name
="iqn.2021-09.com.example:iscsi.lvm.test"
507 mpoint
="$(mktemp -d /iscsi_storeXXX)"
509 # Use the first device as it's configured with larger capacity
510 mkfs.ext4
-L iscsi_store
"${devices[0]}"
512 mount
"${devices[0]}" "$mpoint"
514 dd if=/dev
/zero of
="$mpoint/lun$i.img" bs
=1M count
=32
516 # Initialize a new iSCSI target <$target_name> consisting of 4 LUNs, each
518 tgtadm
--lld iscsi
--op new
--mode target
--tid=2 --targetname "$target_name"
519 # lun-0 is reserved by iSCSI
521 tgtadm
--lld iscsi
--op new
--mode logicalunit
--tid 2 --lun "$i" -b "$mpoint/lun$i.img"
522 tgtadm
--lld iscsi
--op update
--mode logicalunit
--tid 2 --lun "$i"
524 "/dev/disk/by-path/ip-$target_ip:$target_port-iscsi-$target_name-lun-$i"
527 tgtadm
--lld iscsi
--op bind --mode target
--tid 2 -I ALL
528 # Configure the iSCSI initiator
529 iscsiadm
--mode discoverydb
--type sendtargets
--portal "$target_ip" --discover
530 iscsiadm
--mode node
--targetname "$target_name" --portal "$target_ip:$target_port" --login
532 # Check if all device symlinks are valid and if all expected device symlinks exist
533 for link
in "${expected_symlinks[@]}"; do
534 # We need to do some active waiting anyway, as it may take kernel a bit
535 # to attach the newly connected SCSI devices
536 helper_wait_for_dev
"$link"
540 helper_check_device_symlinks
541 # Add all iSCSI devices into a LVM volume group, create two logical volumes,
542 # and check if necessary symlinks exist (and are valid)
543 lvm pvcreate
-y "${expected_symlinks[@]}"
545 lvm vgcreate
"$vgroup" -y "${expected_symlinks[@]}"
547 lvm vgchange
-ay "$vgroup"
548 lvm lvcreate
-y -L 4M
"$vgroup" -n mypart1
549 lvm lvcreate
-y -L 8M
"$vgroup" -n mypart2
552 test -e "/dev/$vgroup/mypart1"
553 test -e "/dev/$vgroup/mypart2"
554 mkfs.ext4
-L mylvpart1
"/dev/$vgroup/mypart1"
556 test -e "/dev/disk/by-label/mylvpart1"
557 helper_check_device_symlinks
"/dev/disk" "/dev/$vgroup"
558 # Disconnect the iSCSI devices and check all the symlinks
559 iscsiadm
--mode node
--targetname "$target_name" --portal "$target_ip:$target_port" --logout
560 # "Reset" the DM state, since we yanked the backing storage from under the LVM,
561 # so the currently active VGs/LVs are invalid
562 dmsetup remove_all
--deferred
564 # The LVM and iSCSI related symlinks should be gone
565 test ! -e "/dev/$vgroup"
566 test ! -e "/dev/disk/by-label/mylvpart1"
567 for link
in "${expected_symlinks[@]}"; do
570 helper_check_device_symlinks
"/dev/disk"
571 # Reconnect the iSCSI devices and check if everything get detected correctly
572 iscsiadm
--mode discoverydb
--type sendtargets
--portal "$target_ip" --discover
573 iscsiadm
--mode node
--targetname "$target_name" --portal "$target_ip:$target_port" --login
575 for link
in "${expected_symlinks[@]}"; do
576 helper_wait_for_dev
"$link"
577 helper_wait_for_pvscan
"$link"
581 test -e "/dev/$vgroup/mypart1"
582 test -e "/dev/$vgroup/mypart2"
583 test -e "/dev/disk/by-label/mylvpart1"
584 helper_check_device_symlinks
"/dev/disk" "/dev/$vgroup"
586 iscsiadm
--mode node
--targetname "$target_name" --portal "$target_ip:$target_port" --logout
587 tgtadm
--lld iscsi
--op delete
--mode target
--tid=2
592 testcase_long_sysfs_path
() {
593 local link logfile mpoint
594 local expected_symlinks
=(
595 "/dev/disk/by-label/data_vol"
596 "/dev/disk/by-label/swap_vol"
597 "/dev/disk/by-partlabel/test_swap"
598 "/dev/disk/by-partlabel/test_part"
599 "/dev/disk/by-partuuid/deadbeef-dead-dead-beef-000000000000"
600 "/dev/disk/by-uuid/deadbeef-dead-dead-beef-111111111111"
601 "/dev/disk/by-uuid/deadbeef-dead-dead-beef-222222222222"
604 # Make sure the test device is connected and show its "wonderful" path
606 readlink
-f /sys
/block
/vda
/dev
608 for link
in "${expected_symlinks[@]}"; do
612 # Try to mount the data partition manually (using its label)
613 mpoint
="$(mktemp -d /logsysfsXXX)"
614 mount LABEL
=data_vol
"$mpoint"
617 # Do the same, but with UUID and using fstab
618 echo "UUID=deadbeef-dead-dead-beef-222222222222 $mpoint ext4 defaults 0 0" >>/etc
/fstab
619 systemctl daemon-reload
621 test -e "$mpoint/test"
624 # Test out the swap partition
625 swapon
-v -L swap_vol
626 swapoff
-v -L swap_vol
629 journalctl
-b -q --no-pager -o short-monotonic
-p info
--grep "Device path.*vda.?' too long to fit into unit name"
630 # Make sure we don't unnecessarily spam the log
631 journalctl
-b -q --no-pager -o short-monotonic
-p info
--grep "/sys/devices/.+/vda[0-9]?" _PID
=1 + UNIT
=systemd-udevd.service |
tee "$logfile"
632 [[ "$(wc -l <"$logfile")" -lt 10 ]]
635 rm -fr "${logfile:?}" "${mpoint:?}"
643 echo "Check if all symlinks under /dev/disk/ are valid (pre-test)"
644 helper_check_device_symlinks
646 # TEST_FUNCTION_NAME is passed on the kernel command line via systemd.setenv=
647 # in the respective test.sh file
648 if ! command -v "${TEST_FUNCTION_NAME:?}"; then
649 echo >&2 "Missing verification handler for test case '$TEST_FUNCTION_NAME'"
653 echo "TEST_FUNCTION_NAME=$TEST_FUNCTION_NAME"
654 "$TEST_FUNCTION_NAME"
657 echo "Check if all symlinks under /dev/disk/ are valid (post-test)"
658 helper_check_device_symlinks
660 systemctl status systemd-udevd