]> git.ipfire.org Git - thirdparty/systemd.git/commitdiff
test: answer 2nd mdadm --create question for compat with new version
authorLuca Boccassi <luca.boccassi@gmail.com>
Sun, 22 Dec 2024 13:31:36 +0000 (13:31 +0000)
committerLuca Boccassi <luca.boccassi@gmail.com>
Sun, 22 Dec 2024 15:43:06 +0000 (15:43 +0000)
New version of mdadm now asks a second question, so send 'y' twice
to it in the test scripts

[    5.253483] TEST-64-UDEV-STORAGE.sh[684]: + echo y
[    5.254412] TEST-64-UDEV-STORAGE.sh[685]: + mdadm --create /dev/md/mdmirror --name mdmirror --uuid aaaaaaaa:bbbbbbbb:cccccccc:00000001 /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm0 /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm1 -v -f --level=1 --raid-devices=2
[    5.254759] TEST-64-UDEV-STORAGE.sh[685]: To optimalize recovery speed, it is recommended to enable write-indent bitmap, do you want to enable it now? [y/N]? mdadm: Note: this array has metadata at the start and
[    5.255085] TEST-64-UDEV-STORAGE.sh[685]:     may not be suitable as a boot device.  If you plan to
[    5.255418] TEST-64-UDEV-STORAGE.sh[685]:     store '/boot' on this device please ensure that
[    5.255745] TEST-64-UDEV-STORAGE.sh[685]:     your boot-loader understands md/v1.x metadata, or use
[    5.256285] TEST-64-UDEV-STORAGE.sh[685]:     --metadata=0.90
[    5.256672] TEST-64-UDEV-STORAGE.sh[685]: mdadm: size set to 64512K
[    5.257063] TEST-64-UDEV-STORAGE.sh[685]: Continue creating array [y/N]? mdadm: create aborted.

This is backward compatible with the older version that asks just one
question

test/units/TEST-64-UDEV-STORAGE.sh
test/units/TEST-74-AUX-UTILS.bootctl.sh

index 24c1adaeda16e5b437df012e29696615bedc8ed5..01c3dd68ead0dee1901208116954135f83514fc2 100755 (executable)
@@ -1080,7 +1080,7 @@ testcase_mdadm_basic() {
         "/dev/disk/by-label/$part_name" # ext4 partition
     )
     # Create a simple RAID 1 with an ext4 filesystem
-    echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..1} -v -f --level=1 --raid-devices=2
+    printf 'y\ny\n' | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..1} -v -f --level=1 --raid-devices=2
     udevadm wait --settle --timeout=30 "$raid_dev"
     # udevd does not lock md devices, hence we need to trigger uevent after creating filesystem.
     mkfs.ext4 -L "$part_name" "$raid_dev"
@@ -1111,7 +1111,7 @@ testcase_mdadm_basic() {
         "/dev/disk/by-label/$part_name" # ext4 partition
     )
     # Create a simple RAID 5 with an ext4 filesystem
-    echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..2} -v -f --level=5 --raid-devices=3
+    printf 'y\ny\n' | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..2} -v -f --level=5 --raid-devices=3
     udevadm wait --settle --timeout=30 "$raid_dev"
     mkfs.ext4 -L "$part_name" "$raid_dev"
     udevadm trigger --settle "$raid_dev"
@@ -1152,7 +1152,7 @@ testcase_mdadm_basic() {
         "/dev/disk/by-id/md-uuid-$uuid-part3"
     )
     # Create a simple RAID 10 with an ext4 filesystem
-    echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..3} -v -f --level=10 --raid-devices=4
+    printf 'y\ny\n' | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadm{0..3} -v -f --level=10 --raid-devices=4
     udevadm wait --settle --timeout=30 "$raid_dev"
     # Partition the raid device
     # Here, 'udevadm lock' is meaningless, as udevd does not lock MD devices.
@@ -1208,7 +1208,7 @@ testcase_mdadm_lvm() {
         "/dev/disk/by-label/$part_name" # ext4 partition
     )
     # Create a RAID 10 with LVM + ext4
-    echo y | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadmlvm{0..3} -v -f --level=10 --raid-devices=4
+    printf 'y\ny\n' | mdadm --create "$raid_dev" --name "$raid_name" --uuid "$uuid" /dev/disk/by-id/scsi-0systemd_foobar_deadbeefmdadmlvm{0..3} -v -f --level=10 --raid-devices=4
     udevadm wait --settle --timeout=30 "$raid_dev"
     # Create an LVM on the MD
     lvm pvcreate -y "$raid_dev"
index 46fd5d1f2d58d1ff3c3ce664aa96b94a8e10b2e7..650c289aca6461783624c63db8f5e5a50e0cdd37 100755 (executable)
@@ -215,9 +215,9 @@ EOF
 
     udevadm settle
 
-    echo y | mdadm --create /dev/md/raid-esp --name "raid-esp" "${LOOPDEV1}p1" "${LOOPDEV2}p1" -v -f --level=1 --raid-devices=2
+    printf 'y\ny\n' | mdadm --create /dev/md/raid-esp --name "raid-esp" "${LOOPDEV1}p1" "${LOOPDEV2}p1" -v -f --level=1 --raid-devices=2
     mkfs.vfat /dev/md/raid-esp
-    echo y | mdadm --create /dev/md/raid-root --name "raid-root" "${LOOPDEV1}p2" "${LOOPDEV2}p2" -v -f --level=1 --raid-devices=2
+    printf 'y\ny\n' | mdadm --create /dev/md/raid-root --name "raid-root" "${LOOPDEV1}p2" "${LOOPDEV2}p2" -v -f --level=1 --raid-devices=2
     mkfs.ext4 /dev/md/raid-root
     mkfs.btrfs -f -M -d raid1 -m raid1 -L "raid-boot" "${LOOPDEV1}p3" "${LOOPDEV2}p3"