From: Christoph Hellwig Date: Fri, 27 Mar 2026 06:17:00 +0000 (+0100) Subject: xor: add a kunit test case X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=af53e85ef797d45b364edf330eb008639b5c98c2;p=thirdparty%2Flinux.git xor: add a kunit test case Add a test case for the XOR routines loosely based on the CRC kunit test. Link: https://lkml.kernel.org/r/20260327061704.3707577-29-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Eric Biggers Tested-by: Eric Biggers Cc: Albert Ou Cc: Alexander Gordeev Cc: Alexandre Ghiti Cc: Andreas Larsson Cc: Anton Ivanov Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: "Borislav Petkov (AMD)" Cc: Catalin Marinas Cc: Chris Mason Cc: Christian Borntraeger Cc: Dan Williams Cc: David S. Miller Cc: David Sterba Cc: Heiko Carstens Cc: Herbert Xu Cc: "H. Peter Anvin" Cc: Huacai Chen Cc: Ingo Molnar Cc: Jason A. Donenfeld Cc: Johannes Berg Cc: Li Nan Cc: Madhavan Srinivasan Cc: Magnus Lindholm Cc: Matt Turner Cc: Michael Ellerman Cc: Nicholas Piggin Cc: Palmer Dabbelt Cc: Richard Henderson Cc: Richard Weinberger Cc: Russell King Cc: Song Liu Cc: Sven Schnelle Cc: Ted Ts'o Cc: Vasily Gorbik Cc: WANG Xuerui Cc: Will Deacon Signed-off-by: Andrew Morton --- diff --git a/lib/raid/.kunitconfig b/lib/raid/.kunitconfig new file mode 100644 index 0000000000000..351d22ed19540 --- /dev/null +++ b/lib/raid/.kunitconfig @@ -0,0 +1,3 @@ +CONFIG_KUNIT=y +CONFIG_BTRFS_FS=y +CONFIG_XOR_KUNIT_TEST=y diff --git a/lib/raid/Kconfig b/lib/raid/Kconfig index 81cb3f9c0a7bb..5ab2b0a7be4c6 100644 --- a/lib/raid/Kconfig +++ b/lib/raid/Kconfig @@ -17,3 +17,14 @@ config XOR_BLOCKS_ARCH default y if X86_32 default y if X86_64 bool + +config XOR_KUNIT_TEST + tristate "KUnit tests for xor_gen" if !KUNIT_ALL_TESTS + depends on KUNIT + depends on XOR_BLOCKS + default KUNIT_ALL_TESTS + help + Unit tests for the XOR library functions. + + This is intended to help people writing architecture-specific + optimized versions. If unsure, say N. diff --git a/lib/raid/xor/Makefile b/lib/raid/xor/Makefile index df55823c4d827..4d633dfd5b90c 100644 --- a/lib/raid/xor/Makefile +++ b/lib/raid/xor/Makefile @@ -29,7 +29,7 @@ xor-$(CONFIG_SPARC64) += sparc/xor-sparc64.o sparc/xor-sparc64-glue.o xor-$(CONFIG_S390) += s390/xor.o xor-$(CONFIG_X86_32) += x86/xor-avx.o x86/xor-sse.o x86/xor-mmx.o xor-$(CONFIG_X86_64) += x86/xor-avx.o x86/xor-sse.o - +obj-y += tests/ CFLAGS_arm/xor-neon.o += $(CC_FLAGS_FPU) CFLAGS_REMOVE_arm/xor-neon.o += $(CC_FLAGS_NO_FPU) diff --git a/lib/raid/xor/tests/Makefile b/lib/raid/xor/tests/Makefile new file mode 100644 index 0000000000000..661e8f6ffd1f3 --- /dev/null +++ b/lib/raid/xor/tests/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only + +obj-$(CONFIG_XOR_KUNIT_TEST) += xor_kunit.o diff --git a/lib/raid/xor/tests/xor_kunit.c b/lib/raid/xor/tests/xor_kunit.c new file mode 100644 index 0000000000000..0c2a3a420bf94 --- /dev/null +++ b/lib/raid/xor/tests/xor_kunit.c @@ -0,0 +1,187 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Unit test the XOR library functions. + * + * Copyright 2024 Google LLC + * Copyright 2026 Christoph Hellwig + * + * Based on the CRC tests by Eric Biggers . + */ +#include +#include +#include +#include +#include + +#define XOR_KUNIT_SEED 42 +#define XOR_KUNIT_MAX_BYTES 16384 +#define XOR_KUNIT_MAX_BUFFERS 64 +#define XOR_KUNIT_NUM_TEST_ITERS 1000 + +static struct rnd_state rng; +static void *test_buffers[XOR_KUNIT_MAX_BUFFERS]; +static void *test_dest; +static void *test_ref; +static size_t test_buflen; + +static u32 rand32(void) +{ + return prandom_u32_state(&rng); +} + +/* Reference implementation using dumb byte-wise XOR */ +static void xor_ref(void *dest, void **srcs, unsigned int src_cnt, + unsigned int bytes) +{ + unsigned int off, idx; + u8 *d = dest; + + for (off = 0; off < bytes; off++) { + for (idx = 0; idx < src_cnt; idx++) { + u8 *src = srcs[idx]; + + d[off] ^= src[off]; + } + } +} + +/* Generate a random length that is a multiple of 512. */ +static unsigned int random_length(unsigned int max_length) +{ + return round_up((rand32() % max_length) + 1, 512); +} + +/* Generate a random alignment that is a multiple of 64. */ +static unsigned int random_alignment(unsigned int max_alignment) +{ + return ((rand32() % max_alignment) + 1) & ~63; +} + +static void xor_generate_random_data(void) +{ + int i; + + prandom_bytes_state(&rng, test_dest, test_buflen); + memcpy(test_ref, test_dest, test_buflen); + for (i = 0; i < XOR_KUNIT_MAX_BUFFERS; i++) + prandom_bytes_state(&rng, test_buffers[i], test_buflen); +} + +/* Test that xor_gen gives the same result as a reference implementation. */ +static void xor_test(struct kunit *test) +{ + void *aligned_buffers[XOR_KUNIT_MAX_BUFFERS]; + size_t i; + + for (i = 0; i < XOR_KUNIT_NUM_TEST_ITERS; i++) { + unsigned int nr_buffers = + (rand32() % XOR_KUNIT_MAX_BUFFERS) + 1; + unsigned int len = random_length(XOR_KUNIT_MAX_BYTES); + unsigned int max_alignment, align = 0; + void *buffers; + + if (rand32() % 8 == 0) + /* Refresh the data occasionally. */ + xor_generate_random_data(); + + /* + * If we're not using the entire buffer size, inject randomize + * alignment into the buffer. + */ + max_alignment = XOR_KUNIT_MAX_BYTES - len; + if (max_alignment == 0) { + buffers = test_buffers; + } else if (rand32() % 2 == 0) { + /* Use random alignments mod 64 */ + int j; + + for (j = 0; j < nr_buffers; j++) + aligned_buffers[j] = test_buffers[j] + + random_alignment(max_alignment); + buffers = aligned_buffers; + align = random_alignment(max_alignment); + } else { + /* Go up to the guard page, to catch buffer overreads */ + int j; + + align = test_buflen - len; + for (j = 0; j < nr_buffers; j++) + aligned_buffers[j] = test_buffers[j] + align; + buffers = aligned_buffers; + } + + /* + * Compute the XOR, and verify that it equals the XOR computed + * by a simple byte-at-a-time reference implementation. + */ + xor_ref(test_ref + align, buffers, nr_buffers, len); + xor_gen(test_dest + align, buffers, nr_buffers, len); + KUNIT_EXPECT_MEMEQ_MSG(test, test_ref + align, + test_dest + align, len, + "Wrong result with buffers=%u, len=%u, unaligned=%s, at_end=%s", + nr_buffers, len, + str_yes_no(max_alignment), + str_yes_no(align + len == test_buflen)); + } +} + +static struct kunit_case xor_test_cases[] = { + KUNIT_CASE(xor_test), + {}, +}; + +static int xor_suite_init(struct kunit_suite *suite) +{ + int i; + + /* + * Allocate the test buffer using vmalloc() with a page-aligned length + * so that it is immediately followed by a guard page. This allows + * buffer overreads to be detected, even in assembly code. + */ + test_buflen = round_up(XOR_KUNIT_MAX_BYTES, PAGE_SIZE); + test_ref = vmalloc(test_buflen); + if (!test_ref) + return -ENOMEM; + test_dest = vmalloc(test_buflen); + if (!test_dest) + goto out_free_ref; + for (i = 0; i < XOR_KUNIT_MAX_BUFFERS; i++) { + test_buffers[i] = vmalloc(test_buflen); + if (!test_buffers[i]) + goto out_free_buffers; + } + + prandom_seed_state(&rng, XOR_KUNIT_SEED); + xor_generate_random_data(); + return 0; + +out_free_buffers: + while (--i >= 0) + vfree(test_buffers[i]); + vfree(test_dest); +out_free_ref: + vfree(test_ref); + return -ENOMEM; +} + +static void xor_suite_exit(struct kunit_suite *suite) +{ + int i; + + vfree(test_ref); + vfree(test_dest); + for (i = 0; i < XOR_KUNIT_MAX_BUFFERS; i++) + vfree(test_buffers[i]); +} + +static struct kunit_suite xor_test_suite = { + .name = "xor", + .test_cases = xor_test_cases, + .suite_init = xor_suite_init, + .suite_exit = xor_suite_exit, +}; +kunit_test_suite(xor_test_suite); + +MODULE_DESCRIPTION("Unit test for the XOR library functions"); +MODULE_LICENSE("GPL");