]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
xor: remove macro abuse for XOR implementation registrations
authorChristoph Hellwig <hch@lst.de>
Fri, 27 Mar 2026 06:16:41 +0000 (07:16 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 3 Apr 2026 06:36:17 +0000 (23:36 -0700)
Drop the pretty confusing historic XOR_TRY_TEMPLATES and
XOR_SELECT_TEMPLATE, and instead let the architectures provide a
arch_xor_init that calls either xor_register to register candidates or
xor_force to force a specific implementation.

Link: https://lkml.kernel.org/r/20260327061704.3707577-10-hch@lst.de
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Eric Biggers <ebiggers@kernel.org>
Tested-by: Eric Biggers <ebiggers@kernel.org>
Cc: Albert Ou <aou@eecs.berkeley.edu>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Alexandre Ghiti <alex@ghiti.fr>
Cc: Andreas Larsson <andreas@gaisler.com>
Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com>
Cc: Ard Biesheuvel <ardb@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: "Borislav Petkov (AMD)" <bp@alien8.de>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Chris Mason <clm@fb.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: David Sterba <dsterba@suse.com>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason A. Donenfeld <jason@zx2c4.com>
Cc: Johannes Berg <johannes@sipsolutions.net>
Cc: Li Nan <linan122@huawei.com>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Magnus Lindholm <linmag7@gmail.com>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Cc: Richard Henderson <richard.henderson@linaro.org>
Cc: Richard Weinberger <richard@nod.at>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Song Liu <song@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Ted Ts'o <tytso@mit.edu>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: WANG Xuerui <kernel@xen0n.name>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
16 files changed:
arch/alpha/include/asm/xor.h
arch/arm/include/asm/xor.h
arch/arm64/include/asm/xor.h
arch/loongarch/include/asm/xor.h
arch/powerpc/include/asm/xor.h
arch/riscv/include/asm/xor.h
arch/s390/include/asm/xor.h
arch/sparc/include/asm/xor_32.h
arch/sparc/include/asm/xor_64.h
arch/x86/include/asm/xor.h
arch/x86/include/asm/xor_32.h
arch/x86/include/asm/xor_64.h
arch/x86/include/asm/xor_avx.h
include/asm-generic/xor.h
include/linux/raid/xor_impl.h
lib/raid/xor/xor-core.c

index e0de0c233ab923f477ae03302015199bf9935050..4c8085711df1826ca1dc9ae27525f989156a37b6 100644 (file)
@@ -851,16 +851,19 @@ static struct xor_block_template xor_block_alpha_prefetch = {
 /* For grins, also test the generic routines.  */
 #include <asm-generic/xor.h>
 
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES                              \
-       do {                                            \
-               xor_speed(&xor_block_8regs);            \
-               xor_speed(&xor_block_32regs);           \
-               xor_speed(&xor_block_alpha);            \
-               xor_speed(&xor_block_alpha_prefetch);   \
-       } while (0)
-
-/* Force the use of alpha_prefetch if EV6, as it is significantly
-   faster in the cold cache case.  */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
-       (implver() == IMPLVER_EV6 ? &xor_block_alpha_prefetch : FASTEST)
+/*
+ * Force the use of alpha_prefetch if EV6, as it is significantly faster in the
+ * cold cache case.
+ */
+#define arch_xor_init arch_xor_init
+static __always_inline void __init arch_xor_init(void)
+{
+       if (implver() == IMPLVER_EV6) {
+               xor_force(&xor_block_alpha_prefetch);
+       } else {
+               xor_register(&xor_block_8regs);
+               xor_register(&xor_block_32regs);
+               xor_register(&xor_block_alpha);
+               xor_register(&xor_block_alpha_prefetch);
+       }
+}
index bca2a6514746e150f9c2c6c764c9c5a8fc8473d2..b2dcd49186e2b9cc97101ed3ce2bd204a6263468 100644 (file)
@@ -138,15 +138,6 @@ static struct xor_block_template xor_block_arm4regs = {
        .do_5   = xor_arm4regs_5,
 };
 
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES                      \
-       do {                                    \
-               xor_speed(&xor_block_arm4regs); \
-               xor_speed(&xor_block_8regs);    \
-               xor_speed(&xor_block_32regs);   \
-               NEON_TEMPLATES;                 \
-       } while (0)
-
 #ifdef CONFIG_KERNEL_MODE_NEON
 
 extern struct xor_block_template const xor_block_neon_inner;
@@ -201,8 +192,16 @@ static struct xor_block_template xor_block_neon = {
        .do_5   = xor_neon_5
 };
 
-#define NEON_TEMPLATES \
-       do { if (cpu_has_neon()) xor_speed(&xor_block_neon); } while (0)
-#else
-#define NEON_TEMPLATES
+#endif /* CONFIG_KERNEL_MODE_NEON */
+
+#define arch_xor_init arch_xor_init
+static __always_inline void __init arch_xor_init(void)
+{
+       xor_register(&xor_block_arm4regs);
+       xor_register(&xor_block_8regs);
+       xor_register(&xor_block_32regs);
+#ifdef CONFIG_KERNEL_MODE_NEON
+       if (cpu_has_neon())
+               xor_register(&xor_block_neon);
 #endif
+}
index bb7428d4ebc65e8cc51165728b0a21102bf2ec25..3cee1eb86371b23030244941dcf0eabbb214cc43 100644 (file)
@@ -60,14 +60,14 @@ static struct xor_block_template xor_block_arm64 = {
        .do_4   = xor_neon_4,
        .do_5   = xor_neon_5
 };
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES           \
-       do {        \
-               xor_speed(&xor_block_8regs);    \
-               xor_speed(&xor_block_32regs);    \
-               if (cpu_has_neon()) { \
-                       xor_speed(&xor_block_arm64);\
-               } \
-       } while (0)
+
+#define arch_xor_init arch_xor_init
+static __always_inline void __init arch_xor_init(void)
+{
+       xor_register(&xor_block_8regs);
+       xor_register(&xor_block_32regs);
+       if (cpu_has_neon())
+               xor_register(&xor_block_arm64);
+}
 
 #endif /* ! CONFIG_KERNEL_MODE_NEON */
index 12467fffee46875b444b500532f98e001b59eb8b..d17c0e3b047f13b1f53447e72cbcad0f9e49064f 100644 (file)
@@ -16,14 +16,6 @@ static struct xor_block_template xor_block_lsx = {
        .do_4 = xor_lsx_4,
        .do_5 = xor_lsx_5,
 };
-
-#define XOR_SPEED_LSX()                                        \
-       do {                                            \
-               if (cpu_has_lsx)                        \
-                       xor_speed(&xor_block_lsx);      \
-       } while (0)
-#else /* CONFIG_CPU_HAS_LSX */
-#define XOR_SPEED_LSX()
 #endif /* CONFIG_CPU_HAS_LSX */
 
 #ifdef CONFIG_CPU_HAS_LASX
@@ -34,14 +26,6 @@ static struct xor_block_template xor_block_lasx = {
        .do_4 = xor_lasx_4,
        .do_5 = xor_lasx_5,
 };
-
-#define XOR_SPEED_LASX()                                       \
-       do {                                                    \
-               if (cpu_has_lasx)                               \
-                       xor_speed(&xor_block_lasx);             \
-       } while (0)
-#else /* CONFIG_CPU_HAS_LASX */
-#define XOR_SPEED_LASX()
 #endif /* CONFIG_CPU_HAS_LASX */
 
 /*
@@ -54,15 +38,21 @@ static struct xor_block_template xor_block_lasx = {
  */
 #include <asm-generic/xor.h>
 
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES                              \
-do {                                                   \
-       xor_speed(&xor_block_8regs);                    \
-       xor_speed(&xor_block_8regs_p);                  \
-       xor_speed(&xor_block_32regs);                   \
-       xor_speed(&xor_block_32regs_p);                 \
-       XOR_SPEED_LSX();                                \
-       XOR_SPEED_LASX();                               \
-} while (0)
+#define arch_xor_init arch_xor_init
+static __always_inline void __init arch_xor_init(void)
+{
+       xor_register(&xor_block_8regs);
+       xor_register(&xor_block_8regs_p);
+       xor_register(&xor_block_32regs);
+       xor_register(&xor_block_32regs_p);
+#ifdef CONFIG_CPU_HAS_LSX
+       if (cpu_has_lsx)
+               xor_register(&xor_block_lsx);
+#endif
+#ifdef CONFIG_CPU_HAS_LASX
+       if (cpu_has_lasx)
+               xor_register(&xor_block_lasx);
+#endif
+}
 
 #endif /* _ASM_LOONGARCH_XOR_H */
index 37d05c11d09cda74709e7936d27f86208bfcec9a..30224c5279c4bf5c40c98ec6ae7644f6f1eb257e 100644 (file)
@@ -21,27 +21,22 @@ static struct xor_block_template xor_block_altivec = {
        .do_4 = xor_altivec_4,
        .do_5 = xor_altivec_5,
 };
-
-#define XOR_SPEED_ALTIVEC()                            \
-       do {                                            \
-               if (cpu_has_feature(CPU_FTR_ALTIVEC))   \
-                       xor_speed(&xor_block_altivec);  \
-       } while (0)
-#else
-#define XOR_SPEED_ALTIVEC()
-#endif
+#endif /* CONFIG_ALTIVEC */
 
 /* Also try the generic routines. */
 #include <asm-generic/xor.h>
 
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES                              \
-do {                                                   \
-       xor_speed(&xor_block_8regs);                    \
-       xor_speed(&xor_block_8regs_p);                  \
-       xor_speed(&xor_block_32regs);                   \
-       xor_speed(&xor_block_32regs_p);                 \
-       XOR_SPEED_ALTIVEC();                            \
-} while (0)
+#define arch_xor_init arch_xor_init
+static __always_inline void __init arch_xor_init(void)
+{
+       xor_register(&xor_block_8regs);
+       xor_register(&xor_block_8regs_p);
+       xor_register(&xor_block_32regs);
+       xor_register(&xor_block_32regs_p);
+#ifdef CONFIG_ALTIVEC
+       if (cpu_has_feature(CPU_FTR_ALTIVEC))
+               xor_register(&xor_block_altivec);
+#endif
+}
 
 #endif /* _ASM_POWERPC_XOR_H */
index 96011861e46b4df24cb973a9066324283ad17b60..ed5f27903efc41b7c2a3289e1c991cde029d7729 100644 (file)
@@ -55,14 +55,15 @@ static struct xor_block_template xor_block_rvv = {
        .do_4 = xor_vector_4,
        .do_5 = xor_vector_5
 };
+#endif /* CONFIG_RISCV_ISA_V */
 
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES           \
-       do {        \
-               xor_speed(&xor_block_8regs);    \
-               xor_speed(&xor_block_32regs);    \
-               if (has_vector()) { \
-                       xor_speed(&xor_block_rvv);\
-               } \
-       } while (0)
+#define arch_xor_init arch_xor_init
+static __always_inline void __init arch_xor_init(void)
+{
+       xor_register(&xor_block_8regs);
+       xor_register(&xor_block_32regs);
+#ifdef CONFIG_RISCV_ISA_V
+       if (has_vector())
+               xor_register(&xor_block_rvv);
 #endif
+}
index 857d6759b67f0df42cd7f713db22e2297875b517..4e2233f64da98df4a8303e6dc9455b5175ae8793 100644 (file)
 
 extern struct xor_block_template xor_block_xc;
 
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES                              \
-do {                                                   \
-       xor_speed(&xor_block_xc);                       \
-} while (0)
-
-#define XOR_SELECT_TEMPLATE(FASTEST)   (&xor_block_xc)
+#define arch_xor_init arch_xor_init
+static __always_inline void __init arch_xor_init(void)
+{
+       xor_force(&xor_block_xc);
+}
 
 #endif /* _ASM_S390_XOR_H */
index 0351813cf3af5af8d3dec9a599e7b0f3ae31a8ba..8fbf0c07ec2897f635e9891005122e10c037a1a3 100644 (file)
@@ -259,10 +259,10 @@ static struct xor_block_template xor_block_SPARC = {
 /* For grins, also test the generic routines.  */
 #include <asm-generic/xor.h>
 
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES                              \
-       do {                                            \
-               xor_speed(&xor_block_8regs);            \
-               xor_speed(&xor_block_32regs);           \
-               xor_speed(&xor_block_SPARC);            \
-       } while (0)
+#define arch_xor_init arch_xor_init
+static __always_inline void __init arch_xor_init(void)
+{
+       xor_register(&xor_block_8regs);
+       xor_register(&xor_block_32regs);
+       xor_register(&xor_block_SPARC);
+}
index caaddea8ad79dd577876f3837582f189d572eb74..e0482ecc0a68bd51ffac0b558037c6042f3254a7 100644 (file)
@@ -60,20 +60,17 @@ static struct xor_block_template xor_block_niagara = {
         .do_5  = xor_niagara_5,
 };
 
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES                              \
-       do {                                            \
-               xor_speed(&xor_block_VIS);              \
-               xor_speed(&xor_block_niagara);          \
-       } while (0)
-
-/* For VIS for everything except Niagara.  */
-#define XOR_SELECT_TEMPLATE(FASTEST) \
-       ((tlb_type == hypervisor && \
-         (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \
-          sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \
-          sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || \
-          sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || \
-          sun4v_chip_type == SUN4V_CHIP_NIAGARA5)) ? \
-        &xor_block_niagara : \
-        &xor_block_VIS)
+#define arch_xor_init arch_xor_init
+static __always_inline void __init arch_xor_init(void)
+{
+       /* Force VIS for everything except Niagara.  */
+       if (tlb_type == hypervisor &&
+           (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
+            sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
+            sun4v_chip_type == SUN4V_CHIP_NIAGARA3 ||
+            sun4v_chip_type == SUN4V_CHIP_NIAGARA4 ||
+            sun4v_chip_type == SUN4V_CHIP_NIAGARA5))
+               xor_force(&xor_block_niagara);
+       else
+               xor_force(&xor_block_VIS);
+}
index 7b0307acc4103c897736fa0fcf5a003e4d1165bd..33f5620d8d691ea7313b61d3da952aad2b8c5bf2 100644 (file)
@@ -496,7 +496,4 @@ static struct xor_block_template xor_block_sse_pf64 = {
 # include <asm/xor_64.h>
 #endif
 
-#define XOR_SELECT_TEMPLATE(FASTEST) \
-       AVX_SELECT(FASTEST)
-
 #endif /* _ASM_X86_XOR_H */
index 7a6b9474591e75cf57a3a2fb5cbe6396312f769c..ee32d08c27bc59ab85b814cfb9fde48cd53ba280 100644 (file)
@@ -552,22 +552,24 @@ static struct xor_block_template xor_block_pIII_sse = {
 /* We force the use of the SSE xor block because it can write around L2.
    We may also be able to load into the L1 only depending on how the cpu
    deals with a load to a line that is being prefetched.  */
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES                              \
-do {                                                   \
-       AVX_XOR_SPEED;                                  \
-       if (boot_cpu_has(X86_FEATURE_XMM)) {                            \
-               xor_speed(&xor_block_pIII_sse);         \
-               xor_speed(&xor_block_sse_pf64);         \
-       } else if (boot_cpu_has(X86_FEATURE_MMX)) {     \
-               xor_speed(&xor_block_pII_mmx);          \
-               xor_speed(&xor_block_p5_mmx);           \
-       } else {                                        \
-               xor_speed(&xor_block_8regs);            \
-               xor_speed(&xor_block_8regs_p);          \
-               xor_speed(&xor_block_32regs);           \
-               xor_speed(&xor_block_32regs_p);         \
-       }                                               \
-} while (0)
+#define arch_xor_init arch_xor_init
+static __always_inline void __init arch_xor_init(void)
+{
+       if (boot_cpu_has(X86_FEATURE_AVX) &&
+           boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               xor_force(&xor_block_avx);
+       } else if (boot_cpu_has(X86_FEATURE_XMM)) {
+               xor_register(&xor_block_pIII_sse);
+               xor_register(&xor_block_sse_pf64);
+       } else if (boot_cpu_has(X86_FEATURE_MMX)) {
+               xor_register(&xor_block_pII_mmx);
+               xor_register(&xor_block_p5_mmx);
+       } else {
+               xor_register(&xor_block_8regs);
+               xor_register(&xor_block_8regs_p);
+               xor_register(&xor_block_32regs);
+               xor_register(&xor_block_32regs_p);
+       }
+}
 
 #endif /* _ASM_X86_XOR_32_H */
index 0307e4ec50440571399cc37137cd14423fc45a07..2d2ceb2418665e287af80300bf23002bb6436821 100644 (file)
@@ -17,12 +17,16 @@ static struct xor_block_template xor_block_sse = {
 /* We force the use of the SSE xor block because it can write around L2.
    We may also be able to load into the L1 only depending on how the cpu
    deals with a load to a line that is being prefetched.  */
-#undef XOR_TRY_TEMPLATES
-#define XOR_TRY_TEMPLATES                      \
-do {                                           \
-       AVX_XOR_SPEED;                          \
-       xor_speed(&xor_block_sse_pf64);         \
-       xor_speed(&xor_block_sse);              \
-} while (0)
+#define arch_xor_init arch_xor_init
+static __always_inline void __init arch_xor_init(void)
+{
+       if (boot_cpu_has(X86_FEATURE_AVX) &&
+           boot_cpu_has(X86_FEATURE_OSXSAVE)) {
+               xor_force(&xor_block_avx);
+       } else {
+               xor_register(&xor_block_sse_pf64);
+               xor_register(&xor_block_sse);
+       }
+}
 
 #endif /* _ASM_X86_XOR_64_H */
index 7f81dd5897f417866d9554443029cf899f43df2a..c600888436bb906ad5ae9391f9326fed2802f07a 100644 (file)
@@ -166,13 +166,4 @@ static struct xor_block_template xor_block_avx = {
        .do_5 = xor_avx_5,
 };
 
-#define AVX_XOR_SPEED \
-do { \
-       if (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_OSXSAVE)) \
-               xor_speed(&xor_block_avx); \
-} while (0)
-
-#define AVX_SELECT(FASTEST) \
-       (boot_cpu_has(X86_FEATURE_AVX) && boot_cpu_has(X86_FEATURE_OSXSAVE) ? &xor_block_avx : FASTEST)
-
 #endif
index 44509d48fca21ec835a695a8cfdc6d7161d64863..79c0096aa9d957b6f278a1050dd685fb66847cc2 100644 (file)
@@ -728,11 +728,3 @@ static struct xor_block_template xor_block_32regs_p __maybe_unused = {
        .do_4 = xor_32regs_p_4,
        .do_5 = xor_32regs_p_5,
 };
-
-#define XOR_TRY_TEMPLATES                      \
-       do {                                    \
-               xor_speed(&xor_block_8regs);    \
-               xor_speed(&xor_block_8regs_p);  \
-               xor_speed(&xor_block_32regs);   \
-               xor_speed(&xor_block_32regs_p); \
-       } while (0)
index a1890cd668124318cd0acf69b9864261297be47c..6ed4c445ab24ce65f7db56c0f2ddc88347b3731b 100644 (file)
@@ -2,6 +2,8 @@
 #ifndef _XOR_IMPL_H
 #define _XOR_IMPL_H
 
+#include <linux/init.h>
+
 struct xor_block_template {
        struct xor_block_template *next;
        const char *name;
@@ -22,4 +24,7 @@ struct xor_block_template {
                     const unsigned long * __restrict);
 };
 
+void __init xor_register(struct xor_block_template *tmpl);
+void __init xor_force(struct xor_block_template *tmpl);
+
 #endif /* _XOR_IMPL_H */
index db1824011a12a83cdfad4aa44e890e2f4428a482..93608b5fece9e3f9e959c619889af36ffd536ec2 100644 (file)
 #include <linux/preempt.h>
 #include <asm/xor.h>
 
-#ifndef XOR_SELECT_TEMPLATE
-#define XOR_SELECT_TEMPLATE(x) (x)
-#endif
-
 /* The xor routines to use.  */
 static struct xor_block_template *active_template;
 
@@ -55,12 +51,33 @@ EXPORT_SYMBOL(xor_blocks);
 static struct xor_block_template *__initdata template_list;
 static bool __initdata xor_forced = false;
 
-static void __init do_xor_register(struct xor_block_template *tmpl)
+/**
+ * xor_register - register a XOR template
+ * @tmpl:      template to register
+ *
+ * Register a XOR implementation with the core.  Registered implementations
+ * will be measured by a trivial benchmark, and the fastest one is chosen
+ * unless an implementation is forced using xor_force().
+ */
+void __init xor_register(struct xor_block_template *tmpl)
 {
        tmpl->next = template_list;
        template_list = tmpl;
 }
 
+/**
+ * xor_force - force use of a XOR template
+ * @tmpl:      template to register
+ *
+ * Register a XOR implementation with the core and force using it.  Forcing
+ * an implementation will make the core ignore any template registered using
+ * xor_register(), or any previous implementation forced using xor_force().
+ */
+void __init xor_force(struct xor_block_template *tmpl)
+{
+       active_template = tmpl;
+}
+
 #define BENCH_SIZE     4096
 #define REPS           800U
 
@@ -126,11 +143,19 @@ static int __init calibrate_xor_blocks(void)
 
 static int __init xor_init(void)
 {
+#ifdef arch_xor_init
+       arch_xor_init();
+#else
+       xor_register(&xor_block_8regs);
+       xor_register(&xor_block_8regs_p);
+       xor_register(&xor_block_32regs);
+       xor_register(&xor_block_32regs_p);
+#endif
+
        /*
         * If this arch/cpu has a short-circuited selection, don't loop through
         * all the possible functions, just use the best one.
         */
-       active_template = XOR_SELECT_TEMPLATE(NULL);
        if (active_template) {
                pr_info("xor: automatically using best checksumming function   %-10s\n",
                        active_template->name);
@@ -138,10 +163,6 @@ static int __init xor_init(void)
                return 0;
        }
 
-#define xor_speed      do_xor_register
-       XOR_TRY_TEMPLATES;
-#undef xor_speed
-
 #ifdef MODULE
        return calibrate_xor_blocks();
 #else