+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_UM_XOR_H
-#define _ASM_UM_XOR_H
-
-#include <asm/cpufeature.h>
-#include <../../x86/include/asm/xor.h>
-
-#endif
mandatory-y += vga.h
mandatory-y += video.h
mandatory-y += word-at-a-time.h
-mandatory-y += xor.h
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * include/asm-generic/xor.h
- *
- * Generic optimized RAID-5 checksumming functions.
- */
-
-extern struct xor_block_template xor_block_8regs;
-extern struct xor_block_template xor_block_32regs;
-extern struct xor_block_template xor_block_8regs_p;
-extern struct xor_block_template xor_block_32regs_p;
config XOR_BLOCKS
tristate
+
+# selected by architectures that provide an optimized XOR implementation
+config XOR_BLOCKS_ARCH
+ depends on XOR_BLOCKS
+ default y if ALPHA
+ default y if ARM
+ default y if ARM64
+ default y if CPU_HAS_LSX # loongarch
+ default y if ALTIVEC # powerpc
+ default y if RISCV_ISA_V
+ default y if SPARC
+ default y if S390
+ default y if X86_32
+ default y if X86_64
+ bool
# SPDX-License-Identifier: GPL-2.0
+ccflags-y += -I $(src)
+
obj-$(CONFIG_XOR_BLOCKS) += xor.o
xor-y += xor-core.o
xor-y += xor-8regs-prefetch.o
xor-y += xor-32regs-prefetch.o
+ifeq ($(CONFIG_XOR_BLOCKS_ARCH),y)
+CFLAGS_xor-core.o += -I$(src)/$(SRCARCH)
+endif
+
xor-$(CONFIG_ALPHA) += alpha/xor.o
xor-$(CONFIG_ARM) += arm/xor.o
ifeq ($(CONFIG_ARM),y)
/*
* Optimized XOR parity functions for alpha EV5 and EV6
*/
-#include <linux/raid/xor_impl.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
extern void
xor_alpha_2(unsigned long bytes, unsigned long * __restrict p1,
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <asm/special_insns.h>
-#include <asm-generic/xor.h>
extern struct xor_block_template xor_block_alpha;
extern struct xor_block_template xor_block_alpha_prefetch;
* Force the use of alpha_prefetch if EV6, as it is significantly faster in the
* cold cache case.
*/
-#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
if (implver() == IMPLVER_EV6) {
/*
* Copyright (C) 2001 Russell King
*/
-#include <linux/raid/xor_impl.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
extern struct xor_block_template const xor_block_neon_inner;
* Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
*/
-#include <linux/raid/xor_impl.h>
+#include "xor_impl.h"
#ifndef __ARM_NEON__
#error You should compile this file with '-march=armv7-a -mfloat-abi=softfp -mfpu=neon'
/*
* Copyright (C) 2001 Russell King
*/
-#include <linux/raid/xor_impl.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
#define __XOR(a1, a2) a1 ^= a2
/*
* Copyright (C) 2001 Russell King
*/
-#include <asm-generic/xor.h>
#include <asm/neon.h>
extern struct xor_block_template xor_block_arm4regs;
extern struct xor_block_template xor_block_neon;
-#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
xor_register(&xor_block_arm4regs);
* Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
*/
-#include <linux/raid/xor_impl.h>
#include <asm/simd.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
#include "xor-neon.h"
#define XOR_TEMPLATE(_name) \
* Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
*/
-#include <linux/raid/xor_impl.h>
#include <linux/cache.h>
#include <asm/neon-intrinsics.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
#include "xor-neon.h"
void __xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
* Authors: Jackie Liu <liuyun01@kylinos.cn>
* Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
*/
-
-#include <asm-generic/xor.h>
#include <asm/simd.h>
extern struct xor_block_template xor_block_neon;
extern struct xor_block_template xor_block_eor3;
-#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
xor_register(&xor_block_8regs);
/*
* Copyright (C) 2023 WANG Xuerui <git@xen0n.name>
*/
-#ifndef _ASM_LOONGARCH_XOR_H
-#define _ASM_LOONGARCH_XOR_H
-
#include <asm/cpu-features.h>
/*
* the scalar ones, maybe for errata or micro-op reasons. It may be
* appropriate to revisit this after one or two more uarch generations.
*/
-#include <asm-generic/xor.h>
extern struct xor_block_template xor_block_lsx;
extern struct xor_block_template xor_block_lasx;
-#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
xor_register(&xor_block_8regs);
xor_register(&xor_block_lasx);
#endif
}
-
-#endif /* _ASM_LOONGARCH_XOR_H */
*/
#include <linux/sched.h>
-#include <linux/raid/xor_impl.h>
#include <asm/fpu.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
#include "xor_simd.h"
#define MAKE_XOR_GLUE_2(flavor) \
*
* Author: Anton Blanchard <anton@au.ibm.com>
*/
-#ifndef _ASM_POWERPC_XOR_H
-#define _ASM_POWERPC_XOR_H
-
#include <asm/cpu_has_feature.h>
-#include <asm-generic/xor.h>
extern struct xor_block_template xor_block_altivec;
-#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
xor_register(&xor_block_8regs);
xor_register(&xor_block_altivec);
#endif
}
-
-#endif /* _ASM_POWERPC_XOR_H */
#include <linux/preempt.h>
#include <linux/sched.h>
-#include <linux/raid/xor_impl.h>
#include <asm/switch_to.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
#include "xor_vmx.h"
static void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1,
* Copyright (C) 2021 SiFive
*/
-#include <linux/raid/xor_impl.h>
#include <asm/vector.h>
#include <asm/switch_to.h>
#include <asm/asm-prototypes.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
static void xor_vector_2(unsigned long bytes, unsigned long *__restrict p1,
const unsigned long *__restrict p2)
* Copyright (C) 2021 SiFive
*/
#include <asm/vector.h>
-#include <asm-generic/xor.h>
extern struct xor_block_template xor_block_rvv;
-#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
xor_register(&xor_block_8regs);
*/
#include <linux/types.h>
-#include <linux/raid/xor_impl.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
static void xor_xc_2(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2)
* Copyright IBM Corp. 2016
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
-#ifndef _ASM_S390_XOR_H
-#define _ASM_S390_XOR_H
-
extern struct xor_block_template xor_block_xc;
-#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
xor_force(&xor_block_xc);
}
-
-#endif /* _ASM_S390_XOR_H */
*
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
-#include <linux/raid/xor_impl.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
static void
sparc_2(unsigned long bytes, unsigned long * __restrict p1,
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/
-#include <linux/raid/xor_impl.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
void xor_vis_2(unsigned long bytes, unsigned long * __restrict p1,
const unsigned long * __restrict p2);
* Copyright (C) 1997, 1999 Jakub Jelinek (jj@ultra.linux.cz)
* Copyright (C) 2006 David S. Miller <davem@davemloft.net>
*/
-#ifndef ___ASM_SPARC_XOR_H
-#define ___ASM_SPARC_XOR_H
-
#if defined(__sparc__) && defined(__arch64__)
#include <asm/spitfire.h>
extern struct xor_block_template xor_block_VIS;
extern struct xor_block_template xor_block_niagara;
-#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
/* Force VIS for everything except Niagara. */
}
#else /* sparc64 */
-/* For grins, also test the generic routines. */
-#include <asm-generic/xor.h>
-
extern struct xor_block_template xor_block_SPARC;
-#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
xor_register(&xor_block_8regs);
xor_register(&xor_block_SPARC);
}
#endif /* !sparc64 */
-#endif /* ___ASM_SPARC_XOR_H */
--- /dev/null
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <../x86/xor_arch.h>
* Based on Ingo Molnar and Zach Brown's respective MMX and SSE routines
*/
#include <linux/compiler.h>
-#include <linux/raid/xor_impl.h>
#include <asm/fpu/api.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
#define BLOCK4(i) \
BLOCK(32 * i, 0) \
*
* Copyright (C) 1998 Ingo Molnar.
*/
-#include <linux/raid/xor_impl.h>
#include <asm/fpu/api.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
#define LD(x, y) " movq 8*("#x")(%1), %%mm"#y" ;\n"
#define ST(x, y) " movq %%mm"#y", 8*("#x")(%1) ;\n"
* x86-64 changes / gcc fixes from Andi Kleen.
* Copyright 2002 Andi Kleen, SuSE Labs.
*/
-#include <linux/raid/xor_impl.h>
#include <asm/fpu/api.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
+#include "xor_arch.h"
#ifdef CONFIG_X86_32
/* reduce register pressure */
/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifndef _ASM_X86_XOR_H
-#define _ASM_X86_XOR_H
-
#include <asm/cpufeature.h>
-#include <asm-generic/xor.h>
extern struct xor_block_template xor_block_pII_mmx;
extern struct xor_block_template xor_block_p5_mmx;
*
* 32-bit without MMX can fall back to the generic routines.
*/
-#define arch_xor_init arch_xor_init
static __always_inline void __init arch_xor_init(void)
{
if (boot_cpu_has(X86_FEATURE_AVX) &&
xor_register(&xor_block_32regs_p);
}
}
-
-#endif /* _ASM_X86_XOR_H */
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/prefetch.h>
-#include <linux/raid/xor_impl.h>
-#include <asm-generic/xor.h>
+#include "xor_impl.h"
static void
xor_32regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <linux/raid/xor_impl.h>
-#include <asm-generic/xor.h>
+#include "xor_impl.h"
static void
xor_32regs_2(unsigned long bytes, unsigned long * __restrict p1,
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/prefetch.h>
-#include <linux/raid/xor_impl.h>
-#include <asm-generic/xor.h>
+#include "xor_impl.h"
static void
xor_8regs_p_2(unsigned long bytes, unsigned long * __restrict p1,
// SPDX-License-Identifier: GPL-2.0-or-later
-#include <linux/raid/xor_impl.h>
-#include <asm-generic/xor.h>
+#include "xor_impl.h"
static void
xor_8regs_2(unsigned long bytes, unsigned long * __restrict p1,
#include <linux/module.h>
#include <linux/gfp.h>
#include <linux/raid/xor.h>
-#include <linux/raid/xor_impl.h>
#include <linux/jiffies.h>
#include <linux/preempt.h>
-#include <asm/xor.h>
+#include "xor_impl.h"
/* The xor routines to use. */
static struct xor_block_template *active_template;
return 0;
}
-static int __init xor_init(void)
-{
-#ifdef arch_xor_init
- arch_xor_init();
+#ifdef CONFIG_XOR_BLOCKS_ARCH
+#include "xor_arch.h" /* $SRCARCH/xor_arch.h */
#else
+static void __init arch_xor_init(void)
+{
xor_register(&xor_block_8regs);
xor_register(&xor_block_8regs_p);
xor_register(&xor_block_32regs);
xor_register(&xor_block_32regs_p);
-#endif
+}
+#endif /* CONFIG_XOR_BLOCKS_ARCH */
+
+static int __init xor_init(void)
+{
+ arch_xor_init();
/*
* If this arch/cpu has a short-circuited selection, don't loop through
const unsigned long * __restrict);
};
+/* generic implementations */
+extern struct xor_block_template xor_block_8regs;
+extern struct xor_block_template xor_block_32regs;
+extern struct xor_block_template xor_block_8regs_p;
+extern struct xor_block_template xor_block_32regs_p;
+
void __init xor_register(struct xor_block_template *tmpl);
void __init xor_force(struct xor_block_template *tmpl);