From 63432fd625372a0e79fb00a4009af204f4edc013 Mon Sep 17 00:00:00 2001 From: Demian Shulhan Date: Sun, 29 Mar 2026 07:43:38 +0000 Subject: [PATCH] lib/crc: arm64: add NEON accelerated CRC64-NVMe implementation Implement an optimized CRC64 (NVMe) algorithm for ARM64 using NEON Polynomial Multiply Long (PMULL) instructions. The generic shift-and-XOR software implementation is slow, which creates a bottleneck in NVMe and other storage subsystems. The acceleration is implemented using C intrinsics () rather than raw assembly for better readability and maintainability. Key highlights of this implementation: - Uses 4KB chunking inside scoped_ksimd() to avoid preemption latency spikes on large buffers. - Pre-calculates and loads fold constants via vld1q_u64() to minimize register spilling. - Benchmarks show the break-even point against the generic implementation is around 128 bytes. The PMULL path is enabled only for len >= 128. Performance results (kunit crc_benchmark on Cortex-A72): - Generic (len=4096): ~268 MB/s - PMULL (len=4096): ~1556 MB/s (nearly 6x improvement) Signed-off-by: Demian Shulhan Link: https://lore.kernel.org/r/20260329074338.1053550-1-demyansh@gmail.com Signed-off-by: Eric Biggers --- lib/crc/Kconfig | 1 + lib/crc/Makefile | 8 +++- lib/crc/arm64/crc64-neon-inner.c | 78 ++++++++++++++++++++++++++++++++ lib/crc/arm64/crc64.h | 30 ++++++++++++ 4 files changed, 116 insertions(+), 1 deletion(-) create mode 100644 lib/crc/arm64/crc64-neon-inner.c create mode 100644 lib/crc/arm64/crc64.h diff --git a/lib/crc/Kconfig b/lib/crc/Kconfig index 52e216f397468..31038c8d111ad 100644 --- a/lib/crc/Kconfig +++ b/lib/crc/Kconfig @@ -82,6 +82,7 @@ config CRC64 config CRC64_ARCH bool depends on CRC64 && CRC_OPTIMIZATIONS + default y if ARM64 default y if RISCV && RISCV_ISA_ZBC && 64BIT default y if X86_64 diff --git a/lib/crc/Makefile b/lib/crc/Makefile index 7543ad295ab6f..c9c35419b39c7 100644 --- a/lib/crc/Makefile +++ b/lib/crc/Makefile @@ -38,9 +38,15 @@ obj-$(CONFIG_CRC64) += crc64.o crc64-y := crc64-main.o ifeq ($(CONFIG_CRC64_ARCH),y) CFLAGS_crc64-main.o += -I$(src)/$(SRCARCH) + +CFLAGS_REMOVE_arm64/crc64-neon-inner.o += -mgeneral-regs-only +CFLAGS_arm64/crc64-neon-inner.o += -ffreestanding -march=armv8-a+crypto +CFLAGS_arm64/crc64-neon-inner.o += -isystem $(shell $(CC) -print-file-name=include) +crc64-$(CONFIG_ARM64) += arm64/crc64-neon-inner.o + crc64-$(CONFIG_RISCV) += riscv/crc64_lsb.o riscv/crc64_msb.o crc64-$(CONFIG_X86) += x86/crc64-pclmul.o -endif +endif # CONFIG_CRC64_ARCH obj-y += tests/ diff --git a/lib/crc/arm64/crc64-neon-inner.c b/lib/crc/arm64/crc64-neon-inner.c new file mode 100644 index 0000000000000..881cdafadb37a --- /dev/null +++ b/lib/crc/arm64/crc64-neon-inner.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Accelerated CRC64 (NVMe) using ARM NEON C intrinsics + */ + +#include +#include + +u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len); + +#define GET_P64_0(v) ((poly64_t)vgetq_lane_u64(vreinterpretq_u64_p64(v), 0)) +#define GET_P64_1(v) ((poly64_t)vgetq_lane_u64(vreinterpretq_u64_p64(v), 1)) + +/* x^191 mod G, x^127 mod G */ +static const u64 fold_consts_val[2] = { 0xeadc41fd2ba3d420ULL, + 0x21e9761e252621acULL }; +/* floor(x^127 / G), (G - x^64) / x */ +static const u64 bconsts_val[2] = { 0x27ecfa329aef9f77ULL, + 0x34d926535897936aULL }; + +u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len) +{ + uint64x2_t v0_u64 = { crc, 0 }; + poly64x2_t v0 = vreinterpretq_p64_u64(v0_u64); + poly64x2_t fold_consts = + vreinterpretq_p64_u64(vld1q_u64(fold_consts_val)); + poly64x2_t v1 = vreinterpretq_p64_u8(vld1q_u8(p)); + + v0 = vreinterpretq_p64_u8(veorq_u8(vreinterpretq_u8_p64(v0), + vreinterpretq_u8_p64(v1))); + p += 16; + len -= 16; + + do { + v1 = vreinterpretq_p64_u8(vld1q_u8(p)); + + poly128_t v2 = vmull_high_p64(fold_consts, v0); + poly128_t v0_128 = + vmull_p64(GET_P64_0(fold_consts), GET_P64_0(v0)); + + uint8x16_t x0 = veorq_u8(vreinterpretq_u8_p128(v0_128), + vreinterpretq_u8_p128(v2)); + + x0 = veorq_u8(x0, vreinterpretq_u8_p64(v1)); + v0 = vreinterpretq_p64_u8(x0); + + p += 16; + len -= 16; + } while (len >= 16); + + /* Multiply the 128-bit value by x^64 and reduce it back to 128 bits. */ + poly64x2_t v7 = vreinterpretq_p64_u64((uint64x2_t){ 0, 0 }); + poly128_t v1_128 = vmull_p64(GET_P64_1(fold_consts), GET_P64_0(v0)); + + uint8x16_t ext_v0 = + vextq_u8(vreinterpretq_u8_p64(v0), vreinterpretq_u8_p64(v7), 8); + uint8x16_t x0 = veorq_u8(ext_v0, vreinterpretq_u8_p128(v1_128)); + + v0 = vreinterpretq_p64_u8(x0); + + /* Final Barrett reduction */ + poly64x2_t bconsts = vreinterpretq_p64_u64(vld1q_u64(bconsts_val)); + + v1_128 = vmull_p64(GET_P64_0(bconsts), GET_P64_0(v0)); + + poly64x2_t v1_64 = vreinterpretq_p64_u8(vreinterpretq_u8_p128(v1_128)); + poly128_t v3_128 = vmull_p64(GET_P64_1(bconsts), GET_P64_0(v1_64)); + + x0 = veorq_u8(vreinterpretq_u8_p64(v0), vreinterpretq_u8_p128(v3_128)); + + uint8x16_t ext_v2 = vextq_u8(vreinterpretq_u8_p64(v7), + vreinterpretq_u8_p128(v1_128), 8); + + x0 = veorq_u8(x0, ext_v2); + + v0 = vreinterpretq_p64_u8(x0); + return vgetq_lane_u64(vreinterpretq_u64_p64(v0), 1); +} diff --git a/lib/crc/arm64/crc64.h b/lib/crc/arm64/crc64.h new file mode 100644 index 0000000000000..cc65abeee24ce --- /dev/null +++ b/lib/crc/arm64/crc64.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * CRC64 using ARM64 PMULL instructions + */ + +#include +#include +#include +#include + +u64 crc64_nvme_arm64_c(u64 crc, const u8 *p, size_t len); + +#define crc64_be_arch crc64_be_generic + +static inline u64 crc64_nvme_arch(u64 crc, const u8 *p, size_t len) +{ + if (len >= 128 && cpu_have_named_feature(PMULL) && + likely(may_use_simd())) { + do { + size_t chunk = min_t(size_t, len & ~15, SZ_4K); + + scoped_ksimd() + crc = crc64_nvme_arm64_c(crc, p, chunk); + + p += chunk; + len -= chunk; + } while (len >= 128); + } + return crc64_nvme_generic(crc, p, len); +} -- 2.47.3