]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
lib/crc: Use underlying functions instead of crypto_simd_usable()
authorEric Biggers <ebiggers@kernel.org>
Mon, 11 Aug 2025 18:26:31 +0000 (11:26 -0700)
committerEric Biggers <ebiggers@kernel.org>
Mon, 11 Aug 2025 18:28:00 +0000 (11:28 -0700)
Since crc_kunit now tests the fallback code paths without using
crypto_simd_disabled_for_test, make the CRC code just use the underlying
may_use_simd() and irq_fpu_usable() functions directly instead of
crypto_simd_usable().  This eliminates an unnecessary layer.

Take the opportunity to add likely() and unlikely() annotations as well.

Link: https://lore.kernel.org/r/20250811182631.376302-4-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
lib/crc/arm/crc-t10dif.h
lib/crc/arm/crc32.h
lib/crc/arm64/crc-t10dif.h
lib/crc/arm64/crc32.h
lib/crc/powerpc/crc-t10dif.h
lib/crc/powerpc/crc32.h
lib/crc/x86/crc-pclmul-template.h
lib/crc/x86/crc32.h

index 2edf7e9681d05a7dba846df903caa943db4b3b91..1a969f4024d479ddbd39c6fae14d23c4839267ff 100644 (file)
@@ -5,8 +5,6 @@
  * Copyright (C) 2016 Linaro Ltd <ard.biesheuvel@linaro.org>
  */
 
-#include <crypto/internal/simd.h>
-
 #include <asm/neon.h>
 #include <asm/simd.h>
 
@@ -23,7 +21,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
 {
        if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) {
                if (static_branch_likely(&have_pmull)) {
-                       if (crypto_simd_usable()) {
+                       if (likely(may_use_simd())) {
                                kernel_neon_begin();
                                crc = crc_t10dif_pmull64(crc, data, length);
                                kernel_neon_end();
@@ -31,7 +29,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
                        }
                } else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE &&
                           static_branch_likely(&have_neon) &&
-                          crypto_simd_usable()) {
+                          likely(may_use_simd())) {
                        u8 buf[16] __aligned(16);
 
                        kernel_neon_begin();
index 018007e162a2b61345b768893d91cbeb52ef6ddb..ae71aa60b7a74edf0fa4034fe417431349da3fa1 100644 (file)
@@ -7,8 +7,6 @@
 
 #include <linux/cpufeature.h>
 
-#include <crypto/internal/simd.h>
-
 #include <asm/hwcap.h>
 #include <asm/neon.h>
 #include <asm/simd.h>
@@ -34,7 +32,7 @@ static inline u32 crc32_le_scalar(u32 crc, const u8 *p, size_t len)
 static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
 {
        if (len >= PMULL_MIN_LEN + 15 &&
-           static_branch_likely(&have_pmull) && crypto_simd_usable()) {
+           static_branch_likely(&have_pmull) && likely(may_use_simd())) {
                size_t n = -(uintptr_t)p & 15;
 
                /* align p to 16-byte boundary */
@@ -63,7 +61,7 @@ static inline u32 crc32c_scalar(u32 crc, const u8 *p, size_t len)
 static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
 {
        if (len >= PMULL_MIN_LEN + 15 &&
-           static_branch_likely(&have_pmull) && crypto_simd_usable()) {
+           static_branch_likely(&have_pmull) && likely(may_use_simd())) {
                size_t n = -(uintptr_t)p & 15;
 
                /* align p to 16-byte boundary */
index c4521a7f1ee9bf150bc02a4181dfd88c12047fac..435a990ec43c276df1a150a389baa15a8a36f5ac 100644 (file)
@@ -7,8 +7,6 @@
 
 #include <linux/cpufeature.h>
 
-#include <crypto/internal/simd.h>
-
 #include <asm/neon.h>
 #include <asm/simd.h>
 
@@ -25,7 +23,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
 {
        if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE) {
                if (static_branch_likely(&have_pmull)) {
-                       if (crypto_simd_usable()) {
+                       if (likely(may_use_simd())) {
                                kernel_neon_begin();
                                crc = crc_t10dif_pmull_p64(crc, data, length);
                                kernel_neon_end();
@@ -33,7 +31,7 @@ static inline u16 crc_t10dif_arch(u16 crc, const u8 *data, size_t length)
                        }
                } else if (length > CRC_T10DIF_PMULL_CHUNK_SIZE &&
                           static_branch_likely(&have_asimd) &&
-                          crypto_simd_usable()) {
+                          likely(may_use_simd())) {
                        u8 buf[16];
 
                        kernel_neon_begin();
index 6e5dec45f05d21723f7a57e88f2aa376a62e402c..31e649cd40a2fdee6dae2de5532f5929d54fd540 100644 (file)
@@ -5,8 +5,6 @@
 #include <asm/neon.h>
 #include <asm/simd.h>
 
-#include <crypto/internal/simd.h>
-
 // The minimum input length to consider the 4-way interleaved code path
 static const size_t min_len = 1024;
 
@@ -23,7 +21,8 @@ static inline u32 crc32_le_arch(u32 crc, const u8 *p, size_t len)
        if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
                return crc32_le_base(crc, p, len);
 
-       if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
+       if (len >= min_len && cpu_have_named_feature(PMULL) &&
+           likely(may_use_simd())) {
                kernel_neon_begin();
                crc = crc32_le_arm64_4way(crc, p, len);
                kernel_neon_end();
@@ -43,7 +42,8 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
        if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
                return crc32c_base(crc, p, len);
 
-       if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
+       if (len >= min_len && cpu_have_named_feature(PMULL) &&
+           likely(may_use_simd())) {
                kernel_neon_begin();
                crc = crc32c_le_arm64_4way(crc, p, len);
                kernel_neon_end();
@@ -63,7 +63,8 @@ static inline u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
        if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
                return crc32_be_base(crc, p, len);
 
-       if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
+       if (len >= min_len && cpu_have_named_feature(PMULL) &&
+           likely(may_use_simd())) {
                kernel_neon_begin();
                crc = crc32_be_arm64_4way(crc, p, len);
                kernel_neon_end();
index 59e16804a6eae95fe22a18a5b15901ce73217e33..e033d5f57bae22f0a628ee9b3f37216d3a28979e 100644 (file)
@@ -6,8 +6,8 @@
  * [based on crc32c-vpmsum_glue.c]
  */
 
+#include <asm/simd.h>
 #include <asm/switch_to.h>
-#include <crypto/internal/simd.h>
 #include <linux/cpufeature.h>
 #include <linux/jump_label.h>
 #include <linux/preempt.h>
@@ -29,7 +29,8 @@ static inline u16 crc_t10dif_arch(u16 crci, const u8 *p, size_t len)
        u32 crc = crci;
 
        if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) ||
-           !static_branch_likely(&have_vec_crypto) || !crypto_simd_usable())
+           !static_branch_likely(&have_vec_crypto) ||
+           unlikely(!may_use_simd()))
                return crc_t10dif_generic(crc, p, len);
 
        if ((unsigned long)p & VMX_ALIGN_MASK) {
index 811cc2e6ed24d4a504421f291af08706a6a6336d..cc8fa3913d4e01ee59df21fe69205a3198e48ad5 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
+#include <asm/simd.h>
 #include <asm/switch_to.h>
-#include <crypto/internal/simd.h>
 #include <linux/cpufeature.h>
 #include <linux/jump_label.h>
 #include <linux/preempt.h>
@@ -24,7 +24,8 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
        unsigned int tail;
 
        if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) ||
-           !static_branch_likely(&have_vec_crypto) || !crypto_simd_usable())
+           !static_branch_likely(&have_vec_crypto) ||
+           unlikely(!may_use_simd()))
                return crc32c_base(crc, p, len);
 
        if ((unsigned long)p & VMX_ALIGN_MASK) {
index 35c950d7010c2819a452a065fb05c58a89cb73ac..02744831c6fac089b735c8fccecb77ac9c9fe083 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <asm/cpufeatures.h>
 #include <asm/simd.h>
-#include <crypto/internal/simd.h>
 #include <linux/static_call.h>
 #include "crc-pclmul-consts.h"
 
@@ -57,7 +56,7 @@ static inline bool have_avx512(void)
 #define CRC_PCLMUL(crc, p, len, prefix, consts, have_pclmulqdq)                \
 do {                                                                   \
        if ((len) >= 16 && static_branch_likely(&(have_pclmulqdq)) &&   \
-           crypto_simd_usable()) {                                     \
+           likely(irq_fpu_usable())) {                                 \
                const void *consts_ptr;                                 \
                                                                        \
                consts_ptr = (consts).fold_across_128_bits_consts;      \
index cea2c96d08d09ec0374239df188fd888265bb4aa..2c4a5976654ad7106e23c03c80c070110585690a 100644 (file)
@@ -44,7 +44,7 @@ static inline u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
                return crc32c_base(crc, p, len);
 
        if (IS_ENABLED(CONFIG_X86_64) && len >= CRC32C_PCLMUL_BREAKEVEN &&
-           static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) {
+           static_branch_likely(&have_pclmulqdq) && likely(irq_fpu_usable())) {
                /*
                 * Long length, the vector registers are usable, and the CPU is
                 * 64-bit and supports both CRC32 and PCLMULQDQ instructions.