}
EXPORT_SYMBOL(crc32_le_arch);
-static u32 crc32c_le_scalar(u32 crc, const u8 *p, size_t len)
+static u32 crc32c_scalar(u32 crc, const u8 *p, size_t len)
{
if (static_branch_likely(&have_crc32))
return crc32c_armv8_le(crc, p, len);
- return crc32c_le_base(crc, p, len);
+ return crc32c_base(crc, p, len);
}
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
if (len >= PMULL_MIN_LEN + 15 &&
static_branch_likely(&have_pmull) && crypto_simd_usable()) {
/* align p to 16-byte boundary */
if (n) {
- crc = crc32c_le_scalar(crc, p, n);
+ crc = crc32c_scalar(crc, p, n);
p += n;
len -= n;
}
p += n;
len -= n;
}
- return crc32c_le_scalar(crc, p, len);
+ return crc32c_scalar(crc, p, len);
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
}
EXPORT_SYMBOL(crc32_le_arch);
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
if (!alternative_has_cap_likely(ARM64_HAS_CRC32))
- return crc32c_le_base(crc, p, len);
+ return crc32c_base(crc, p, len);
if (len >= min_len && cpu_have_named_feature(PMULL) && crypto_simd_usable()) {
kernel_neon_begin();
return crc32c_le_arm64(crc, p, len);
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
}
EXPORT_SYMBOL(crc32_le_arch);
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
if (!static_branch_likely(&have_crc32))
- return crc32c_le_base(crc, p, len);
+ return crc32c_base(crc, p, len);
while (len >= sizeof(u64)) {
u64 value = get_unaligned_le64(p);
return crc;
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
}
EXPORT_SYMBOL(crc32_le_arch);
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
if (!static_branch_likely(&have_crc32))
- return crc32c_le_base(crc, p, len);
+ return crc32c_base(crc, p, len);
if (IS_ENABLED(CONFIG_64BIT)) {
for (; len >= sizeof(u64); p += sizeof(u64), len -= sizeof(u64)) {
}
return crc;
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
}
EXPORT_SYMBOL(crc32_le_arch);
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
unsigned int prealign;
unsigned int tail;
if (len < (VECTOR_BREAKPOINT + VMX_ALIGN) ||
!static_branch_likely(&have_vec_crypto) || !crypto_simd_usable())
- return crc32c_le_base(crc, p, len);
+ return crc32c_base(crc, p, len);
if ((unsigned long)p & VMX_ALIGN_MASK) {
prealign = VMX_ALIGN - ((unsigned long)p & VMX_ALIGN_MASK);
- crc = crc32c_le_base(crc, p, prealign);
+ crc = crc32c_base(crc, p, prealign);
len -= prealign;
p += prealign;
}
tail = len & VMX_ALIGN_MASK;
if (tail) {
p += len & ~VMX_ALIGN_MASK;
- crc = crc32c_le_base(crc, p, tail);
+ crc = crc32c_base(crc, p, tail);
}
return crc;
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
}
EXPORT_SYMBOL(crc32_le_arch);
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
return crc32_le_generic(crc, p, len, CRC32C_POLY_LE,
- CRC32C_POLY_QT_LE, crc32c_le_base);
+ CRC32C_POLY_QT_LE, crc32c_base);
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
static inline u32 crc32_be_unaligned(u32 crc, unsigned char const *p,
size_t len)
DEFINE_CRC32_VX(crc32_le_arch, crc32_le_vgfm_16, crc32_le_base)
DEFINE_CRC32_VX(crc32_be_arch, crc32_be_vgfm_16, crc32_be_base)
-DEFINE_CRC32_VX(crc32c_le_arch, crc32c_le_vgfm_16, crc32c_le_base)
+DEFINE_CRC32_VX(crc32c_arch, crc32c_le_vgfm_16, crc32c_base)
static int __init crc32_s390_init(void)
{
void crc32c_sparc64(u32 *crcp, const u64 *data, size_t len);
-u32 crc32c_le_arch(u32 crc, const u8 *data, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *data, size_t len)
{
size_t n = -(uintptr_t)data & 7;
if (!static_branch_likely(&have_crc32c_opcode))
- return crc32c_le_base(crc, data, len);
+ return crc32c_base(crc, data, len);
if (n) {
/* Data isn't 8-byte aligned. Align it. */
n = min(n, len);
- crc = crc32c_le_base(crc, data, n);
+ crc = crc32c_base(crc, data, n);
data += n;
len -= n;
}
len -= n;
}
if (len)
- crc = crc32c_le_base(crc, data, len);
+ crc = crc32c_base(crc, data, len);
return crc;
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *data, size_t len)
{
asmlinkage u32 crc32c_x86_3way(u32 crc, const u8 *buffer, size_t len);
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len)
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
{
size_t num_longs;
if (!static_branch_likely(&have_crc32))
- return crc32c_le_base(crc, p, len);
+ return crc32c_base(crc, p, len);
if (IS_ENABLED(CONFIG_X86_64) && len >= CRC32C_PCLMUL_BREAKEVEN &&
static_branch_likely(&have_pclmulqdq) && crypto_simd_usable()) {
return crc;
}
-EXPORT_SYMBOL(crc32c_le_arch);
+EXPORT_SYMBOL(crc32c_arch);
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len)
{
{
struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
- ctx->crc = crc32c_le_base(ctx->crc, data, length);
+ ctx->crc = crc32c_base(ctx->crc, data, length);
return 0;
}
static int __chksum_finup(u32 *crcp, const u8 *data, unsigned int len, u8 *out)
{
- put_unaligned_le32(~crc32c_le_base(*crcp, data, len), out);
+ put_unaligned_le32(~crc32c_base(*crcp, data, len), out);
return 0;
}
u32 crc32_le_base(u32 crc, const u8 *p, size_t len);
u32 crc32_be_arch(u32 crc, const u8 *p, size_t len);
u32 crc32_be_base(u32 crc, const u8 *p, size_t len);
-u32 crc32c_le_arch(u32 crc, const u8 *p, size_t len);
-u32 crc32c_le_base(u32 crc, const u8 *p, size_t len);
+u32 crc32c_arch(u32 crc, const u8 *p, size_t len);
+u32 crc32c_base(u32 crc, const u8 *p, size_t len);
static inline u32 crc32_le(u32 crc, const void *p, size_t len)
{
static inline u32 crc32c(u32 crc, const void *p, size_t len)
{
if (IS_ENABLED(CONFIG_CRC32_ARCH))
- return crc32c_le_arch(crc, p, len);
- return crc32c_le_base(crc, p, len);
+ return crc32c_arch(crc, p, len);
+ return crc32c_base(crc, p, len);
}
/*
}
EXPORT_SYMBOL(crc32_le_base);
-u32 crc32c_le_base(u32 crc, const u8 *p, size_t len)
+u32 crc32c_base(u32 crc, const u8 *p, size_t len)
{
while (len--)
crc = (crc >> 8) ^ crc32ctable_le[(crc & 255) ^ *p++];
return crc;
}
-EXPORT_SYMBOL(crc32c_le_base);
+EXPORT_SYMBOL(crc32c_base);
/*
* This multiplies the polynomials x and y modulo the given modulus.