From: Eric Biggers Date: Mon, 28 Apr 2025 17:00:28 +0000 (-0700) Subject: crypto: arm64/sha256 - remove obsolete chunking logic X-Git-Tag: v6.16-rc1~206^2~120 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=642cfc0680ff9aae73cd87d6fffcc84d9434938b;p=thirdparty%2Fkernel%2Flinux.git crypto: arm64/sha256 - remove obsolete chunking logic Since kernel-mode NEON sections are now preemptible on arm64, there is no longer any need to limit the length of them. Reviewed-by: Ard Biesheuvel Signed-off-by: Eric Biggers Signed-off-by: Herbert Xu --- diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c index 26f9fdfae87bf..d63ea82e1374e 100644 --- a/arch/arm64/crypto/sha256-glue.c +++ b/arch/arm64/crypto/sha256-glue.c @@ -86,23 +86,8 @@ static struct shash_alg algs[] = { { static int sha256_update_neon(struct shash_desc *desc, const u8 *data, unsigned int len) { - do { - unsigned int chunk = len; - - /* - * Don't hog the CPU for the entire time it takes to process all - * input when running on a preemptible kernel, but process the - * data block by block instead. - */ - if (IS_ENABLED(CONFIG_PREEMPTION)) - chunk = SHA256_BLOCK_SIZE; - - chunk -= sha256_base_do_update_blocks(desc, data, chunk, - sha256_neon_transform); - data += chunk; - len -= chunk; - } while (len >= SHA256_BLOCK_SIZE); - return len; + return sha256_base_do_update_blocks(desc, data, len, + sha256_neon_transform); } static int sha256_finup_neon(struct shash_desc *desc, const u8 *data,