Since commit
aefbab8e77eb ("arm64: fpsimd: Preserve/restore kernel mode
NEON at context switch"), kernel-mode NEON sections have been
preemptible on arm64. And since commit
7dadeaa6e851 ("sched: Further
restrict the preemption modes"), voluntary preemption is no longer
supported on arm64 either. Therefore, there's no longer any need to
limit the length of kernel-mode NEON sections on arm64.
Simplify the AES-CBC-MAC code accordingly.
Reviewed-by: Ard Biesheuvel <ardb@kernel.org>
Link: https://lore.kernel.org/r/20260401000548.133151-2-ebiggers@kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
u32 blocks = abytes / AES_BLOCK_SIZE;
if (macp == AES_BLOCK_SIZE || (!macp && blocks > 0)) {
- u32 rem = ce_aes_mac_update(in, rk, rounds, blocks, mac,
- macp, enc_after);
- u32 adv = (blocks - rem) * AES_BLOCK_SIZE;
-
+ ce_aes_mac_update(in, rk, rounds, blocks, mac, macp,
+ enc_after);
macp = enc_after ? 0 : AES_BLOCK_SIZE;
- in += adv;
- abytes -= adv;
-
- if (unlikely(rem))
- macp = 0;
+ in += blocks * AES_BLOCK_SIZE;
+ abytes -= blocks * AES_BLOCK_SIZE;
} else {
u32 l = min(AES_BLOCK_SIZE - macp, abytes);
asmlinkage void ce_aes_essiv_cbc_decrypt(u8 out[], u8 const in[],
u32 const rk1[], int rounds,
int blocks, u8 iv[], u32 const rk2[]);
-asmlinkage size_t ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
- size_t blocks, u8 dg[], int enc_before,
- int enc_after);
+asmlinkage void ce_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
+ size_t blocks, u8 dg[], int enc_before,
+ int enc_after);
#elif defined(CONFIG_PPC)
void ppc_expand_key_128(u32 *key_enc, const u8 *key);
void ppc_expand_key_192(u32 *key_enc, const u8 *key);
#if IS_ENABLED(CONFIG_CRYPTO_LIB_AES_CBC_MACS)
/*
- * size_t aes_mac_update(u8 const in[], u32 const rk[], int rounds,
- * size_t blocks, u8 dg[], int enc_before,
- * int enc_after);
+ * void aes_mac_update(u8 const in[], u32 const rk[], int rounds,
+ * size_t blocks, u8 dg[], int enc_before,
+ * int enc_after);
*/
AES_FUNC_START(aes_mac_update)
ld1 {v0.16b}, [x4] /* get dg */
cbz w5, .Lmacout
encrypt_block v0, w2, x1, x7, w8
st1 {v0.16b}, [x4] /* return dg */
- cond_yield .Lmacout, x7, x8
b .Lmacloop4x
.Lmac1x:
add x3, x3, #4
.Lmacout:
st1 {v0.16b}, [x4] /* return dg */
- mov x0, x3
ret
AES_FUNC_END(aes_mac_update)
#endif /* CONFIG_CRYPTO_LIB_AES_CBC_MACS */
asmlinkage u32 __aes_ce_sub(u32 l);
asmlinkage void __aes_ce_invert(struct aes_block *out,
const struct aes_block *in);
-asmlinkage size_t neon_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
- size_t blocks, u8 dg[], int enc_before,
- int enc_after);
+asmlinkage void neon_aes_mac_update(u8 const in[], u32 const rk[], int rounds,
+ size_t blocks, u8 dg[], int enc_before,
+ int enc_after);
/*
* Expand an AES key using the crypto extensions if supported and usable or
bool enc_after)
{
if (static_branch_likely(&have_neon) && likely(may_use_simd())) {
- do {
- size_t rem;
-
- scoped_ksimd() {
- if (static_branch_likely(&have_aes))
- rem = ce_aes_mac_update(
- data, key->k.rndkeys,
- key->nrounds, nblocks, h,
- enc_before, enc_after);
- else
- rem = neon_aes_mac_update(
- data, key->k.rndkeys,
- key->nrounds, nblocks, h,
- enc_before, enc_after);
- }
- data += (nblocks - rem) * AES_BLOCK_SIZE;
- nblocks = rem;
- enc_before = false;
- } while (nblocks);
+ scoped_ksimd() {
+ if (static_branch_likely(&have_aes))
+ ce_aes_mac_update(data, key->k.rndkeys,
+ key->nrounds, nblocks, h,
+ enc_before, enc_after);
+ else
+ neon_aes_mac_update(data, key->k.rndkeys,
+ key->nrounds, nblocks, h,
+ enc_before, enc_after);
+ }
return true;
}
return false;