]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
crypto: x86/aegis - Fix sleeping when disallowed on PREEMPT_RT
authorEric Biggers <ebiggers@kernel.org>
Tue, 8 Jul 2025 19:38:28 +0000 (12:38 -0700)
committerHerbert Xu <herbert@gondor.apana.org.au>
Fri, 18 Jul 2025 10:51:59 +0000 (20:51 +1000)
skcipher_walk_done() can call kfree(), which takes a spinlock, which
makes it incorrect to call while preemption is disabled on PREEMPT_RT.
Therefore, end the kernel-mode FPU section before calling
skcipher_walk_done(), and restart it afterwards.

Moreover, pass atomic=false to skcipher_walk_aead_encrypt() instead of
atomic=true.  The point of atomic=true was to make skcipher_walk_done()
safe to call while in a kernel-mode FPU section, but that does not
actually work.  So just use the usual atomic=false.

Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations")
Cc: stable@vger.kernel.org
Signed-off-by: Eric Biggers <ebiggers@kernel.org>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/x86/crypto/aegis128-aesni-glue.c

index f1b6d40154e3528ee5bad6167d872ea37a398694..3cb5c193038bb2849fd9cecb508db02d65fed051 100644 (file)
@@ -119,7 +119,9 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state,
                                           walk->dst.virt.addr,
                                           round_down(walk->nbytes,
                                                      AEGIS128_BLOCK_SIZE));
+               kernel_fpu_end();
                skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
+               kernel_fpu_begin();
        }
 
        if (walk->nbytes) {
@@ -131,7 +133,9 @@ crypto_aegis128_aesni_process_crypt(struct aegis_state *state,
                        aegis128_aesni_dec_tail(state, walk->src.virt.addr,
                                                walk->dst.virt.addr,
                                                walk->nbytes);
+               kernel_fpu_end();
                skcipher_walk_done(walk, 0);
+               kernel_fpu_begin();
        }
 }
 
@@ -176,9 +180,9 @@ crypto_aegis128_aesni_crypt(struct aead_request *req,
        struct aegis_state state;
 
        if (enc)
-               skcipher_walk_aead_encrypt(&walk, req, true);
+               skcipher_walk_aead_encrypt(&walk, req, false);
        else
-               skcipher_walk_aead_decrypt(&walk, req, true);
+               skcipher_walk_aead_decrypt(&walk, req, false);
 
        kernel_fpu_begin();