]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
crypto: x86/aes-gcm - fix PREEMPT_RT issue in gcm_crypt()
authorEric Biggers <ebiggers@google.com>
Mon, 5 Aug 2024 18:27:13 +0000 (11:27 -0700)
committerHerbert Xu <herbert@gondor.apana.org.au>
Sat, 10 Aug 2024 04:25:34 +0000 (12:25 +0800)
On PREEMPT_RT, kfree() takes sleeping locks and must not be called with
preemption disabled.  Therefore, on PREEMPT_RT skcipher_walk_done() must
not be called from within a kernel_fpu_{begin,end}() pair, even when
it's the last call which is guaranteed to not allocate memory.

Therefore, move the last skcipher_walk_done() in gcm_crypt() to the end
of the function so that it goes after the kernel_fpu_end().  To make
this work cleanly, rework the data processing loop to handle only
non-last data segments.

Fixes: b06affb1cb58 ("crypto: x86/aes-gcm - add VAES and AVX512 / AVX10 optimized AES-GCM")
Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Closes: https://lore.kernel.org/linux-crypto/20240802102333.itejxOsJ@linutronix.de
Signed-off-by: Eric Biggers <ebiggers@google.com>
Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
arch/x86/crypto/aesni-intel_glue.c

index cd37de5ec40464fc2714b54b5b27182147824099..d63ba9eaba3e44c53ccbcc9ecbdcbd60db07e021 100644 (file)
@@ -1366,6 +1366,8 @@ gcm_crypt(struct aead_request *req, int flags)
                err = skcipher_walk_aead_encrypt(&walk, req, false);
        else
                err = skcipher_walk_aead_decrypt(&walk, req, false);
+       if (err)
+               return err;
 
        /*
         * Since the AES-GCM assembly code requires that at least three assembly
@@ -1381,37 +1383,31 @@ gcm_crypt(struct aead_request *req, int flags)
        gcm_process_assoc(key, ghash_acc, req->src, assoclen, flags);
 
        /* En/decrypt the data and pass the ciphertext through GHASH. */
-       while ((nbytes = walk.nbytes) != 0) {
-               if (unlikely(nbytes < walk.total)) {
-                       /*
-                        * Non-last segment.  In this case, the assembly
-                        * function requires that the length be a multiple of 16
-                        * (AES_BLOCK_SIZE) bytes.  The needed buffering of up
-                        * to 16 bytes is handled by the skcipher_walk.  Here we
-                        * just need to round down to a multiple of 16.
-                        */
-                       nbytes = round_down(nbytes, AES_BLOCK_SIZE);
-                       aes_gcm_update(key, le_ctr, ghash_acc,
-                                      walk.src.virt.addr, walk.dst.virt.addr,
-                                      nbytes, flags);
-                       le_ctr[0] += nbytes / AES_BLOCK_SIZE;
-                       kernel_fpu_end();
-                       err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
-                       kernel_fpu_begin();
-               } else {
-                       /* Last segment: process all remaining data. */
-                       aes_gcm_update(key, le_ctr, ghash_acc,
-                                      walk.src.virt.addr, walk.dst.virt.addr,
-                                      nbytes, flags);
-                       err = skcipher_walk_done(&walk, 0);
-                       /*
-                        * The low word of the counter isn't used by the
-                        * finalize, so there's no need to increment it here.
-                        */
-               }
+       while (unlikely((nbytes = walk.nbytes) < walk.total)) {
+               /*
+                * Non-last segment.  In this case, the assembly function
+                * requires that the length be a multiple of 16 (AES_BLOCK_SIZE)
+                * bytes.  The needed buffering of up to 16 bytes is handled by
+                * the skcipher_walk.  Here we just need to round down to a
+                * multiple of 16.
+                */
+               nbytes = round_down(nbytes, AES_BLOCK_SIZE);
+               aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
+                              walk.dst.virt.addr, nbytes, flags);
+               le_ctr[0] += nbytes / AES_BLOCK_SIZE;
+               kernel_fpu_end();
+               err = skcipher_walk_done(&walk, walk.nbytes - nbytes);
+               if (err)
+                       return err;
+               kernel_fpu_begin();
        }
-       if (err)
-               goto out;
+       /* Last segment: process all remaining data. */
+       aes_gcm_update(key, le_ctr, ghash_acc, walk.src.virt.addr,
+                      walk.dst.virt.addr, nbytes, flags);
+       /*
+        * The low word of the counter isn't used by the finalize, so there's no
+        * need to increment it here.
+        */
 
        /* Finalize */
        taglen = crypto_aead_authsize(tfm);
@@ -1439,8 +1435,9 @@ gcm_crypt(struct aead_request *req, int flags)
                                       datalen, tag, taglen, flags))
                        err = -EBADMSG;
        }
-out:
        kernel_fpu_end();
+       if (nbytes)
+               skcipher_walk_done(&walk, 0);
        return err;
 }