]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 30 Apr 2026 07:05:28 +0000 (09:05 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 30 Apr 2026 07:05:28 +0000 (09:05 +0200)
added patches:
crypto-af_alg-fix-page-reassignment-overflow-in-af_alg_pull_tsgl.patch
crypto-algif_aead-revert-to-operating-out-of-place.patch
crypto-algif_aead-snapshot-iv-for-async-aead-requests.patch
crypto-algif_aead-use-memcpy_sglist-instead-of-null-skcipher.patch
crypto-authenc-use-memcpy_sglist-instead-of-null-skcipher.patch
crypto-authencesn-do-not-place-hiseq-at-end-of-dst-for-out-of-place-decryption.patch
crypto-authencesn-fix-src-offset-when-decrypting-in-place.patch
crypto-scatterwalk-backport-memcpy_sglist.patch
series

queue-6.12/crypto-af_alg-fix-page-reassignment-overflow-in-af_alg_pull_tsgl.patch [new file with mode: 0644]
queue-6.12/crypto-algif_aead-revert-to-operating-out-of-place.patch [new file with mode: 0644]
queue-6.12/crypto-algif_aead-snapshot-iv-for-async-aead-requests.patch [new file with mode: 0644]
queue-6.12/crypto-algif_aead-use-memcpy_sglist-instead-of-null-skcipher.patch [new file with mode: 0644]
queue-6.12/crypto-authenc-use-memcpy_sglist-instead-of-null-skcipher.patch [new file with mode: 0644]
queue-6.12/crypto-authencesn-do-not-place-hiseq-at-end-of-dst-for-out-of-place-decryption.patch [new file with mode: 0644]
queue-6.12/crypto-authencesn-fix-src-offset-when-decrypting-in-place.patch [new file with mode: 0644]
queue-6.12/crypto-scatterwalk-backport-memcpy_sglist.patch [new file with mode: 0644]
queue-6.12/series [new file with mode: 0644]

diff --git a/queue-6.12/crypto-af_alg-fix-page-reassignment-overflow-in-af_alg_pull_tsgl.patch b/queue-6.12/crypto-af_alg-fix-page-reassignment-overflow-in-af_alg_pull_tsgl.patch
new file mode 100644 (file)
index 0000000..5450101
--- /dev/null
@@ -0,0 +1,43 @@
+From stable+bounces-242003-greg=kroah.com@vger.kernel.org Thu Apr 30 08:10:21 2026
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Wed, 29 Apr 2026 23:07:02 -0700
+Subject: crypto: af_alg - Fix page reassignment overflow in af_alg_pull_tsgl
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, Herbert Xu <herbert@gondor.apana.org.au>, syzbot+d23888375c2737c17ba5@syzkaller.appspotmail.com, Eric Biggers <ebiggers@kernel.org>
+Message-ID: <20260430060702.110091-9-ebiggers@kernel.org>
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 31d00156e50ecad37f2cb6cbf04aaa9a260505ef upstream.
+
+When page reassignment was added to af_alg_pull_tsgl the original
+loop wasn't updated so it may try to reassign one more page than
+necessary.
+
+Add the check to the reassignment so that this does not happen.
+
+Also update the comment which still refers to the obsolete offset
+argument.
+
+Reported-by: syzbot+d23888375c2737c17ba5@syzkaller.appspotmail.com
+Fixes: e870456d8e7c ("crypto: algif_skcipher - overhaul memory management")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/af_alg.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -705,8 +705,8 @@ void af_alg_pull_tsgl(struct sock *sk, s
+                        * Assumption: caller created af_alg_count_tsgl(len)
+                        * SG entries in dst.
+                        */
+-                      if (dst) {
+-                              /* reassign page to dst after offset */
++                      if (dst && plen) {
++                              /* reassign page to dst */
+                               get_page(page);
+                               sg_set_page(dst + j, page, plen, sg[i].offset);
+                               j++;
diff --git a/queue-6.12/crypto-algif_aead-revert-to-operating-out-of-place.patch b/queue-6.12/crypto-algif_aead-revert-to-operating-out-of-place.patch
new file mode 100644 (file)
index 0000000..1f02021
--- /dev/null
@@ -0,0 +1,316 @@
+From stable+bounces-241998-greg=kroah.com@vger.kernel.org Thu Apr 30 08:09:21 2026
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Wed, 29 Apr 2026 23:06:57 -0700
+Subject: crypto: algif_aead - Revert to operating out-of-place
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, Herbert Xu <herbert@gondor.apana.org.au>, Taeyang Lee <0wn@theori.io>, Eric Biggers <ebiggers@kernel.org>
+Message-ID: <20260430060702.110091-4-ebiggers@kernel.org>
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit a664bf3d603dc3bdcf9ae47cc21e0daec706d7a5 upstream.
+
+This mostly reverts commit 72548b093ee3 except for the copying of
+the associated data.
+
+There is no benefit in operating in-place in algif_aead since the
+source and destination come from different mappings.  Get rid of
+all the complexity added for in-place operation and just copy the
+AD directly.
+
+Fixes: 72548b093ee3 ("crypto: algif_aead - copy AAD from src to dst")
+Reported-by: Taeyang Lee <0wn@theori.io>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/af_alg.c         |   49 ++++-------------------
+ crypto/algif_aead.c     |  100 +++++++++---------------------------------------
+ crypto/algif_skcipher.c |    6 +-
+ include/crypto/if_alg.h |    5 --
+ 4 files changed, 34 insertions(+), 126 deletions(-)
+
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -637,15 +637,13 @@ static int af_alg_alloc_tsgl(struct sock
+ /**
+  * af_alg_count_tsgl - Count number of TX SG entries
+  *
+- * The counting starts from the beginning of the SGL to @bytes. If
+- * an @offset is provided, the counting of the SG entries starts at the @offset.
++ * The counting starts from the beginning of the SGL to @bytes.
+  *
+  * @sk: socket of connection to user space
+  * @bytes: Count the number of SG entries holding given number of bytes.
+- * @offset: Start the counting of SG entries from the given offset.
+  * Return: Number of TX SG entries found given the constraints
+  */
+-unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
++unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes)
+ {
+       const struct alg_sock *ask = alg_sk(sk);
+       const struct af_alg_ctx *ctx = ask->private;
+@@ -660,25 +658,11 @@ unsigned int af_alg_count_tsgl(struct so
+               const struct scatterlist *sg = sgl->sg;
+               for (i = 0; i < sgl->cur; i++) {
+-                      size_t bytes_count;
+-
+-                      /* Skip offset */
+-                      if (offset >= sg[i].length) {
+-                              offset -= sg[i].length;
+-                              bytes -= sg[i].length;
+-                              continue;
+-                      }
+-
+-                      bytes_count = sg[i].length - offset;
+-
+-                      offset = 0;
+                       sgl_count++;
+-
+-                      /* If we have seen requested number of bytes, stop */
+-                      if (bytes_count >= bytes)
++                      if (sg[i].length >= bytes)
+                               return sgl_count;
+-                      bytes -= bytes_count;
++                      bytes -= sg[i].length;
+               }
+       }
+@@ -690,19 +674,14 @@ EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
+  * af_alg_pull_tsgl - Release the specified buffers from TX SGL
+  *
+  * If @dst is non-null, reassign the pages to @dst. The caller must release
+- * the pages. If @dst_offset is given only reassign the pages to @dst starting
+- * at the @dst_offset (byte). The caller must ensure that @dst is large
+- * enough (e.g. by using af_alg_count_tsgl with the same offset).
++ * the pages.
+  *
+  * @sk: socket of connection to user space
+  * @used: Number of bytes to pull from TX SGL
+  * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
+  *     caller must release the buffers in dst.
+- * @dst_offset: Reassign the TX SGL from given offset. All buffers before
+- *            reaching the offset is released.
+  */
+-void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
+-                    size_t dst_offset)
++void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst)
+ {
+       struct alg_sock *ask = alg_sk(sk);
+       struct af_alg_ctx *ctx = ask->private;
+@@ -727,18 +706,10 @@ void af_alg_pull_tsgl(struct sock *sk, s
+                        * SG entries in dst.
+                        */
+                       if (dst) {
+-                              if (dst_offset >= plen) {
+-                                      /* discard page before offset */
+-                                      dst_offset -= plen;
+-                              } else {
+-                                      /* reassign page to dst after offset */
+-                                      get_page(page);
+-                                      sg_set_page(dst + j, page,
+-                                                  plen - dst_offset,
+-                                                  sg[i].offset + dst_offset);
+-                                      dst_offset = 0;
+-                                      j++;
+-                              }
++                              /* reassign page to dst after offset */
++                              get_page(page);
++                              sg_set_page(dst + j, page, plen, sg[i].offset);
++                              j++;
+                       }
+                       sg[i].length -= plen;
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -26,7 +26,6 @@
+ #include <crypto/internal/aead.h>
+ #include <crypto/scatterwalk.h>
+ #include <crypto/if_alg.h>
+-#include <crypto/skcipher.h>
+ #include <linux/init.h>
+ #include <linux/list.h>
+ #include <linux/kernel.h>
+@@ -72,9 +71,8 @@ static int _aead_recvmsg(struct socket *
+       struct alg_sock *pask = alg_sk(psk);
+       struct af_alg_ctx *ctx = ask->private;
+       struct crypto_aead *tfm = pask->private;
+-      unsigned int i, as = crypto_aead_authsize(tfm);
++      unsigned int as = crypto_aead_authsize(tfm);
+       struct af_alg_async_req *areq;
+-      struct af_alg_tsgl *tsgl, *tmp;
+       struct scatterlist *rsgl_src, *tsgl_src = NULL;
+       int err = 0;
+       size_t used = 0;                /* [in]  TX bufs to be en/decrypted */
+@@ -154,23 +152,24 @@ static int _aead_recvmsg(struct socket *
+               outlen -= less;
+       }
++      /*
++       * Create a per request TX SGL for this request which tracks the
++       * SG entries from the global TX SGL.
++       */
+       processed = used + ctx->aead_assoclen;
+-      list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
+-              for (i = 0; i < tsgl->cur; i++) {
+-                      struct scatterlist *process_sg = tsgl->sg + i;
+-
+-                      if (!(process_sg->length) || !sg_page(process_sg))
+-                              continue;
+-                      tsgl_src = process_sg;
+-                      break;
+-              }
+-              if (tsgl_src)
+-                      break;
+-      }
+-      if (processed && !tsgl_src) {
+-              err = -EFAULT;
++      areq->tsgl_entries = af_alg_count_tsgl(sk, processed);
++      if (!areq->tsgl_entries)
++              areq->tsgl_entries = 1;
++      areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
++                                               areq->tsgl_entries),
++                                GFP_KERNEL);
++      if (!areq->tsgl) {
++              err = -ENOMEM;
+               goto free;
+       }
++      sg_init_table(areq->tsgl, areq->tsgl_entries);
++      af_alg_pull_tsgl(sk, processed, areq->tsgl);
++      tsgl_src = areq->tsgl;
+       /*
+        * Copy of AAD from source to destination
+@@ -179,76 +178,15 @@ static int _aead_recvmsg(struct socket *
+        * when user space uses an in-place cipher operation, the kernel
+        * will copy the data as it does not see whether such in-place operation
+        * is initiated.
+-       *
+-       * To ensure efficiency, the following implementation ensure that the
+-       * ciphers are invoked to perform a crypto operation in-place. This
+-       * is achieved by memory management specified as follows.
+        */
+       /* Use the RX SGL as source (and destination) for crypto op. */
+       rsgl_src = areq->first_rsgl.sgl.sgt.sgl;
+-      if (ctx->enc) {
+-              /*
+-               * Encryption operation - The in-place cipher operation is
+-               * achieved by the following operation:
+-               *
+-               * TX SGL: AAD || PT
+-               *          |      |
+-               *          | copy |
+-               *          v      v
+-               * RX SGL: AAD || PT || Tag
+-               */
+-              memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src,
+-                            processed);
+-              af_alg_pull_tsgl(sk, processed, NULL, 0);
+-      } else {
+-              /*
+-               * Decryption operation - To achieve an in-place cipher
+-               * operation, the following  SGL structure is used:
+-               *
+-               * TX SGL: AAD || CT || Tag
+-               *          |      |     ^
+-               *          | copy |     | Create SGL link.
+-               *          v      v     |
+-               * RX SGL: AAD || CT ----+
+-               */
+-
+-              /* Copy AAD || CT to RX SGL buffer for in-place operation. */
+-              memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen);
+-
+-              /* Create TX SGL for tag and chain it to RX SGL. */
+-              areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
+-                                                     processed - as);
+-              if (!areq->tsgl_entries)
+-                      areq->tsgl_entries = 1;
+-              areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
+-                                                       areq->tsgl_entries),
+-                                        GFP_KERNEL);
+-              if (!areq->tsgl) {
+-                      err = -ENOMEM;
+-                      goto free;
+-              }
+-              sg_init_table(areq->tsgl, areq->tsgl_entries);
+-
+-              /* Release TX SGL, except for tag data and reassign tag data. */
+-              af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
+-
+-              /* chain the areq TX SGL holding the tag with RX SGL */
+-              if (usedpages) {
+-                      /* RX SGL present */
+-                      struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
+-                      struct scatterlist *sg = sgl_prev->sgt.sgl;
+-
+-                      sg_unmark_end(sg + sgl_prev->sgt.nents - 1);
+-                      sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl);
+-              } else
+-                      /* no RX SGL present (e.g. authentication only) */
+-                      rsgl_src = areq->tsgl;
+-      }
++      memcpy_sglist(rsgl_src, tsgl_src, ctx->aead_assoclen);
+       /* Initialize the crypto operation */
+-      aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
++      aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src,
+                              areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
+       aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
+       aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
+@@ -450,7 +388,7 @@ static void aead_sock_destruct(struct so
+       struct crypto_aead *tfm = pask->private;
+       unsigned int ivlen = crypto_aead_ivsize(tfm);
+-      af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
++      af_alg_pull_tsgl(sk, ctx->used, NULL);
+       sock_kzfree_s(sk, ctx->iv, ivlen);
+       sock_kfree_s(sk, ctx, ctx->len);
+       af_alg_release_parent(sk);
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -143,7 +143,7 @@ static int _skcipher_recvmsg(struct sock
+        * Create a per request TX SGL for this request which tracks the
+        * SG entries from the global TX SGL.
+        */
+-      areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
++      areq->tsgl_entries = af_alg_count_tsgl(sk, len);
+       if (!areq->tsgl_entries)
+               areq->tsgl_entries = 1;
+       areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
+@@ -154,7 +154,7 @@ static int _skcipher_recvmsg(struct sock
+               goto free;
+       }
+       sg_init_table(areq->tsgl, areq->tsgl_entries);
+-      af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
++      af_alg_pull_tsgl(sk, len, areq->tsgl);
+       /* Initialize the crypto operation */
+       skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
+@@ -368,7 +368,7 @@ static void skcipher_sock_destruct(struc
+       struct alg_sock *pask = alg_sk(psk);
+       struct crypto_skcipher *tfm = pask->private;
+-      af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
++      af_alg_pull_tsgl(sk, ctx->used, NULL);
+       sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
+       if (ctx->state)
+               sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -230,9 +230,8 @@ static inline bool af_alg_readable(struc
+       return PAGE_SIZE <= af_alg_rcvbuf(sk);
+ }
+-unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
+-void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
+-                    size_t dst_offset);
++unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes);
++void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst);
+ void af_alg_wmem_wakeup(struct sock *sk);
+ int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
+ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/queue-6.12/crypto-algif_aead-snapshot-iv-for-async-aead-requests.patch b/queue-6.12/crypto-algif_aead-snapshot-iv-for-async-aead-requests.patch
new file mode 100644 (file)
index 0000000..e106272
--- /dev/null
@@ -0,0 +1,77 @@
+From stable+bounces-241999-greg=kroah.com@vger.kernel.org Thu Apr 30 08:09:22 2026
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Wed, 29 Apr 2026 23:06:58 -0700
+Subject: crypto: algif_aead - snapshot IV for async AEAD requests
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, Herbert Xu <herbert@gondor.apana.org.au>, Douya Le <ldy3087146292@gmail.com>, stable@kernel.org, Yuan Tan <yuantan098@gmail.com>, Yifan Wu <yifanwucs@gmail.com>, Juefei Pu <tomapufckgml@gmail.com>, Xin Liu <bird@lzu.edu.cn>, Luxing Yin <tr0jan@lzu.edu.cn>, Yucheng Lu <kanolyc@gmail.com>, Ren Wei <n05ec@lzu.edu.cn>, Eric Biggers <ebiggers@kernel.org>
+Message-ID: <20260430060702.110091-5-ebiggers@kernel.org>
+
+From: Douya Le <ldy3087146292@gmail.com>
+
+commit 5aa58c3a572b3e3b6c786953339f7978b845cc52 upstream.
+
+AF_ALG AEAD AIO requests currently use the socket-wide IV buffer during
+request processing.  For async requests, later socket activity can
+update that shared state before the original request has fully
+completed, which can lead to inconsistent IV handling.
+
+Snapshot the IV into per-request storage when preparing the AEAD
+request, so in-flight operations no longer depend on mutable socket
+state.
+
+Fixes: d887c52d6ae4 ("crypto: algif_aead - overhaul memory management")
+Cc: stable@kernel.org
+Reported-by: Yuan Tan <yuantan098@gmail.com>
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Reported-by: Xin Liu <bird@lzu.edu.cn>
+Co-developed-by: Luxing Yin <tr0jan@lzu.edu.cn>
+Signed-off-by: Luxing Yin <tr0jan@lzu.edu.cn>
+Tested-by: Yucheng Lu <kanolyc@gmail.com>
+Signed-off-by: Douya Le <ldy3087146292@gmail.com>
+Signed-off-by: Ren Wei <n05ec@lzu.edu.cn>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/algif_aead.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -72,8 +72,10 @@ static int _aead_recvmsg(struct socket *
+       struct af_alg_ctx *ctx = ask->private;
+       struct crypto_aead *tfm = pask->private;
+       unsigned int as = crypto_aead_authsize(tfm);
++      unsigned int ivsize = crypto_aead_ivsize(tfm);
+       struct af_alg_async_req *areq;
+       struct scatterlist *rsgl_src, *tsgl_src = NULL;
++      void *iv;
+       int err = 0;
+       size_t used = 0;                /* [in]  TX bufs to be en/decrypted */
+       size_t outlen = 0;              /* [out] RX bufs produced by kernel */
+@@ -125,10 +127,14 @@ static int _aead_recvmsg(struct socket *
+       /* Allocate cipher request for current operation. */
+       areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
+-                                   crypto_aead_reqsize(tfm));
++                                   crypto_aead_reqsize(tfm) + ivsize);
+       if (IS_ERR(areq))
+               return PTR_ERR(areq);
++      iv = (u8 *)aead_request_ctx(&areq->cra_u.aead_req) +
++           crypto_aead_reqsize(tfm);
++      memcpy(iv, ctx->iv, ivsize);
++
+       /* convert iovecs of output buffers into RX SGL */
+       err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
+       if (err)
+@@ -187,7 +193,7 @@ static int _aead_recvmsg(struct socket *
+       /* Initialize the crypto operation */
+       aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src,
+-                             areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
++                             areq->first_rsgl.sgl.sgt.sgl, used, iv);
+       aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
+       aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
diff --git a/queue-6.12/crypto-algif_aead-use-memcpy_sglist-instead-of-null-skcipher.patch b/queue-6.12/crypto-algif_aead-use-memcpy_sglist-instead-of-null-skcipher.patch
new file mode 100644 (file)
index 0000000..78646c0
--- /dev/null
@@ -0,0 +1,246 @@
+From stable+bounces-241997-greg=kroah.com@vger.kernel.org Thu Apr 30 08:09:28 2026
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Wed, 29 Apr 2026 23:06:56 -0700
+Subject: crypto: algif_aead - use memcpy_sglist() instead of null skcipher
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, Herbert Xu <herbert@gondor.apana.org.au>, Eric Biggers <ebiggers@google.com>, Eric Biggers <ebiggers@kernel.org>
+Message-ID: <20260430060702.110091-3-ebiggers@kernel.org>
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit f2804d0eee8ddd57aa79d0b82872b74c21e1b69b upstream.
+
+For copying data between two scatterlists, just use memcpy_sglist()
+instead of the so-called "null skcipher".  This is much simpler.
+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/Kconfig      |    1 
+ crypto/algif_aead.c |  101 +++++++++-------------------------------------------
+ 2 files changed, 18 insertions(+), 84 deletions(-)
+
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -1424,7 +1424,6 @@ config CRYPTO_USER_API_AEAD
+       depends on NET
+       select CRYPTO_AEAD
+       select CRYPTO_SKCIPHER
+-      select CRYPTO_NULL
+       select CRYPTO_USER_API
+       help
+         Enable the userspace interface for AEAD cipher algorithms.
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -27,7 +27,6 @@
+ #include <crypto/scatterwalk.h>
+ #include <crypto/if_alg.h>
+ #include <crypto/skcipher.h>
+-#include <crypto/null.h>
+ #include <linux/init.h>
+ #include <linux/list.h>
+ #include <linux/kernel.h>
+@@ -36,19 +35,13 @@
+ #include <linux/net.h>
+ #include <net/sock.h>
+-struct aead_tfm {
+-      struct crypto_aead *aead;
+-      struct crypto_sync_skcipher *null_tfm;
+-};
+-
+ static inline bool aead_sufficient_data(struct sock *sk)
+ {
+       struct alg_sock *ask = alg_sk(sk);
+       struct sock *psk = ask->parent;
+       struct alg_sock *pask = alg_sk(psk);
+       struct af_alg_ctx *ctx = ask->private;
+-      struct aead_tfm *aeadc = pask->private;
+-      struct crypto_aead *tfm = aeadc->aead;
++      struct crypto_aead *tfm = pask->private;
+       unsigned int as = crypto_aead_authsize(tfm);
+       /*
+@@ -64,27 +57,12 @@ static int aead_sendmsg(struct socket *s
+       struct alg_sock *ask = alg_sk(sk);
+       struct sock *psk = ask->parent;
+       struct alg_sock *pask = alg_sk(psk);
+-      struct aead_tfm *aeadc = pask->private;
+-      struct crypto_aead *tfm = aeadc->aead;
++      struct crypto_aead *tfm = pask->private;
+       unsigned int ivsize = crypto_aead_ivsize(tfm);
+       return af_alg_sendmsg(sock, msg, size, ivsize);
+ }
+-static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
+-                              struct scatterlist *src,
+-                              struct scatterlist *dst, unsigned int len)
+-{
+-      SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
+-
+-      skcipher_request_set_sync_tfm(skreq, null_tfm);
+-      skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+-                                    NULL, NULL);
+-      skcipher_request_set_crypt(skreq, src, dst, len, NULL);
+-
+-      return crypto_skcipher_encrypt(skreq);
+-}
+-
+ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
+                        size_t ignored, int flags)
+ {
+@@ -93,9 +71,7 @@ static int _aead_recvmsg(struct socket *
+       struct sock *psk = ask->parent;
+       struct alg_sock *pask = alg_sk(psk);
+       struct af_alg_ctx *ctx = ask->private;
+-      struct aead_tfm *aeadc = pask->private;
+-      struct crypto_aead *tfm = aeadc->aead;
+-      struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
++      struct crypto_aead *tfm = pask->private;
+       unsigned int i, as = crypto_aead_authsize(tfm);
+       struct af_alg_async_req *areq;
+       struct af_alg_tsgl *tsgl, *tmp;
+@@ -223,11 +199,8 @@ static int _aead_recvmsg(struct socket *
+                *          v      v
+                * RX SGL: AAD || PT || Tag
+                */
+-              err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
+-                                         areq->first_rsgl.sgl.sgt.sgl,
+-                                         processed);
+-              if (err)
+-                      goto free;
++              memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src,
++                            processed);
+               af_alg_pull_tsgl(sk, processed, NULL, 0);
+       } else {
+               /*
+@@ -241,12 +214,8 @@ static int _aead_recvmsg(struct socket *
+                * RX SGL: AAD || CT ----+
+                */
+-               /* Copy AAD || CT to RX SGL buffer for in-place operation. */
+-              err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
+-                                         areq->first_rsgl.sgl.sgt.sgl,
+-                                         outlen);
+-              if (err)
+-                      goto free;
++              /* Copy AAD || CT to RX SGL buffer for in-place operation. */
++              memcpy_sglist(areq->first_rsgl.sgl.sgt.sgl, tsgl_src, outlen);
+               /* Create TX SGL for tag and chain it to RX SGL. */
+               areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
+@@ -379,7 +348,7 @@ static int aead_check_key(struct socket
+       int err = 0;
+       struct sock *psk;
+       struct alg_sock *pask;
+-      struct aead_tfm *tfm;
++      struct crypto_aead *tfm;
+       struct sock *sk = sock->sk;
+       struct alg_sock *ask = alg_sk(sk);
+@@ -393,7 +362,7 @@ static int aead_check_key(struct socket
+       err = -ENOKEY;
+       lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
+-      if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
++      if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+               goto unlock;
+       atomic_dec(&pask->nokey_refcnt);
+@@ -454,54 +423,22 @@ static struct proto_ops algif_aead_ops_n
+ static void *aead_bind(const char *name, u32 type, u32 mask)
+ {
+-      struct aead_tfm *tfm;
+-      struct crypto_aead *aead;
+-      struct crypto_sync_skcipher *null_tfm;
+-
+-      tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+-      if (!tfm)
+-              return ERR_PTR(-ENOMEM);
+-
+-      aead = crypto_alloc_aead(name, type, mask);
+-      if (IS_ERR(aead)) {
+-              kfree(tfm);
+-              return ERR_CAST(aead);
+-      }
+-
+-      null_tfm = crypto_get_default_null_skcipher();
+-      if (IS_ERR(null_tfm)) {
+-              crypto_free_aead(aead);
+-              kfree(tfm);
+-              return ERR_CAST(null_tfm);
+-      }
+-
+-      tfm->aead = aead;
+-      tfm->null_tfm = null_tfm;
+-
+-      return tfm;
++      return crypto_alloc_aead(name, type, mask);
+ }
+ static void aead_release(void *private)
+ {
+-      struct aead_tfm *tfm = private;
+-
+-      crypto_free_aead(tfm->aead);
+-      crypto_put_default_null_skcipher();
+-      kfree(tfm);
++      crypto_free_aead(private);
+ }
+ static int aead_setauthsize(void *private, unsigned int authsize)
+ {
+-      struct aead_tfm *tfm = private;
+-
+-      return crypto_aead_setauthsize(tfm->aead, authsize);
++      return crypto_aead_setauthsize(private, authsize);
+ }
+ static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
+ {
+-      struct aead_tfm *tfm = private;
+-
+-      return crypto_aead_setkey(tfm->aead, key, keylen);
++      return crypto_aead_setkey(private, key, keylen);
+ }
+ static void aead_sock_destruct(struct sock *sk)
+@@ -510,8 +447,7 @@ static void aead_sock_destruct(struct so
+       struct af_alg_ctx *ctx = ask->private;
+       struct sock *psk = ask->parent;
+       struct alg_sock *pask = alg_sk(psk);
+-      struct aead_tfm *aeadc = pask->private;
+-      struct crypto_aead *tfm = aeadc->aead;
++      struct crypto_aead *tfm = pask->private;
+       unsigned int ivlen = crypto_aead_ivsize(tfm);
+       af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
+@@ -524,10 +460,9 @@ static int aead_accept_parent_nokey(void
+ {
+       struct af_alg_ctx *ctx;
+       struct alg_sock *ask = alg_sk(sk);
+-      struct aead_tfm *tfm = private;
+-      struct crypto_aead *aead = tfm->aead;
++      struct crypto_aead *tfm = private;
+       unsigned int len = sizeof(*ctx);
+-      unsigned int ivlen = crypto_aead_ivsize(aead);
++      unsigned int ivlen = crypto_aead_ivsize(tfm);
+       ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+       if (!ctx)
+@@ -554,9 +489,9 @@ static int aead_accept_parent_nokey(void
+ static int aead_accept_parent(void *private, struct sock *sk)
+ {
+-      struct aead_tfm *tfm = private;
++      struct crypto_aead *tfm = private;
+-      if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
++      if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+               return -ENOKEY;
+       return aead_accept_parent_nokey(private, sk);
diff --git a/queue-6.12/crypto-authenc-use-memcpy_sglist-instead-of-null-skcipher.patch b/queue-6.12/crypto-authenc-use-memcpy_sglist-instead-of-null-skcipher.patch
new file mode 100644 (file)
index 0000000..3226ea6
--- /dev/null
@@ -0,0 +1,232 @@
+From stable+bounces-242000-greg=kroah.com@vger.kernel.org Thu Apr 30 08:09:54 2026
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Wed, 29 Apr 2026 23:06:59 -0700
+Subject: crypto: authenc - use memcpy_sglist() instead of null skcipher
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, Herbert Xu <herbert@gondor.apana.org.au>, Eric Biggers <ebiggers@google.com>, Eric Biggers <ebiggers@kernel.org>
+Message-ID: <20260430060702.110091-6-ebiggers@kernel.org>
+
+From: Eric Biggers <ebiggers@google.com>
+
+commit dbc4b1458e931e47198c3165ff5853bc1ad6bd7a upstream.
+
+For copying data between two scatterlists, just use memcpy_sglist()
+instead of the so-called "null skcipher".  This is much simpler.
+
+Signed-off-by: Eric Biggers <ebiggers@google.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/Kconfig      |    1 -
+ crypto/authenc.c    |   32 +-------------------------------
+ crypto/authencesn.c |   38 +++-----------------------------------
+ 3 files changed, 4 insertions(+), 67 deletions(-)
+
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -222,7 +222,6 @@ config CRYPTO_AUTHENC
+       select CRYPTO_SKCIPHER
+       select CRYPTO_MANAGER
+       select CRYPTO_HASH
+-      select CRYPTO_NULL
+       help
+         Authenc: Combined mode wrapper for IPsec.
+--- a/crypto/authenc.c
++++ b/crypto/authenc.c
+@@ -9,7 +9,6 @@
+ #include <crypto/internal/hash.h>
+ #include <crypto/internal/skcipher.h>
+ #include <crypto/authenc.h>
+-#include <crypto/null.h>
+ #include <crypto/scatterwalk.h>
+ #include <linux/err.h>
+ #include <linux/init.h>
+@@ -28,7 +27,6 @@ struct authenc_instance_ctx {
+ struct crypto_authenc_ctx {
+       struct crypto_ahash *auth;
+       struct crypto_skcipher *enc;
+-      struct crypto_sync_skcipher *null;
+ };
+ struct authenc_request_ctx {
+@@ -186,21 +184,6 @@ static void crypto_authenc_encrypt_done(
+       authenc_request_complete(areq, err);
+ }
+-static int crypto_authenc_copy_assoc(struct aead_request *req)
+-{
+-      struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+-      struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+-      SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+-
+-      skcipher_request_set_sync_tfm(skreq, ctx->null);
+-      skcipher_request_set_callback(skreq, aead_request_flags(req),
+-                                    NULL, NULL);
+-      skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
+-                                 NULL);
+-
+-      return crypto_skcipher_encrypt(skreq);
+-}
+-
+ static int crypto_authenc_encrypt(struct aead_request *req)
+ {
+       struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+@@ -219,10 +202,7 @@ static int crypto_authenc_encrypt(struct
+       dst = src;
+       if (req->src != req->dst) {
+-              err = crypto_authenc_copy_assoc(req);
+-              if (err)
+-                      return err;
+-
++              memcpy_sglist(req->dst, req->src, req->assoclen);
+               dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+       }
+@@ -328,7 +308,6 @@ static int crypto_authenc_init_tfm(struc
+       struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
+       struct crypto_ahash *auth;
+       struct crypto_skcipher *enc;
+-      struct crypto_sync_skcipher *null;
+       int err;
+       auth = crypto_spawn_ahash(&ictx->auth);
+@@ -340,14 +319,8 @@ static int crypto_authenc_init_tfm(struc
+       if (IS_ERR(enc))
+               goto err_free_ahash;
+-      null = crypto_get_default_null_skcipher();
+-      err = PTR_ERR(null);
+-      if (IS_ERR(null))
+-              goto err_free_skcipher;
+-
+       ctx->auth = auth;
+       ctx->enc = enc;
+-      ctx->null = null;
+       crypto_aead_set_reqsize(
+               tfm,
+@@ -361,8 +334,6 @@ static int crypto_authenc_init_tfm(struc
+       return 0;
+-err_free_skcipher:
+-      crypto_free_skcipher(enc);
+ err_free_ahash:
+       crypto_free_ahash(auth);
+       return err;
+@@ -374,7 +345,6 @@ static void crypto_authenc_exit_tfm(stru
+       crypto_free_ahash(ctx->auth);
+       crypto_free_skcipher(ctx->enc);
+-      crypto_put_default_null_skcipher();
+ }
+ static void crypto_authenc_free(struct aead_instance *inst)
+--- a/crypto/authencesn.c
++++ b/crypto/authencesn.c
+@@ -12,7 +12,6 @@
+ #include <crypto/internal/hash.h>
+ #include <crypto/internal/skcipher.h>
+ #include <crypto/authenc.h>
+-#include <crypto/null.h>
+ #include <crypto/scatterwalk.h>
+ #include <linux/err.h>
+ #include <linux/init.h>
+@@ -31,7 +30,6 @@ struct crypto_authenc_esn_ctx {
+       unsigned int reqoff;
+       struct crypto_ahash *auth;
+       struct crypto_skcipher *enc;
+-      struct crypto_sync_skcipher *null;
+ };
+ struct authenc_esn_request_ctx {
+@@ -158,20 +156,6 @@ static void crypto_authenc_esn_encrypt_d
+       authenc_esn_request_complete(areq, err);
+ }
+-static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
+-{
+-      struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+-      struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+-      SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+-
+-      skcipher_request_set_sync_tfm(skreq, ctx->null);
+-      skcipher_request_set_callback(skreq, aead_request_flags(req),
+-                                    NULL, NULL);
+-      skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
+-
+-      return crypto_skcipher_encrypt(skreq);
+-}
+-
+ static int crypto_authenc_esn_encrypt(struct aead_request *req)
+ {
+       struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+@@ -193,10 +177,7 @@ static int crypto_authenc_esn_encrypt(st
+       dst = src;
+       if (req->src != req->dst) {
+-              err = crypto_authenc_esn_copy(req, assoclen);
+-              if (err)
+-                      return err;
+-
++              memcpy_sglist(req->dst, req->src, assoclen);
+               sg_init_table(areq_ctx->dst, 2);
+               dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
+       }
+@@ -283,11 +264,8 @@ static int crypto_authenc_esn_decrypt(st
+       cryptlen -= authsize;
+-      if (req->src != dst) {
+-              err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
+-              if (err)
+-                      return err;
+-      }
++      if (req->src != dst)
++              memcpy_sglist(dst, req->src, assoclen + cryptlen);
+       scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
+                                authsize, 0);
+@@ -323,7 +301,6 @@ static int crypto_authenc_esn_init_tfm(s
+       struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
+       struct crypto_ahash *auth;
+       struct crypto_skcipher *enc;
+-      struct crypto_sync_skcipher *null;
+       int err;
+       auth = crypto_spawn_ahash(&ictx->auth);
+@@ -335,14 +312,8 @@ static int crypto_authenc_esn_init_tfm(s
+       if (IS_ERR(enc))
+               goto err_free_ahash;
+-      null = crypto_get_default_null_skcipher();
+-      err = PTR_ERR(null);
+-      if (IS_ERR(null))
+-              goto err_free_skcipher;
+-
+       ctx->auth = auth;
+       ctx->enc = enc;
+-      ctx->null = null;
+       ctx->reqoff = 2 * crypto_ahash_digestsize(auth);
+@@ -358,8 +329,6 @@ static int crypto_authenc_esn_init_tfm(s
+       return 0;
+-err_free_skcipher:
+-      crypto_free_skcipher(enc);
+ err_free_ahash:
+       crypto_free_ahash(auth);
+       return err;
+@@ -371,7 +340,6 @@ static void crypto_authenc_esn_exit_tfm(
+       crypto_free_ahash(ctx->auth);
+       crypto_free_skcipher(ctx->enc);
+-      crypto_put_default_null_skcipher();
+ }
+ static void crypto_authenc_esn_free(struct aead_instance *inst)
diff --git a/queue-6.12/crypto-authencesn-do-not-place-hiseq-at-end-of-dst-for-out-of-place-decryption.patch b/queue-6.12/crypto-authencesn-do-not-place-hiseq-at-end-of-dst-for-out-of-place-decryption.patch
new file mode 100644 (file)
index 0000000..30a20f4
--- /dev/null
@@ -0,0 +1,121 @@
+From stable+bounces-242001-greg=kroah.com@vger.kernel.org Thu Apr 30 08:10:05 2026
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Wed, 29 Apr 2026 23:07:00 -0700
+Subject: crypto: authencesn - Do not place hiseq at end of dst for out-of-place decryption
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, Herbert Xu <herbert@gondor.apana.org.au>, Taeyang Lee <0wn@theori.io>, Eric Biggers <ebiggers@kernel.org>
+Message-ID: <20260430060702.110091-7-ebiggers@kernel.org>
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit e02494114ebf7c8b42777c6cd6982f113bfdbec7 upstream.
+
+When decrypting data that is not in-place (src != dst), there is
+no need to save the high-order sequence bits in dst as it could
+simply be re-copied from the source.
+
+However, the data to be hashed need to be rearranged accordingly.
+
+Reported-by: Taeyang Lee <0wn@theori.io>
+Fixes: 104880a6b470 ("crypto: authencesn - Convert to new AEAD interface")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/authencesn.c |   48 +++++++++++++++++++++++++++++-------------------
+ 1 file changed, 29 insertions(+), 19 deletions(-)
+
+--- a/crypto/authencesn.c
++++ b/crypto/authencesn.c
+@@ -207,6 +207,7 @@ static int crypto_authenc_esn_decrypt_ta
+       u8 *ohash = areq_ctx->tail;
+       unsigned int cryptlen = req->cryptlen - authsize;
+       unsigned int assoclen = req->assoclen;
++      struct scatterlist *src = req->src;
+       struct scatterlist *dst = req->dst;
+       u8 *ihash = ohash + crypto_ahash_digestsize(auth);
+       u32 tmp[2];
+@@ -214,23 +215,27 @@ static int crypto_authenc_esn_decrypt_ta
+       if (!authsize)
+               goto decrypt;
+-      /* Move high-order bits of sequence number back. */
+-      scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
+-      scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
+-      scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
++      if (src == dst) {
++              /* Move high-order bits of sequence number back. */
++              scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
++              scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
++              scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
++      } else
++              memcpy_sglist(dst, src, assoclen);
+       if (crypto_memneq(ihash, ohash, authsize))
+               return -EBADMSG;
+ decrypt:
+-      sg_init_table(areq_ctx->dst, 2);
++      if (src != dst)
++              src = scatterwalk_ffwd(areq_ctx->src, src, assoclen);
+       dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
+       skcipher_request_set_tfm(skreq, ctx->enc);
+       skcipher_request_set_callback(skreq, flags,
+                                     req->base.complete, req->base.data);
+-      skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv);
++      skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
+       return crypto_skcipher_decrypt(skreq);
+ }
+@@ -255,6 +260,7 @@ static int crypto_authenc_esn_decrypt(st
+       unsigned int assoclen = req->assoclen;
+       unsigned int cryptlen = req->cryptlen;
+       u8 *ihash = ohash + crypto_ahash_digestsize(auth);
++      struct scatterlist *src = req->src;
+       struct scatterlist *dst = req->dst;
+       u32 tmp[2];
+       int err;
+@@ -262,24 +268,28 @@ static int crypto_authenc_esn_decrypt(st
+       if (assoclen < 8)
+               return -EINVAL;
+-      cryptlen -= authsize;
+-
+-      if (req->src != dst)
+-              memcpy_sglist(dst, req->src, assoclen + cryptlen);
++      if (!authsize)
++              goto tail;
++      cryptlen -= authsize;
+       scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
+                                authsize, 0);
+-      if (!authsize)
+-              goto tail;
+-
+       /* Move high-order bits of sequence number to the end. */
+-      scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
+-      scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
+-      scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
+-
+-      sg_init_table(areq_ctx->dst, 2);
+-      dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
++      scatterwalk_map_and_copy(tmp, src, 0, 8, 0);
++      if (src == dst) {
++              scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
++              scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
++              dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
++      } else {
++              scatterwalk_map_and_copy(tmp, dst, 0, 4, 1);
++              scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen - 4, 4, 1);
++
++              src = scatterwalk_ffwd(areq_ctx->src, src, 8);
++              dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
++              memcpy_sglist(dst, src, assoclen + cryptlen - 8);
++              dst = req->dst;
++      }
+       ahash_request_set_tfm(ahreq, auth);
+       ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen);
diff --git a/queue-6.12/crypto-authencesn-fix-src-offset-when-decrypting-in-place.patch b/queue-6.12/crypto-authencesn-fix-src-offset-when-decrypting-in-place.patch
new file mode 100644 (file)
index 0000000..854a6bf
--- /dev/null
@@ -0,0 +1,40 @@
+From stable+bounces-242002-greg=kroah.com@vger.kernel.org Thu Apr 30 08:10:13 2026
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Wed, 29 Apr 2026 23:07:01 -0700
+Subject: crypto: authencesn - Fix src offset when decrypting in-place
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, Herbert Xu <herbert@gondor.apana.org.au>, Wolfgang Walter <linux@stwm.de>, Eric Biggers <ebiggers@kernel.org>
+Message-ID: <20260430060702.110091-8-ebiggers@kernel.org>
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+commit 1f48ad3b19a9dfc947868edda0bb8e48e5b5a8fa upstream.
+
+The src SG list offset wasn't set properly when decrypting in-place,
+fix it.
+
+Reported-by: Wolfgang Walter <linux@stwm.de>
+Fixes: e02494114ebf ("crypto: authencesn - Do not place hiseq at end of dst for out-of-place decryption")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/authencesn.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/crypto/authencesn.c
++++ b/crypto/authencesn.c
+@@ -228,9 +228,11 @@ static int crypto_authenc_esn_decrypt_ta
+ decrypt:
+-      if (src != dst)
+-              src = scatterwalk_ffwd(areq_ctx->src, src, assoclen);
+       dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
++      if (req->src == req->dst)
++              src = dst;
++      else
++              src = scatterwalk_ffwd(areq_ctx->src, src, assoclen);
+       skcipher_request_set_tfm(skreq, ctx->enc);
+       skcipher_request_set_callback(skreq, flags,
diff --git a/queue-6.12/crypto-scatterwalk-backport-memcpy_sglist.patch b/queue-6.12/crypto-scatterwalk-backport-memcpy_sglist.patch
new file mode 100644 (file)
index 0000000..08087ae
--- /dev/null
@@ -0,0 +1,174 @@
+From stable+bounces-241996-greg=kroah.com@vger.kernel.org Thu Apr 30 08:09:25 2026
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Wed, 29 Apr 2026 23:06:55 -0700
+Subject: crypto: scatterwalk - Backport memcpy_sglist()
+To: stable@vger.kernel.org
+Cc: linux-crypto@vger.kernel.org, Herbert Xu <herbert@gondor.apana.org.au>, Eric Biggers <ebiggers@kernel.org>
+Message-ID: <20260430060702.110091-2-ebiggers@kernel.org>
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+This backports the current implementation of memcpy_sglist() from
+upstream commit 4dffc9bbffb9ccfcda730d899c97c553599e7ca8.
+
+This function was rewritten twice.  The earlier implementations had many
+prerequisite commits, while the latest implementation is standalone.
+It's much easier to just backport the latest code directly.
+
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ crypto/scatterwalk.c         |   94 +++++++++++++++++++++++++++++++++++++++++++
+ include/crypto/scatterwalk.h |   31 ++++++++++++++
+ 2 files changed, 125 insertions(+)
+
+--- a/crypto/scatterwalk.c
++++ b/crypto/scatterwalk.c
+@@ -69,6 +69,100 @@ void scatterwalk_map_and_copy(void *buf,
+ }
+ EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
++/**
++ * memcpy_sglist() - Copy data from one scatterlist to another
++ * @dst: The destination scatterlist.  Can be NULL if @nbytes == 0.
++ * @src: The source scatterlist.  Can be NULL if @nbytes == 0.
++ * @nbytes: Number of bytes to copy
++ *
++ * The scatterlists can describe exactly the same memory, in which case this
++ * function is a no-op.  No other overlaps are supported.
++ *
++ * Context: Any context
++ */
++void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
++                 unsigned int nbytes)
++{
++      unsigned int src_offset, dst_offset;
++
++      if (unlikely(nbytes == 0)) /* in case src and/or dst is NULL */
++              return;
++
++      src_offset = src->offset;
++      dst_offset = dst->offset;
++      for (;;) {
++              /* Compute the length to copy this step. */
++              unsigned int len = min3(src->offset + src->length - src_offset,
++                                      dst->offset + dst->length - dst_offset,
++                                      nbytes);
++              struct page *src_page = sg_page(src);
++              struct page *dst_page = sg_page(dst);
++              const void *src_virt;
++              void *dst_virt;
++
++              if (IS_ENABLED(CONFIG_HIGHMEM)) {
++                      /* HIGHMEM: we may have to actually map the pages. */
++                      const unsigned int src_oip = offset_in_page(src_offset);
++                      const unsigned int dst_oip = offset_in_page(dst_offset);
++                      const unsigned int limit = PAGE_SIZE;
++
++                      /* Further limit len to not cross a page boundary. */
++                      len = min3(len, limit - src_oip, limit - dst_oip);
++
++                      /* Compute the source and destination pages. */
++                      src_page += src_offset / PAGE_SIZE;
++                      dst_page += dst_offset / PAGE_SIZE;
++
++                      if (src_page != dst_page) {
++                              /* Copy between different pages. */
++                              memcpy_page(dst_page, dst_oip,
++                                          src_page, src_oip, len);
++                              flush_dcache_page(dst_page);
++                      } else if (src_oip != dst_oip) {
++                              /* Copy between different parts of same page. */
++                              dst_virt = kmap_local_page(dst_page);
++                              memcpy(dst_virt + dst_oip, dst_virt + src_oip,
++                                     len);
++                              kunmap_local(dst_virt);
++                              flush_dcache_page(dst_page);
++                      } /* Else, it's the same memory.  No action needed. */
++              } else {
++                      /*
++                       * !HIGHMEM: no mapping needed.  Just work in the linear
++                       * buffer of each sg entry.  Note that we can cross page
++                       * boundaries, as they are not significant in this case.
++                       */
++                      src_virt = page_address(src_page) + src_offset;
++                      dst_virt = page_address(dst_page) + dst_offset;
++                      if (src_virt != dst_virt) {
++                              memcpy(dst_virt, src_virt, len);
++                              if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
++                                      __scatterwalk_flush_dcache_pages(
++                                              dst_page, dst_offset, len);
++                      } /* Else, it's the same memory.  No action needed. */
++              }
++              nbytes -= len;
++              if (nbytes == 0) /* No more to copy? */
++                      break;
++
++              /*
++               * There's more to copy.  Advance the offsets by the length
++               * copied this step, and advance the sg entries as needed.
++               */
++              src_offset += len;
++              if (src_offset >= src->offset + src->length) {
++                      src = sg_next(src);
++                      src_offset = src->offset;
++              }
++              dst_offset += len;
++              if (dst_offset >= dst->offset + dst->length) {
++                      dst = sg_next(dst);
++                      dst_offset = dst->offset;
++              }
++      }
++}
++EXPORT_SYMBOL_GPL(memcpy_sglist);
++
+ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
+                                    struct scatterlist *src,
+                                    unsigned int len)
+--- a/include/crypto/scatterwalk.h
++++ b/include/crypto/scatterwalk.h
+@@ -83,6 +83,34 @@ static inline void scatterwalk_pagedone(
+               scatterwalk_start(walk, sg_next(walk->sg));
+ }
++/*
++ * Flush the dcache of any pages that overlap the region
++ * [offset, offset + nbytes) relative to base_page.
++ *
++ * This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
++ * that all relevant code (including the call to sg_page() in the caller, if
++ * applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
++ */
++static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
++                                                  unsigned int offset,
++                                                  unsigned int nbytes)
++{
++      unsigned int num_pages;
++
++      base_page += offset / PAGE_SIZE;
++      offset %= PAGE_SIZE;
++
++      /*
++       * This is an overflow-safe version of
++       * num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
++       */
++      num_pages = nbytes / PAGE_SIZE;
++      num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
++
++      for (unsigned int i = 0; i < num_pages; i++)
++              flush_dcache_page(base_page + i);
++}
++
+ static inline void scatterwalk_done(struct scatter_walk *walk, int out,
+                                   int more)
+ {
+@@ -94,6 +122,9 @@ static inline void scatterwalk_done(stru
+ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
+                           size_t nbytes, int out);
++void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
++                 unsigned int nbytes);
++
+ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
+                             unsigned int start, unsigned int nbytes, int out);
diff --git a/queue-6.12/series b/queue-6.12/series
new file mode 100644 (file)
index 0000000..768a75b
--- /dev/null
@@ -0,0 +1,8 @@
+crypto-scatterwalk-backport-memcpy_sglist.patch
+crypto-algif_aead-use-memcpy_sglist-instead-of-null-skcipher.patch
+crypto-algif_aead-revert-to-operating-out-of-place.patch
+crypto-algif_aead-snapshot-iv-for-async-aead-requests.patch
+crypto-authenc-use-memcpy_sglist-instead-of-null-skcipher.patch
+crypto-authencesn-do-not-place-hiseq-at-end-of-dst-for-out-of-place-decryption.patch
+crypto-authencesn-fix-src-offset-when-decrypting-in-place.patch
+crypto-af_alg-fix-page-reassignment-overflow-in-af_alg_pull_tsgl.patch