]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
7.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 May 2026 17:25:29 +0000 (19:25 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 May 2026 17:25:29 +0000 (19:25 +0200)
added patches:
8021q-delete-cleared-egress-qos-mappings.patch
8021q-use-rcu-for-egress-qos-mappings.patch
bpf-fix-use-after-free-in-arena_vm_close-on-fork.patch
crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch
crypto-qat-fix-firmware-loading-failure-for-gen6-devices.patch
crypto-qat-fix-indentation-of-macros-in-qat_hal.c.patch
hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch
hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch
mmc-core-add-quirk-for-incorrect-manufacturing-date.patch
mmc-core-adjust-mdt-beyond-2025.patch
mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch
net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch
net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch
octeon_ep_vf-add-null-check-for-napi_build_skb.patch
printk-add-print_hex_dump_devel.patch
rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch

17 files changed:
queue-7.0/8021q-delete-cleared-egress-qos-mappings.patch [new file with mode: 0644]
queue-7.0/8021q-use-rcu-for-egress-qos-mappings.patch [new file with mode: 0644]
queue-7.0/bpf-fix-use-after-free-in-arena_vm_close-on-fork.patch [new file with mode: 0644]
queue-7.0/crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch [new file with mode: 0644]
queue-7.0/crypto-qat-fix-firmware-loading-failure-for-gen6-devices.patch [new file with mode: 0644]
queue-7.0/crypto-qat-fix-indentation-of-macros-in-qat_hal.c.patch [new file with mode: 0644]
queue-7.0/hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch [new file with mode: 0644]
queue-7.0/hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch [new file with mode: 0644]
queue-7.0/mmc-core-add-quirk-for-incorrect-manufacturing-date.patch [new file with mode: 0644]
queue-7.0/mmc-core-adjust-mdt-beyond-2025.patch [new file with mode: 0644]
queue-7.0/mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch [new file with mode: 0644]
queue-7.0/net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch [new file with mode: 0644]
queue-7.0/net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch [new file with mode: 0644]
queue-7.0/octeon_ep_vf-add-null-check-for-napi_build_skb.patch [new file with mode: 0644]
queue-7.0/printk-add-print_hex_dump_devel.patch [new file with mode: 0644]
queue-7.0/rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch [new file with mode: 0644]
queue-7.0/series

diff --git a/queue-7.0/8021q-delete-cleared-egress-qos-mappings.patch b/queue-7.0/8021q-delete-cleared-egress-qos-mappings.patch
new file mode 100644 (file)
index 0000000..410ed41
--- /dev/null
@@ -0,0 +1,95 @@
+From stable+bounces-244865-greg=kroah.com@vger.kernel.org Sat May  9 02:49:14 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri,  8 May 2026 20:49:07 -0400
+Subject: 8021q: delete cleared egress QoS mappings
+To: stable@vger.kernel.org
+Cc: Longxuan Yu <ylong030@ucr.edu>, stable@kernel.org, Yifan Wu <yifanwucs@gmail.com>, Juefei Pu <tomapufckgml@gmail.com>, Xin Liu <bird@lzu.edu.cn>, Yuan Tan <yuantan098@gmail.com>, Ren Wei <n05ec@lzu.edu.cn>, Paolo Abeni <pabeni@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509004907.2449764-2-sashal@kernel.org>
+
+From: Longxuan Yu <ylong030@ucr.edu>
+
+[ Upstream commit 7dddc74af369478ba7f9bc136d0fc1dc4570cb66 ]
+
+vlan_dev_set_egress_priority() currently keeps cleared egress
+priority mappings in the hash as tombstones. Repeated set/clear cycles
+with distinct skb priorities therefore accumulate mapping nodes until
+device teardown and leak memory.
+
+Delete mappings when vlan_prio is cleared instead of keeping tombstones.
+Now that the egress mapping lists are RCU protected, the node can be
+unlinked safely and freed after a grace period.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@kernel.org
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Reported-by: Xin Liu <bird@lzu.edu.cn>
+Co-developed-by: Yuan Tan <yuantan098@gmail.com>
+Signed-off-by: Yuan Tan <yuantan098@gmail.com>
+Signed-off-by: Longxuan Yu <ylong030@ucr.edu>
+Signed-off-by: Ren Wei <n05ec@lzu.edu.cn>
+Link: https://patch.msgid.link/ecfa6f6ce2467a42647ff4c5221238ae85b79a59.1776647968.git.yuantan098@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/8021q/vlan_dev.c     |   20 ++++++++++++++------
+ net/8021q/vlan_netlink.c |    4 ----
+ 2 files changed, 14 insertions(+), 10 deletions(-)
+
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -172,26 +172,34 @@ int vlan_dev_set_egress_priority(const s
+                                u32 skb_prio, u16 vlan_prio)
+ {
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
++      struct vlan_priority_tci_mapping __rcu **mpp;
+       struct vlan_priority_tci_mapping *mp;
+       struct vlan_priority_tci_mapping *np;
+       u32 bucket = skb_prio & 0xF;
+       u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
+       /* See if a priority mapping exists.. */
+-      mp = rtnl_dereference(vlan->egress_priority_map[bucket]);
++      mpp = &vlan->egress_priority_map[bucket];
++      mp = rtnl_dereference(*mpp);
+       while (mp) {
+               if (mp->priority == skb_prio) {
+-                      if (mp->vlan_qos && !vlan_qos)
++                      if (!vlan_qos) {
++                              rcu_assign_pointer(*mpp, rtnl_dereference(mp->next));
+                               vlan->nr_egress_mappings--;
+-                      else if (!mp->vlan_qos && vlan_qos)
+-                              vlan->nr_egress_mappings++;
+-                      WRITE_ONCE(mp->vlan_qos, vlan_qos);
++                              kfree_rcu(mp, rcu);
++                      } else {
++                              WRITE_ONCE(mp->vlan_qos, vlan_qos);
++                      }
+                       return 0;
+               }
+-              mp = rtnl_dereference(mp->next);
++              mpp = &mp->next;
++              mp = rtnl_dereference(*mpp);
+       }
+       /* Create a new mapping then. */
++      if (!vlan_qos)
++              return 0;
++
+       np = kmalloc_obj(struct vlan_priority_tci_mapping);
+       if (!np)
+               return -ENOBUFS;
+--- a/net/8021q/vlan_netlink.c
++++ b/net/8021q/vlan_netlink.c
+@@ -263,10 +263,6 @@ static int vlan_fill_info(struct sk_buff
+                       for (pm = rcu_dereference_rtnl(vlan->egress_priority_map[i]); pm;
+                            pm = rcu_dereference_rtnl(pm->next)) {
+                               u16 vlan_qos = READ_ONCE(pm->vlan_qos);
+-
+-                              if (!vlan_qos)
+-                                      continue;
+-
+                               m.from = pm->priority;
+                               m.to   = (vlan_qos >> 13) & 0x7;
+                               if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
diff --git a/queue-7.0/8021q-use-rcu-for-egress-qos-mappings.patch b/queue-7.0/8021q-use-rcu-for-egress-qos-mappings.patch
new file mode 100644 (file)
index 0000000..ec4f099
--- /dev/null
@@ -0,0 +1,212 @@
+From stable+bounces-244864-greg=kroah.com@vger.kernel.org Sat May  9 02:49:14 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri,  8 May 2026 20:49:06 -0400
+Subject: 8021q: use RCU for egress QoS mappings
+To: stable@vger.kernel.org
+Cc: Longxuan Yu <ylong030@ucr.edu>, Yuan Tan <yuantan098@gmail.com>, Ren Wei <n05ec@lzu.edu.cn>, Paolo Abeni <pabeni@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509004907.2449764-1-sashal@kernel.org>
+
+From: Longxuan Yu <ylong030@ucr.edu>
+
+[ Upstream commit fc69decc811b155a0ed8eef17ee940f28c4f6dbc ]
+
+The TX fast path and reporting paths walk egress QoS mappings without
+RTNL. Convert the mapping lists to RCU-protected pointers, use RCU
+reader annotations in readers, and defer freeing mapping nodes with an
+embedded rcu_head.
+
+This prepares the egress QoS mapping code for safe removal of mapping
+nodes in a follow-up change while preserving the current behavior.
+
+Co-developed-by: Yuan Tan <yuantan098@gmail.com>
+Signed-off-by: Yuan Tan <yuantan098@gmail.com>
+Signed-off-by: Longxuan Yu <ylong030@ucr.edu>
+Signed-off-by: Ren Wei <n05ec@lzu.edu.cn>
+Link: https://patch.msgid.link/9136768189f8c6d3f824f476c62d2fa1111688e8.1776647968.git.yuantan098@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 7dddc74af369 ("8021q: delete cleared egress QoS mappings")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/if_vlan.h  |   25 ++++++++++++++++---------
+ net/8021q/vlan_dev.c     |   31 ++++++++++++++++---------------
+ net/8021q/vlan_netlink.c |   10 ++++++----
+ net/8021q/vlanproc.c     |   12 ++++++++----
+ 4 files changed, 46 insertions(+), 32 deletions(-)
+
+--- a/include/linux/if_vlan.h
++++ b/include/linux/if_vlan.h
+@@ -147,11 +147,13 @@ extern __be16 vlan_dev_vlan_proto(const
+  *    @priority: skb priority
+  *    @vlan_qos: vlan priority: (skb->priority << 13) & 0xE000
+  *    @next: pointer to next struct
++ *    @rcu: used for deferred freeing of mapping nodes
+  */
+ struct vlan_priority_tci_mapping {
+       u32                                     priority;
+       u16                                     vlan_qos;
+-      struct vlan_priority_tci_mapping        *next;
++      struct vlan_priority_tci_mapping __rcu  *next;
++      struct rcu_head                 rcu;
+ };
+ struct proc_dir_entry;
+@@ -177,7 +179,7 @@ struct vlan_dev_priv {
+       unsigned int                            nr_ingress_mappings;
+       u32                                     ingress_priority_map[8];
+       unsigned int                            nr_egress_mappings;
+-      struct vlan_priority_tci_mapping        *egress_priority_map[16];
++      struct vlan_priority_tci_mapping __rcu  *egress_priority_map[16];
+       __be16                                  vlan_proto;
+       u16                                     vlan_id;
+@@ -209,19 +211,24 @@ static inline u16
+ vlan_dev_get_egress_qos_mask(struct net_device *dev, u32 skprio)
+ {
+       struct vlan_priority_tci_mapping *mp;
++      u16 vlan_qos = 0;
+-      smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
++      rcu_read_lock();
+-      mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
++      mp = rcu_dereference(vlan_dev_priv(dev)->egress_priority_map[skprio & 0xF]);
+       while (mp) {
+               if (mp->priority == skprio) {
+-                      return mp->vlan_qos; /* This should already be shifted
+-                                            * to mask correctly with the
+-                                            * VLAN's TCI */
++                      vlan_qos = READ_ONCE(mp->vlan_qos);
++                      break;
+               }
+-              mp = mp->next;
++              mp = rcu_dereference(mp->next);
+       }
+-      return 0;
++      rcu_read_unlock();
++
++      /* This should already be shifted to mask correctly with
++       * the VLAN's TCI.
++       */
++      return vlan_qos;
+ }
+ extern bool vlan_do_receive(struct sk_buff **skb);
+--- a/net/8021q/vlan_dev.c
++++ b/net/8021q/vlan_dev.c
+@@ -172,39 +172,34 @@ int vlan_dev_set_egress_priority(const s
+                                u32 skb_prio, u16 vlan_prio)
+ {
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+-      struct vlan_priority_tci_mapping *mp = NULL;
++      struct vlan_priority_tci_mapping *mp;
+       struct vlan_priority_tci_mapping *np;
++      u32 bucket = skb_prio & 0xF;
+       u32 vlan_qos = (vlan_prio << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
+       /* See if a priority mapping exists.. */
+-      mp = vlan->egress_priority_map[skb_prio & 0xF];
++      mp = rtnl_dereference(vlan->egress_priority_map[bucket]);
+       while (mp) {
+               if (mp->priority == skb_prio) {
+                       if (mp->vlan_qos && !vlan_qos)
+                               vlan->nr_egress_mappings--;
+                       else if (!mp->vlan_qos && vlan_qos)
+                               vlan->nr_egress_mappings++;
+-                      mp->vlan_qos = vlan_qos;
++                      WRITE_ONCE(mp->vlan_qos, vlan_qos);
+                       return 0;
+               }
+-              mp = mp->next;
++              mp = rtnl_dereference(mp->next);
+       }
+       /* Create a new mapping then. */
+-      mp = vlan->egress_priority_map[skb_prio & 0xF];
+       np = kmalloc_obj(struct vlan_priority_tci_mapping);
+       if (!np)
+               return -ENOBUFS;
+-      np->next = mp;
+       np->priority = skb_prio;
+       np->vlan_qos = vlan_qos;
+-      /* Before inserting this element in hash table, make sure all its fields
+-       * are committed to memory.
+-       * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
+-       */
+-      smp_wmb();
+-      vlan->egress_priority_map[skb_prio & 0xF] = np;
++      RCU_INIT_POINTER(np->next, rtnl_dereference(vlan->egress_priority_map[bucket]));
++      rcu_assign_pointer(vlan->egress_priority_map[bucket], np);
+       if (vlan_qos)
+               vlan->nr_egress_mappings++;
+       return 0;
+@@ -604,11 +599,17 @@ void vlan_dev_free_egress_priority(const
+       int i;
+       for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
+-              while ((pm = vlan->egress_priority_map[i]) != NULL) {
+-                      vlan->egress_priority_map[i] = pm->next;
+-                      kfree(pm);
++              pm = rtnl_dereference(vlan->egress_priority_map[i]);
++              RCU_INIT_POINTER(vlan->egress_priority_map[i], NULL);
++              while (pm) {
++                      struct vlan_priority_tci_mapping *next;
++
++                      next = rtnl_dereference(pm->next);
++                      kfree_rcu(pm, rcu);
++                      pm = next;
+               }
+       }
++      vlan->nr_egress_mappings = 0;
+ }
+ static void vlan_dev_uninit(struct net_device *dev)
+--- a/net/8021q/vlan_netlink.c
++++ b/net/8021q/vlan_netlink.c
+@@ -260,13 +260,15 @@ static int vlan_fill_info(struct sk_buff
+                       goto nla_put_failure;
+               for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
+-                      for (pm = vlan->egress_priority_map[i]; pm;
+-                           pm = pm->next) {
+-                              if (!pm->vlan_qos)
++                      for (pm = rcu_dereference_rtnl(vlan->egress_priority_map[i]); pm;
++                           pm = rcu_dereference_rtnl(pm->next)) {
++                              u16 vlan_qos = READ_ONCE(pm->vlan_qos);
++
++                              if (!vlan_qos)
+                                       continue;
+                               m.from = pm->priority;
+-                              m.to   = (pm->vlan_qos >> 13) & 0x7;
++                              m.to   = (vlan_qos >> 13) & 0x7;
+                               if (nla_put(skb, IFLA_VLAN_QOS_MAPPING,
+                                           sizeof(m), &m))
+                                       goto nla_put_failure;
+--- a/net/8021q/vlanproc.c
++++ b/net/8021q/vlanproc.c
+@@ -262,15 +262,19 @@ static int vlandev_seq_show(struct seq_f
+                  vlan->ingress_priority_map[7]);
+       seq_printf(seq, " EGRESS priority mappings: ");
++      rcu_read_lock();
+       for (i = 0; i < 16; i++) {
+-              const struct vlan_priority_tci_mapping *mp
+-                      = vlan->egress_priority_map[i];
++              const struct vlan_priority_tci_mapping *mp =
++                      rcu_dereference(vlan->egress_priority_map[i]);
+               while (mp) {
++                      u16 vlan_qos = READ_ONCE(mp->vlan_qos);
++
+                       seq_printf(seq, "%u:%d ",
+-                                 mp->priority, ((mp->vlan_qos >> 13) & 0x7));
+-                      mp = mp->next;
++                                 mp->priority, ((vlan_qos >> 13) & 0x7));
++                      mp = rcu_dereference(mp->next);
+               }
+       }
++      rcu_read_unlock();
+       seq_puts(seq, "\n");
+       return 0;
diff --git a/queue-7.0/bpf-fix-use-after-free-in-arena_vm_close-on-fork.patch b/queue-7.0/bpf-fix-use-after-free-in-arena_vm_close-on-fork.patch
new file mode 100644 (file)
index 0000000..59c2490
--- /dev/null
@@ -0,0 +1,92 @@
+From 4fddde2a732de60bb97e3307d4eb69ac5f1d2b74 Mon Sep 17 00:00:00 2001
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Mon, 13 Apr 2026 12:42:45 -0700
+Subject: bpf: Fix use-after-free in arena_vm_close on fork
+
+From: Alexei Starovoitov <ast@kernel.org>
+
+commit 4fddde2a732de60bb97e3307d4eb69ac5f1d2b74 upstream.
+
+arena_vm_open() only bumps vml->mmap_count but never registers the
+child VMA in arena->vma_list. The vml->vma always points at the
+parent VMA, so after parent munmap the pointer dangles. If the child
+then calls bpf_arena_free_pages(), zap_pages() reads the stale
+vml->vma triggering use-after-free.
+
+Fix this by preventing the arena VMA from being inherited across
+fork with VM_DONTCOPY, and preventing VMA splits via the may_split
+callback.
+
+Also reject mremap with a .mremap callback returning -EINVAL. A
+same-size mremap(MREMAP_FIXED) on the full arena VMA reaches
+copy_vma() through the following path:
+
+  check_prep_vma()       - returns 0 early: new_len == old_len
+                           skips VM_DONTEXPAND check
+  prep_move_vma()        - vm_start == old_addr and
+                           vm_end == old_addr + old_len
+                           so may_split is never called
+  move_vma()
+    copy_vma_and_data()
+      copy_vma()
+        vm_area_dup()    - copies vm_private_data (vml pointer)
+        vm_ops->open()   - bumps vml->mmap_count
+      vm_ops->mremap()   - returns -EINVAL, rollback unmaps new VMA
+
+The refcount ensures the rollback's arena_vm_close does not free
+the vml shared with the original VMA.
+
+Reported-by: Weiming Shi <bestswngs@gmail.com>
+Reported-by: Xiang Mei <xmei5@asu.edu>
+Fixes: 317460317a02 ("bpf: Introduce bpf_arena.")
+Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
+Link: https://lore.kernel.org/r/20260413194245.21449-1-alexei.starovoitov@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/arena.c |   19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+--- a/kernel/bpf/arena.c
++++ b/kernel/bpf/arena.c
+@@ -341,6 +341,16 @@ static void arena_vm_open(struct vm_area
+       refcount_inc(&vml->mmap_count);
+ }
++static int arena_vm_may_split(struct vm_area_struct *vma, unsigned long addr)
++{
++      return -EINVAL;
++}
++
++static int arena_vm_mremap(struct vm_area_struct *vma)
++{
++      return -EINVAL;
++}
++
+ static void arena_vm_close(struct vm_area_struct *vma)
+ {
+       struct bpf_map *map = vma->vm_file->private_data;
+@@ -417,6 +427,8 @@ out_unlock_sigsegv:
+ static const struct vm_operations_struct arena_vm_ops = {
+       .open           = arena_vm_open,
++      .may_split      = arena_vm_may_split,
++      .mremap         = arena_vm_mremap,
+       .close          = arena_vm_close,
+       .fault          = arena_vm_fault,
+ };
+@@ -486,10 +498,11 @@ static int arena_map_mmap(struct bpf_map
+       arena->user_vm_end = vma->vm_end;
+       /*
+        * bpf_map_mmap() checks that it's being mmaped as VM_SHARED and
+-       * clears VM_MAYEXEC. Set VM_DONTEXPAND as well to avoid
+-       * potential change of user_vm_start.
++       * clears VM_MAYEXEC. Set VM_DONTEXPAND to avoid potential change
++       * of user_vm_start. Set VM_DONTCOPY to prevent arena VMA from
++       * being copied into the child process on fork.
+        */
+-      vm_flags_set(vma, VM_DONTEXPAND);
++      vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY);
+       vma->vm_ops = &arena_vm_ops;
+       return 0;
+ }
diff --git a/queue-7.0/crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch b/queue-7.0/crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch
new file mode 100644 (file)
index 0000000..390738d
--- /dev/null
@@ -0,0 +1,68 @@
+From stable+bounces-244984-greg=kroah.com@vger.kernel.org Sat May  9 20:59:39 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  9 May 2026 14:59:29 -0400
+Subject: crypto: caam - guard HMAC key hex dumps in hash_digest_key
+To: stable@vger.kernel.org
+Cc: Thorsten Blum <thorsten.blum@linux.dev>, Herbert Xu <herbert@gondor.apana.org.au>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509185929.3682915-2-sashal@kernel.org>
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+[ Upstream commit 177730a273b18e195263ed953853273e901b5064 ]
+
+Use print_hex_dump_devel() for dumping sensitive HMAC key bytes in
+hash_digest_key() to avoid leaking secrets at runtime when
+CONFIG_DYNAMIC_DEBUG is enabled.
+
+Fixes: 045e36780f11 ("crypto: caam - ahash hmac support")
+Fixes: 3f16f6c9d632 ("crypto: caam/qi2 - add support for ahash algorithms")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/caam/caamalg_qi2.c |    4 ++--
+ drivers/crypto/caam/caamhash.c    |    4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -3269,7 +3269,7 @@ static int hash_digest_key(struct caam_h
+       dpaa2_fl_set_addr(out_fle, key_dma);
+       dpaa2_fl_set_len(out_fle, digestsize);
+-      print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
++      print_hex_dump_devel("key_in@" __stringify(__LINE__)": ",
+                            DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
+       print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
+                            DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+@@ -3289,7 +3289,7 @@ static int hash_digest_key(struct caam_h
+               /* in progress */
+               wait_for_completion(&result.completion);
+               ret = result.err;
+-              print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
++              print_hex_dump_devel("digested key@" __stringify(__LINE__)": ",
+                                    DUMP_PREFIX_ADDRESS, 16, 4, key,
+                                    digestsize, 1);
+       }
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -393,7 +393,7 @@ static int hash_digest_key(struct caam_h
+       append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+                        LDST_SRCDST_BYTE_CONTEXT);
+-      print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
++      print_hex_dump_devel("key_in@"__stringify(__LINE__)": ",
+                            DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
+       print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+                            DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+@@ -408,7 +408,7 @@ static int hash_digest_key(struct caam_h
+               wait_for_completion(&result.completion);
+               ret = result.err;
+-              print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
++              print_hex_dump_devel("digested key@"__stringify(__LINE__)": ",
+                                    DUMP_PREFIX_ADDRESS, 16, 4, key,
+                                    digestsize, 1);
+       }
diff --git a/queue-7.0/crypto-qat-fix-firmware-loading-failure-for-gen6-devices.patch b/queue-7.0/crypto-qat-fix-firmware-loading-failure-for-gen6-devices.patch
new file mode 100644 (file)
index 0000000..5e00134
--- /dev/null
@@ -0,0 +1,90 @@
+From stable+bounces-244081-greg=kroah.com@vger.kernel.org Tue May  5 12:36:35 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  5 May 2026 06:17:38 -0400
+Subject: crypto: qat - fix firmware loading failure for GEN6 devices
+To: stable@vger.kernel.org
+Cc: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>, Giovanni Cabiddu <giovanni.cabiddu@intel.com>, Andy Shevchenko <andriy.shevchenko@intel.com>, Herbert Xu <herbert@gondor.apana.org.au>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505101738.582879-2-sashal@kernel.org>
+
+From: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>
+
+[ Upstream commit e7dcb722bb75bb3f3992f580a8728a794732fd7a ]
+
+QAT GEN6 hardware requires a minimum 3 us delay during the acceleration
+engine reset sequence to ensure the hardware fully settles.
+Without this delay, the firmware load may fail intermittently.
+
+Add a delay after placing the AE into reset and before clearing the reset,
+matching the hardware requirements and ensuring stable firmware loading.
+Earlier generations remain unaffected.
+
+Fixes: 17fd7514ae68 ("crypto: qat - add qat_6xxx driver")
+Signed-off-by: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/intel/qat/qat_common/adf_accel_engine.c         |    7 +++++++
+ drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h |    1 +
+ drivers/crypto/intel/qat/qat_common/qat_hal.c                  |    5 ++++-
+ 3 files changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+ /* Copyright(c) 2014 - 2020 Intel Corporation */
++#include <linux/delay.h>
+ #include <linux/firmware.h>
+ #include <linux/pci.h>
+ #include "adf_cfg.h"
+@@ -162,8 +163,14 @@ int adf_ae_stop(struct adf_accel_dev *ac
+ static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
+ {
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
++      unsigned long reset_delay;
+       qat_hal_reset(loader_data->fw_loader);
++
++      reset_delay = loader_data->fw_loader->chip_info->reset_delay_us;
++      if (reset_delay)
++              fsleep(reset_delay);
++
+       if (qat_hal_clr_reset(loader_data->fw_loader))
+               return -EFAULT;
+--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
++++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
+@@ -27,6 +27,7 @@ struct icp_qat_fw_loader_chip_info {
+       int mmp_sram_size;
+       bool nn;
+       bool lm2lm3;
++      u16 reset_delay_us;
+       u32 lm_size;
+       u32 icp_rst_csr;
+       u32 icp_rst_mask;
+--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
++++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
+@@ -20,6 +20,7 @@
+ #define RST_CSR_QAT_LSB                       20
+ #define RST_CSR_AE_LSB                        0
+ #define MC_TIMESTAMP_ENABLE           (0x1 << 7)
++#define MIN_RESET_DELAY_US            3
+ #define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+       (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
+@@ -713,8 +714,10 @@ static int qat_hal_chip_init(struct icp_
+               handle->chip_info->wakeup_event_val = 0x80000000;
+               handle->chip_info->fw_auth = true;
+               handle->chip_info->css_3k = true;
+-              if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX)
++              if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX) {
+                       handle->chip_info->dual_sign = true;
++                      handle->chip_info->reset_delay_us = MIN_RESET_DELAY_US;
++              }
+               handle->chip_info->tgroup_share_ustore = true;
+               handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
+               handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
diff --git a/queue-7.0/crypto-qat-fix-indentation-of-macros-in-qat_hal.c.patch b/queue-7.0/crypto-qat-fix-indentation-of-macros-in-qat_hal.c.patch
new file mode 100644 (file)
index 0000000..4478b0a
--- /dev/null
@@ -0,0 +1,59 @@
+From stable+bounces-244079-greg=kroah.com@vger.kernel.org Tue May  5 12:38:57 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  5 May 2026 06:17:37 -0400
+Subject: crypto: qat - fix indentation of macros in qat_hal.c
+To: stable@vger.kernel.org
+Cc: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>, Giovanni Cabiddu <giovanni.cabiddu@intel.com>, Herbert Xu <herbert@gondor.apana.org.au>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505101738.582879-1-sashal@kernel.org>
+
+From: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>
+
+[ Upstream commit 4963b39e3a3feed07fbf4d5cc2b5df8498888285 ]
+
+The macros in qat_hal.c were using a mixture of tabs and spaces.
+Update all macro indentation to use tabs consistently, matching the
+predominant style.
+
+This does not introduce any functional change.
+
+Signed-off-by: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>
+Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Stable-dep-of: e7dcb722bb75 ("crypto: qat - fix firmware loading failure for GEN6 devices")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/intel/qat/qat_common/qat_hal.c |   22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
++++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
+@@ -9,17 +9,17 @@
+ #include "icp_qat_hal.h"
+ #include "icp_qat_uclo.h"
+-#define BAD_REGADDR          0xffff
+-#define MAX_RETRY_TIMES          10000
+-#define INIT_CTX_ARB_VALUE    0x0
+-#define INIT_CTX_ENABLE_VALUE     0x0
+-#define INIT_PC_VALUE      0x0
+-#define INIT_WAKEUP_EVENTS_VALUE  0x1
+-#define INIT_SIG_EVENTS_VALUE     0x1
+-#define INIT_CCENABLE_VALUE       0x2000
+-#define RST_CSR_QAT_LSB          20
+-#define RST_CSR_AE_LSB                  0
+-#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
++#define BAD_REGADDR                   0xffff
++#define MAX_RETRY_TIMES                       10000
++#define INIT_CTX_ARB_VALUE            0x0
++#define INIT_CTX_ENABLE_VALUE         0x0
++#define INIT_PC_VALUE                 0x0
++#define INIT_WAKEUP_EVENTS_VALUE      0x1
++#define INIT_SIG_EVENTS_VALUE         0x1
++#define INIT_CCENABLE_VALUE           0x2000
++#define RST_CSR_QAT_LSB                       20
++#define RST_CSR_AE_LSB                        0
++#define MC_TIMESTAMP_ENABLE           (0x1 << 7)
+ #define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+       (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
diff --git a/queue-7.0/hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch b/queue-7.0/hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch
new file mode 100644 (file)
index 0000000..ef1df6d
--- /dev/null
@@ -0,0 +1,142 @@
+From stable+bounces-244851-greg=kroah.com@vger.kernel.org Sat May  9 02:00:24 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri,  8 May 2026 20:00:00 -0400
+Subject: hfsplus: fix held lock freed on hfsplus_fill_super()
+To: stable@vger.kernel.org
+Cc: Zilin Guan <zilin@seu.edu.cn>, Viacheslav Dubeyko <slava@dubeyko.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509000000.2245470-2-sashal@kernel.org>
+
+From: Zilin Guan <zilin@seu.edu.cn>
+
+[ Upstream commit 90c500e4fd83fa33c09bc7ee23b6d9cc487ac733 ]
+
+hfsplus_fill_super() calls hfs_find_init() to initialize a search
+structure, which acquires tree->tree_lock. If the subsequent call to
+hfsplus_cat_build_key() fails, the function jumps to the out_put_root
+error label without releasing the lock. The later cleanup path then
+frees the tree data structure with the lock still held, triggering a
+held lock freed warning.
+
+Fix this by adding the missing hfs_find_exit(&fd) call before jumping
+to the out_put_root error label. This ensures that tree->tree_lock is
+properly released on the error path.
+
+The bug was originally detected on v6.13-rc1 using an experimental
+static analysis tool we are developing, and we have verified that the
+issue persists in the latest mainline kernel. The tool is specifically
+designed to detect memory management issues. It is currently under active
+development and not yet publicly available.
+
+We confirmed the bug by runtime testing under QEMU with x86_64 defconfig,
+lockdep enabled, and CONFIG_HFSPLUS_FS=y. To trigger the error path, we
+used GDB to dynamically shrink the max_unistr_len parameter to 1 before
+hfsplus_asc2uni() is called. This forces hfsplus_asc2uni() to naturally
+return -ENAMETOOLONG, which propagates to hfsplus_cat_build_key() and
+exercises the faulty error path. The following warning was observed
+during mount:
+
+       =========================
+       WARNING: held lock freed!
+       7.0.0-rc3-00016-gb4f0dd314b39 #4 Not tainted
+       -------------------------
+       mount/174 is freeing memory ffff888103f92000-ffff888103f92fff, with a lock still held there!
+       ffff888103f920b0 (&tree->tree_lock){+.+.}-{4:4}, at: hfsplus_find_init+0x154/0x1e0
+       2 locks held by mount/174:
+       #0: ffff888103f960e0 (&type->s_umount_key#42/1){+.+.}-{4:4}, at: alloc_super.constprop.0+0x167/0xa40
+       #1: ffff888103f920b0 (&tree->tree_lock){+.+.}-{4:4}, at: hfsplus_find_init+0x154/0x1e0
+
+       stack backtrace:
+       CPU: 2 UID: 0 PID: 174 Comm: mount Not tainted 7.0.0-rc3-00016-gb4f0dd314b39 #4 PREEMPT(lazy)
+       Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014
+       Call Trace:
+       <TASK>
+       dump_stack_lvl+0x82/0xd0
+       debug_check_no_locks_freed+0x13a/0x180
+       kfree+0x16b/0x510
+       ? hfsplus_fill_super+0xcb4/0x18a0
+       hfsplus_fill_super+0xcb4/0x18a0
+       ? __pfx_hfsplus_fill_super+0x10/0x10
+       ? srso_return_thunk+0x5/0x5f
+       ? bdev_open+0x65f/0xc30
+       ? srso_return_thunk+0x5/0x5f
+       ? pointer+0x4ce/0xbf0
+       ? trace_contention_end+0x11c/0x150
+       ? __pfx_pointer+0x10/0x10
+       ? srso_return_thunk+0x5/0x5f
+       ? bdev_open+0x79b/0xc30
+       ? srso_return_thunk+0x5/0x5f
+       ? srso_return_thunk+0x5/0x5f
+       ? vsnprintf+0x6da/0x1270
+       ? srso_return_thunk+0x5/0x5f
+       ? __mutex_unlock_slowpath+0x157/0x740
+       ? __pfx_vsnprintf+0x10/0x10
+       ? srso_return_thunk+0x5/0x5f
+       ? srso_return_thunk+0x5/0x5f
+       ? mark_held_locks+0x49/0x80
+       ? srso_return_thunk+0x5/0x5f
+       ? srso_return_thunk+0x5/0x5f
+       ? irqentry_exit+0x17b/0x5e0
+       ? trace_irq_disable.constprop.0+0x116/0x150
+       ? __pfx_hfsplus_fill_super+0x10/0x10
+       ? __pfx_hfsplus_fill_super+0x10/0x10
+       get_tree_bdev_flags+0x302/0x580
+       ? __pfx_get_tree_bdev_flags+0x10/0x10
+       ? vfs_parse_fs_qstr+0x129/0x1a0
+       ? __pfx_vfs_parse_fs_qstr+0x3/0x10
+       vfs_get_tree+0x89/0x320
+       fc_mount+0x10/0x1d0
+       path_mount+0x5c5/0x21c0
+       ? __pfx_path_mount+0x10/0x10
+       ? trace_irq_enable.constprop.0+0x116/0x150
+       ? trace_irq_enable.constprop.0+0x116/0x150
+       ? srso_return_thunk+0x5/0x5f
+       ? srso_return_thunk+0x5/0x5f
+       ? kmem_cache_free+0x307/0x540
+       ? user_path_at+0x51/0x60
+       ? __x64_sys_mount+0x212/0x280
+       ? srso_return_thunk+0x5/0x5f
+       __x64_sys_mount+0x212/0x280
+       ? __pfx___x64_sys_mount+0x10/0x10
+       ? srso_return_thunk+0x5/0x5f
+       ? trace_irq_enable.constprop.0+0x116/0x150
+       ? srso_return_thunk+0x5/0x5f
+       do_syscall_64+0x111/0x680
+       entry_SYSCALL_64_after_hwframe+0x77/0x7f
+       RIP: 0033:0x7ffacad55eae
+       Code: 48 8b 0d 85 1f 0f 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 49 89 ca b8 a5 00 00 8
+       RSP: 002b:00007fff1ab55718 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
+       RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007ffacad55eae
+       RDX: 000055740c64e5b0 RSI: 000055740c64e630 RDI: 000055740c651ab0
+       RBP: 000055740c64e380 R08: 0000000000000000 R09: 0000000000000001
+       R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+       R13: 000055740c64e5b0 R14: 000055740c651ab0 R15: 000055740c64e380
+       </TASK>
+
+After applying this patch, the warning no longer appears.
+
+Fixes: 89ac9b4d3d1a ("hfsplus: fix longname handling")
+CC: stable@vger.kernel.org
+Signed-off-by: Zilin Guan <zilin@seu.edu.cn>
+Reviewed-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Tested-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/hfsplus/super.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -569,8 +569,10 @@ static int hfsplus_fill_super(struct sup
+       if (err)
+               goto out_put_root;
+       err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
+-      if (unlikely(err < 0))
++      if (unlikely(err < 0)) {
++              hfs_find_exit(&fd);
+               goto out_put_root;
++      }
+       if (!hfsplus_brec_read_cat(&fd, &entry)) {
+               hfs_find_exit(&fd);
+               if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
diff --git a/queue-7.0/hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch b/queue-7.0/hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch
new file mode 100644 (file)
index 0000000..7d9f443
--- /dev/null
@@ -0,0 +1,189 @@
+From stable+bounces-244850-greg=kroah.com@vger.kernel.org Sat May  9 02:00:20 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri,  8 May 2026 19:59:59 -0400
+Subject: hfsplus: fix uninit-value by validating catalog record size
+To: stable@vger.kernel.org
+Cc: Deepanshu Kartikey <kartikey406@gmail.com>, syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com, Viacheslav Dubeyko <slava@dubeyko.com>, Charalampos Mitrodimas <charmitro@posteo.net>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509000000.2245470-1-sashal@kernel.org>
+
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+
+[ Upstream commit b6b592275aeff184aa82fcf6abccd833fb71b393 ]
+
+Syzbot reported a KMSAN uninit-value issue in hfsplus_strcasecmp(). The
+root cause is that hfs_brec_read() doesn't validate that the on-disk
+record size matches the expected size for the record type being read.
+
+When mounting a corrupted filesystem, hfs_brec_read() may read less data
+than expected. For example, when reading a catalog thread record, the
+debug output showed:
+
+  HFSPLUS_BREC_READ: rec_len=520, fd->entrylength=26
+  HFSPLUS_BREC_READ: WARNING - entrylength (26) < rec_len (520) - PARTIAL READ!
+
+hfs_brec_read() only validates that entrylength is not greater than the
+buffer size, but doesn't check if it's less than expected. It successfully
+reads 26 bytes into a 520-byte structure and returns success, leaving 494
+bytes uninitialized.
+
+This uninitialized data in tmp.thread.nodeName then gets copied by
+hfsplus_cat_build_key_uni() and used by hfsplus_strcasecmp(), triggering
+the KMSAN warning when the uninitialized bytes are used as array indices
+in case_fold().
+
+Fix by introducing hfsplus_brec_read_cat() wrapper that:
+1. Calls hfs_brec_read() to read the data
+2. Validates the record size based on the type field:
+   - Fixed size for folder and file records
+   - Variable size for thread records (depends on string length)
+3. Returns -EIO if size doesn't match expected
+
+For thread records, check against HFSPLUS_MIN_THREAD_SZ before reading
+nodeName.length to avoid reading uninitialized data at call sites that
+don't zero-initialize the entry structure.
+
+Also initialize the tmp variable in hfsplus_find_cat() as defensive
+programming to ensure no uninitialized data even if validation is
+bypassed.
+
+Reported-by: syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=d80abb5b890d39261e72
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Tested-by: syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com
+Reviewed-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Tested-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Suggested-by: Charalampos Mitrodimas <charmitro@posteo.net>
+Link: https://lore.kernel.org/all/20260120051114.1281285-1-kartikey406@gmail.com/ [v1]
+Link: https://lore.kernel.org/all/20260121063109.1830263-1-kartikey406@gmail.com/ [v2]
+Link: https://lore.kernel.org/all/20260212014233.2422046-1-kartikey406@gmail.com/ [v3]
+Link: https://lore.kernel.org/all/20260214002100.436125-1-kartikey406@gmail.com/T/ [v4]
+Link: https://lore.kernel.org/all/20260221061626.15853-1-kartikey406@gmail.com/T/ [v5]
+Signed-off-by: Deepanshu Kartikey <kartikey406@gmail.com>
+Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Link: https://lore.kernel.org/r/20260307010302.41547-1-kartikey406@gmail.com
+Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Stable-dep-of: 90c500e4fd83 ("hfsplus: fix held lock freed on hfsplus_fill_super()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/hfsplus/bfind.c      |   51 ++++++++++++++++++++++++++++++++++++++++++++++++
+ fs/hfsplus/catalog.c    |    4 +--
+ fs/hfsplus/dir.c        |    2 -
+ fs/hfsplus/hfsplus_fs.h |    9 ++++++++
+ fs/hfsplus/super.c      |    2 -
+ 5 files changed, 64 insertions(+), 4 deletions(-)
+
+--- a/fs/hfsplus/bfind.c
++++ b/fs/hfsplus/bfind.c
+@@ -287,3 +287,54 @@ out:
+       fd->bnode = bnode;
+       return res;
+ }
++
++/**
++ * hfsplus_brec_read_cat - read and validate a catalog record
++ * @fd: find data structure
++ * @entry: pointer to catalog entry to read into
++ *
++ * Reads a catalog record and validates its size matches the expected
++ * size based on the record type.
++ *
++ * Returns 0 on success, or negative error code on failure.
++ */
++int hfsplus_brec_read_cat(struct hfs_find_data *fd, hfsplus_cat_entry *entry)
++{
++      int res;
++      u32 expected_size;
++
++      res = hfs_brec_read(fd, entry, sizeof(hfsplus_cat_entry));
++      if (res)
++              return res;
++
++      /* Validate catalog record size based on type */
++      switch (be16_to_cpu(entry->type)) {
++      case HFSPLUS_FOLDER:
++              expected_size = sizeof(struct hfsplus_cat_folder);
++              break;
++      case HFSPLUS_FILE:
++              expected_size = sizeof(struct hfsplus_cat_file);
++              break;
++      case HFSPLUS_FOLDER_THREAD:
++      case HFSPLUS_FILE_THREAD:
++              /* Ensure we have at least the fixed fields before reading nodeName.length */
++              if (fd->entrylength < HFSPLUS_MIN_THREAD_SZ) {
++                      pr_err("thread record too short (got %u)\n", fd->entrylength);
++                      return -EIO;
++              }
++              expected_size = hfsplus_cat_thread_size(&entry->thread);
++              break;
++      default:
++              pr_err("unknown catalog record type %d\n",
++                     be16_to_cpu(entry->type));
++              return -EIO;
++      }
++
++      if (fd->entrylength != expected_size) {
++              pr_err("catalog record size mismatch (type %d, got %u, expected %u)\n",
++                     be16_to_cpu(entry->type), fd->entrylength, expected_size);
++              return -EIO;
++      }
++
++      return 0;
++}
+--- a/fs/hfsplus/catalog.c
++++ b/fs/hfsplus/catalog.c
+@@ -194,12 +194,12 @@ static int hfsplus_fill_cat_thread(struc
+ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
+                    struct hfs_find_data *fd)
+ {
+-      hfsplus_cat_entry tmp;
++      hfsplus_cat_entry tmp = {0};
+       int err;
+       u16 type;
+       hfsplus_cat_build_key_with_cnid(sb, fd->search_key, cnid);
+-      err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
++      err = hfsplus_brec_read_cat(fd, &tmp);
+       if (err)
+               return err;
+--- a/fs/hfsplus/dir.c
++++ b/fs/hfsplus/dir.c
+@@ -49,7 +49,7 @@ static struct dentry *hfsplus_lookup(str
+       if (unlikely(err < 0))
+               goto fail;
+ again:
+-      err = hfs_brec_read(&fd, &entry, sizeof(entry));
++      err = hfsplus_brec_read_cat(&fd, &entry);
+       if (err) {
+               if (err == -ENOENT) {
+                       hfs_find_exit(&fd);
+--- a/fs/hfsplus/hfsplus_fs.h
++++ b/fs/hfsplus/hfsplus_fs.h
+@@ -509,6 +509,15 @@ int hfsplus_submit_bio(struct super_bloc
+                      void **data, blk_opf_t opf);
+ int hfsplus_read_wrapper(struct super_block *sb);
++static inline u32 hfsplus_cat_thread_size(const struct hfsplus_cat_thread *thread)
++{
++      return offsetof(struct hfsplus_cat_thread, nodeName) +
++             offsetof(struct hfsplus_unistr, unicode) +
++             be16_to_cpu(thread->nodeName.length) * sizeof(hfsplus_unichr);
++}
++
++int hfsplus_brec_read_cat(struct hfs_find_data *fd, hfsplus_cat_entry *entry);
++
+ /*
+  * time helpers: convert between 1904-base and 1970-base timestamps
+  *
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -571,7 +571,7 @@ static int hfsplus_fill_super(struct sup
+       err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
+       if (unlikely(err < 0))
+               goto out_put_root;
+-      if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
++      if (!hfsplus_brec_read_cat(&fd, &entry)) {
+               hfs_find_exit(&fd);
+               if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
+                       err = -EIO;
diff --git a/queue-7.0/mmc-core-add-quirk-for-incorrect-manufacturing-date.patch b/queue-7.0/mmc-core-add-quirk-for-incorrect-manufacturing-date.patch
new file mode 100644 (file)
index 0000000..5d2f23f
--- /dev/null
@@ -0,0 +1,97 @@
+From stable+bounces-244072-greg=kroah.com@vger.kernel.org Tue May  5 12:13:41 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  5 May 2026 06:02:49 -0400
+Subject: mmc: core: Add quirk for incorrect manufacturing date
+To: stable@vger.kernel.org
+Cc: Avri Altman <avri.altman@sandisk.com>, Ulf Hansson <ulf.hansson@linaro.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505100250.522459-2-sashal@kernel.org>
+
+From: Avri Altman <avri.altman@sandisk.com>
+
+[ Upstream commit 263ff314cc5602599d481b0912a381555fcbad28 ]
+
+Some eMMC vendors need to report manufacturing dates beyond 2025 but are
+reluctant to update the EXT_CSD revision from 8 to 9. Changing the
+Updating the EXT_CSD revision may involve additional testing or
+qualification steps with customers. To ease this transition and avoid a
+full re-qualification process, a workaround is needed. This
+patch introduces a temporary quirk that re-purposes the year codes
+corresponding to 2010, 2011, and 2012 to represent the years 2026, 2027,
+and 2028, respectively. This solution is only valid for this three-year
+period.
+
+After 2028, vendors must update their firmware to set EXT_CSD_REV=9 to
+continue reporting the correct manufacturing date in compliance with the
+JEDEC standard.
+
+The `MMC_QUIRK_BROKEN_MDT` is introduced and enabled for all Sandisk
+devices to handle this behavior.
+
+Signed-off-by: Avri Altman <avri.altman@sandisk.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Stable-dep-of: d6bf2e64dec8 ("mmc: core: Optimize time for secure erase/trim for some Kingston eMMCs")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/card.h   |    6 ++++++
+ drivers/mmc/core/mmc.c    |    5 +++++
+ drivers/mmc/core/quirks.h |    3 +++
+ include/linux/mmc/card.h  |    1 +
+ 4 files changed, 15 insertions(+)
+
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -89,6 +89,7 @@ struct mmc_fixup {
+ #define CID_MANFID_MICRON       0x13
+ #define CID_MANFID_SAMSUNG      0x15
+ #define CID_MANFID_APACER       0x27
++#define CID_MANFID_SANDISK_MMC  0x45
+ #define CID_MANFID_SWISSBIT     0x5D
+ #define CID_MANFID_KINGSTON     0x70
+ #define CID_MANFID_HYNIX      0x90
+@@ -305,4 +306,9 @@ static inline int mmc_card_no_uhs_ddr50_
+       return c->quirks & MMC_QUIRK_NO_UHS_DDR50_TUNING;
+ }
++static inline int mmc_card_broken_mdt(const struct mmc_card *c)
++{
++      return c->quirks & MMC_QUIRK_BROKEN_MDT;
++}
++
+ #endif
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -676,6 +676,11 @@ static int mmc_decode_ext_csd(struct mmc
+                       /* Adjust production date as per JEDEC JESD84-B51B September 2025 */
+                       if (card->cid.year < 2023)
+                               card->cid.year += 16;
++              } else {
++                      /* Handle vendors with broken MDT reporting */
++                      if (mmc_card_broken_mdt(card) && card->cid.year >= 2010 &&
++                          card->cid.year <= 2012)
++                              card->cid.year += 16;
+               }
+       }
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -170,6 +170,9 @@ static const struct mmc_fixup __maybe_un
+       MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX,
+                             0x014e, add_quirk, MMC_QUIRK_BROKEN_HPI, 6),
++      MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_MMC, CID_OEMID_ANY, add_quirk_mmc,
++                MMC_QUIRK_BROKEN_MDT),
++
+       END_FIXUP
+ };
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -329,6 +329,7 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_CACHE_FLUSH  (1<<16) /* Don't flush cache until the write has occurred */
+ #define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY   (1<<17) /* Disable broken SD poweroff notify support */
+ #define MMC_QUIRK_NO_UHS_DDR50_TUNING (1<<18) /* Disable DDR50 tuning */
++#define MMC_QUIRK_BROKEN_MDT    (1<<19) /* Wrong manufacturing year */
+       bool                    written_flag;   /* Indicates eMMC has been written since power on */
+       bool                    reenable_cmdq;  /* Re-enable Command Queue */
diff --git a/queue-7.0/mmc-core-adjust-mdt-beyond-2025.patch b/queue-7.0/mmc-core-adjust-mdt-beyond-2025.patch
new file mode 100644 (file)
index 0000000..77fb90b
--- /dev/null
@@ -0,0 +1,66 @@
+From stable+bounces-244071-greg=kroah.com@vger.kernel.org Tue May  5 12:13:36 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  5 May 2026 06:02:48 -0400
+Subject: mmc: core: Adjust MDT beyond 2025
+To: stable@vger.kernel.org
+Cc: Avri Altman <avri.altman@sandisk.com>, Shawn Lin <shawn.lin@rock-chips.com>, Ulf Hansson <ulf.hansson@linaro.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505100250.522459-1-sashal@kernel.org>
+
+From: Avri Altman <avri.altman@sandisk.com>
+
+[ Upstream commit 3e487a634bc019166e452ea276f7522710eda9f4 ]
+
+JEDEC JESD84-B51B which was released in September 2025, increases the
+manufacturing year limit for eMMC devices. The eMMC manufacturing year
+is stored in a 4-bit field in the CID register. Originally, it covered
+1997–2012. Later, with EXT_CSD_REV=8, it was extended up to 2025. Now,
+with EXT_CSD_REV=9, the range is rolled over by another 16 years, up to
+2038.
+
+The mapping is as follows:
+cid[8..11] | rev â‰¤ 4 | 8 â‰¥ rev > 4 | rev > 8
+---------------------------------------------
+0          | 1997    | 2013        | 2029
+1          | 1998    | 2014        | 2030
+2          | 1999    | 2015        | 2031
+3          | 2000    | 2016        | 2032
+4          | 2001    | 2017        | 2033
+5          | 2002    | 2018        | 2034
+6          | 2003    | 2019        | 2035
+7          | 2004    | 2020        | 2036
+8          | 2005    | 2021        | 2037
+9          | 2006    | 2022        | 2038
+10         | 2007    | 2023        |
+11         | 2008    | 2024        |
+12         | 2009    | 2025        |
+13         | 2010    |             | 2026
+14         | 2011    |             | 2027
+15         | 2012    |             | 2028
+
+Signed-off-by: Avri Altman <avri.altman@sandisk.com>
+Reviewed-by: Shawn Lin <shawn.lin@rock-chips.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Stable-dep-of: d6bf2e64dec8 ("mmc: core: Optimize time for secure erase/trim for some Kingston eMMCs")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/mmc.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -671,7 +671,14 @@ static int mmc_decode_ext_csd(struct mmc
+               card->ext_csd.enhanced_rpmb_supported =
+                                       (card->ext_csd.rel_param &
+                                        EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
++
++              if (card->ext_csd.rev >= 9) {
++                      /* Adjust production date as per JEDEC JESD84-B51B September 2025 */
++                      if (card->cid.year < 2023)
++                              card->cid.year += 16;
++              }
+       }
++
+ out:
+       return err;
+ }
diff --git a/queue-7.0/mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch b/queue-7.0/mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch
new file mode 100644 (file)
index 0000000..40331ba
--- /dev/null
@@ -0,0 +1,93 @@
+From stable+bounces-244073-greg=kroah.com@vger.kernel.org Tue May  5 12:08:25 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  5 May 2026 06:02:50 -0400
+Subject: mmc: core: Optimize time for secure erase/trim for some Kingston eMMCs
+To: stable@vger.kernel.org
+Cc: Luke Wang <ziniu.wang_1@nxp.com>, Ulf Hansson <ulf.hansson@linaro.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505100250.522459-3-sashal@kernel.org>
+
+From: Luke Wang <ziniu.wang_1@nxp.com>
+
+[ Upstream commit d6bf2e64dec87322f2b11565ddb59c0e967f96e3 ]
+
+Kingston eMMC IY2964 and IB2932 takes a fixed ~2 seconds for each secure
+erase/trim operation regardless of size - that is, a single secure
+erase/trim operation of 1MB takes the same time as 1GB. With default
+calculated 3.5MB max discard size, secure erase 1GB requires ~300 separate
+operations taking ~10 minutes total.
+
+Add a card quirk, MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME, to set maximum
+secure erase size for those devices. This allows 1GB secure erase to
+complete in a single operation, reducing time from 10 minutes to just 2
+seconds.
+
+Signed-off-by: Luke Wang <ziniu.wang_1@nxp.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/card.h   |    5 +++++
+ drivers/mmc/core/queue.c  |    9 +++++++--
+ drivers/mmc/core/quirks.h |    9 +++++++++
+ include/linux/mmc/card.h  |    1 +
+ 4 files changed, 22 insertions(+), 2 deletions(-)
+
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -311,4 +311,9 @@ static inline int mmc_card_broken_mdt(co
+       return c->quirks & MMC_QUIRK_BROKEN_MDT;
+ }
++static inline int mmc_card_fixed_secure_erase_trim_time(const struct mmc_card *c)
++{
++      return c->quirks & MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME;
++}
++
+ #endif
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -184,8 +184,13 @@ static void mmc_queue_setup_discard(stru
+               return;
+       lim->max_hw_discard_sectors = max_discard;
+-      if (mmc_card_can_secure_erase_trim(card))
+-              lim->max_secure_erase_sectors = max_discard;
++      if (mmc_card_can_secure_erase_trim(card)) {
++              if (mmc_card_fixed_secure_erase_trim_time(card))
++                      lim->max_secure_erase_sectors = UINT_MAX >> card->erase_shift;
++              else
++                      lim->max_secure_erase_sectors = max_discard;
++      }
++
+       if (mmc_card_can_trim(card) && card->erased_byte == 0)
+               lim->max_write_zeroes_sectors = max_discard;
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -153,6 +153,15 @@ static const struct mmc_fixup __maybe_un
+       MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
+                 MMC_QUIRK_TRIM_BROKEN),
++      /*
++       * On Some Kingston eMMCs, secure erase/trim time is independent
++       * of erase size, fixed at approximately 2 seconds.
++       */
++      MMC_FIXUP("IY2964", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
++                MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME),
++      MMC_FIXUP("IB2932", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
++                MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME),
++
+       END_FIXUP
+ };
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -330,6 +330,7 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY   (1<<17) /* Disable broken SD poweroff notify support */
+ #define MMC_QUIRK_NO_UHS_DDR50_TUNING (1<<18) /* Disable DDR50 tuning */
+ #define MMC_QUIRK_BROKEN_MDT    (1<<19) /* Wrong manufacturing year */
++#define MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME        (1<<20) /* Secure erase/trim time is fixed regardless of size */
+       bool                    written_flag;   /* Indicates eMMC has been written since power on */
+       bool                    reenable_cmdq;  /* Re-enable Command Queue */
diff --git a/queue-7.0/net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch b/queue-7.0/net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch
new file mode 100644 (file)
index 0000000..96ef105
--- /dev/null
@@ -0,0 +1,120 @@
+From stable+bounces-245021-greg=kroah.com@vger.kernel.org Sun May 10 15:38:05 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 10 May 2026 09:37:57 -0400
+Subject: net: stmmac: Prevent NULL deref when RX memory exhausted
+To: stable@vger.kernel.org
+Cc: Sam Edwards <cfsworks@gmail.com>, Russell King <linux@armlinux.org.uk>, Sam Edwards <CFSworks@gmail.com>, Paolo Abeni <pabeni@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260510133757.4137215-2-sashal@kernel.org>
+
+From: Sam Edwards <cfsworks@gmail.com>
+
+[ Upstream commit 0bb05e6adfa99a2ea1fee1125cc0953409f83ed8 ]
+
+The CPU receives frames from the MAC through conventional DMA: the CPU
+allocates buffers for the MAC, then the MAC fills them and returns
+ownership to the CPU. For each hardware RX queue, the CPU and MAC
+coordinate through a shared ring array of DMA descriptors: one
+descriptor per DMA buffer. Each descriptor includes the buffer's
+physical address and a status flag ("OWN") indicating which side owns
+the buffer: OWN=0 for CPU, OWN=1 for MAC. The CPU is only allowed to set
+the flag and the MAC is only allowed to clear it, and both must move
+through the ring in sequence: thus the ring is used for both
+"submissions" and "completions."
+
+In the stmmac driver, stmmac_rx() bookmarks its position in the ring
+with the `cur_rx` index. The main receive loop in that function checks
+for rx_descs[cur_rx].own=0, gives the corresponding buffer to the
+network stack (NULLing the pointer), and increments `cur_rx` modulo the
+ring size. After the loop exits, stmmac_rx_refill(), which bookmarks its
+position with `dirty_rx`, allocates fresh buffers and rearms the
+descriptors (setting OWN=1). If it fails any allocation, it simply stops
+early (leaving OWN=0) and will retry where it left off when next called.
+
+This means descriptors have a three-stage lifecycle (terms my own):
+- `empty` (OWN=1, buffer valid)
+- `full` (OWN=0, buffer valid and populated)
+- `dirty` (OWN=0, buffer NULL)
+
+But because stmmac_rx() only checks OWN, it confuses `full`/`dirty`. In
+the past (see 'Fixes:'), there was a bug where the loop could cycle
+`cur_rx` all the way back to the first descriptor it dirtied, resulting
+in a NULL dereference when mistaken for `full`. The aforementioned
+commit resolved that *specific* failure by capping the loop's iteration
+limit at `dma_rx_size - 1`, but this is only a partial fix: if the
+previous stmmac_rx_refill() didn't complete, then there are leftover
+`dirty` descriptors that the loop might encounter without needing to
+cycle fully around. The current code therefore panics (see 'Closes:')
+when stmmac_rx_refill() is memory-starved long enough for `cur_rx` to
+catch up to `dirty_rx`.
+
+Fix this by explicitly checking, before advancing `cur_rx`, if the next
+entry is dirty; exit the loop if so. This prevents processing of the
+final, used descriptor until stmmac_rx_refill() succeeds, but
+fully prevents the `cur_rx == dirty_rx` ambiguity as the previous bugfix
+intended: so remove the clamp as well. Since stmmac_rx_zc() is a
+copy-paste-and-tweak of stmmac_rx() and the code structure is identical,
+any fix to stmmac_rx() will also need a corresponding fix for
+stmmac_rx_zc(). Therefore, apply the same check there.
+
+In stmmac_rx() (not stmmac_rx_zc()), a related bug remains: after the
+MAC sets OWN=0 on the final descriptor, it will be unable to send any
+further DMA-complete IRQs until it's given more `empty` descriptors.
+Currently, the driver simply *hopes* that the next stmmac_rx_refill()
+succeeds, risking an indefinite stall of the receive process if not. But
+this is not a regression, so it can be addressed in a future change.
+
+Fixes: b6cb4541853c7 ("net: stmmac: avoid rx queue overrun")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=221010
+Cc: stable@vger.kernel.org
+Suggested-by: Russell King <linux@armlinux.org.uk>
+Signed-off-by: Sam Edwards <CFSworks@gmail.com>
+Link: https://patch.msgid.link/20260422044503.5349-1-CFSworks@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |   19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -5469,9 +5469,12 @@ read_again:
+                       break;
+               /* Prefetch the next RX descriptor */
+-              rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
+-                                              priv->dma_conf.dma_rx_size);
+-              next_entry = rx_q->cur_rx;
++              next_entry = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
++                                             priv->dma_conf.dma_rx_size);
++              if (unlikely(next_entry == rx_q->dirty_rx))
++                      break;
++
++              rx_q->cur_rx = next_entry;
+               if (priv->extend_desc)
+                       np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
+@@ -5609,7 +5612,6 @@ static int stmmac_rx(struct stmmac_priv
+       dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
+       bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+-      limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
+       if (netif_msg_rx_status(priv)) {
+               void *rx_head;
+@@ -5665,9 +5667,12 @@ read_again:
+               if (unlikely(status & dma_own))
+                       break;
+-              rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
+-                                              priv->dma_conf.dma_rx_size);
+-              next_entry = rx_q->cur_rx;
++              next_entry = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
++                                             priv->dma_conf.dma_rx_size);
++              if (unlikely(next_entry == rx_q->dirty_rx))
++                      break;
++
++              rx_q->cur_rx = next_entry;
+               if (priv->extend_desc)
+                       np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
diff --git a/queue-7.0/net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch b/queue-7.0/net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch
new file mode 100644 (file)
index 0000000..f073b32
--- /dev/null
@@ -0,0 +1,181 @@
+From stable+bounces-245020-greg=kroah.com@vger.kernel.org Sun May 10 15:38:03 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 10 May 2026 09:37:56 -0400
+Subject: net: stmmac: rename STMMAC_GET_ENTRY() -> STMMAC_NEXT_ENTRY()
+To: stable@vger.kernel.org
+Cc: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260510133757.4137215-1-sashal@kernel.org>
+
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+
+[ Upstream commit 6b4286e0550814cdc4b897f881ec1fa8b0313227 ]
+
+STMMAC_GET_ENTRY() doesn't describe what this macro is doing - it is
+incrementing the provided index for the circular array of descriptors.
+Replace "GET" with "NEXT" as this better describes the action here.
+
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Link: https://patch.msgid.link/E1w2vba-0000000DbWo-1oL5@rmk-PC.armlinux.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 0bb05e6adfa9 ("net: stmmac: Prevent NULL deref when RX memory exhausted")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/chain_mode.c  |    2 -
+ drivers/net/ethernet/stmicro/stmmac/common.h      |    2 -
+ drivers/net/ethernet/stmicro/stmmac/ring_mode.c   |    2 -
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |   26 +++++++++++-----------
+ 4 files changed, 16 insertions(+), 16 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+@@ -47,7 +47,7 @@ static int jumbo_frm(struct stmmac_tx_qu
+       while (len != 0) {
+               tx_q->tx_skbuff[entry] = NULL;
+-              entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
++              entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
+               desc = tx_q->dma_tx + entry;
+               if (len > bmax) {
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -63,7 +63,7 @@ static inline bool dwmac_is_xmac(enum dw
+ #define DMA_MIN_RX_SIZE               64
+ #define DMA_MAX_RX_SIZE               1024
+ #define DMA_DEFAULT_RX_SIZE   512
+-#define STMMAC_GET_ENTRY(x, size)     ((x + 1) & (size - 1))
++#define STMMAC_NEXT_ENTRY(x, size)    ((x + 1) & (size - 1))
+ #undef FRAME_FILTER_DEBUG
+ /* #define FRAME_FILTER_DEBUG */
+--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+@@ -51,7 +51,7 @@ static int jumbo_frm(struct stmmac_tx_qu
+               stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
+                               STMMAC_RING_MODE, 0, false, skb->len);
+               tx_q->tx_skbuff[entry] = NULL;
+-              entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
++              entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
+               if (priv->extend_desc)
+                       desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2744,7 +2744,7 @@ static bool stmmac_xdp_xmit_zc(struct st
+               xsk_tx_metadata_to_compl(meta,
+                                        &tx_q->tx_skbuff_dma[entry].xsk_meta);
+-              tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
++              tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+               entry = tx_q->cur_tx;
+       }
+       u64_stats_update_begin(&txq_stats->napi_syncp);
+@@ -2915,7 +2915,7 @@ static int stmmac_tx_clean(struct stmmac
+               stmmac_release_tx_desc(priv, p, priv->mode);
+-              entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
++              entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
+       }
+       tx_q->dirty_tx = entry;
+@@ -4258,7 +4258,7 @@ static bool stmmac_vlan_insert(struct st
+               return false;
+       stmmac_set_tx_owner(priv, p);
+-      tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
++      tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+       return true;
+ }
+@@ -4286,7 +4286,7 @@ static void stmmac_tso_allocator(struct
+       while (tmp_len > 0) {
+               dma_addr_t curr_addr;
+-              tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
++              tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
+                                               priv->dma_conf.dma_tx_size);
+               WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
+@@ -4437,7 +4437,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+               stmmac_set_mss(priv, mss_desc, mss);
+               tx_q->mss = mss;
+-              tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
++              tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
+                                               priv->dma_conf.dma_tx_size);
+               WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
+       }
+@@ -4541,7 +4541,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+        * ndo_start_xmit will fill this descriptor the next time it's
+        * called and stmmac_tx_clean may clean up to this descriptor.
+        */
+-      tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
++      tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+       if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
+               netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
+@@ -4751,7 +4751,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+               int len = skb_frag_size(frag);
+               bool last_segment = (i == (nfrags - 1));
+-              entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
++              entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
+               WARN_ON(tx_q->tx_skbuff[entry]);
+               if (likely(priv->extend_desc))
+@@ -4821,7 +4821,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+        * ndo_start_xmit will fill this descriptor the next time it's
+        * called and stmmac_tx_clean may clean up to this descriptor.
+        */
+-      entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
++      entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
+       tx_q->cur_tx = entry;
+       if (netif_msg_pktdata(priv)) {
+@@ -4990,7 +4990,7 @@ static inline void stmmac_rx_refill(stru
+               dma_wmb();
+               stmmac_set_rx_owner(priv, p, use_rx_wd);
+-              entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
++              entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
+       }
+       rx_q->dirty_rx = entry;
+       rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+@@ -5140,7 +5140,7 @@ static int stmmac_xdp_xmit_xdpf(struct s
+       stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
+-      entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
++      entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
+       tx_q->cur_tx = entry;
+       return STMMAC_XDP_TX;
+@@ -5374,7 +5374,7 @@ static bool stmmac_rx_refill_zc(struct s
+               dma_wmb();
+               stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
+-              entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
++              entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
+       }
+       if (rx_desc) {
+@@ -5469,7 +5469,7 @@ read_again:
+                       break;
+               /* Prefetch the next RX descriptor */
+-              rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
++              rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
+                                               priv->dma_conf.dma_rx_size);
+               next_entry = rx_q->cur_rx;
+@@ -5665,7 +5665,7 @@ read_again:
+               if (unlikely(status & dma_own))
+                       break;
+-              rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
++              rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
+                                               priv->dma_conf.dma_rx_size);
+               next_entry = rx_q->cur_rx;
diff --git a/queue-7.0/octeon_ep_vf-add-null-check-for-napi_build_skb.patch b/queue-7.0/octeon_ep_vf-add-null-check-for-napi_build_skb.patch
new file mode 100644 (file)
index 0000000..bf910ad
--- /dev/null
@@ -0,0 +1,86 @@
+From stable+bounces-244002-greg=kroah.com@vger.kernel.org Tue May  5 09:43:03 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  5 May 2026 03:38:09 -0400
+Subject: octeon_ep_vf: add NULL check for napi_build_skb()
+To: stable@vger.kernel.org
+Cc: David Carlier <devnexen@gmail.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505073809.427518-1-sashal@kernel.org>
+
+From: David Carlier <devnexen@gmail.com>
+
+[ Upstream commit dd66b42854705e4e4ee7f14d260f86c578bed3e3 ]
+
+napi_build_skb() can return NULL on allocation failure. In
+__octep_vf_oq_process_rx(), the result is used directly without a NULL
+check in both the single-buffer and multi-fragment paths, leading to a
+NULL pointer dereference.
+
+Add NULL checks after both napi_build_skb() calls, properly advancing
+descriptors and consuming remaining fragments on failure.
+
+Fixes: 1cd3b407977c ("octeon_ep_vf: add Tx/Rx processing and interrupt support")
+Cc: stable@vger.kernel.org
+Signed-off-by: David Carlier <devnexen@gmail.com>
+Link: https://patch.msgid.link/20260409184009.930359-3-devnexen@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ inlined missing octep_vf_oq_next_idx() helper as read_idx++ with wraparound ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c |   36 +++++++++++++++-
+ 1 file changed, 34 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+@@ -409,10 +409,17 @@ static int __octep_vf_oq_process_rx(stru
+                       data_offset = OCTEP_VF_OQ_RESP_HW_SIZE;
+                       rx_ol_flags = 0;
+               }
+-              rx_bytes += buff_info->len;
+-
+               if (buff_info->len <= oq->max_single_buffer_size) {
+                       skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
++                      if (!skb) {
++                              oq->stats->alloc_failures++;
++                              desc_used++;
++                              read_idx++;
++                              if (read_idx == oq->max_count)
++                                      read_idx = 0;
++                              continue;
++                      }
++                      rx_bytes += buff_info->len;
+                       skb_reserve(skb, data_offset);
+                       skb_put(skb, buff_info->len);
+                       read_idx++;
+@@ -424,6 +431,31 @@ static int __octep_vf_oq_process_rx(stru
+                       u16 data_len;
+                       skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
++                      if (!skb) {
++                              oq->stats->alloc_failures++;
++                              desc_used++;
++                              read_idx++;
++                              if (read_idx == oq->max_count)
++                                      read_idx = 0;
++                              data_len = buff_info->len - oq->max_single_buffer_size;
++                              while (data_len) {
++                                      dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
++                                                     PAGE_SIZE, DMA_FROM_DEVICE);
++                                      buff_info = (struct octep_vf_rx_buffer *)
++                                                  &oq->buff_info[read_idx];
++                                      buff_info->page = NULL;
++                                      if (data_len < oq->buffer_size)
++                                              data_len = 0;
++                                      else
++                                              data_len -= oq->buffer_size;
++                                      desc_used++;
++                                      read_idx++;
++                                      if (read_idx == oq->max_count)
++                                              read_idx = 0;
++                              }
++                              continue;
++                      }
++                      rx_bytes += buff_info->len;
+                       skb_reserve(skb, data_offset);
+                       /* Head fragment includes response header(s);
+                        * subsequent fragments contains only data.
diff --git a/queue-7.0/printk-add-print_hex_dump_devel.patch b/queue-7.0/printk-add-print_hex_dump_devel.patch
new file mode 100644 (file)
index 0000000..21acfb4
--- /dev/null
@@ -0,0 +1,49 @@
+From stable+bounces-244983-greg=kroah.com@vger.kernel.org Sat May  9 20:59:39 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  9 May 2026 14:59:28 -0400
+Subject: printk: add print_hex_dump_devel()
+To: stable@vger.kernel.org
+Cc: Thorsten Blum <thorsten.blum@linux.dev>, Herbert Xu <herbert@gondor.apana.org.au>, John Ogness <john.ogness@linutronix.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509185929.3682915-1-sashal@kernel.org>
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+[ Upstream commit d134feeb5df33fbf77f482f52a366a44642dba09 ]
+
+Add print_hex_dump_devel() as the hex dump equivalent of pr_devel(),
+which emits output only when DEBUG is enabled, but keeps call sites
+compiled otherwise.
+
+Suggested-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Reviewed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Stable-dep-of: 177730a273b1 ("crypto: caam - guard HMAC key hex dumps in hash_digest_key")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/printk.h |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -801,6 +801,19 @@ static inline void print_hex_dump_debug(
+ }
+ #endif
++#if defined(DEBUG)
++#define print_hex_dump_devel(prefix_str, prefix_type, rowsize,                \
++                           groupsize, buf, len, ascii)                \
++      print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize,    \
++                     groupsize, buf, len, ascii)
++#else
++static inline void print_hex_dump_devel(const char *prefix_str, int prefix_type,
++                                      int rowsize, int groupsize,
++                                      const void *buf, size_t len, bool ascii)
++{
++}
++#endif
++
+ /**
+  * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
+  * @prefix_str: string to prefix each line with;
diff --git a/queue-7.0/rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch b/queue-7.0/rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch
new file mode 100644 (file)
index 0000000..d338f1f
--- /dev/null
@@ -0,0 +1,250 @@
+From gary@garyguo.net Tue May 12 17:19:46 2026
+From: Gary Guo <gary@garyguo.net>
+Date: Tue, 12 May 2026 16:17:20 +0100
+Subject: rust: pin-init: fix incorrect accessor reference lifetime
+To: gregkh@linuxfoundation.org, ojeda@kernel.org
+Cc: stable@vger.kernel.org, Gary Guo <gary@garyguo.net>
+Message-ID: <20260512151719.3309464-2-gary@garyguo.net>
+
+From: Gary Guo <gary@garyguo.net>
+
+commit 68bf102226cf2199dc609b67c1e847cad4de4b57 upstream
+
+When a field has been initialized, `init!`/`pin_init!` create a reference
+or pinned reference to the field so it can be accessed later during the
+initialization of other fields. However, the reference it created is
+incorrectly `&'static` rather than just the scope of the initializer.
+
+This means that you can do
+
+    init!(Foo {
+        a: 1,
+        _: {
+            let b: &'static u32 = a;
+        }
+    })
+
+which is unsound.
+
+This is caused by `&mut (*#slot).#ident`, which actually allows arbitrary
+lifetime, so this is effectively `'static`. Somewhat ironically, the safety
+justification of creating the accessor is.. "SAFETY: TODO".
+
+Fix it by adding `let_binding` method on `DropGuard` to shorten lifetime.
+This results exactly what we want for these accessors. The safety and
+invariant comments of `DropGuard` have been reworked; instead of reasoning
+about what caller can do with the guard, express it in a way that the
+ownership is transferred to the guard and `forget` takes it back, so the
+unsafe operations within the `DropGuard` can be more easily justified.
+
+Fixes: db96c5103ae6 ("add references to previously initialized fields")
+Signed-off-by: Gary Guo <gary@garyguo.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/pin-init/internal/src/init.rs |  106 ++++++++++++++++---------------------
+ rust/pin-init/src/__internal.rs    |   28 ++++++---
+ 2 files changed, 66 insertions(+), 68 deletions(-)
+
+--- a/rust/pin-init/internal/src/init.rs
++++ b/rust/pin-init/internal/src/init.rs
+@@ -243,18 +243,6 @@ fn init_fields(
+                 });
+                 // Again span for better diagnostics
+                 let write = quote_spanned!(ident.span()=> ::core::ptr::write);
+-                let accessor = if pinned {
+-                    let project_ident = format_ident!("__project_{ident}");
+-                    quote! {
+-                        // SAFETY: TODO
+-                        unsafe { #data.#project_ident(&mut (*#slot).#ident) }
+-                    }
+-                } else {
+-                    quote! {
+-                        // SAFETY: TODO
+-                        unsafe { &mut (*#slot).#ident }
+-                    }
+-                };
+                 quote! {
+                     #(#attrs)*
+                     {
+@@ -262,51 +250,31 @@ fn init_fields(
+                         // SAFETY: TODO
+                         unsafe { #write(::core::ptr::addr_of_mut!((*#slot).#ident), #value_ident) };
+                     }
+-                    #(#cfgs)*
+-                    #[allow(unused_variables)]
+-                    let #ident = #accessor;
+                 }
+             }
+             InitializerKind::Init { ident, value, .. } => {
+                 // Again span for better diagnostics
+                 let init = format_ident!("init", span = value.span());
+-                // NOTE: the field accessor ensures that the initialized field is properly aligned.
+-                // Unaligned fields will cause the compiler to emit E0793. We do not support
+-                // unaligned fields since `Init::__init` requires an aligned pointer; the call to
+-                // `ptr::write` below has the same requirement.
+-                let (value_init, accessor) = if pinned {
+-                    let project_ident = format_ident!("__project_{ident}");
+-                    (
+-                        quote! {
+-                            // SAFETY:
+-                            // - `slot` is valid, because we are inside of an initializer closure, we
+-                            //   return when an error/panic occurs.
+-                            // - We also use `#data` to require the correct trait (`Init` or `PinInit`)
+-                            //   for `#ident`.
+-                            unsafe { #data.#ident(::core::ptr::addr_of_mut!((*#slot).#ident), #init)? };
+-                        },
+-                        quote! {
+-                            // SAFETY: TODO
+-                            unsafe { #data.#project_ident(&mut (*#slot).#ident) }
+-                        },
+-                    )
++                let value_init = if pinned {
++                    quote! {
++                        // SAFETY:
++                        // - `slot` is valid, because we are inside of an initializer closure, we
++                        //   return when an error/panic occurs.
++                        // - We also use `#data` to require the correct trait (`Init` or `PinInit`)
++                        //   for `#ident`.
++                        unsafe { #data.#ident(::core::ptr::addr_of_mut!((*#slot).#ident), #init)? };
++                    }
+                 } else {
+-                    (
+-                        quote! {
+-                            // SAFETY: `slot` is valid, because we are inside of an initializer
+-                            // closure, we return when an error/panic occurs.
+-                            unsafe {
+-                                ::pin_init::Init::__init(
+-                                    #init,
+-                                    ::core::ptr::addr_of_mut!((*#slot).#ident),
+-                                )?
+-                            };
+-                        },
+-                        quote! {
+-                            // SAFETY: TODO
+-                            unsafe { &mut (*#slot).#ident }
+-                        },
+-                    )
++                    quote! {
++                        // SAFETY: `slot` is valid, because we are inside of an initializer
++                        // closure, we return when an error/panic occurs.
++                        unsafe {
++                            ::pin_init::Init::__init(
++                                #init,
++                                ::core::ptr::addr_of_mut!((*#slot).#ident),
++                            )?
++                        };
++                    }
+                 };
+                 quote! {
+                     #(#attrs)*
+@@ -314,9 +282,6 @@ fn init_fields(
+                         let #init = #value;
+                         #value_init
+                     }
+-                    #(#cfgs)*
+-                    #[allow(unused_variables)]
+-                    let #ident = #accessor;
+                 }
+             }
+             InitializerKind::Code { block: value, .. } => quote! {
+@@ -329,18 +294,41 @@ fn init_fields(
+         if let Some(ident) = kind.ident() {
+             // `mixed_site` ensures that the guard is not accessible to the user-controlled code.
+             let guard = format_ident!("__{ident}_guard", span = Span::mixed_site());
++
++            // NOTE: The reference is derived from the guard so that it only lives as long as the
++            // guard does and cannot escape the scope. If it's created via `&mut (*#slot).#ident`
++            // like the unaligned field guard, it will become effectively `'static`.
++            let accessor = if pinned {
++                let project_ident = format_ident!("__project_{ident}");
++                quote! {
++                    // SAFETY: the initialization is pinned.
++                    unsafe { #data.#project_ident(#guard.let_binding()) }
++                }
++            } else {
++                quote! {
++                    #guard.let_binding()
++                }
++            };
++
+             res.extend(quote! {
+                 #(#cfgs)*
+-                // Create the drop guard:
++                // Create the drop guard.
+                 //
+-                // We rely on macro hygiene to make it impossible for users to access this local
+-                // variable.
+-                // SAFETY: We forget the guard later when initialization has succeeded.
+-                let #guard = unsafe {
++                // SAFETY:
++                // - `&raw mut (*slot).#ident` is valid.
++                // - `make_field_check` checks that `&raw mut (*slot).#ident` is properly aligned.
++                // - `(*slot).#ident` has been initialized above.
++                // - We only need the ownership to the pointee back when initialization has
++                //   succeeded, where we `forget` the guard.
++                let mut #guard = unsafe {
+                     ::pin_init::__internal::DropGuard::new(
+                         ::core::ptr::addr_of_mut!((*slot).#ident)
+                     )
+                 };
++
++                #(#cfgs)*
++                #[allow(unused_variables)]
++                let #ident = #accessor;
+             });
+             guards.push(guard);
+             guard_attrs.push(cfgs);
+--- a/rust/pin-init/src/__internal.rs
++++ b/rust/pin-init/src/__internal.rs
+@@ -238,32 +238,42 @@ fn stack_init_reuse() {
+ /// When a value of this type is dropped, it drops a `T`.
+ ///
+ /// Can be forgotten to prevent the drop.
++///
++/// # Invariants
++///
++/// - `ptr` is valid and properly aligned.
++/// - `*ptr` is initialized and owned by this guard.
+ pub struct DropGuard<T: ?Sized> {
+     ptr: *mut T,
+ }
+ impl<T: ?Sized> DropGuard<T> {
+-    /// Creates a new [`DropGuard<T>`]. It will [`ptr::drop_in_place`] `ptr` when it gets dropped.
++    /// Creates a drop guard and transfer the ownership of the pointer content.
+     ///
+-    /// # Safety
++    /// The ownership is only relinguished if the guard is forgotten via [`core::mem::forget`].
+     ///
+-    /// `ptr` must be a valid pointer.
++    /// # Safety
+     ///
+-    /// It is the callers responsibility that `self` will only get dropped if the pointee of `ptr`:
+-    /// - has not been dropped,
+-    /// - is not accessible by any other means,
+-    /// - will not be dropped by any other means.
++    /// - `ptr` is valid and properly aligned.
++    /// - `*ptr` is initialized, and the ownership is transferred to this guard.
+     #[inline]
+     pub unsafe fn new(ptr: *mut T) -> Self {
++        // INVARIANT: By safety requirement.
+         Self { ptr }
+     }
++
++    /// Create a let binding for accessor use.
++    #[inline]
++    pub fn let_binding(&mut self) -> &mut T {
++        // SAFETY: Per type invariant.
++        unsafe { &mut *self.ptr }
++    }
+ }
+ impl<T: ?Sized> Drop for DropGuard<T> {
+     #[inline]
+     fn drop(&mut self) {
+-        // SAFETY: A `DropGuard` can only be constructed using the unsafe `new` function
+-        // ensuring that this operation is safe.
++        // SAFETY: `self.ptr` is valid, properly aligned and `*self.ptr` is owned by this guard.
+         unsafe { ptr::drop_in_place(self.ptr) }
+     }
+ }
index 583bbc129d09253cdbbe182fecec5e623174956a..87739636918362c8dc360874f241ea45b580dd9f 100644 (file)
@@ -288,3 +288,19 @@ loongarch-kvm-fix-hw-timer-interrupt-lost-when-inject-interrupt-by-software.patc
 loongarch-kvm-move-unconditional-delay-into-timer-clear-scenery.patch
 loongarch-kvm-use-kvm_set_pte-in-kvm_flush_pte.patch
 loongarch-use-per-root-bridge-pcih-flag-to-skip-mem-resource-fixup.patch
+bpf-fix-use-after-free-in-arena_vm_close-on-fork.patch
+octeon_ep_vf-add-null-check-for-napi_build_skb.patch
+mmc-core-adjust-mdt-beyond-2025.patch
+mmc-core-add-quirk-for-incorrect-manufacturing-date.patch
+mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch
+crypto-qat-fix-indentation-of-macros-in-qat_hal.c.patch
+crypto-qat-fix-firmware-loading-failure-for-gen6-devices.patch
+hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch
+hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch
+8021q-use-rcu-for-egress-qos-mappings.patch
+8021q-delete-cleared-egress-qos-mappings.patch
+printk-add-print_hex_dump_devel.patch
+crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch
+net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch
+net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch
+rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch