]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 May 2026 17:25:19 +0000 (19:25 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 12 May 2026 17:25:19 +0000 (19:25 +0200)
added patches:
bpf-fix-use-after-free-in-arena_vm_close-on-fork.patch
crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch
crypto-qat-fix-firmware-loading-failure-for-gen6-devices.patch
crypto-qat-fix-indentation-of-macros-in-qat_hal.c.patch
dma-mapping-add-__dma_from_device_group_begin-end.patch
erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch
erofs-tidy-up-z_erofs_lz4_handle_overlap.patch
fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch
firmware-exynos-acpm-drop-fake-const-on-handle-pointer.patch
hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch
hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch
hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch
mm-damon-core-disallow-non-power-of-two-min_region_sz-on-damon_start.patch
mm-swap-speed-up-hibernation-allocation-and-writeout.patch
mmc-core-add-quirk-for-incorrect-manufacturing-date.patch
mmc-core-adjust-mdt-beyond-2025.patch
mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch
net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch
net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch
octeon_ep_vf-add-null-check-for-napi_build_skb.patch
printk-add-print_hex_dump_devel.patch
rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch

23 files changed:
queue-6.18/bpf-fix-use-after-free-in-arena_vm_close-on-fork.patch [new file with mode: 0644]
queue-6.18/crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch [new file with mode: 0644]
queue-6.18/crypto-qat-fix-firmware-loading-failure-for-gen6-devices.patch [new file with mode: 0644]
queue-6.18/crypto-qat-fix-indentation-of-macros-in-qat_hal.c.patch [new file with mode: 0644]
queue-6.18/dma-mapping-add-__dma_from_device_group_begin-end.patch [new file with mode: 0644]
queue-6.18/erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch [new file with mode: 0644]
queue-6.18/erofs-tidy-up-z_erofs_lz4_handle_overlap.patch [new file with mode: 0644]
queue-6.18/fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch [new file with mode: 0644]
queue-6.18/firmware-exynos-acpm-drop-fake-const-on-handle-pointer.patch [new file with mode: 0644]
queue-6.18/hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch [new file with mode: 0644]
queue-6.18/hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch [new file with mode: 0644]
queue-6.18/hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch [new file with mode: 0644]
queue-6.18/mm-damon-core-disallow-non-power-of-two-min_region_sz-on-damon_start.patch [new file with mode: 0644]
queue-6.18/mm-swap-speed-up-hibernation-allocation-and-writeout.patch [new file with mode: 0644]
queue-6.18/mmc-core-add-quirk-for-incorrect-manufacturing-date.patch [new file with mode: 0644]
queue-6.18/mmc-core-adjust-mdt-beyond-2025.patch [new file with mode: 0644]
queue-6.18/mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch [new file with mode: 0644]
queue-6.18/net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch [new file with mode: 0644]
queue-6.18/net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch [new file with mode: 0644]
queue-6.18/octeon_ep_vf-add-null-check-for-napi_build_skb.patch [new file with mode: 0644]
queue-6.18/printk-add-print_hex_dump_devel.patch [new file with mode: 0644]
queue-6.18/rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch [new file with mode: 0644]
queue-6.18/series

diff --git a/queue-6.18/bpf-fix-use-after-free-in-arena_vm_close-on-fork.patch b/queue-6.18/bpf-fix-use-after-free-in-arena_vm_close-on-fork.patch
new file mode 100644 (file)
index 0000000..112faeb
--- /dev/null
@@ -0,0 +1,92 @@
+From 4fddde2a732de60bb97e3307d4eb69ac5f1d2b74 Mon Sep 17 00:00:00 2001
+From: Alexei Starovoitov <ast@kernel.org>
+Date: Mon, 13 Apr 2026 12:42:45 -0700
+Subject: bpf: Fix use-after-free in arena_vm_close on fork
+
+From: Alexei Starovoitov <ast@kernel.org>
+
+commit 4fddde2a732de60bb97e3307d4eb69ac5f1d2b74 upstream.
+
+arena_vm_open() only bumps vml->mmap_count but never registers the
+child VMA in arena->vma_list. The vml->vma always points at the
+parent VMA, so after parent munmap the pointer dangles. If the child
+then calls bpf_arena_free_pages(), zap_pages() reads the stale
+vml->vma triggering use-after-free.
+
+Fix this by preventing the arena VMA from being inherited across
+fork with VM_DONTCOPY, and preventing VMA splits via the may_split
+callback.
+
+Also reject mremap with a .mremap callback returning -EINVAL. A
+same-size mremap(MREMAP_FIXED) on the full arena VMA reaches
+copy_vma() through the following path:
+
+  check_prep_vma()       - returns 0 early: new_len == old_len
+                           skips VM_DONTEXPAND check
+  prep_move_vma()        - vm_start == old_addr and
+                           vm_end == old_addr + old_len
+                           so may_split is never called
+  move_vma()
+    copy_vma_and_data()
+      copy_vma()
+        vm_area_dup()    - copies vm_private_data (vml pointer)
+        vm_ops->open()   - bumps vml->mmap_count
+      vm_ops->mremap()   - returns -EINVAL, rollback unmaps new VMA
+
+The refcount ensures the rollback's arena_vm_close does not free
+the vml shared with the original VMA.
+
+Reported-by: Weiming Shi <bestswngs@gmail.com>
+Reported-by: Xiang Mei <xmei5@asu.edu>
+Fixes: 317460317a02 ("bpf: Introduce bpf_arena.")
+Reviewed-by: Emil Tsalapatis <emil@etsalapatis.com>
+Link: https://lore.kernel.org/r/20260413194245.21449-1-alexei.starovoitov@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/arena.c |   19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+--- a/kernel/bpf/arena.c
++++ b/kernel/bpf/arena.c
+@@ -246,6 +246,16 @@ static void arena_vm_open(struct vm_area
+       refcount_inc(&vml->mmap_count);
+ }
++static int arena_vm_may_split(struct vm_area_struct *vma, unsigned long addr)
++{
++      return -EINVAL;
++}
++
++static int arena_vm_mremap(struct vm_area_struct *vma)
++{
++      return -EINVAL;
++}
++
+ static void arena_vm_close(struct vm_area_struct *vma)
+ {
+       struct bpf_map *map = vma->vm_file->private_data;
+@@ -307,6 +317,8 @@ out:
+ static const struct vm_operations_struct arena_vm_ops = {
+       .open           = arena_vm_open,
++      .may_split      = arena_vm_may_split,
++      .mremap         = arena_vm_mremap,
+       .close          = arena_vm_close,
+       .fault          = arena_vm_fault,
+ };
+@@ -376,10 +388,11 @@ static int arena_map_mmap(struct bpf_map
+       arena->user_vm_end = vma->vm_end;
+       /*
+        * bpf_map_mmap() checks that it's being mmaped as VM_SHARED and
+-       * clears VM_MAYEXEC. Set VM_DONTEXPAND as well to avoid
+-       * potential change of user_vm_start.
++       * clears VM_MAYEXEC. Set VM_DONTEXPAND to avoid potential change
++       * of user_vm_start. Set VM_DONTCOPY to prevent arena VMA from
++       * being copied into the child process on fork.
+        */
+-      vm_flags_set(vma, VM_DONTEXPAND);
++      vm_flags_set(vma, VM_DONTEXPAND | VM_DONTCOPY);
+       vma->vm_ops = &arena_vm_ops;
+       return 0;
+ }
diff --git a/queue-6.18/crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch b/queue-6.18/crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch
new file mode 100644 (file)
index 0000000..84f9eff
--- /dev/null
@@ -0,0 +1,68 @@
+From stable+bounces-244987-greg=kroah.com@vger.kernel.org Sat May  9 23:30:05 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  9 May 2026 17:29:57 -0400
+Subject: crypto: caam - guard HMAC key hex dumps in hash_digest_key
+To: stable@vger.kernel.org
+Cc: Thorsten Blum <thorsten.blum@linux.dev>, Herbert Xu <herbert@gondor.apana.org.au>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509212957.3843722-2-sashal@kernel.org>
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+[ Upstream commit 177730a273b18e195263ed953853273e901b5064 ]
+
+Use print_hex_dump_devel() for dumping sensitive HMAC key bytes in
+hash_digest_key() to avoid leaking secrets at runtime when
+CONFIG_DYNAMIC_DEBUG is enabled.
+
+Fixes: 045e36780f11 ("crypto: caam - ahash hmac support")
+Fixes: 3f16f6c9d632 ("crypto: caam/qi2 - add support for ahash algorithms")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/caam/caamalg_qi2.c |    4 ++--
+ drivers/crypto/caam/caamhash.c    |    4 ++--
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/crypto/caam/caamalg_qi2.c
++++ b/drivers/crypto/caam/caamalg_qi2.c
+@@ -3269,7 +3269,7 @@ static int hash_digest_key(struct caam_h
+       dpaa2_fl_set_addr(out_fle, key_dma);
+       dpaa2_fl_set_len(out_fle, digestsize);
+-      print_hex_dump_debug("key_in@" __stringify(__LINE__)": ",
++      print_hex_dump_devel("key_in@" __stringify(__LINE__)": ",
+                            DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
+       print_hex_dump_debug("shdesc@" __stringify(__LINE__)": ",
+                            DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+@@ -3289,7 +3289,7 @@ static int hash_digest_key(struct caam_h
+               /* in progress */
+               wait_for_completion(&result.completion);
+               ret = result.err;
+-              print_hex_dump_debug("digested key@" __stringify(__LINE__)": ",
++              print_hex_dump_devel("digested key@" __stringify(__LINE__)": ",
+                                    DUMP_PREFIX_ADDRESS, 16, 4, key,
+                                    digestsize, 1);
+       }
+--- a/drivers/crypto/caam/caamhash.c
++++ b/drivers/crypto/caam/caamhash.c
+@@ -393,7 +393,7 @@ static int hash_digest_key(struct caam_h
+       append_seq_store(desc, digestsize, LDST_CLASS_2_CCB |
+                        LDST_SRCDST_BYTE_CONTEXT);
+-      print_hex_dump_debug("key_in@"__stringify(__LINE__)": ",
++      print_hex_dump_devel("key_in@"__stringify(__LINE__)": ",
+                            DUMP_PREFIX_ADDRESS, 16, 4, key, *keylen, 1);
+       print_hex_dump_debug("jobdesc@"__stringify(__LINE__)": ",
+                            DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
+@@ -408,7 +408,7 @@ static int hash_digest_key(struct caam_h
+               wait_for_completion(&result.completion);
+               ret = result.err;
+-              print_hex_dump_debug("digested key@"__stringify(__LINE__)": ",
++              print_hex_dump_devel("digested key@"__stringify(__LINE__)": ",
+                                    DUMP_PREFIX_ADDRESS, 16, 4, key,
+                                    digestsize, 1);
+       }
diff --git a/queue-6.18/crypto-qat-fix-firmware-loading-failure-for-gen6-devices.patch b/queue-6.18/crypto-qat-fix-firmware-loading-failure-for-gen6-devices.patch
new file mode 100644 (file)
index 0000000..63b6586
--- /dev/null
@@ -0,0 +1,90 @@
+From stable+bounces-244086-greg=kroah.com@vger.kernel.org Tue May  5 12:33:51 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  5 May 2026 06:33:30 -0400
+Subject: crypto: qat - fix firmware loading failure for GEN6 devices
+To: stable@vger.kernel.org
+Cc: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>, Giovanni Cabiddu <giovanni.cabiddu@intel.com>, Andy Shevchenko <andriy.shevchenko@intel.com>, Herbert Xu <herbert@gondor.apana.org.au>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505103330.596817-2-sashal@kernel.org>
+
+From: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>
+
+[ Upstream commit e7dcb722bb75bb3f3992f580a8728a794732fd7a ]
+
+QAT GEN6 hardware requires a minimum 3 us delay during the acceleration
+engine reset sequence to ensure the hardware fully settles.
+Without this delay, the firmware load may fail intermittently.
+
+Add a delay after placing the AE into reset and before clearing the reset,
+matching the hardware requirements and ensuring stable firmware loading.
+Earlier generations remain unaffected.
+
+Fixes: 17fd7514ae68 ("crypto: qat - add qat_6xxx driver")
+Signed-off-by: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>
+Cc: stable@vger.kernel.org
+Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/intel/qat/qat_common/adf_accel_engine.c         |    7 +++++++
+ drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h |    1 +
+ drivers/crypto/intel/qat/qat_common/qat_hal.c                  |    5 ++++-
+ 3 files changed, 12 insertions(+), 1 deletion(-)
+
+--- a/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
++++ b/drivers/crypto/intel/qat/qat_common/adf_accel_engine.c
+@@ -1,5 +1,6 @@
+ // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
+ /* Copyright(c) 2014 - 2020 Intel Corporation */
++#include <linux/delay.h>
+ #include <linux/firmware.h>
+ #include <linux/pci.h>
+ #include "adf_cfg.h"
+@@ -162,8 +163,14 @@ int adf_ae_stop(struct adf_accel_dev *ac
+ static int adf_ae_reset(struct adf_accel_dev *accel_dev, int ae)
+ {
+       struct adf_fw_loader_data *loader_data = accel_dev->fw_loader;
++      unsigned long reset_delay;
+       qat_hal_reset(loader_data->fw_loader);
++
++      reset_delay = loader_data->fw_loader->chip_info->reset_delay_us;
++      if (reset_delay)
++              fsleep(reset_delay);
++
+       if (qat_hal_clr_reset(loader_data->fw_loader))
+               return -EFAULT;
+--- a/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
++++ b/drivers/crypto/intel/qat/qat_common/icp_qat_fw_loader_handle.h
+@@ -27,6 +27,7 @@ struct icp_qat_fw_loader_chip_info {
+       int mmp_sram_size;
+       bool nn;
+       bool lm2lm3;
++      u16 reset_delay_us;
+       u32 lm_size;
+       u32 icp_rst_csr;
+       u32 icp_rst_mask;
+--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
++++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
+@@ -20,6 +20,7 @@
+ #define RST_CSR_QAT_LSB                       20
+ #define RST_CSR_AE_LSB                        0
+ #define MC_TIMESTAMP_ENABLE           (0x1 << 7)
++#define MIN_RESET_DELAY_US            3
+ #define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+       (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
+@@ -713,8 +714,10 @@ static int qat_hal_chip_init(struct icp_
+               handle->chip_info->wakeup_event_val = 0x80000000;
+               handle->chip_info->fw_auth = true;
+               handle->chip_info->css_3k = true;
+-              if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX)
++              if (handle->pci_dev->device == PCI_DEVICE_ID_INTEL_QAT_6XXX) {
+                       handle->chip_info->dual_sign = true;
++                      handle->chip_info->reset_delay_us = MIN_RESET_DELAY_US;
++              }
+               handle->chip_info->tgroup_share_ustore = true;
+               handle->chip_info->fcu_ctl_csr = FCU_CONTROL_4XXX;
+               handle->chip_info->fcu_sts_csr = FCU_STATUS_4XXX;
diff --git a/queue-6.18/crypto-qat-fix-indentation-of-macros-in-qat_hal.c.patch b/queue-6.18/crypto-qat-fix-indentation-of-macros-in-qat_hal.c.patch
new file mode 100644 (file)
index 0000000..4e78f3d
--- /dev/null
@@ -0,0 +1,59 @@
+From stable+bounces-244085-greg=kroah.com@vger.kernel.org Tue May  5 12:46:29 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  5 May 2026 06:33:29 -0400
+Subject: crypto: qat - fix indentation of macros in qat_hal.c
+To: stable@vger.kernel.org
+Cc: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>, Giovanni Cabiddu <giovanni.cabiddu@intel.com>, Herbert Xu <herbert@gondor.apana.org.au>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505103330.596817-1-sashal@kernel.org>
+
+From: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>
+
+[ Upstream commit 4963b39e3a3feed07fbf4d5cc2b5df8498888285 ]
+
+The macros in qat_hal.c were using a mixture of tabs and spaces.
+Update all macro indentation to use tabs consistently, matching the
+predominant style.
+
+This does not introduce any functional change.
+
+Signed-off-by: Suman Kumar Chakraborty <suman.kumar.chakraborty@intel.com>
+Reviewed-by: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Stable-dep-of: e7dcb722bb75 ("crypto: qat - fix firmware loading failure for GEN6 devices")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/intel/qat/qat_common/qat_hal.c |   22 +++++++++++-----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+--- a/drivers/crypto/intel/qat/qat_common/qat_hal.c
++++ b/drivers/crypto/intel/qat/qat_common/qat_hal.c
+@@ -9,17 +9,17 @@
+ #include "icp_qat_hal.h"
+ #include "icp_qat_uclo.h"
+-#define BAD_REGADDR          0xffff
+-#define MAX_RETRY_TIMES          10000
+-#define INIT_CTX_ARB_VALUE    0x0
+-#define INIT_CTX_ENABLE_VALUE     0x0
+-#define INIT_PC_VALUE      0x0
+-#define INIT_WAKEUP_EVENTS_VALUE  0x1
+-#define INIT_SIG_EVENTS_VALUE     0x1
+-#define INIT_CCENABLE_VALUE       0x2000
+-#define RST_CSR_QAT_LSB          20
+-#define RST_CSR_AE_LSB                  0
+-#define MC_TIMESTAMP_ENABLE       (0x1 << 7)
++#define BAD_REGADDR                   0xffff
++#define MAX_RETRY_TIMES                       10000
++#define INIT_CTX_ARB_VALUE            0x0
++#define INIT_CTX_ENABLE_VALUE         0x0
++#define INIT_PC_VALUE                 0x0
++#define INIT_WAKEUP_EVENTS_VALUE      0x1
++#define INIT_SIG_EVENTS_VALUE         0x1
++#define INIT_CCENABLE_VALUE           0x2000
++#define RST_CSR_QAT_LSB                       20
++#define RST_CSR_AE_LSB                        0
++#define MC_TIMESTAMP_ENABLE           (0x1 << 7)
+ #define IGNORE_W1C_MASK ((~(1 << CE_BREAKPOINT_BITPOS)) & \
+       (~(1 << CE_CNTL_STORE_PARITY_ERROR_BITPOS)) & \
diff --git a/queue-6.18/dma-mapping-add-__dma_from_device_group_begin-end.patch b/queue-6.18/dma-mapping-add-__dma_from_device_group_begin-end.patch
new file mode 100644 (file)
index 0000000..5a61f08
--- /dev/null
@@ -0,0 +1,69 @@
+From stable+bounces-243938-greg=kroah.com@vger.kernel.org Tue May  5 02:15:51 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon,  4 May 2026 20:15:21 -0400
+Subject: dma-mapping: add __dma_from_device_group_begin()/end()
+To: stable@vger.kernel.org
+Cc: "Michael S. Tsirkin" <mst@redhat.com>, Marek Szyprowski <m.szyprowski@samsung.com>, Petr Tesarik <ptesarik@suse.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505001522.124823-1-sashal@kernel.org>
+
+From: "Michael S. Tsirkin" <mst@redhat.com>
+
+[ Upstream commit ca085faabb42c31ee204235facc5a430cb9e78a9 ]
+
+When a structure contains a buffer that DMA writes to alongside fields
+that the CPU writes to, cache line sharing between the DMA buffer and
+CPU-written fields can cause data corruption on non-cache-coherent
+platforms.
+
+Add __dma_from_device_group_begin()/end() annotations to ensure proper
+alignment to prevent this:
+
+struct my_device {
+       spinlock_t lock1;
+       __dma_from_device_group_begin();
+       char dma_buffer1[16];
+       char dma_buffer2[16];
+       __dma_from_device_group_end();
+       spinlock_t lock2;
+};
+
+Message-ID: <19163086d5e4704c316f18f6da06bc1c72968904.1767601130.git.mst@redhat.com>
+Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Reviewed-by: Petr Tesarik <ptesarik@suse.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Stable-dep-of: 3023c050af36 ("hwmon: (powerz) Avoid cacheline sharing for DMA buffer")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/dma-mapping.h |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/include/linux/dma-mapping.h
++++ b/include/linux/dma-mapping.h
+@@ -7,6 +7,7 @@
+ #include <linux/dma-direction.h>
+ #include <linux/scatterlist.h>
+ #include <linux/bug.h>
++#include <linux/cache.h>
+ /**
+  * List of possible attributes associated with a DMA mapping. The semantics
+@@ -710,6 +711,18 @@ static inline int dma_get_cache_alignmen
+ }
+ #endif
++#ifdef ARCH_HAS_DMA_MINALIGN
++#define ____dma_from_device_aligned __aligned(ARCH_DMA_MINALIGN)
++#else
++#define ____dma_from_device_aligned
++#endif
++/* Mark start of DMA buffer */
++#define __dma_from_device_group_begin(GROUP)                  \
++      __cacheline_group_begin(GROUP) ____dma_from_device_aligned
++/* Mark end of DMA buffer */
++#define __dma_from_device_group_end(GROUP)                    \
++      __cacheline_group_end(GROUP) ____dma_from_device_aligned
++
+ static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
+               dma_addr_t *dma_handle, gfp_t gfp)
+ {
diff --git a/queue-6.18/erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch b/queue-6.18/erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch
new file mode 100644 (file)
index 0000000..fb01b02
--- /dev/null
@@ -0,0 +1,57 @@
+From stable+bounces-244897-greg=kroah.com@vger.kernel.org Sat May  9 05:22:47 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri,  8 May 2026 23:22:29 -0400
+Subject: erofs: fix unsigned underflow in z_erofs_lz4_handle_overlap()
+To: stable@vger.kernel.org
+Cc: Junrui Luo <moonafterrain@outlook.com>, Yuhao Jiang <danisjiang@gmail.com>, Gao Xiang <hsiangkao@linux.alibaba.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509032229.3064816-2-sashal@kernel.org>
+
+From: Junrui Luo <moonafterrain@outlook.com>
+
+[ Upstream commit 21e161de2dc660b1bb70ef5b156ab8e6e1cca3ab ]
+
+Some crafted images can have illegal (!partial_decoding &&
+m_llen < m_plen) extents, and the LZ4 inplace decompression path
+can be wrongly hit, but it cannot handle (outpages < inpages)
+properly: "outpages - inpages" wraps to a large value and
+the subsequent rq->out[] access reads past the decompressed_pages
+array.
+
+However, such crafted cases can correctly result in a corruption
+report in the normal LZ4 non-inplace path.
+
+Let's add an additional check to fix this for backporting.
+
+Reproducible image (base64-encoded gzipped blob):
+
+H4sIAJGR12kCA+3SPUoDQRgG4MkmkkZk8QRbRFIIi9hbpEjrHQI5ghfwCN5BLCzTGtLbBI+g
+dilSJo1CnIm7GEXFxhT6PDDwfrs73/ywIQD/1ePD4r7Ou6ETsrq4mu7XcWfj++Pb58nJU/9i
+PNtbjhan04/9GtX4qVYc814WDqt6FaX5s+ZwXXeq52lndT6IuVvlblytLMvh4Gzwaf90nsvz
+2DF/21+20T/ldgp5s1jXRaN4t/8izsy/OUB6e/Qa79r+JwAAAAAAAL52vQVuGQAAAP6+my1w
+ywAAAAAAAADwu14ATsEYtgBQAAA=
+
+$ mount -t erofs -o cache_strategy=disabled foo.erofs /mnt
+$ dd if=/mnt/data of=/dev/null bs=4096 count=1
+
+Fixes: 598162d05080 ("erofs: support decompress big pcluster for lz4 backend")
+Reported-by: Yuhao Jiang <danisjiang@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Junrui Luo <moonafterrain@outlook.com>
+Reviewed-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/decompressor.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/fs/erofs/decompressor.c
++++ b/fs/erofs/decompressor.c
+@@ -142,6 +142,7 @@ static void *z_erofs_lz4_handle_overlap(
+       oend = rq->pageofs_out + rq->outputsize;
+       omargin = PAGE_ALIGN(oend) - oend;
+       if (!rq->partial_decoding && may_inplace &&
++          rq->outpages >= rq->inpages &&
+           omargin >= LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) {
+               for (i = 0; i < rq->inpages; ++i)
+                       if (rq->out[rq->outpages - rq->inpages + i] !=
diff --git a/queue-6.18/erofs-tidy-up-z_erofs_lz4_handle_overlap.patch b/queue-6.18/erofs-tidy-up-z_erofs_lz4_handle_overlap.patch
new file mode 100644 (file)
index 0000000..8da20a8
--- /dev/null
@@ -0,0 +1,139 @@
+From stable+bounces-244896-greg=kroah.com@vger.kernel.org Sat May  9 05:22:43 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri,  8 May 2026 23:22:28 -0400
+Subject: erofs: tidy up z_erofs_lz4_handle_overlap()
+To: stable@vger.kernel.org
+Cc: Gao Xiang <hsiangkao@linux.alibaba.com>, Chao Yu <chao@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509032229.3064816-1-sashal@kernel.org>
+
+From: Gao Xiang <hsiangkao@linux.alibaba.com>
+
+[ Upstream commit 9ae77198d4815c63fc8ebacc659c71d150d1e51b ]
+
+ - Add some useful comments to explain inplace I/Os and decompression;
+
+ - Rearrange the code to get rid of one unnecessary goto.
+
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
+Stable-dep-of: 21e161de2dc6 ("erofs: fix unsigned underflow in z_erofs_lz4_handle_overlap()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/erofs/decompressor.c |   85 +++++++++++++++++++++++++-----------------------
+ 1 file changed, 46 insertions(+), 39 deletions(-)
+
+--- a/fs/erofs/decompressor.c
++++ b/fs/erofs/decompressor.c
+@@ -105,44 +105,58 @@ static int z_erofs_lz4_prepare_dstpages(
+       return kaddr ? 1 : 0;
+ }
+-static void *z_erofs_lz4_handle_overlap(struct z_erofs_decompress_req *rq,
++static void *z_erofs_lz4_handle_overlap(const struct z_erofs_decompress_req *rq,
+                       void *inpage, void *out, unsigned int *inputmargin,
+                       int *maptype, bool may_inplace)
+ {
+-      unsigned int oend, omargin, total, i;
++      unsigned int oend, omargin, cnt, i;
+       struct page **in;
+-      void *src, *tmp;
+-
+-      if (rq->inplace_io) {
+-              oend = rq->pageofs_out + rq->outputsize;
+-              omargin = PAGE_ALIGN(oend) - oend;
+-              if (rq->partial_decoding || !may_inplace ||
+-                  omargin < LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize))
+-                      goto docopy;
++      void *src;
++      /*
++       * If in-place I/O isn't used, for example, the bounce compressed cache
++       * can hold data for incomplete read requests. Just map the compressed
++       * buffer as well and decompress directly.
++       */
++      if (!rq->inplace_io) {
++              if (rq->inpages <= 1) {
++                      *maptype = 0;
++                      return inpage;
++              }
++              kunmap_local(inpage);
++              src = erofs_vm_map_ram(rq->in, rq->inpages);
++              if (!src)
++                      return ERR_PTR(-ENOMEM);
++              *maptype = 1;
++              return src;
++      }
++      /*
++       * Then, deal with in-place I/Os. The reasons why in-place I/O is useful
++       * are: (1) It minimizes memory footprint during the I/O submission,
++       * which is useful for slow storage (including network devices and
++       * low-end HDDs/eMMCs) but with a lot inflight I/Os; (2) If in-place
++       * decompression can also be applied, it will reuse the unique buffer so
++       * that no extra CPU D-cache is polluted with temporary compressed data
++       * for extreme performance.
++       */
++      oend = rq->pageofs_out + rq->outputsize;
++      omargin = PAGE_ALIGN(oend) - oend;
++      if (!rq->partial_decoding && may_inplace &&
++          omargin >= LZ4_DECOMPRESS_INPLACE_MARGIN(rq->inputsize)) {
+               for (i = 0; i < rq->inpages; ++i)
+                       if (rq->out[rq->outpages - rq->inpages + i] !=
+                           rq->in[i])
+-                              goto docopy;
+-              kunmap_local(inpage);
+-              *maptype = 3;
+-              return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT);
++                              break;
++              if (i >= rq->inpages) {
++                      kunmap_local(inpage);
++                      *maptype = 3;
++                      return out + ((rq->outpages - rq->inpages) << PAGE_SHIFT);
++              }
+       }
+-
+-      if (rq->inpages <= 1) {
+-              *maptype = 0;
+-              return inpage;
+-      }
+-      kunmap_local(inpage);
+-      src = erofs_vm_map_ram(rq->in, rq->inpages);
+-      if (!src)
+-              return ERR_PTR(-ENOMEM);
+-      *maptype = 1;
+-      return src;
+-
+-docopy:
+-      /* Or copy compressed data which can be overlapped to per-CPU buffer */
+-      in = rq->in;
++      /*
++       * If in-place decompression can't be applied, copy compressed data that
++       * may potentially overlap during decompression to a per-CPU buffer.
++       */
+       src = z_erofs_get_gbuf(rq->inpages);
+       if (!src) {
+               DBG_BUGON(1);
+@@ -150,20 +164,13 @@ docopy:
+               return ERR_PTR(-EFAULT);
+       }
+-      tmp = src;
+-      total = rq->inputsize;
+-      while (total) {
+-              unsigned int page_copycnt =
+-                      min_t(unsigned int, total, PAGE_SIZE - *inputmargin);
+-
++      for (i = 0, in = rq->in; i < rq->inputsize; i += cnt, ++in) {
++              cnt = min_t(u32, rq->inputsize - i, PAGE_SIZE - *inputmargin);
+               if (!inpage)
+                       inpage = kmap_local_page(*in);
+-              memcpy(tmp, inpage + *inputmargin, page_copycnt);
++              memcpy(src + i, inpage + *inputmargin, cnt);
+               kunmap_local(inpage);
+               inpage = NULL;
+-              tmp += page_copycnt;
+-              total -= page_copycnt;
+-              ++in;
+               *inputmargin = 0;
+       }
+       *maptype = 2;
diff --git a/queue-6.18/fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch b/queue-6.18/fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch
new file mode 100644 (file)
index 0000000..c3ae48a
--- /dev/null
@@ -0,0 +1,371 @@
+From stable+bounces-243632-greg=kroah.com@vger.kernel.org Mon May  4 16:34:29 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon,  4 May 2026 10:19:38 -0400
+Subject: fbdev: defio: Disconnect deferred I/O from the lifetime of struct fb_info
+To: stable@vger.kernel.org
+Cc: Thomas Zimmermann <tzimmermann@suse.de>, Helge Deller <deller@gmx.de>, linux-fbdev@vger.kernel.org, dri-devel@lists.freedesktop.org, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260504141938.2378270-1-sashal@kernel.org>
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+[ Upstream commit 9ded47ad003f09a94b6a710b5c47f4aa5ceb7429 ]
+
+Hold state of deferred I/O in struct fb_deferred_io_state. Allocate an
+instance as part of initializing deferred I/O and remove it only after
+the final mapping has been closed. If the fb_info and the contained
+deferred I/O meanwhile goes away, clear struct fb_deferred_io_state.info
+to invalidate the mapping. Any access will then result in a SIGBUS
+signal.
+
+Fixes a long-standing problem, where a device hot-unplug happens while
+user space still has an active mapping of the graphics memory. The hot-
+unplug frees the instance of struct fb_info. Accessing the memory will
+operate on undefined state.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: 60b59beafba8 ("fbdev: mm: Deferred IO support")
+Cc: Helge Deller <deller@gmx.de>
+Cc: linux-fbdev@vger.kernel.org
+Cc: dri-devel@lists.freedesktop.org
+Cc: stable@vger.kernel.org # v2.6.22+
+Signed-off-by: Helge Deller <deller@gmx.de>
+[ replaced kzalloc_obj(*fbdefio_state) with kzalloc(sizeof(*fbdefio_state), GFP_KERNEL) ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/video/fbdev/core/fb_defio.c |  178 ++++++++++++++++++++++++++++--------
+ include/linux/fb.h                  |    4 
+ 2 files changed, 145 insertions(+), 37 deletions(-)
+
+--- a/drivers/video/fbdev/core/fb_defio.c
++++ b/drivers/video/fbdev/core/fb_defio.c
+@@ -24,6 +24,75 @@
+ #include <linux/rmap.h>
+ #include <linux/pagemap.h>
++/*
++ * struct fb_deferred_io_state
++ */
++
++struct fb_deferred_io_state {
++      struct kref ref;
++
++      struct mutex lock; /* mutex that protects the pageref list */
++      /* fields protected by lock */
++      struct fb_info *info;
++};
++
++static struct fb_deferred_io_state *fb_deferred_io_state_alloc(void)
++{
++      struct fb_deferred_io_state *fbdefio_state;
++
++      fbdefio_state = kzalloc(sizeof(*fbdefio_state), GFP_KERNEL);
++      if (!fbdefio_state)
++              return NULL;
++
++      kref_init(&fbdefio_state->ref);
++      mutex_init(&fbdefio_state->lock);
++
++      return fbdefio_state;
++}
++
++static void fb_deferred_io_state_release(struct fb_deferred_io_state *fbdefio_state)
++{
++      mutex_destroy(&fbdefio_state->lock);
++
++      kfree(fbdefio_state);
++}
++
++static void fb_deferred_io_state_get(struct fb_deferred_io_state *fbdefio_state)
++{
++      kref_get(&fbdefio_state->ref);
++}
++
++static void __fb_deferred_io_state_release(struct kref *ref)
++{
++      struct fb_deferred_io_state *fbdefio_state =
++              container_of(ref, struct fb_deferred_io_state, ref);
++
++      fb_deferred_io_state_release(fbdefio_state);
++}
++
++static void fb_deferred_io_state_put(struct fb_deferred_io_state *fbdefio_state)
++{
++      kref_put(&fbdefio_state->ref, __fb_deferred_io_state_release);
++}
++
++/*
++ * struct vm_operations_struct
++ */
++
++static void fb_deferred_io_vm_open(struct vm_area_struct *vma)
++{
++      struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
++
++      fb_deferred_io_state_get(fbdefio_state);
++}
++
++static void fb_deferred_io_vm_close(struct vm_area_struct *vma)
++{
++      struct fb_deferred_io_state *fbdefio_state = vma->vm_private_data;
++
++      fb_deferred_io_state_put(fbdefio_state);
++}
++
+ static struct page *fb_deferred_io_get_page(struct fb_info *info, unsigned long offs)
+ {
+       struct fb_deferred_io *fbdefio = info->fbdefio;
+@@ -121,25 +190,46 @@ static void fb_deferred_io_pageref_put(s
+ /* this is to find and return the vmalloc-ed fb pages */
+ static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
+ {
++      struct fb_info *info;
+       unsigned long offset;
+       struct page *page;
+-      struct fb_info *info = vmf->vma->vm_private_data;
++      vm_fault_t ret;
++      struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
++
++      mutex_lock(&fbdefio_state->lock);
++
++      info = fbdefio_state->info;
++      if (!info) {
++              ret = VM_FAULT_SIGBUS; /* our device is gone */
++              goto err_mutex_unlock;
++      }
+       offset = vmf->pgoff << PAGE_SHIFT;
+-      if (offset >= info->fix.smem_len)
+-              return VM_FAULT_SIGBUS;
++      if (offset >= info->fix.smem_len) {
++              ret = VM_FAULT_SIGBUS;
++              goto err_mutex_unlock;
++      }
+       page = fb_deferred_io_get_page(info, offset);
+-      if (!page)
+-              return VM_FAULT_SIGBUS;
++      if (!page) {
++              ret = VM_FAULT_SIGBUS;
++              goto err_mutex_unlock;
++      }
+       if (!vmf->vma->vm_file)
+               fb_err(info, "no mapping available\n");
+       BUG_ON(!info->fbdefio->mapping);
++      mutex_unlock(&fbdefio_state->lock);
++
+       vmf->page = page;
++
+       return 0;
++
++err_mutex_unlock:
++      mutex_unlock(&fbdefio_state->lock);
++      return ret;
+ }
+ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
+@@ -166,15 +256,24 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
+  * Adds a page to the dirty list. Call this from struct
+  * vm_operations_struct.page_mkwrite.
+  */
+-static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
+-                                          struct page *page)
++static vm_fault_t fb_deferred_io_track_page(struct fb_deferred_io_state *fbdefio_state,
++                                          unsigned long offset, struct page *page)
+ {
+-      struct fb_deferred_io *fbdefio = info->fbdefio;
++      struct fb_info *info;
++      struct fb_deferred_io *fbdefio;
+       struct fb_deferred_io_pageref *pageref;
+       vm_fault_t ret;
+       /* protect against the workqueue changing the page list */
+-      mutex_lock(&fbdefio->lock);
++      mutex_lock(&fbdefio_state->lock);
++
++      info = fbdefio_state->info;
++      if (!info) {
++              ret = VM_FAULT_SIGBUS; /* our device is gone */
++              goto err_mutex_unlock;
++      }
++
++      fbdefio = info->fbdefio;
+       pageref = fb_deferred_io_pageref_get(info, offset, page);
+       if (WARN_ON_ONCE(!pageref)) {
+@@ -192,50 +291,38 @@ static vm_fault_t fb_deferred_io_track_p
+        */
+       lock_page(pageref->page);
+-      mutex_unlock(&fbdefio->lock);
++      mutex_unlock(&fbdefio_state->lock);
+       /* come back after delay to process the deferred IO */
+       schedule_delayed_work(&info->deferred_work, fbdefio->delay);
+       return VM_FAULT_LOCKED;
+ err_mutex_unlock:
+-      mutex_unlock(&fbdefio->lock);
++      mutex_unlock(&fbdefio_state->lock);
+       return ret;
+ }
+-/*
+- * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
+- * @fb_info: The fbdev info structure
+- * @vmf: The VM fault
+- *
+- * This is a callback we get when userspace first tries to
+- * write to the page. We schedule a workqueue. That workqueue
+- * will eventually mkclean the touched pages and execute the
+- * deferred framebuffer IO. Then if userspace touches a page
+- * again, we repeat the same scheme.
+- *
+- * Returns:
+- * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
+- */
+-static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
++static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_deferred_io_state *fbdefio_state,
++                                            struct vm_fault *vmf)
+ {
+       unsigned long offset = vmf->pgoff << PAGE_SHIFT;
+       struct page *page = vmf->page;
+       file_update_time(vmf->vma->vm_file);
+-      return fb_deferred_io_track_page(info, offset, page);
++      return fb_deferred_io_track_page(fbdefio_state, offset, page);
+ }
+-/* vm_ops->page_mkwrite handler */
+ static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
+ {
+-      struct fb_info *info = vmf->vma->vm_private_data;
++      struct fb_deferred_io_state *fbdefio_state = vmf->vma->vm_private_data;
+-      return fb_deferred_io_page_mkwrite(info, vmf);
++      return fb_deferred_io_page_mkwrite(fbdefio_state, vmf);
+ }
+ static const struct vm_operations_struct fb_deferred_io_vm_ops = {
++      .open           = fb_deferred_io_vm_open,
++      .close          = fb_deferred_io_vm_close,
+       .fault          = fb_deferred_io_fault,
+       .page_mkwrite   = fb_deferred_io_mkwrite,
+ };
+@@ -252,7 +339,10 @@ int fb_deferred_io_mmap(struct fb_info *
+       vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
+       if (!(info->flags & FBINFO_VIRTFB))
+               vm_flags_set(vma, VM_IO);
+-      vma->vm_private_data = info;
++      vma->vm_private_data = info->fbdefio_state;
++
++      fb_deferred_io_state_get(info->fbdefio_state); /* released in vma->vm_ops->close() */
++
+       return 0;
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
+@@ -263,9 +353,10 @@ static void fb_deferred_io_work(struct w
+       struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
+       struct fb_deferred_io_pageref *pageref, *next;
+       struct fb_deferred_io *fbdefio = info->fbdefio;
++      struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
+       /* here we wrprotect the page's mappings, then do all deferred IO. */
+-      mutex_lock(&fbdefio->lock);
++      mutex_lock(&fbdefio_state->lock);
+ #ifdef CONFIG_MMU
+       list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
+               struct page *page = pageref->page;
+@@ -283,12 +374,13 @@ static void fb_deferred_io_work(struct w
+       list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
+               fb_deferred_io_pageref_put(pageref, info);
+-      mutex_unlock(&fbdefio->lock);
++      mutex_unlock(&fbdefio_state->lock);
+ }
+ int fb_deferred_io_init(struct fb_info *info)
+ {
+       struct fb_deferred_io *fbdefio = info->fbdefio;
++      struct fb_deferred_io_state *fbdefio_state;
+       struct fb_deferred_io_pageref *pagerefs;
+       unsigned long npagerefs;
+       int ret;
+@@ -298,7 +390,11 @@ int fb_deferred_io_init(struct fb_info *
+       if (WARN_ON(!info->fix.smem_len))
+               return -EINVAL;
+-      mutex_init(&fbdefio->lock);
++      fbdefio_state = fb_deferred_io_state_alloc();
++      if (!fbdefio_state)
++              return -ENOMEM;
++      fbdefio_state->info = info;
++
+       INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
+       INIT_LIST_HEAD(&fbdefio->pagereflist);
+       if (fbdefio->delay == 0) /* set a default of 1 s */
+@@ -315,10 +411,12 @@ int fb_deferred_io_init(struct fb_info *
+       info->npagerefs = npagerefs;
+       info->pagerefs = pagerefs;
++      info->fbdefio_state = fbdefio_state;
++
+       return 0;
+ err:
+-      mutex_destroy(&fbdefio->lock);
++      fb_deferred_io_state_release(fbdefio_state);
+       return ret;
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_init);
+@@ -352,11 +450,19 @@ EXPORT_SYMBOL_GPL(fb_deferred_io_release
+ void fb_deferred_io_cleanup(struct fb_info *info)
+ {
+       struct fb_deferred_io *fbdefio = info->fbdefio;
++      struct fb_deferred_io_state *fbdefio_state = info->fbdefio_state;
+       fb_deferred_io_lastclose(info);
++      info->fbdefio_state = NULL;
++
++      mutex_lock(&fbdefio_state->lock);
++      fbdefio_state->info = NULL;
++      mutex_unlock(&fbdefio_state->lock);
++
++      fb_deferred_io_state_put(fbdefio_state);
++
+       kvfree(info->pagerefs);
+-      mutex_destroy(&fbdefio->lock);
+       fbdefio->mapping = NULL;
+ }
+ EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
+--- a/include/linux/fb.h
++++ b/include/linux/fb.h
+@@ -217,13 +217,14 @@ struct fb_deferred_io {
+       unsigned long delay;
+       bool sort_pagereflist; /* sort pagelist by offset */
+       int open_count; /* number of opened files; protected by fb_info lock */
+-      struct mutex lock; /* mutex that protects the pageref list */
+       struct list_head pagereflist; /* list of pagerefs for touched pages */
+       struct address_space *mapping; /* page cache object for fb device */
+       /* callback */
+       struct page *(*get_page)(struct fb_info *info, unsigned long offset);
+       void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
+ };
++
++struct fb_deferred_io_state;
+ #endif
+ /*
+@@ -490,6 +491,7 @@ struct fb_info {
+       unsigned long npagerefs;
+       struct fb_deferred_io_pageref *pagerefs;
+       struct fb_deferred_io *fbdefio;
++      struct fb_deferred_io_state *fbdefio_state;
+ #endif
+       const struct fb_ops *fbops;
diff --git a/queue-6.18/firmware-exynos-acpm-drop-fake-const-on-handle-pointer.patch b/queue-6.18/firmware-exynos-acpm-drop-fake-const-on-handle-pointer.patch
new file mode 100644 (file)
index 0000000..0108135
--- /dev/null
@@ -0,0 +1,273 @@
+From stable+bounces-244780-greg=kroah.com@vger.kernel.org Fri May  8 16:54:46 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri,  8 May 2026 10:52:59 -0400
+Subject: firmware: exynos-acpm: Drop fake 'const' on handle pointer
+To: stable@vger.kernel.org
+Cc: Krzysztof Kozlowski <krzysztof.kozlowski@oss.qualcomm.com>, Krzysztof Kozlowski <krzk@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260508145259.1514616-1-sashal@kernel.org>
+
+From: Krzysztof Kozlowski <krzysztof.kozlowski@oss.qualcomm.com>
+
+[ Upstream commit a2be37eedb52ea26938fa4cc9de1ff84963c57ad ]
+
+All the functions operating on the 'handle' pointer are claiming it is a
+pointer to const thus they should not modify the handle.  In fact that's
+a false statement, because first thing these functions do is drop the
+cast to const with container_of:
+
+  struct acpm_info *acpm = handle_to_acpm_info(handle);
+
+And with such cast the handle is easily writable with simple:
+
+  acpm->handle.ops.pmic_ops.read_reg = NULL;
+
+The code is not correct logically, either, because functions like
+acpm_get_by_node() and acpm_handle_put() are meant to modify the handle
+reference counting, thus they must modify the handle.  Modification here
+happens anyway, even if the reference counting is stored in the
+container which the handle is part of.
+
+The code does not have actual visible bug, but incorrect 'const'
+annotations could lead to incorrect compiler decisions.
+
+Fixes: a88927b534ba ("firmware: add Exynos ACPM protocol driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@oss.qualcomm.com>
+Link: https://patch.msgid.link/20260224104203.42950-2-krzysztof.kozlowski@oss.qualcomm.com
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+[ dropped hunks for DVFS/clk-acpm files and `acpm_dvfs_ops` struct that don't exist in 6.18 ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/samsung/exynos-acpm-pmic.c           |   10 +++---
+ drivers/firmware/samsung/exynos-acpm-pmic.h           |   10 +++---
+ drivers/firmware/samsung/exynos-acpm.c                |   16 +++++----
+ drivers/firmware/samsung/exynos-acpm.h                |    2 -
+ drivers/mfd/sec-acpm.c                                |   10 +++---
+ include/linux/firmware/samsung/exynos-acpm-protocol.h |   29 +++++++-----------
+ 6 files changed, 37 insertions(+), 40 deletions(-)
+
+--- a/drivers/firmware/samsung/exynos-acpm-pmic.c
++++ b/drivers/firmware/samsung/exynos-acpm-pmic.c
+@@ -77,7 +77,7 @@ static void acpm_pmic_init_read_cmd(u32
+       cmd[3] = ktime_to_ms(ktime_get());
+ }
+-int acpm_pmic_read_reg(const struct acpm_handle *handle,
++int acpm_pmic_read_reg(struct acpm_handle *handle,
+                      unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+                      u8 *buf)
+ {
+@@ -107,7 +107,7 @@ static void acpm_pmic_init_bulk_read_cmd
+                FIELD_PREP(ACPM_PMIC_VALUE, count);
+ }
+-int acpm_pmic_bulk_read(const struct acpm_handle *handle,
++int acpm_pmic_bulk_read(struct acpm_handle *handle,
+                       unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+                       u8 count, u8 *buf)
+ {
+@@ -150,7 +150,7 @@ static void acpm_pmic_init_write_cmd(u32
+       cmd[3] = ktime_to_ms(ktime_get());
+ }
+-int acpm_pmic_write_reg(const struct acpm_handle *handle,
++int acpm_pmic_write_reg(struct acpm_handle *handle,
+                       unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+                       u8 value)
+ {
+@@ -187,7 +187,7 @@ static void acpm_pmic_init_bulk_write_cm
+       }
+ }
+-int acpm_pmic_bulk_write(const struct acpm_handle *handle,
++int acpm_pmic_bulk_write(struct acpm_handle *handle,
+                        unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+                        u8 count, const u8 *buf)
+ {
+@@ -220,7 +220,7 @@ static void acpm_pmic_init_update_cmd(u3
+       cmd[3] = ktime_to_ms(ktime_get());
+ }
+-int acpm_pmic_update_reg(const struct acpm_handle *handle,
++int acpm_pmic_update_reg(struct acpm_handle *handle,
+                        unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+                        u8 value, u8 mask)
+ {
+--- a/drivers/firmware/samsung/exynos-acpm-pmic.h
++++ b/drivers/firmware/samsung/exynos-acpm-pmic.h
+@@ -11,19 +11,19 @@
+ struct acpm_handle;
+-int acpm_pmic_read_reg(const struct acpm_handle *handle,
++int acpm_pmic_read_reg(struct acpm_handle *handle,
+                      unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+                      u8 *buf);
+-int acpm_pmic_bulk_read(const struct acpm_handle *handle,
++int acpm_pmic_bulk_read(struct acpm_handle *handle,
+                       unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+                       u8 count, u8 *buf);
+-int acpm_pmic_write_reg(const struct acpm_handle *handle,
++int acpm_pmic_write_reg(struct acpm_handle *handle,
+                       unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+                       u8 value);
+-int acpm_pmic_bulk_write(const struct acpm_handle *handle,
++int acpm_pmic_bulk_write(struct acpm_handle *handle,
+                        unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+                        u8 count, const u8 *buf);
+-int acpm_pmic_update_reg(const struct acpm_handle *handle,
++int acpm_pmic_update_reg(struct acpm_handle *handle,
+                        unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+                        u8 value, u8 mask);
+ #endif /* __EXYNOS_ACPM_PMIC_H__ */
+--- a/drivers/firmware/samsung/exynos-acpm.c
++++ b/drivers/firmware/samsung/exynos-acpm.c
+@@ -409,7 +409,7 @@ static int acpm_wait_for_message_respons
+  *
+  * Return: 0 on success, -errno otherwise.
+  */
+-int acpm_do_xfer(const struct acpm_handle *handle, const struct acpm_xfer *xfer)
++int acpm_do_xfer(struct acpm_handle *handle, const struct acpm_xfer *xfer)
+ {
+       struct acpm_info *acpm = handle_to_acpm_info(handle);
+       struct exynos_mbox_msg msg;
+@@ -649,7 +649,7 @@ static int acpm_probe(struct platform_de
+  * acpm_handle_put() - release the handle acquired by acpm_get_by_phandle.
+  * @handle:   Handle acquired by acpm_get_by_phandle.
+  */
+-static void acpm_handle_put(const struct acpm_handle *handle)
++static void acpm_handle_put(struct acpm_handle *handle)
+ {
+       struct acpm_info *acpm = handle_to_acpm_info(handle);
+       struct device *dev = acpm->dev;
+@@ -675,9 +675,11 @@ static void devm_acpm_release(struct dev
+  * @np:               ACPM device tree node.
+  *
+  * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
++ *
++ * Note: handle CANNOT be pointer to const
+  */
+-static const struct acpm_handle *acpm_get_by_node(struct device *dev,
+-                                                struct device_node *np)
++static struct acpm_handle *acpm_get_by_node(struct device *dev,
++                                          struct device_node *np)
+ {
+       struct platform_device *pdev;
+       struct device_link *link;
+@@ -718,10 +720,10 @@ static const struct acpm_handle *acpm_ge
+  *
+  * Return: pointer to handle on success, ERR_PTR(-errno) otherwise.
+  */
+-const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+-                                              struct device_node *np)
++struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
++                                        struct device_node *np)
+ {
+-      const struct acpm_handle **ptr, *handle;
++      struct acpm_handle **ptr, *handle;
+       ptr = devres_alloc(devm_acpm_release, sizeof(*ptr), GFP_KERNEL);
+       if (!ptr)
+--- a/drivers/firmware/samsung/exynos-acpm.h
++++ b/drivers/firmware/samsung/exynos-acpm.h
+@@ -17,7 +17,7 @@ struct acpm_xfer {
+ struct acpm_handle;
+-int acpm_do_xfer(const struct acpm_handle *handle,
++int acpm_do_xfer(struct acpm_handle *handle,
+                const struct acpm_xfer *xfer);
+ #endif /* __EXYNOS_ACPM_H__ */
+--- a/drivers/mfd/sec-acpm.c
++++ b/drivers/mfd/sec-acpm.c
+@@ -217,7 +217,7 @@ static const struct regmap_config s2mpg1
+ };
+ struct sec_pmic_acpm_shared_bus_context {
+-      const struct acpm_handle *acpm;
++      struct acpm_handle *acpm;
+       unsigned int acpm_chan_id;
+       u8 speedy_channel;
+ };
+@@ -240,7 +240,7 @@ static int sec_pmic_acpm_bus_write(void
+                                  size_t count)
+ {
+       struct sec_pmic_acpm_bus_context *ctx = context;
+-      const struct acpm_handle *acpm = ctx->shared->acpm;
++      struct acpm_handle *acpm = ctx->shared->acpm;
+       const struct acpm_pmic_ops *pmic_ops = &acpm->ops.pmic_ops;
+       size_t val_count = count - BITS_TO_BYTES(ACPM_ADDR_BITS);
+       const u8 *d = data;
+@@ -260,7 +260,7 @@ static int sec_pmic_acpm_bus_read(void *
+                                 void *val_buf, size_t val_size)
+ {
+       struct sec_pmic_acpm_bus_context *ctx = context;
+-      const struct acpm_handle *acpm = ctx->shared->acpm;
++      struct acpm_handle *acpm = ctx->shared->acpm;
+       const struct acpm_pmic_ops *pmic_ops = &acpm->ops.pmic_ops;
+       const u8 *r = reg_buf;
+       u8 reg;
+@@ -279,7 +279,7 @@ static int sec_pmic_acpm_bus_reg_update_
+                                            unsigned int val)
+ {
+       struct sec_pmic_acpm_bus_context *ctx = context;
+-      const struct acpm_handle *acpm = ctx->shared->acpm;
++      struct acpm_handle *acpm = ctx->shared->acpm;
+       const struct acpm_pmic_ops *pmic_ops = &acpm->ops.pmic_ops;
+       return pmic_ops->update_reg(acpm, ctx->shared->acpm_chan_id, ctx->type, reg & 0xff,
+@@ -335,7 +335,7 @@ static int sec_pmic_acpm_probe(struct pl
+       struct regmap *regmap_common, *regmap_pmic, *regmap;
+       const struct sec_pmic_acpm_platform_data *pdata;
+       struct sec_pmic_acpm_shared_bus_context *shared_ctx;
+-      const struct acpm_handle *acpm;
++      struct acpm_handle *acpm;
+       struct device *dev = &pdev->dev;
+       int ret, irq;
+--- a/include/linux/firmware/samsung/exynos-acpm-protocol.h
++++ b/include/linux/firmware/samsung/exynos-acpm-protocol.h
+@@ -14,21 +14,16 @@ struct acpm_handle;
+ struct device_node;
+ struct acpm_pmic_ops {
+-      int (*read_reg)(const struct acpm_handle *handle,
+-                      unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+-                      u8 *buf);
+-      int (*bulk_read)(const struct acpm_handle *handle,
+-                       unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+-                       u8 count, u8 *buf);
+-      int (*write_reg)(const struct acpm_handle *handle,
+-                       unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+-                       u8 value);
+-      int (*bulk_write)(const struct acpm_handle *handle,
+-                        unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+-                        u8 count, const u8 *buf);
+-      int (*update_reg)(const struct acpm_handle *handle,
+-                        unsigned int acpm_chan_id, u8 type, u8 reg, u8 chan,
+-                        u8 value, u8 mask);
++      int (*read_reg)(struct acpm_handle *handle, unsigned int acpm_chan_id,
++                      u8 type, u8 reg, u8 chan, u8 *buf);
++      int (*bulk_read)(struct acpm_handle *handle, unsigned int acpm_chan_id,
++                       u8 type, u8 reg, u8 chan, u8 count, u8 *buf);
++      int (*write_reg)(struct acpm_handle *handle, unsigned int acpm_chan_id,
++                       u8 type, u8 reg, u8 chan, u8 value);
++      int (*bulk_write)(struct acpm_handle *handle, unsigned int acpm_chan_id,
++                        u8 type, u8 reg, u8 chan, u8 count, const u8 *buf);
++      int (*update_reg)(struct acpm_handle *handle, unsigned int acpm_chan_id,
++                        u8 type, u8 reg, u8 chan, u8 value, u8 mask);
+ };
+ struct acpm_ops {
+@@ -45,7 +40,7 @@ struct acpm_handle {
+ struct device;
+-const struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
+-                                              struct device_node *np);
++struct acpm_handle *devm_acpm_get_by_node(struct device *dev,
++                                        struct device_node *np);
+ #endif /* __EXYNOS_ACPM_PROTOCOL_H */
diff --git a/queue-6.18/hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch b/queue-6.18/hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch
new file mode 100644 (file)
index 0000000..6b432fe
--- /dev/null
@@ -0,0 +1,142 @@
+From stable+bounces-244857-greg=kroah.com@vger.kernel.org Sat May  9 02:35:43 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri,  8 May 2026 20:35:34 -0400
+Subject: hfsplus: fix held lock freed on hfsplus_fill_super()
+To: stable@vger.kernel.org
+Cc: Zilin Guan <zilin@seu.edu.cn>, Viacheslav Dubeyko <slava@dubeyko.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509003534.2360473-2-sashal@kernel.org>
+
+From: Zilin Guan <zilin@seu.edu.cn>
+
+[ Upstream commit 90c500e4fd83fa33c09bc7ee23b6d9cc487ac733 ]
+
+hfsplus_fill_super() calls hfs_find_init() to initialize a search
+structure, which acquires tree->tree_lock. If the subsequent call to
+hfsplus_cat_build_key() fails, the function jumps to the out_put_root
+error label without releasing the lock. The later cleanup path then
+frees the tree data structure with the lock still held, triggering a
+held lock freed warning.
+
+Fix this by adding the missing hfs_find_exit(&fd) call before jumping
+to the out_put_root error label. This ensures that tree->tree_lock is
+properly released on the error path.
+
+The bug was originally detected on v6.13-rc1 using an experimental
+static analysis tool we are developing, and we have verified that the
+issue persists in the latest mainline kernel. The tool is specifically
+designed to detect memory management issues. It is currently under active
+development and not yet publicly available.
+
+We confirmed the bug by runtime testing under QEMU with x86_64 defconfig,
+lockdep enabled, and CONFIG_HFSPLUS_FS=y. To trigger the error path, we
+used GDB to dynamically shrink the max_unistr_len parameter to 1 before
+hfsplus_asc2uni() is called. This forces hfsplus_asc2uni() to naturally
+return -ENAMETOOLONG, which propagates to hfsplus_cat_build_key() and
+exercises the faulty error path. The following warning was observed
+during mount:
+
+       =========================
+       WARNING: held lock freed!
+       7.0.0-rc3-00016-gb4f0dd314b39 #4 Not tainted
+       -------------------------
+       mount/174 is freeing memory ffff888103f92000-ffff888103f92fff, with a lock still held there!
+       ffff888103f920b0 (&tree->tree_lock){+.+.}-{4:4}, at: hfsplus_find_init+0x154/0x1e0
+       2 locks held by mount/174:
+       #0: ffff888103f960e0 (&type->s_umount_key#42/1){+.+.}-{4:4}, at: alloc_super.constprop.0+0x167/0xa40
+       #1: ffff888103f920b0 (&tree->tree_lock){+.+.}-{4:4}, at: hfsplus_find_init+0x154/0x1e0
+
+       stack backtrace:
+       CPU: 2 UID: 0 PID: 174 Comm: mount Not tainted 7.0.0-rc3-00016-gb4f0dd314b39 #4 PREEMPT(lazy)
+       Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.15.0-1 04/01/2014
+       Call Trace:
+       <TASK>
+       dump_stack_lvl+0x82/0xd0
+       debug_check_no_locks_freed+0x13a/0x180
+       kfree+0x16b/0x510
+       ? hfsplus_fill_super+0xcb4/0x18a0
+       hfsplus_fill_super+0xcb4/0x18a0
+       ? __pfx_hfsplus_fill_super+0x10/0x10
+       ? srso_return_thunk+0x5/0x5f
+       ? bdev_open+0x65f/0xc30
+       ? srso_return_thunk+0x5/0x5f
+       ? pointer+0x4ce/0xbf0
+       ? trace_contention_end+0x11c/0x150
+       ? __pfx_pointer+0x10/0x10
+       ? srso_return_thunk+0x5/0x5f
+       ? bdev_open+0x79b/0xc30
+       ? srso_return_thunk+0x5/0x5f
+       ? srso_return_thunk+0x5/0x5f
+       ? vsnprintf+0x6da/0x1270
+       ? srso_return_thunk+0x5/0x5f
+       ? __mutex_unlock_slowpath+0x157/0x740
+       ? __pfx_vsnprintf+0x10/0x10
+       ? srso_return_thunk+0x5/0x5f
+       ? srso_return_thunk+0x5/0x5f
+       ? mark_held_locks+0x49/0x80
+       ? srso_return_thunk+0x5/0x5f
+       ? srso_return_thunk+0x5/0x5f
+       ? irqentry_exit+0x17b/0x5e0
+       ? trace_irq_disable.constprop.0+0x116/0x150
+       ? __pfx_hfsplus_fill_super+0x10/0x10
+       ? __pfx_hfsplus_fill_super+0x10/0x10
+       get_tree_bdev_flags+0x302/0x580
+       ? __pfx_get_tree_bdev_flags+0x10/0x10
+       ? vfs_parse_fs_qstr+0x129/0x1a0
+       ? __pfx_vfs_parse_fs_qstr+0x3/0x10
+       vfs_get_tree+0x89/0x320
+       fc_mount+0x10/0x1d0
+       path_mount+0x5c5/0x21c0
+       ? __pfx_path_mount+0x10/0x10
+       ? trace_irq_enable.constprop.0+0x116/0x150
+       ? trace_irq_enable.constprop.0+0x116/0x150
+       ? srso_return_thunk+0x5/0x5f
+       ? srso_return_thunk+0x5/0x5f
+       ? kmem_cache_free+0x307/0x540
+       ? user_path_at+0x51/0x60
+       ? __x64_sys_mount+0x212/0x280
+       ? srso_return_thunk+0x5/0x5f
+       __x64_sys_mount+0x212/0x280
+       ? __pfx___x64_sys_mount+0x10/0x10
+       ? srso_return_thunk+0x5/0x5f
+       ? trace_irq_enable.constprop.0+0x116/0x150
+       ? srso_return_thunk+0x5/0x5f
+       do_syscall_64+0x111/0x680
+       entry_SYSCALL_64_after_hwframe+0x77/0x7f
+       RIP: 0033:0x7ffacad55eae
+       Code: 48 8b 0d 85 1f 0f 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 49 89 ca b8 a5 00 00 8
+       RSP: 002b:00007fff1ab55718 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
+       RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007ffacad55eae
+       RDX: 000055740c64e5b0 RSI: 000055740c64e630 RDI: 000055740c651ab0
+       RBP: 000055740c64e380 R08: 0000000000000000 R09: 0000000000000001
+       R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+       R13: 000055740c64e5b0 R14: 000055740c651ab0 R15: 000055740c64e380
+       </TASK>
+
+After applying this patch, the warning no longer appears.
+
+Fixes: 89ac9b4d3d1a ("hfsplus: fix longname handling")
+CC: stable@vger.kernel.org
+Signed-off-by: Zilin Guan <zilin@seu.edu.cn>
+Reviewed-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Tested-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/hfsplus/super.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -569,8 +569,10 @@ static int hfsplus_fill_super(struct sup
+       if (err)
+               goto out_put_root;
+       err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
+-      if (unlikely(err < 0))
++      if (unlikely(err < 0)) {
++              hfs_find_exit(&fd);
+               goto out_put_root;
++      }
+       if (!hfsplus_brec_read_cat(&fd, &entry)) {
+               hfs_find_exit(&fd);
+               if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
diff --git a/queue-6.18/hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch b/queue-6.18/hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch
new file mode 100644 (file)
index 0000000..bcecf13
--- /dev/null
@@ -0,0 +1,189 @@
+From stable+bounces-244856-greg=kroah.com@vger.kernel.org Sat May  9 02:35:40 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri,  8 May 2026 20:35:33 -0400
+Subject: hfsplus: fix uninit-value by validating catalog record size
+To: stable@vger.kernel.org
+Cc: Deepanshu Kartikey <kartikey406@gmail.com>, syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com, Viacheslav Dubeyko <slava@dubeyko.com>, Charalampos Mitrodimas <charmitro@posteo.net>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509003534.2360473-1-sashal@kernel.org>
+
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+
+[ Upstream commit b6b592275aeff184aa82fcf6abccd833fb71b393 ]
+
+Syzbot reported a KMSAN uninit-value issue in hfsplus_strcasecmp(). The
+root cause is that hfs_brec_read() doesn't validate that the on-disk
+record size matches the expected size for the record type being read.
+
+When mounting a corrupted filesystem, hfs_brec_read() may read less data
+than expected. For example, when reading a catalog thread record, the
+debug output showed:
+
+  HFSPLUS_BREC_READ: rec_len=520, fd->entrylength=26
+  HFSPLUS_BREC_READ: WARNING - entrylength (26) < rec_len (520) - PARTIAL READ!
+
+hfs_brec_read() only validates that entrylength is not greater than the
+buffer size, but doesn't check if it's less than expected. It successfully
+reads 26 bytes into a 520-byte structure and returns success, leaving 494
+bytes uninitialized.
+
+This uninitialized data in tmp.thread.nodeName then gets copied by
+hfsplus_cat_build_key_uni() and used by hfsplus_strcasecmp(), triggering
+the KMSAN warning when the uninitialized bytes are used as array indices
+in case_fold().
+
+Fix by introducing hfsplus_brec_read_cat() wrapper that:
+1. Calls hfs_brec_read() to read the data
+2. Validates the record size based on the type field:
+   - Fixed size for folder and file records
+   - Variable size for thread records (depends on string length)
+3. Returns -EIO if size doesn't match expected
+
+For thread records, check against HFSPLUS_MIN_THREAD_SZ before reading
+nodeName.length to avoid reading uninitialized data at call sites that
+don't zero-initialize the entry structure.
+
+Also initialize the tmp variable in hfsplus_find_cat() as defensive
+programming to ensure no uninitialized data even if validation is
+bypassed.
+
+Reported-by: syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=d80abb5b890d39261e72
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Tested-by: syzbot+d80abb5b890d39261e72@syzkaller.appspotmail.com
+Reviewed-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Tested-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Suggested-by: Charalampos Mitrodimas <charmitro@posteo.net>
+Link: https://lore.kernel.org/all/20260120051114.1281285-1-kartikey406@gmail.com/ [v1]
+Link: https://lore.kernel.org/all/20260121063109.1830263-1-kartikey406@gmail.com/ [v2]
+Link: https://lore.kernel.org/all/20260212014233.2422046-1-kartikey406@gmail.com/ [v3]
+Link: https://lore.kernel.org/all/20260214002100.436125-1-kartikey406@gmail.com/T/ [v4]
+Link: https://lore.kernel.org/all/20260221061626.15853-1-kartikey406@gmail.com/T/ [v5]
+Signed-off-by: Deepanshu Kartikey <kartikey406@gmail.com>
+Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Link: https://lore.kernel.org/r/20260307010302.41547-1-kartikey406@gmail.com
+Signed-off-by: Viacheslav Dubeyko <slava@dubeyko.com>
+Stable-dep-of: 90c500e4fd83 ("hfsplus: fix held lock freed on hfsplus_fill_super()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/hfsplus/bfind.c      |   51 ++++++++++++++++++++++++++++++++++++++++++++++++
+ fs/hfsplus/catalog.c    |    4 +--
+ fs/hfsplus/dir.c        |    2 -
+ fs/hfsplus/hfsplus_fs.h |    9 ++++++++
+ fs/hfsplus/super.c      |    2 -
+ 5 files changed, 64 insertions(+), 4 deletions(-)
+
+--- a/fs/hfsplus/bfind.c
++++ b/fs/hfsplus/bfind.c
+@@ -287,3 +287,54 @@ out:
+       fd->bnode = bnode;
+       return res;
+ }
++
++/**
++ * hfsplus_brec_read_cat - read and validate a catalog record
++ * @fd: find data structure
++ * @entry: pointer to catalog entry to read into
++ *
++ * Reads a catalog record and validates its size matches the expected
++ * size based on the record type.
++ *
++ * Returns 0 on success, or negative error code on failure.
++ */
++int hfsplus_brec_read_cat(struct hfs_find_data *fd, hfsplus_cat_entry *entry)
++{
++      int res;
++      u32 expected_size;
++
++      res = hfs_brec_read(fd, entry, sizeof(hfsplus_cat_entry));
++      if (res)
++              return res;
++
++      /* Validate catalog record size based on type */
++      switch (be16_to_cpu(entry->type)) {
++      case HFSPLUS_FOLDER:
++              expected_size = sizeof(struct hfsplus_cat_folder);
++              break;
++      case HFSPLUS_FILE:
++              expected_size = sizeof(struct hfsplus_cat_file);
++              break;
++      case HFSPLUS_FOLDER_THREAD:
++      case HFSPLUS_FILE_THREAD:
++              /* Ensure we have at least the fixed fields before reading nodeName.length */
++              if (fd->entrylength < HFSPLUS_MIN_THREAD_SZ) {
++                      pr_err("thread record too short (got %u)\n", fd->entrylength);
++                      return -EIO;
++              }
++              expected_size = hfsplus_cat_thread_size(&entry->thread);
++              break;
++      default:
++              pr_err("unknown catalog record type %d\n",
++                     be16_to_cpu(entry->type));
++              return -EIO;
++      }
++
++      if (fd->entrylength != expected_size) {
++              pr_err("catalog record size mismatch (type %d, got %u, expected %u)\n",
++                     be16_to_cpu(entry->type), fd->entrylength, expected_size);
++              return -EIO;
++      }
++
++      return 0;
++}
+--- a/fs/hfsplus/catalog.c
++++ b/fs/hfsplus/catalog.c
+@@ -194,12 +194,12 @@ static int hfsplus_fill_cat_thread(struc
+ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
+                    struct hfs_find_data *fd)
+ {
+-      hfsplus_cat_entry tmp;
++      hfsplus_cat_entry tmp = {0};
+       int err;
+       u16 type;
+       hfsplus_cat_build_key_with_cnid(sb, fd->search_key, cnid);
+-      err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
++      err = hfsplus_brec_read_cat(fd, &tmp);
+       if (err)
+               return err;
+--- a/fs/hfsplus/dir.c
++++ b/fs/hfsplus/dir.c
+@@ -49,7 +49,7 @@ static struct dentry *hfsplus_lookup(str
+       if (unlikely(err < 0))
+               goto fail;
+ again:
+-      err = hfs_brec_read(&fd, &entry, sizeof(entry));
++      err = hfsplus_brec_read_cat(&fd, &entry);
+       if (err) {
+               if (err == -ENOENT) {
+                       hfs_find_exit(&fd);
+--- a/fs/hfsplus/hfsplus_fs.h
++++ b/fs/hfsplus/hfsplus_fs.h
+@@ -507,6 +507,15 @@ int hfsplus_submit_bio(struct super_bloc
+                      void **data, blk_opf_t opf);
+ int hfsplus_read_wrapper(struct super_block *sb);
++static inline u32 hfsplus_cat_thread_size(const struct hfsplus_cat_thread *thread)
++{
++      return offsetof(struct hfsplus_cat_thread, nodeName) +
++             offsetof(struct hfsplus_unistr, unicode) +
++             be16_to_cpu(thread->nodeName.length) * sizeof(hfsplus_unichr);
++}
++
++int hfsplus_brec_read_cat(struct hfs_find_data *fd, hfsplus_cat_entry *entry);
++
+ /*
+  * time helpers: convert between 1904-base and 1970-base timestamps
+  *
+--- a/fs/hfsplus/super.c
++++ b/fs/hfsplus/super.c
+@@ -571,7 +571,7 @@ static int hfsplus_fill_super(struct sup
+       err = hfsplus_cat_build_key(sb, fd.search_key, HFSPLUS_ROOT_CNID, &str);
+       if (unlikely(err < 0))
+               goto out_put_root;
+-      if (!hfs_brec_read(&fd, &entry, sizeof(entry))) {
++      if (!hfsplus_brec_read_cat(&fd, &entry)) {
+               hfs_find_exit(&fd);
+               if (entry.type != cpu_to_be16(HFSPLUS_FOLDER)) {
+                       err = -EIO;
diff --git a/queue-6.18/hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch b/queue-6.18/hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch
new file mode 100644 (file)
index 0000000..a7df65d
--- /dev/null
@@ -0,0 +1,55 @@
+From stable+bounces-243939-greg=kroah.com@vger.kernel.org Tue May  5 02:15:31 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon,  4 May 2026 20:15:22 -0400
+Subject: hwmon: (powerz) Avoid cacheline sharing for DMA buffer
+To: stable@vger.kernel.org
+Cc: "Thomas Weißschuh" <linux@weissschuh.net>, "Guenter Roeck" <linux@roeck-us.net>, "Sasha Levin" <sashal@kernel.org>
+Message-ID: <20260505001522.124823-2-sashal@kernel.org>
+
+From: Thomas Weißschuh <linux@weissschuh.net>
+
+[ Upstream commit 3023c050af3600bf451153335dea5e073c9a3088 ]
+
+Depending on the architecture the transfer buffer may share a cacheline
+with the following mutex. As the buffer may be used for DMA, that is
+problematic.
+
+Use the high-level DMA helpers to make sure that cacheline sharing can
+not happen.
+
+Also drop the comment, as the helpers are documentation enough.
+
+https://sashiko.dev/#/message/20260408175814.934BFC19421%40smtp.kernel.org
+
+Fixes: 4381a36abdf1c ("hwmon: add POWER-Z driver")
+Cc: stable@vger.kernel.org # ca085faabb42: dma-mapping: add __dma_from_device_group_begin()/end()
+Signed-off-by: Thomas Weißschuh <linux@weissschuh.net>
+Link: https://lore.kernel.org/r/20260408-powerz-cacheline-alias-v1-1-1254891be0dd@weissschuh.net
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hwmon/powerz.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/hwmon/powerz.c
++++ b/drivers/hwmon/powerz.c
+@@ -6,6 +6,7 @@
+ #include <linux/completion.h>
+ #include <linux/device.h>
++#include <linux/dma-mapping.h>
+ #include <linux/hwmon.h>
+ #include <linux/module.h>
+ #include <linux/mutex.h>
+@@ -33,7 +34,9 @@ struct powerz_sensor_data {
+ } __packed;
+ struct powerz_priv {
+-      char transfer_buffer[64];       /* first member to satisfy DMA alignment */
++      __dma_from_device_group_begin();
++      char transfer_buffer[64];
++      __dma_from_device_group_end();
+       struct mutex mutex;
+       struct completion completion;
+       struct urb *urb;
diff --git a/queue-6.18/mm-damon-core-disallow-non-power-of-two-min_region_sz-on-damon_start.patch b/queue-6.18/mm-damon-core-disallow-non-power-of-two-min_region_sz-on-damon_start.patch
new file mode 100644 (file)
index 0000000..d1499e0
--- /dev/null
@@ -0,0 +1,43 @@
+From 95093e5cb4c5b50a5b1a4b79f2942b62744bd66a Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Sat, 11 Apr 2026 14:36:36 -0700
+Subject: mm/damon/core: disallow non-power of two min_region_sz on damon_start()
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 95093e5cb4c5b50a5b1a4b79f2942b62744bd66a upstream.
+
+Commit d8f867fa0825 ("mm/damon: add damon_ctx->min_sz_region") introduced
+a bug that allows unaligned DAMON region address ranges.  Commit
+c80f46ac228b ("mm/damon/core: disallow non-power of two min_region_sz")
+fixed it, but only for damon_commit_ctx() use case.  Still, DAMON sysfs
+interface can emit non-power of two min_region_sz via damon_start().  Fix
+the path by adding the is_power_of_2() check on damon_start().
+
+The issue was discovered by sashiko [1].
+
+Link: https://lore.kernel.org/20260411213638.77768-1-sj@kernel.org
+Link: https://lore.kernel.org/20260403155530.64647-1-sj@kernel.org [1]
+Fixes: d8f867fa0825 ("mm/damon: add damon_ctx->min_sz_region")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> # 6.18.x
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1352,6 +1352,11 @@ int damon_start(struct damon_ctx **ctxs,
+       int i;
+       int err = 0;
++      for (i = 0; i < nr_ctxs; i++) {
++              if (!is_power_of_2(ctxs[i]->min_sz_region))
++                      return -EINVAL;
++      }
++
+       mutex_lock(&damon_lock);
+       if ((exclusive && nr_running_ctxs) ||
+                       (!exclusive && running_exclusive_ctxs)) {
diff --git a/queue-6.18/mm-swap-speed-up-hibernation-allocation-and-writeout.patch b/queue-6.18/mm-swap-speed-up-hibernation-allocation-and-writeout.patch
new file mode 100644 (file)
index 0000000..dee2e91
--- /dev/null
@@ -0,0 +1,84 @@
+From stable+bounces-244832-greg=kroah.com@vger.kernel.org Fri May  8 23:18:04 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri,  8 May 2026 17:17:56 -0400
+Subject: mm, swap: speed up hibernation allocation and writeout
+To: stable@vger.kernel.org
+Cc: Kairui Song <kasong@tencent.com>, Carsten Grohmann <mail@carstengrohmann.de>, Baoquan He <bhe@redhat.com>, Barry Song <baohua@kernel.org>, Chris Li <chrisl@kernel.org>, Kemeng Shi <shikemeng@huaweicloud.com>, Nhat Pham <nphamcs@gmail.com>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260508211756.1960595-1-sashal@kernel.org>
+
+From: Kairui Song <kasong@tencent.com>
+
+[ Upstream commit 396f57b5720024638dbb503f6a4abd988a49d815 ]
+
+Since commit 0ff67f990bd4 ("mm, swap: remove swap slot cache"),
+hibernation has been using the swap slot slow allocation path for
+simplification, which turns out might cause regression for some devices
+because the allocator now rotates clusters too often, leading to slower
+allocation and more random distribution of data.
+
+Fast allocation is not complex, so implement hibernation support as well.
+
+Test result with Samsung SSD 830 Series (SATA II, 3.0 Gbps) shows the
+performance is several times better [1]:
+6.19:               324 seconds
+After this series:  35 seconds
+
+Link: https://lkml.kernel.org/r/20260216-hibernate-perf-v4-1-1ba9f0bf1ec9@tencent.com
+Link: https://lore.kernel.org/linux-mm/8b4bdcfa-ce3f-4e23-839f-31367df7c18f@gmx.de/ [1]
+Signed-off-by: Kairui Song <kasong@tencent.com>
+Fixes: 0ff67f990bd4 ("mm, swap: remove swap slot cache")
+Reported-by: Carsten Grohmann <mail@carstengrohmann.de>
+Closes: https://lore.kernel.org/linux-mm/20260206121151.dea3633d1f0ded7bbf49c22e@linux-foundation.org/
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: Chris Li <chrisl@kernel.org>
+Cc: Kemeng Shi <shikemeng@huaweicloud.com>
+Cc: Nhat Pham <nphamcs@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ adjusted helper signatures ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/swapfile.c |   21 ++++++++++++++++-----
+ 1 file changed, 16 insertions(+), 5 deletions(-)
+
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2014,8 +2014,9 @@ out:
+ swp_entry_t get_swap_page_of_type(int type)
+ {
+-      struct swap_info_struct *si = swap_type_to_info(type);
+-      unsigned long offset;
++      struct swap_info_struct *pcp_si, *si = swap_type_to_info(type);
++      unsigned long pcp_offset, offset = SWAP_ENTRY_INVALID;
++      struct swap_cluster_info *ci;
+       swp_entry_t entry = {0};
+       if (!si)
+@@ -2025,11 +2026,21 @@ swp_entry_t get_swap_page_of_type(int ty
+       if (get_swap_device_info(si)) {
+               if (si->flags & SWP_WRITEOK) {
+                       /*
+-                       * Grab the local lock to be complaint
+-                       * with swap table allocation.
++                       * Try the local cluster first if it matches the device. If
++                       * not, try grab a new cluster and override local cluster.
+                        */
+                       local_lock(&percpu_swap_cluster.lock);
+-                      offset = cluster_alloc_swap_entry(si, 0, 1);
++                      pcp_si = this_cpu_read(percpu_swap_cluster.si[0]);
++                      pcp_offset = this_cpu_read(percpu_swap_cluster.offset[0]);
++                      if (pcp_si == si && pcp_offset) {
++                              ci = swap_cluster_lock(si, pcp_offset);
++                              if (cluster_is_usable(ci, 0))
++                                      offset = alloc_swap_scan_cluster(si, ci, pcp_offset, 0, 1);
++                              else
++                                      swap_cluster_unlock(ci);
++                      }
++                      if (!offset)
++                              offset = cluster_alloc_swap_entry(si, 0, 1);
+                       local_unlock(&percpu_swap_cluster.lock);
+                       if (offset)
+                               entry = swp_entry(si->type, offset);
diff --git a/queue-6.18/mmc-core-add-quirk-for-incorrect-manufacturing-date.patch b/queue-6.18/mmc-core-add-quirk-for-incorrect-manufacturing-date.patch
new file mode 100644 (file)
index 0000000..c9c74d4
--- /dev/null
@@ -0,0 +1,97 @@
+From stable+bounces-244078-greg=kroah.com@vger.kernel.org Tue May  5 12:42:41 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  5 May 2026 06:17:30 -0400
+Subject: mmc: core: Add quirk for incorrect manufacturing date
+To: stable@vger.kernel.org
+Cc: Avri Altman <avri.altman@sandisk.com>, Ulf Hansson <ulf.hansson@linaro.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505101731.582352-2-sashal@kernel.org>
+
+From: Avri Altman <avri.altman@sandisk.com>
+
+[ Upstream commit 263ff314cc5602599d481b0912a381555fcbad28 ]
+
+Some eMMC vendors need to report manufacturing dates beyond 2025 but are
+reluctant to update the EXT_CSD revision from 8 to 9. Changing the
+Updating the EXT_CSD revision may involve additional testing or
+qualification steps with customers. To ease this transition and avoid a
+full re-qualification process, a workaround is needed. This
+patch introduces a temporary quirk that re-purposes the year codes
+corresponding to 2010, 2011, and 2012 to represent the years 2026, 2027,
+and 2028, respectively. This solution is only valid for this three-year
+period.
+
+After 2028, vendors must update their firmware to set EXT_CSD_REV=9 to
+continue reporting the correct manufacturing date in compliance with the
+JEDEC standard.
+
+The `MMC_QUIRK_BROKEN_MDT` is introduced and enabled for all Sandisk
+devices to handle this behavior.
+
+Signed-off-by: Avri Altman <avri.altman@sandisk.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Stable-dep-of: d6bf2e64dec8 ("mmc: core: Optimize time for secure erase/trim for some Kingston eMMCs")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/card.h   |    6 ++++++
+ drivers/mmc/core/mmc.c    |    5 +++++
+ drivers/mmc/core/quirks.h |    3 +++
+ include/linux/mmc/card.h  |    1 +
+ 4 files changed, 15 insertions(+)
+
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -89,6 +89,7 @@ struct mmc_fixup {
+ #define CID_MANFID_MICRON       0x13
+ #define CID_MANFID_SAMSUNG      0x15
+ #define CID_MANFID_APACER       0x27
++#define CID_MANFID_SANDISK_MMC  0x45
+ #define CID_MANFID_SWISSBIT     0x5D
+ #define CID_MANFID_KINGSTON     0x70
+ #define CID_MANFID_HYNIX      0x90
+@@ -305,4 +306,9 @@ static inline int mmc_card_no_uhs_ddr50_
+       return c->quirks & MMC_QUIRK_NO_UHS_DDR50_TUNING;
+ }
++static inline int mmc_card_broken_mdt(const struct mmc_card *c)
++{
++      return c->quirks & MMC_QUIRK_BROKEN_MDT;
++}
++
+ #endif
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -676,6 +676,11 @@ static int mmc_decode_ext_csd(struct mmc
+                       /* Adjust production date as per JEDEC JESD84-B51B September 2025 */
+                       if (card->cid.year < 2023)
+                               card->cid.year += 16;
++              } else {
++                      /* Handle vendors with broken MDT reporting */
++                      if (mmc_card_broken_mdt(card) && card->cid.year >= 2010 &&
++                          card->cid.year <= 2012)
++                              card->cid.year += 16;
+               }
+       }
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -170,6 +170,9 @@ static const struct mmc_fixup __maybe_un
+       MMC_FIXUP_EXT_CSD_REV(CID_NAME_ANY, CID_MANFID_NUMONYX,
+                             0x014e, add_quirk, MMC_QUIRK_BROKEN_HPI, 6),
++      MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_MMC, CID_OEMID_ANY, add_quirk_mmc,
++                MMC_QUIRK_BROKEN_MDT),
++
+       END_FIXUP
+ };
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -330,6 +330,7 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_CACHE_FLUSH  (1<<16) /* Don't flush cache until the write has occurred */
+ #define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY   (1<<17) /* Disable broken SD poweroff notify support */
+ #define MMC_QUIRK_NO_UHS_DDR50_TUNING (1<<18) /* Disable DDR50 tuning */
++#define MMC_QUIRK_BROKEN_MDT    (1<<19) /* Wrong manufacturing year */
+       bool                    written_flag;   /* Indicates eMMC has been written since power on */
+       bool                    reenable_cmdq;  /* Re-enable Command Queue */
diff --git a/queue-6.18/mmc-core-adjust-mdt-beyond-2025.patch b/queue-6.18/mmc-core-adjust-mdt-beyond-2025.patch
new file mode 100644 (file)
index 0000000..b105ef7
--- /dev/null
@@ -0,0 +1,66 @@
+From stable+bounces-244077-greg=kroah.com@vger.kernel.org Tue May  5 12:45:15 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  5 May 2026 06:17:29 -0400
+Subject: mmc: core: Adjust MDT beyond 2025
+To: stable@vger.kernel.org
+Cc: Avri Altman <avri.altman@sandisk.com>, Shawn Lin <shawn.lin@rock-chips.com>, Ulf Hansson <ulf.hansson@linaro.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505101731.582352-1-sashal@kernel.org>
+
+From: Avri Altman <avri.altman@sandisk.com>
+
+[ Upstream commit 3e487a634bc019166e452ea276f7522710eda9f4 ]
+
+JEDEC JESD84-B51B which was released in September 2025, increases the
+manufacturing year limit for eMMC devices. The eMMC manufacturing year
+is stored in a 4-bit field in the CID register. Originally, it covered
+1997–2012. Later, with EXT_CSD_REV=8, it was extended up to 2025. Now,
+with EXT_CSD_REV=9, the range is rolled over by another 16 years, up to
+2038.
+
+The mapping is as follows:
+cid[8..11] | rev ≤ 4 | 8 ≥ rev > 4 | rev > 8
+---------------------------------------------
+0          | 1997    | 2013        | 2029
+1          | 1998    | 2014        | 2030
+2          | 1999    | 2015        | 2031
+3          | 2000    | 2016        | 2032
+4          | 2001    | 2017        | 2033
+5          | 2002    | 2018        | 2034
+6          | 2003    | 2019        | 2035
+7          | 2004    | 2020        | 2036
+8          | 2005    | 2021        | 2037
+9          | 2006    | 2022        | 2038
+10         | 2007    | 2023        |
+11         | 2008    | 2024        |
+12         | 2009    | 2025        |
+13         | 2010    |             | 2026
+14         | 2011    |             | 2027
+15         | 2012    |             | 2028
+
+Signed-off-by: Avri Altman <avri.altman@sandisk.com>
+Reviewed-by: Shawn Lin <shawn.lin@rock-chips.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Stable-dep-of: d6bf2e64dec8 ("mmc: core: Optimize time for secure erase/trim for some Kingston eMMCs")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/mmc.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -671,7 +671,14 @@ static int mmc_decode_ext_csd(struct mmc
+               card->ext_csd.enhanced_rpmb_supported =
+                                       (card->ext_csd.rel_param &
+                                        EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
++
++              if (card->ext_csd.rev >= 9) {
++                      /* Adjust production date as per JEDEC JESD84-B51B September 2025 */
++                      if (card->cid.year < 2023)
++                              card->cid.year += 16;
++              }
+       }
++
+ out:
+       return err;
+ }
diff --git a/queue-6.18/mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch b/queue-6.18/mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch
new file mode 100644 (file)
index 0000000..5f6563e
--- /dev/null
@@ -0,0 +1,93 @@
+From stable+bounces-244080-greg=kroah.com@vger.kernel.org Tue May  5 12:45:28 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  5 May 2026 06:17:31 -0400
+Subject: mmc: core: Optimize time for secure erase/trim for some Kingston eMMCs
+To: stable@vger.kernel.org
+Cc: Luke Wang <ziniu.wang_1@nxp.com>, Ulf Hansson <ulf.hansson@linaro.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505101731.582352-3-sashal@kernel.org>
+
+From: Luke Wang <ziniu.wang_1@nxp.com>
+
+[ Upstream commit d6bf2e64dec87322f2b11565ddb59c0e967f96e3 ]
+
+Kingston eMMC IY2964 and IB2932 takes a fixed ~2 seconds for each secure
+erase/trim operation regardless of size - that is, a single secure
+erase/trim operation of 1MB takes the same time as 1GB. With default
+calculated 3.5MB max discard size, secure erase 1GB requires ~300 separate
+operations taking ~10 minutes total.
+
+Add a card quirk, MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME, to set maximum
+secure erase size for those devices. This allows 1GB secure erase to
+complete in a single operation, reducing time from 10 minutes to just 2
+seconds.
+
+Signed-off-by: Luke Wang <ziniu.wang_1@nxp.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/card.h   |    5 +++++
+ drivers/mmc/core/queue.c  |    9 +++++++--
+ drivers/mmc/core/quirks.h |    9 +++++++++
+ include/linux/mmc/card.h  |    1 +
+ 4 files changed, 22 insertions(+), 2 deletions(-)
+
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -311,4 +311,9 @@ static inline int mmc_card_broken_mdt(co
+       return c->quirks & MMC_QUIRK_BROKEN_MDT;
+ }
++static inline int mmc_card_fixed_secure_erase_trim_time(const struct mmc_card *c)
++{
++      return c->quirks & MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME;
++}
++
+ #endif
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -184,8 +184,13 @@ static void mmc_queue_setup_discard(stru
+               return;
+       lim->max_hw_discard_sectors = max_discard;
+-      if (mmc_card_can_secure_erase_trim(card))
+-              lim->max_secure_erase_sectors = max_discard;
++      if (mmc_card_can_secure_erase_trim(card)) {
++              if (mmc_card_fixed_secure_erase_trim_time(card))
++                      lim->max_secure_erase_sectors = UINT_MAX >> card->erase_shift;
++              else
++                      lim->max_secure_erase_sectors = max_discard;
++      }
++
+       if (mmc_card_can_trim(card) && card->erased_byte == 0)
+               lim->max_write_zeroes_sectors = max_discard;
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -153,6 +153,15 @@ static const struct mmc_fixup __maybe_un
+       MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
+                 MMC_QUIRK_TRIM_BROKEN),
++      /*
++       * On Some Kingston eMMCs, secure erase/trim time is independent
++       * of erase size, fixed at approximately 2 seconds.
++       */
++      MMC_FIXUP("IY2964", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
++                MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME),
++      MMC_FIXUP("IB2932", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc,
++                MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME),
++
+       END_FIXUP
+ };
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -331,6 +331,7 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_SD_POWEROFF_NOTIFY   (1<<17) /* Disable broken SD poweroff notify support */
+ #define MMC_QUIRK_NO_UHS_DDR50_TUNING (1<<18) /* Disable DDR50 tuning */
+ #define MMC_QUIRK_BROKEN_MDT    (1<<19) /* Wrong manufacturing year */
++#define MMC_QUIRK_FIXED_SECURE_ERASE_TRIM_TIME        (1<<20) /* Secure erase/trim time is fixed regardless of size */
+       bool                    written_flag;   /* Indicates eMMC has been written since power on */
+       bool                    reenable_cmdq;  /* Re-enable Command Queue */
diff --git a/queue-6.18/net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch b/queue-6.18/net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch
new file mode 100644 (file)
index 0000000..89a43f3
--- /dev/null
@@ -0,0 +1,120 @@
+From stable+bounces-245024-greg=kroah.com@vger.kernel.org Sun May 10 15:54:26 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 10 May 2026 09:54:16 -0400
+Subject: net: stmmac: Prevent NULL deref when RX memory exhausted
+To: stable@vger.kernel.org
+Cc: Sam Edwards <cfsworks@gmail.com>, Russell King <linux@armlinux.org.uk>, Sam Edwards <CFSworks@gmail.com>, Paolo Abeni <pabeni@redhat.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260510135416.4143602-2-sashal@kernel.org>
+
+From: Sam Edwards <cfsworks@gmail.com>
+
+[ Upstream commit 0bb05e6adfa99a2ea1fee1125cc0953409f83ed8 ]
+
+The CPU receives frames from the MAC through conventional DMA: the CPU
+allocates buffers for the MAC, then the MAC fills them and returns
+ownership to the CPU. For each hardware RX queue, the CPU and MAC
+coordinate through a shared ring array of DMA descriptors: one
+descriptor per DMA buffer. Each descriptor includes the buffer's
+physical address and a status flag ("OWN") indicating which side owns
+the buffer: OWN=0 for CPU, OWN=1 for MAC. The CPU is only allowed to set
+the flag and the MAC is only allowed to clear it, and both must move
+through the ring in sequence: thus the ring is used for both
+"submissions" and "completions."
+
+In the stmmac driver, stmmac_rx() bookmarks its position in the ring
+with the `cur_rx` index. The main receive loop in that function checks
+for rx_descs[cur_rx].own=0, gives the corresponding buffer to the
+network stack (NULLing the pointer), and increments `cur_rx` modulo the
+ring size. After the loop exits, stmmac_rx_refill(), which bookmarks its
+position with `dirty_rx`, allocates fresh buffers and rearms the
+descriptors (setting OWN=1). If it fails any allocation, it simply stops
+early (leaving OWN=0) and will retry where it left off when next called.
+
+This means descriptors have a three-stage lifecycle (terms my own):
+- `empty` (OWN=1, buffer valid)
+- `full` (OWN=0, buffer valid and populated)
+- `dirty` (OWN=0, buffer NULL)
+
+But because stmmac_rx() only checks OWN, it confuses `full`/`dirty`. In
+the past (see 'Fixes:'), there was a bug where the loop could cycle
+`cur_rx` all the way back to the first descriptor it dirtied, resulting
+in a NULL dereference when mistaken for `full`. The aforementioned
+commit resolved that *specific* failure by capping the loop's iteration
+limit at `dma_rx_size - 1`, but this is only a partial fix: if the
+previous stmmac_rx_refill() didn't complete, then there are leftover
+`dirty` descriptors that the loop might encounter without needing to
+cycle fully around. The current code therefore panics (see 'Closes:')
+when stmmac_rx_refill() is memory-starved long enough for `cur_rx` to
+catch up to `dirty_rx`.
+
+Fix this by explicitly checking, before advancing `cur_rx`, if the next
+entry is dirty; exit the loop if so. This prevents processing of the
+final, used descriptor until stmmac_rx_refill() succeeds, but
+fully prevents the `cur_rx == dirty_rx` ambiguity as the previous bugfix
+intended: so remove the clamp as well. Since stmmac_rx_zc() is a
+copy-paste-and-tweak of stmmac_rx() and the code structure is identical,
+any fix to stmmac_rx() will also need a corresponding fix for
+stmmac_rx_zc(). Therefore, apply the same check there.
+
+In stmmac_rx() (not stmmac_rx_zc()), a related bug remains: after the
+MAC sets OWN=0 on the final descriptor, it will be unable to send any
+further DMA-complete IRQs until it's given more `empty` descriptors.
+Currently, the driver simply *hopes* that the next stmmac_rx_refill()
+succeeds, risking an indefinite stall of the receive process if not. But
+this is not a regression, so it can be addressed in a future change.
+
+Fixes: b6cb4541853c7 ("net: stmmac: avoid rx queue overrun")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=221010
+Cc: stable@vger.kernel.org
+Suggested-by: Russell King <linux@armlinux.org.uk>
+Signed-off-by: Sam Edwards <CFSworks@gmail.com>
+Link: https://patch.msgid.link/20260422044503.5349-1-CFSworks@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |   19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -5282,9 +5282,12 @@ read_again:
+                       break;
+               /* Prefetch the next RX descriptor */
+-              rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
+-                                              priv->dma_conf.dma_rx_size);
+-              next_entry = rx_q->cur_rx;
++              next_entry = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
++                                             priv->dma_conf.dma_rx_size);
++              if (unlikely(next_entry == rx_q->dirty_rx))
++                      break;
++
++              rx_q->cur_rx = next_entry;
+               if (priv->extend_desc)
+                       np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
+@@ -5422,7 +5425,6 @@ static int stmmac_rx(struct stmmac_priv
+       dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
+       bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+-      limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
+       if (netif_msg_rx_status(priv)) {
+               void *rx_head;
+@@ -5478,9 +5480,12 @@ read_again:
+               if (unlikely(status & dma_own))
+                       break;
+-              rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
+-                                              priv->dma_conf.dma_rx_size);
+-              next_entry = rx_q->cur_rx;
++              next_entry = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
++                                             priv->dma_conf.dma_rx_size);
++              if (unlikely(next_entry == rx_q->dirty_rx))
++                      break;
++
++              rx_q->cur_rx = next_entry;
+               if (priv->extend_desc)
+                       np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
diff --git a/queue-6.18/net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch b/queue-6.18/net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch
new file mode 100644 (file)
index 0000000..c471545
--- /dev/null
@@ -0,0 +1,181 @@
+From stable+bounces-245023-greg=kroah.com@vger.kernel.org Sun May 10 15:54:23 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 10 May 2026 09:54:15 -0400
+Subject: net: stmmac: rename STMMAC_GET_ENTRY() -> STMMAC_NEXT_ENTRY()
+To: stable@vger.kernel.org
+Cc: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260510135416.4143602-1-sashal@kernel.org>
+
+From: "Russell King (Oracle)" <rmk+kernel@armlinux.org.uk>
+
+[ Upstream commit 6b4286e0550814cdc4b897f881ec1fa8b0313227 ]
+
+STMMAC_GET_ENTRY() doesn't describe what this macro is doing - it is
+incrementing the provided index for the circular array of descriptors.
+Replace "GET" with "NEXT" as this better describes the action here.
+
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Link: https://patch.msgid.link/E1w2vba-0000000DbWo-1oL5@rmk-PC.armlinux.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 0bb05e6adfa9 ("net: stmmac: Prevent NULL deref when RX memory exhausted")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/chain_mode.c  |    2 -
+ drivers/net/ethernet/stmicro/stmmac/common.h      |    2 -
+ drivers/net/ethernet/stmicro/stmmac/ring_mode.c   |    2 -
+ drivers/net/ethernet/stmicro/stmmac/stmmac_main.c |   26 +++++++++++-----------
+ 4 files changed, 16 insertions(+), 16 deletions(-)
+
+--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+@@ -47,7 +47,7 @@ static int jumbo_frm(struct stmmac_tx_qu
+       while (len != 0) {
+               tx_q->tx_skbuff[entry] = NULL;
+-              entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
++              entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
+               desc = tx_q->dma_tx + entry;
+               if (len > bmax) {
+--- a/drivers/net/ethernet/stmicro/stmmac/common.h
++++ b/drivers/net/ethernet/stmicro/stmmac/common.h
+@@ -60,7 +60,7 @@ static inline bool dwmac_is_xmac(enum dw
+ #define DMA_MIN_RX_SIZE               64
+ #define DMA_MAX_RX_SIZE               1024
+ #define DMA_DEFAULT_RX_SIZE   512
+-#define STMMAC_GET_ENTRY(x, size)     ((x + 1) & (size - 1))
++#define STMMAC_NEXT_ENTRY(x, size)    ((x + 1) & (size - 1))
+ #undef FRAME_FILTER_DEBUG
+ /* #define FRAME_FILTER_DEBUG */
+--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
++++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+@@ -51,7 +51,7 @@ static int jumbo_frm(struct stmmac_tx_qu
+               stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
+                               STMMAC_RING_MODE, 0, false, skb->len);
+               tx_q->tx_skbuff[entry] = NULL;
+-              entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
++              entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
+               if (priv->extend_desc)
+                       desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -2609,7 +2609,7 @@ static bool stmmac_xdp_xmit_zc(struct st
+               xsk_tx_metadata_to_compl(meta,
+                                        &tx_q->tx_skbuff_dma[entry].xsk_meta);
+-              tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
++              tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+               entry = tx_q->cur_tx;
+       }
+       u64_stats_update_begin(&txq_stats->napi_syncp);
+@@ -2780,7 +2780,7 @@ static int stmmac_tx_clean(struct stmmac
+               stmmac_release_tx_desc(priv, p, priv->mode);
+-              entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
++              entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
+       }
+       tx_q->dirty_tx = entry;
+@@ -4079,7 +4079,7 @@ static bool stmmac_vlan_insert(struct st
+               return false;
+       stmmac_set_tx_owner(priv, p);
+-      tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
++      tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+       return true;
+ }
+@@ -4107,7 +4107,7 @@ static void stmmac_tso_allocator(struct
+       while (tmp_len > 0) {
+               dma_addr_t curr_addr;
+-              tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
++              tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
+                                               priv->dma_conf.dma_tx_size);
+               WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
+@@ -4258,7 +4258,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+               stmmac_set_mss(priv, mss_desc, mss);
+               tx_q->mss = mss;
+-              tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
++              tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
+                                               priv->dma_conf.dma_tx_size);
+               WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
+       }
+@@ -4362,7 +4362,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
+        * ndo_start_xmit will fill this descriptor the next time it's
+        * called and stmmac_tx_clean may clean up to this descriptor.
+        */
+-      tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
++      tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+       if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
+               netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
+@@ -4566,7 +4566,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+               int len = skb_frag_size(frag);
+               bool last_segment = (i == (nfrags - 1));
+-              entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
++              entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
+               WARN_ON(tx_q->tx_skbuff[entry]);
+               if (likely(priv->extend_desc))
+@@ -4636,7 +4636,7 @@ static netdev_tx_t stmmac_xmit(struct sk
+        * ndo_start_xmit will fill this descriptor the next time it's
+        * called and stmmac_tx_clean may clean up to this descriptor.
+        */
+-      entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
++      entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
+       tx_q->cur_tx = entry;
+       if (netif_msg_pktdata(priv)) {
+@@ -4805,7 +4805,7 @@ static inline void stmmac_rx_refill(stru
+               dma_wmb();
+               stmmac_set_rx_owner(priv, p, use_rx_wd);
+-              entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
++              entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
+       }
+       rx_q->dirty_rx = entry;
+       rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+@@ -4953,7 +4953,7 @@ static int stmmac_xdp_xmit_xdpf(struct s
+       stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
+-      entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
++      entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
+       tx_q->cur_tx = entry;
+       return STMMAC_XDP_TX;
+@@ -5187,7 +5187,7 @@ static bool stmmac_rx_refill_zc(struct s
+               dma_wmb();
+               stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
+-              entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
++              entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
+       }
+       if (rx_desc) {
+@@ -5282,7 +5282,7 @@ read_again:
+                       break;
+               /* Prefetch the next RX descriptor */
+-              rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
++              rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
+                                               priv->dma_conf.dma_rx_size);
+               next_entry = rx_q->cur_rx;
+@@ -5478,7 +5478,7 @@ read_again:
+               if (unlikely(status & dma_own))
+                       break;
+-              rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
++              rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
+                                               priv->dma_conf.dma_rx_size);
+               next_entry = rx_q->cur_rx;
diff --git a/queue-6.18/octeon_ep_vf-add-null-check-for-napi_build_skb.patch b/queue-6.18/octeon_ep_vf-add-null-check-for-napi_build_skb.patch
new file mode 100644 (file)
index 0000000..26f28e8
--- /dev/null
@@ -0,0 +1,86 @@
+From stable+bounces-244036-greg=kroah.com@vger.kernel.org Tue May  5 11:49:44 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue,  5 May 2026 05:49:31 -0400
+Subject: octeon_ep_vf: add NULL check for napi_build_skb()
+To: stable@vger.kernel.org
+Cc: David Carlier <devnexen@gmail.com>, Jakub Kicinski <kuba@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260505094932.505988-1-sashal@kernel.org>
+
+From: David Carlier <devnexen@gmail.com>
+
+[ Upstream commit dd66b42854705e4e4ee7f14d260f86c578bed3e3 ]
+
+napi_build_skb() can return NULL on allocation failure. In
+__octep_vf_oq_process_rx(), the result is used directly without a NULL
+check in both the single-buffer and multi-fragment paths, leading to a
+NULL pointer dereference.
+
+Add NULL checks after both napi_build_skb() calls, properly advancing
+descriptors and consuming remaining fragments on failure.
+
+Fixes: 1cd3b407977c ("octeon_ep_vf: add Tx/Rx processing and interrupt support")
+Cc: stable@vger.kernel.org
+Signed-off-by: David Carlier <devnexen@gmail.com>
+Link: https://patch.msgid.link/20260409184009.930359-3-devnexen@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ inlined missing octep_vf_oq_next_idx() helper as read_idx++ with wraparound ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c |   36 +++++++++++++++-
+ 1 file changed, 34 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+@@ -409,10 +409,17 @@ static int __octep_vf_oq_process_rx(stru
+                       data_offset = OCTEP_VF_OQ_RESP_HW_SIZE;
+                       rx_ol_flags = 0;
+               }
+-              rx_bytes += buff_info->len;
+-
+               if (buff_info->len <= oq->max_single_buffer_size) {
+                       skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
++                      if (!skb) {
++                              oq->stats->alloc_failures++;
++                              desc_used++;
++                              read_idx++;
++                              if (read_idx == oq->max_count)
++                                      read_idx = 0;
++                              continue;
++                      }
++                      rx_bytes += buff_info->len;
+                       skb_reserve(skb, data_offset);
+                       skb_put(skb, buff_info->len);
+                       read_idx++;
+@@ -424,6 +431,31 @@ static int __octep_vf_oq_process_rx(stru
+                       u16 data_len;
+                       skb = napi_build_skb((void *)resp_hw, PAGE_SIZE);
++                      if (!skb) {
++                              oq->stats->alloc_failures++;
++                              desc_used++;
++                              read_idx++;
++                              if (read_idx == oq->max_count)
++                                      read_idx = 0;
++                              data_len = buff_info->len - oq->max_single_buffer_size;
++                              while (data_len) {
++                                      dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr,
++                                                     PAGE_SIZE, DMA_FROM_DEVICE);
++                                      buff_info = (struct octep_vf_rx_buffer *)
++                                                  &oq->buff_info[read_idx];
++                                      buff_info->page = NULL;
++                                      if (data_len < oq->buffer_size)
++                                              data_len = 0;
++                                      else
++                                              data_len -= oq->buffer_size;
++                                      desc_used++;
++                                      read_idx++;
++                                      if (read_idx == oq->max_count)
++                                              read_idx = 0;
++                              }
++                              continue;
++                      }
++                      rx_bytes += buff_info->len;
+                       skb_reserve(skb, data_offset);
+                       /* Head fragment includes response header(s);
+                        * subsequent fragments contains only data.
diff --git a/queue-6.18/printk-add-print_hex_dump_devel.patch b/queue-6.18/printk-add-print_hex_dump_devel.patch
new file mode 100644 (file)
index 0000000..9b76066
--- /dev/null
@@ -0,0 +1,49 @@
+From stable+bounces-244986-greg=kroah.com@vger.kernel.org Sat May  9 23:30:06 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat,  9 May 2026 17:29:56 -0400
+Subject: printk: add print_hex_dump_devel()
+To: stable@vger.kernel.org
+Cc: Thorsten Blum <thorsten.blum@linux.dev>, Herbert Xu <herbert@gondor.apana.org.au>, John Ogness <john.ogness@linutronix.de>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509212957.3843722-1-sashal@kernel.org>
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+[ Upstream commit d134feeb5df33fbf77f482f52a366a44642dba09 ]
+
+Add print_hex_dump_devel() as the hex dump equivalent of pr_devel(),
+which emits output only when DEBUG is enabled, but keeps call sites
+compiled otherwise.
+
+Suggested-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Reviewed-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Stable-dep-of: 177730a273b1 ("crypto: caam - guard HMAC key hex dumps in hash_digest_key")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/printk.h |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -802,6 +802,19 @@ static inline void print_hex_dump_debug(
+ }
+ #endif
++#if defined(DEBUG)
++#define print_hex_dump_devel(prefix_str, prefix_type, rowsize,                \
++                           groupsize, buf, len, ascii)                \
++      print_hex_dump(KERN_DEBUG, prefix_str, prefix_type, rowsize,    \
++                     groupsize, buf, len, ascii)
++#else
++static inline void print_hex_dump_devel(const char *prefix_str, int prefix_type,
++                                      int rowsize, int groupsize,
++                                      const void *buf, size_t len, bool ascii)
++{
++}
++#endif
++
+ /**
+  * print_hex_dump_bytes - shorthand form of print_hex_dump() with default params
+  * @prefix_str: string to prefix each line with;
diff --git a/queue-6.18/rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch b/queue-6.18/rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch
new file mode 100644 (file)
index 0000000..9ecb6ea
--- /dev/null
@@ -0,0 +1,269 @@
+From gary@garyguo.net Tue May 12 17:03:21 2026
+From: Gary Guo <gary@garyguo.net>
+Date: Tue, 12 May 2026 16:02:57 +0100
+Subject: rust: pin-init: fix incorrect accessor reference lifetime
+To: gregkh@linuxfoundation.org, ojeda@kernel.org
+Cc: stable@vger.kernel.org, Gary Guo <gary@garyguo.net>
+Message-ID: <20260512150257.3240635-1-gary@garyguo.net>
+
+From: Gary Guo <gary@garyguo.net>
+
+commit 68bf102226cf2199dc609b67c1e847cad4de4b57 upstream
+
+When a field has been initialized, `init!`/`pin_init!` create a reference
+or pinned reference to the field so it can be accessed later during the
+initialization of other fields. However, the reference it created is
+incorrectly `&'static` rather than just the scope of the initializer.
+
+This means that you can do
+
+    init!(Foo {
+        a: 1,
+        _: {
+            let b: &'static u32 = a;
+        }
+    })
+
+which is unsound.
+
+This is caused by `&mut (*$slot).$ident`, which actually allows arbitrary
+lifetime, so this is effectively `'static`.
+
+Fix it by adding `let_binding` method on `DropGuard` to shorten lifetime.
+This results in exactly what we want for these accessors. The safety and
+invariant comments of `DropGuard` have been reworked; instead of reasoning
+about what caller can do with the guard, express it in a way that the
+ownership is transferred to the guard and `forget` takes it back, so the
+unsafe operations within the `DropGuard` can be more easily justified.
+
+Assisted-by: Claude:claude-3-opus
+Signed-off-by: Gary Guo <gary@garyguo.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/pin-init/src/__internal.rs |   28 ++++++++----
+ rust/pin-init/src/macros.rs     |   91 +++++++++++++++++++++++-----------------
+ 2 files changed, 73 insertions(+), 46 deletions(-)
+
+--- a/rust/pin-init/src/__internal.rs
++++ b/rust/pin-init/src/__internal.rs
+@@ -218,32 +218,42 @@ fn stack_init_reuse() {
+ /// When a value of this type is dropped, it drops a `T`.
+ ///
+ /// Can be forgotten to prevent the drop.
++///
++/// # Invariants
++///
++/// - `ptr` is valid and properly aligned.
++/// - `*ptr` is initialized and owned by this guard.
+ pub struct DropGuard<T: ?Sized> {
+     ptr: *mut T,
+ }
+ impl<T: ?Sized> DropGuard<T> {
+-    /// Creates a new [`DropGuard<T>`]. It will [`ptr::drop_in_place`] `ptr` when it gets dropped.
++    /// Creates a drop guard and transfer the ownership of the pointer content.
+     ///
+-    /// # Safety
++    /// The ownership is only relinquished if the guard is forgotten via [`core::mem::forget`].
+     ///
+-    /// `ptr` must be a valid pointer.
++    /// # Safety
+     ///
+-    /// It is the callers responsibility that `self` will only get dropped if the pointee of `ptr`:
+-    /// - has not been dropped,
+-    /// - is not accessible by any other means,
+-    /// - will not be dropped by any other means.
++    /// - `ptr` is valid and properly aligned.
++    /// - `*ptr` is initialized, and the ownership is transferred to this guard.
+     #[inline]
+     pub unsafe fn new(ptr: *mut T) -> Self {
++        // INVARIANT: By safety requirement.
+         Self { ptr }
+     }
++
++    /// Create a let binding for accessor use.
++    #[inline]
++    pub fn let_binding(&mut self) -> &mut T {
++        // SAFETY: Per type invariant.
++        unsafe { &mut *self.ptr }
++    }
+ }
+ impl<T: ?Sized> Drop for DropGuard<T> {
+     #[inline]
+     fn drop(&mut self) {
+-        // SAFETY: A `DropGuard` can only be constructed using the unsafe `new` function
+-        // ensuring that this operation is safe.
++        // SAFETY: `self.ptr` is valid, properly aligned and `*self.ptr` is owned by this guard.
+         unsafe { ptr::drop_in_place(self.ptr) }
+     }
+ }
+--- a/rust/pin-init/src/macros.rs
++++ b/rust/pin-init/src/macros.rs
+@@ -1310,27 +1310,33 @@ macro_rules! __init_internal {
+         // return when an error/panic occurs.
+         // We also use the `data` to require the correct trait (`Init` or `PinInit`) for `$field`.
+         unsafe { $data.$field(::core::ptr::addr_of_mut!((*$slot).$field), init)? };
+-        // NOTE: the field accessor ensures that the initialized field is properly aligned.
++        // NOTE: this ensures that the initialized field is properly aligned.
+         // Unaligned fields will cause the compiler to emit E0793. We do not support
+         // unaligned fields since `Init::__init` requires an aligned pointer; the call to
+         // `ptr::write` below has the same requirement.
+-        // SAFETY:
+-        // - the project function does the correct field projection,
+-        // - the field has been initialized,
+-        // - the reference is only valid until the end of the initializer.
+-        #[allow(unused_variables)]
+-        let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
++        // SAFETY: the field has been initialized.
++        let _ = unsafe { &mut (*$slot).$field };
+         // Create the drop guard:
+         //
+         // We rely on macro hygiene to make it impossible for users to access this local variable.
+         // We use `paste!` to create new hygiene for `$field`.
+         $crate::macros::paste! {
+-            // SAFETY: We forget the guard later when initialization has succeeded.
+-            let [< __ $field _guard >] = unsafe {
++            // SAFETY:
++            // - `addr_of_mut!((*$slot).$field)` is valid.
++            // - `(*$slot).$field` has been initialized above.
++            // - We only need the ownership to the pointee back when initialization has
++            //   succeeded, where we `forget` the guard.
++            let mut [< __ $field _guard >] = unsafe {
+                 $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+             };
++            // NOTE: The reference is derived from the guard so that it only lives as long as
++            // the guard does and cannot escape the scope.
++            #[allow(unused_variables)]
++            // SAFETY: the project function does the correct field projection.
++            let $field = unsafe { $data.[< __project_ $field >]([< __ $field _guard >].let_binding()) };
++
+             $crate::__init_internal!(init_slot($use_data):
+                 @data($data),
+                 @slot($slot),
+@@ -1353,27 +1359,30 @@ macro_rules! __init_internal {
+         // return when an error/panic occurs.
+         unsafe { $crate::Init::__init(init, ::core::ptr::addr_of_mut!((*$slot).$field))? };
+-        // NOTE: the field accessor ensures that the initialized field is properly aligned.
++        // NOTE: this ensures that the initialized field is properly aligned.
+         // Unaligned fields will cause the compiler to emit E0793. We do not support
+         // unaligned fields since `Init::__init` requires an aligned pointer; the call to
+         // `ptr::write` below has the same requirement.
+-        // SAFETY:
+-        // - the field is not structurally pinned, since the line above must compile,
+-        // - the field has been initialized,
+-        // - the reference is only valid until the end of the initializer.
+-        #[allow(unused_variables)]
+-        let $field = unsafe { &mut (*$slot).$field };
++        // SAFETY: the field has been initialized.
++        let _ = unsafe { &mut (*$slot).$field };
+         // Create the drop guard:
+         //
+         // We rely on macro hygiene to make it impossible for users to access this local variable.
+         // We use `paste!` to create new hygiene for `$field`.
+         $crate::macros::paste! {
+-            // SAFETY: We forget the guard later when initialization has succeeded.
+-            let [< __ $field _guard >] = unsafe {
++            // SAFETY:
++            // - `addr_of_mut!((*$slot).$field)` is valid.
++            // - `(*$slot).$field` has been initialized above.
++            // - We only need the ownership to the pointee back when initialization has
++            //   succeeded, where we `forget` the guard.
++            let mut [< __ $field _guard >] = unsafe {
+                 $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+             };
++            #[allow(unused_variables)]
++            let $field = [< __ $field _guard >].let_binding();
++
+             $crate::__init_internal!(init_slot():
+                 @data($data),
+                 @slot($slot),
+@@ -1397,28 +1406,30 @@ macro_rules! __init_internal {
+             unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+         }
+-        // NOTE: the field accessor ensures that the initialized field is properly aligned.
++        // NOTE: this ensures that the initialized field is properly aligned.
+         // Unaligned fields will cause the compiler to emit E0793. We do not support
+         // unaligned fields since `Init::__init` requires an aligned pointer; the call to
+         // `ptr::write` below has the same requirement.
+-        #[allow(unused_variables)]
+-        // SAFETY:
+-        // - the field is not structurally pinned, since no `use_data` was required to create this
+-        //   initializer,
+-        // - the field has been initialized,
+-        // - the reference is only valid until the end of the initializer.
+-        let $field = unsafe { &mut (*$slot).$field };
++        // SAFETY: the field has been initialized.
++        let _ = unsafe { &mut (*$slot).$field };
+         // Create the drop guard:
+         //
+         // We rely on macro hygiene to make it impossible for users to access this local variable.
+         // We use `paste!` to create new hygiene for `$field`.
+         $crate::macros::paste! {
+-            // SAFETY: We forget the guard later when initialization has succeeded.
+-            let [< __ $field _guard >] = unsafe {
++            // SAFETY:
++            // - `addr_of_mut!((*$slot).$field)` is valid.
++            // - `(*$slot).$field` has been initialized above.
++            // - We only need the ownership to the pointee back when initialization has
++            //   succeeded, where we `forget` the guard.
++            let mut [< __ $field _guard >] = unsafe {
+                 $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+             };
++            #[allow(unused_variables)]
++            let $field = [< __ $field _guard >].let_binding();
++
+             $crate::__init_internal!(init_slot():
+                 @data($data),
+                 @slot($slot),
+@@ -1441,27 +1452,33 @@ macro_rules! __init_internal {
+             // SAFETY: The memory at `slot` is uninitialized.
+             unsafe { ::core::ptr::write(::core::ptr::addr_of_mut!((*$slot).$field), $field) };
+         }
+-        // NOTE: the field accessor ensures that the initialized field is properly aligned.
++        // NOTE: this ensures that the initialized field is properly aligned.
+         // Unaligned fields will cause the compiler to emit E0793. We do not support
+         // unaligned fields since `Init::__init` requires an aligned pointer; the call to
+         // `ptr::write` below has the same requirement.
+-        // SAFETY:
+-        // - the project function does the correct field projection,
+-        // - the field has been initialized,
+-        // - the reference is only valid until the end of the initializer.
+-        #[allow(unused_variables)]
+-        let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
++        // SAFETY: the field has been initialized.
++        let _ = unsafe { &mut (*$slot).$field };
+         // Create the drop guard:
+         //
+         // We rely on macro hygiene to make it impossible for users to access this local variable.
+         // We use `paste!` to create new hygiene for `$field`.
+         $crate::macros::paste! {
+-            // SAFETY: We forget the guard later when initialization has succeeded.
+-            let [< __ $field _guard >] = unsafe {
++            // SAFETY:
++            // - `addr_of_mut!((*$slot).$field)` is valid.
++            // - `(*$slot).$field` has been initialized above.
++            // - We only need the ownership to the pointee back when initialization has
++            //   succeeded, where we `forget` the guard.
++            let mut [< __ $field _guard >] = unsafe {
+                 $crate::__internal::DropGuard::new(::core::ptr::addr_of_mut!((*$slot).$field))
+             };
++            // NOTE: The reference is derived from the guard so that it only lives as long as
++            // the guard does and cannot escape the scope.
++            #[allow(unused_variables)]
++            // SAFETY: the project function does the correct field projection.
++            let $field = unsafe { $data.[< __project_ $field >]([< __ $field _guard >].let_binding()) };
++
+             $crate::__init_internal!(init_slot($use_data):
+                 @data($data),
+                 @slot($slot),
index a347ef1904138721611860d45d77980c2f452301..c2ec1b0bd892dbec2923194c26b1bc0ce7e38b58 100644 (file)
@@ -245,3 +245,25 @@ loongarch-kvm-use-kvm_set_pte-in-kvm_flush_pte.patch
 loongarch-use-per-root-bridge-pcih-flag-to-skip-mem-resource-fixup.patch
 io_uring-kbuf-support-min-length-left-for-incremental-buffers.patch
 io_uring-tw-serialize-ctx-retry_llist-with-uring_lock.patch
+bpf-fix-use-after-free-in-arena_vm_close-on-fork.patch
+mm-damon-core-disallow-non-power-of-two-min_region_sz-on-damon_start.patch
+fbdev-defio-disconnect-deferred-i-o-from-the-lifetime-of-struct-fb_info.patch
+dma-mapping-add-__dma_from_device_group_begin-end.patch
+hwmon-powerz-avoid-cacheline-sharing-for-dma-buffer.patch
+octeon_ep_vf-add-null-check-for-napi_build_skb.patch
+mmc-core-adjust-mdt-beyond-2025.patch
+mmc-core-add-quirk-for-incorrect-manufacturing-date.patch
+mmc-core-optimize-time-for-secure-erase-trim-for-some-kingston-emmcs.patch
+crypto-qat-fix-indentation-of-macros-in-qat_hal.c.patch
+crypto-qat-fix-firmware-loading-failure-for-gen6-devices.patch
+mm-swap-speed-up-hibernation-allocation-and-writeout.patch
+firmware-exynos-acpm-drop-fake-const-on-handle-pointer.patch
+hfsplus-fix-uninit-value-by-validating-catalog-record-size.patch
+hfsplus-fix-held-lock-freed-on-hfsplus_fill_super.patch
+erofs-tidy-up-z_erofs_lz4_handle_overlap.patch
+erofs-fix-unsigned-underflow-in-z_erofs_lz4_handle_overlap.patch
+printk-add-print_hex_dump_devel.patch
+crypto-caam-guard-hmac-key-hex-dumps-in-hash_digest_key.patch
+net-stmmac-rename-stmmac_get_entry-stmmac_next_entry.patch
+net-stmmac-prevent-null-deref-when-rx-memory-exhausted.patch
+rust-pin-init-fix-incorrect-accessor-reference-lifetime.patch