--- /dev/null
+From b2a09e4ac99fe6faa8536b44bde9366841147e76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Apr 2026 16:44:06 -0600
+Subject: io_uring/poll: fix backport of io_poll_add() changes
+
+From: Jens Axboe <axboe@kernel.dk>
+
+The 5.15/5.10 backport of 84230ad2d2af had a few issues, due to the
+older poll base. Notably return value handling __io_arm_poll_handler()
+and in return __io_poll_add() as well. Fix them up.
+
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Fixes: 349ef5d2e7bf ("io_uring/poll: correctly handle io_poll_add() return value on update")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ io_uring/io_uring.c | 12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index 83806b3c6e3bc..8b0dfea96ee09 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -5991,19 +5991,15 @@ static int __io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
+ if (!ret && ipt.error)
+ req_set_fail(req);
+ ret = ret ?: ipt.error;
+- if (ret > 0) {
++ if (ret)
+ __io_req_complete(req, issue_flags, ret, 0);
+- return ret;
+- }
+- return 0;
++ return ret;
+ }
+
+ static int io_poll_add(struct io_kiocb *req, unsigned int issue_flags)
+ {
+- int ret;
+-
+- ret = __io_poll_add(req, issue_flags);
+- return ret < 0 ? ret : 0;
++ __io_poll_add(req, issue_flags);
++ return 0;
+ }
+
+ static int io_poll_update(struct io_kiocb *req, unsigned int issue_flags)
+--
+2.53.0
+
--- /dev/null
+From 8b163d0ce867c89f430db36a4ffbc1d2238dec3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Apr 2026 16:41:32 -0600
+Subject: io_uring/poll: fix EPOLL_URING_WAKE sometimes not being masked in
+
+From: Jens Axboe <axboe@kernel.dk>
+
+Rather than do it only when we jump straight to execution, mark it
+regardless. This ensures it doesn't get lost.
+
+Fixes: ccf06b5a981c ("io_uring: pass in EPOLL_URING_WAKE for eventfd signaling and wakeups")
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ io_uring/io_uring.c | 17 ++++++++---------
+ 1 file changed, 8 insertions(+), 9 deletions(-)
+
+diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
+index dea1fb22c0efb..83806b3c6e3bc 100644
+--- a/io_uring/io_uring.c
++++ b/io_uring/io_uring.c
+@@ -5647,17 +5647,16 @@ static int io_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
+ if (mask && !(mask & poll->events))
+ return 0;
+
+- if (io_poll_get_ownership(req)) {
+- /*
+- * If we trigger a multishot poll off our own wakeup path,
+- * disable multishot as there is a circular dependency between
+- * CQ posting and triggering the event.
+- */
+- if (mask & EPOLL_URING_WAKE)
+- poll->events |= EPOLLONESHOT;
++ /*
++ * If we trigger a multishot poll off our own wakeup path,
++ * disable multishot as there is a circular dependency between
++ * CQ posting and triggering the event.
++ */
++ if (mask & EPOLL_URING_WAKE)
++ poll->events |= EPOLLONESHOT;
+
++ if (io_poll_get_ownership(req))
+ __io_poll_execute(req, mask);
+- }
+ return 1;
+ }
+
+--
+2.53.0
+
--- /dev/null
+From 2d3da49edecb9113a19c2459812767aa19328f61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 12:07:55 -0400
+Subject: Revert "riscv: Sparse-Memory/vmemmap out-of-bounds fix"
+
+This reverts commit 8af1c121b0102041809bc137ec600d1865eaeedd.
+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/include/asm/pgtable.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
+index 982745572945e..a754125ca4b8e 100644
+--- a/arch/riscv/include/asm/pgtable.h
++++ b/arch/riscv/include/asm/pgtable.h
+@@ -44,7 +44,7 @@
+ * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
+ * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
+ */
+-#define vmemmap ((struct page *)VMEMMAP_START - (phys_ram_base >> PAGE_SHIFT))
++#define vmemmap ((struct page *)VMEMMAP_START)
+
+ #define PCI_IO_SIZE SZ_16M
+ #define PCI_IO_END VMEMMAP_START
+--
+2.53.0
+
ibmasm-fix-oob-reads-in-command_file_write-due-to-missing-size-checks.patch
ibmasm-fix-heap-over-read-in-ibmasm_send_i2o_message.patch
firmware-google-framebuffer-do-not-mark-framebuffer-as-busy.patch
+io_uring-poll-fix-epoll_uring_wake-sometimes-not-bei.patch
+io_uring-poll-fix-backport-of-io_poll_add-changes.patch
+revert-riscv-sparse-memory-vmemmap-out-of-bounds-fix.patch
--- /dev/null
+From 102add0272bce3bc25e639b9a721c2cfaa2160e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 14:28:25 +0800
+Subject: scsi: ufs: core: Fix use-after free in init error and remove paths
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: André Draszik <andre.draszik@linaro.org>
+
+[ Upstream commit f8fb2403ddebb5eea0033d90d9daae4c88749ada ]
+
+devm_blk_crypto_profile_init() registers a cleanup handler to run when
+the associated (platform-) device is being released. For UFS, the
+crypto private data and pointers are stored as part of the ufs_hba's
+data structure 'struct ufs_hba::crypto_profile'. This structure is
+allocated as part of the underlying ufshcd and therefore Scsi_host
+allocation.
+
+During driver release or during error handling in ufshcd_pltfrm_init(),
+this structure is released as part of ufshcd_dealloc_host() before the
+(platform-) device associated with the crypto call above is released.
+Once this device is released, the crypto cleanup code will run, using
+the just-released 'struct ufs_hba::crypto_profile'. This causes a
+use-after-free situation:
+
+ Call trace:
+ kfree+0x60/0x2d8 (P)
+ kvfree+0x44/0x60
+ blk_crypto_profile_destroy_callback+0x28/0x70
+ devm_action_release+0x1c/0x30
+ release_nodes+0x6c/0x108
+ devres_release_all+0x98/0x100
+ device_unbind_cleanup+0x20/0x70
+ really_probe+0x218/0x2d0
+
+In other words, the initialisation code flow is:
+
+ platform-device probe
+ ufshcd_pltfrm_init()
+ ufshcd_alloc_host()
+ scsi_host_alloc()
+ allocation of struct ufs_hba
+ creation of scsi-host devices
+ devm_blk_crypto_profile_init()
+ devm registration of cleanup handler using platform-device
+
+and during error handling of ufshcd_pltfrm_init() or during driver
+removal:
+
+ ufshcd_dealloc_host()
+ scsi_host_put()
+ put_device(scsi-host)
+ release of struct ufs_hba
+ put_device(platform-device)
+ crypto cleanup handler
+
+To fix this use-after free, change ufshcd_alloc_host() to register a
+devres action to automatically cleanup the underlying SCSI device on
+ufshcd destruction, without requiring explicit calls to
+ufshcd_dealloc_host(). This way:
+
+ * the crypto profile and all other ufs_hba-owned resources are
+ destroyed before SCSI (as they've been registered after)
+ * a memleak is plugged in tc-dwc-g210-pci.c remove() as a
+ side-effect
+ * EXPORT_SYMBOL_GPL(ufshcd_dealloc_host) can be removed fully as
+ it's not needed anymore
+ * no future drivers using ufshcd_alloc_host() could ever forget
+ adding the cleanup
+
+Fixes: cb77cb5abe1f ("blk-crypto: rename blk_keyslot_manager to blk_crypto_profile")
+Fixes: d76d9d7d1009 ("scsi: ufs: use devm_blk_ksm_init()")
+Cc: stable@vger.kernel.org
+Signed-off-by: André Draszik <andre.draszik@linaro.org>
+Link: https://lore.kernel.org/r/20250124-ufshcd-fix-v4-1-c5d0144aae59@linaro.org
+Reviewed-by: Bean Huo <beanhuo@micron.com>
+Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
+Acked-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+[ Delete modifications about ufshcd_parse_operating_points() for it's added from
+commit 72208ebe181e3("scsi: ufs: core: Add support for parsing OPP")
+and that in ufshcd_pltfrm_remove() for it's added from commit
+897df60c16d54("scsi: ufs: pltfrm: Dellocate HBA during ufshcd_pltfrm_remove()"). ]
+Signed-off-by: Robert Garcia <rob_garcia@163.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/ufs/ufshcd-pci.c | 2 --
+ drivers/scsi/ufs/ufshcd-pltfrm.c | 25 ++++++++-----------------
+ drivers/scsi/ufs/ufshcd.c | 31 +++++++++++++++++++++----------
+ drivers/scsi/ufs/ufshcd.h | 1 -
+ 4 files changed, 29 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
+index ec483ece09b6c..351e6915c33c4 100644
+--- a/drivers/scsi/ufs/ufshcd-pci.c
++++ b/drivers/scsi/ufs/ufshcd-pci.c
+@@ -554,7 +554,6 @@ static void ufshcd_pci_remove(struct pci_dev *pdev)
+ pm_runtime_forbid(&pdev->dev);
+ pm_runtime_get_noresume(&pdev->dev);
+ ufshcd_remove(hba);
+- ufshcd_dealloc_host(hba);
+ }
+
+ /**
+@@ -599,7 +598,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+ err = ufshcd_init(hba, mmio_base, pdev->irq);
+ if (err) {
+ dev_err(&pdev->dev, "Initialization failed\n");
+- ufshcd_dealloc_host(hba);
+ return err;
+ }
+
+diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
+index adc302b1a57ae..c254d5f697fc3 100644
+--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
++++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
+@@ -339,21 +339,17 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ struct device *dev = &pdev->dev;
+
+ mmio_base = devm_platform_ioremap_resource(pdev, 0);
+- if (IS_ERR(mmio_base)) {
+- err = PTR_ERR(mmio_base);
+- goto out;
+- }
++ if (IS_ERR(mmio_base))
++ return PTR_ERR(mmio_base);
+
+ irq = platform_get_irq(pdev, 0);
+- if (irq < 0) {
+- err = irq;
+- goto out;
+- }
++ if (irq < 0)
++ return irq;
+
+ err = ufshcd_alloc_host(dev, &hba);
+ if (err) {
+ dev_err(&pdev->dev, "Allocation failed\n");
+- goto out;
++ return err;
+ }
+
+ hba->vops = vops;
+@@ -362,13 +358,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ if (err) {
+ dev_err(&pdev->dev, "%s: clock parse failed %d\n",
+ __func__, err);
+- goto dealloc_host;
++ return err;
+ }
+ err = ufshcd_parse_regulator_info(hba);
+ if (err) {
+ dev_err(&pdev->dev, "%s: regulator init failed %d\n",
+ __func__, err);
+- goto dealloc_host;
++ return err;
+ }
+
+ ufshcd_init_lanes_per_dir(hba);
+@@ -376,18 +372,13 @@ int ufshcd_pltfrm_init(struct platform_device *pdev,
+ err = ufshcd_init(hba, mmio_base, irq);
+ if (err) {
+ dev_err(dev, "Initialization failed\n");
+- goto dealloc_host;
++ return err;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ return 0;
+-
+-dealloc_host:
+- ufshcd_dealloc_host(hba);
+-out:
+- return err;
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_pltfrm_init);
+
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 55eaf04d75932..637607868f554 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -9322,16 +9322,6 @@ void ufshcd_remove(struct ufs_hba *hba)
+ }
+ EXPORT_SYMBOL_GPL(ufshcd_remove);
+
+-/**
+- * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA)
+- * @hba: pointer to Host Bus Adapter (HBA)
+- */
+-void ufshcd_dealloc_host(struct ufs_hba *hba)
+-{
+- scsi_host_put(hba->host);
+-}
+-EXPORT_SYMBOL_GPL(ufshcd_dealloc_host);
+-
+ /**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+@@ -9348,11 +9338,25 @@ static int ufshcd_set_dma_mask(struct ufs_hba *hba)
+ return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
+ }
+
++/**
++ * ufshcd_devres_release - devres cleanup handler, invoked during release of
++ * hba->dev
++ * @host: pointer to SCSI host
++ */
++static void ufshcd_devres_release(void *host)
++{
++ scsi_host_put(host);
++}
++
+ /**
+ * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
+ * @dev: pointer to device handle
+ * @hba_handle: driver private handle
+ * Returns 0 on success, non-zero value on failure
++ *
++ * NOTE: There is no corresponding ufshcd_dealloc_host() because this function
++ * keeps track of its allocations using devres and deallocates everything on
++ * device removal automatically.
+ */
+ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
+ {
+@@ -9374,6 +9378,13 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
+ err = -ENOMEM;
+ goto out_error;
+ }
++
++ err = devm_add_action_or_reset(dev, ufshcd_devres_release,
++ host);
++ if (err)
++ return dev_err_probe(dev, err,
++ "failed to add ufshcd dealloc action\n");
++
+ hba = shost_priv(host);
+ hba->host = host;
+ hba->dev = dev;
+diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
+index c8513cc6c2bdd..3ceac158c7f36 100644
+--- a/drivers/scsi/ufs/ufshcd.h
++++ b/drivers/scsi/ufs/ufshcd.h
+@@ -1001,7 +1001,6 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
+ }
+
+ int ufshcd_alloc_host(struct device *, struct ufs_hba **);
+-void ufshcd_dealloc_host(struct ufs_hba *);
+ int ufshcd_hba_enable(struct ufs_hba *hba);
+ int ufshcd_init(struct ufs_hba *, void __iomem *, unsigned int);
+ int ufshcd_link_recovery(struct ufs_hba *hba);
+--
+2.53.0
+
ibmasm-fix-oob-reads-in-command_file_write-due-to-missing-size-checks.patch
ibmasm-fix-heap-over-read-in-ibmasm_send_i2o_message.patch
firmware-google-framebuffer-do-not-mark-framebuffer-as-busy.patch
+scsi-ufs-core-fix-use-after-free-in-init-error-and-r.patch
--- /dev/null
+From b0522751fd5d3b50e29580953b65d9bd47da3522 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 17:22:48 +0800
+Subject: arm64: set __exception_irq_entry with __irq_entry as a default
+
+From: Youngmin Nam <youngmin.nam@samsung.com>
+
+[ Upstream commit f6794950f0e5ba37e3bbedda4d6ab0aad7395dd3 ]
+
+filter_irq_stacks() is supposed to cut entries which are related irq entries
+from its call stack.
+And in_irqentry_text() which is called by filter_irq_stacks()
+uses __irqentry_text_start/end symbol to find irq entries in callstack.
+
+But it doesn't work correctly as without "CONFIG_FUNCTION_GRAPH_TRACER",
+arm64 kernel doesn't include gic_handle_irq which is entry point of arm64 irq
+between __irqentry_text_start and __irqentry_text_end as we discussed in below link.
+https://lore.kernel.org/all/CACT4Y+aReMGLYua2rCLHgFpS9io5cZC04Q8GLs-uNmrn1ezxYQ@mail.gmail.com/#t
+
+This problem can makes unintentional deep call stack entries especially
+in KASAN enabled situation as below.
+
+[ 2479.383395]I[0:launcher-loader: 1719] Stack depot reached limit capacity
+[ 2479.383538]I[0:launcher-loader: 1719] WARNING: CPU: 0 PID: 1719 at lib/stackdepot.c:129 __stack_depot_save+0x464/0x46c
+[ 2479.385693]I[0:launcher-loader: 1719] pstate: 624000c5 (nZCv daIF +PAN -UAO +TCO -DIT -SSBS BTYPE=--)
+[ 2479.385724]I[0:launcher-loader: 1719] pc : __stack_depot_save+0x464/0x46c
+[ 2479.385751]I[0:launcher-loader: 1719] lr : __stack_depot_save+0x460/0x46c
+[ 2479.385774]I[0:launcher-loader: 1719] sp : ffffffc0080073c0
+[ 2479.385793]I[0:launcher-loader: 1719] x29: ffffffc0080073e0 x28: ffffffd00b78a000 x27: 0000000000000000
+[ 2479.385839]I[0:launcher-loader: 1719] x26: 000000000004d1dd x25: ffffff891474f000 x24: 00000000ca64d1dd
+[ 2479.385882]I[0:launcher-loader: 1719] x23: 0000000000000200 x22: 0000000000000220 x21: 0000000000000040
+[ 2479.385925]I[0:launcher-loader: 1719] x20: ffffffc008007440 x19: 0000000000000000 x18: 0000000000000000
+[ 2479.385969]I[0:launcher-loader: 1719] x17: 2065726568207475 x16: 000000000000005e x15: 2d2d2d2d2d2d2d20
+[ 2479.386013]I[0:launcher-loader: 1719] x14: 5d39313731203a72 x13: 00000000002f6b30 x12: 00000000002f6af8
+[ 2479.386057]I[0:launcher-loader: 1719] x11: 00000000ffffffff x10: ffffffb90aacf000 x9 : e8a74a6c16008800
+[ 2479.386101]I[0:launcher-loader: 1719] x8 : e8a74a6c16008800 x7 : 00000000002f6b30 x6 : 00000000002f6af8
+[ 2479.386145]I[0:launcher-loader: 1719] x5 : ffffffc0080070c8 x4 : ffffffd00b192380 x3 : ffffffd0092b313c
+[ 2479.386189]I[0:launcher-loader: 1719] x2 : 0000000000000001 x1 : 0000000000000004 x0 : 0000000000000022
+[ 2479.386231]I[0:launcher-loader: 1719] Call trace:
+[ 2479.386248]I[0:launcher-loader: 1719] __stack_depot_save+0x464/0x46c
+[ 2479.386273]I[0:launcher-loader: 1719] kasan_save_stack+0x58/0x70
+[ 2479.386303]I[0:launcher-loader: 1719] save_stack_info+0x34/0x138
+[ 2479.386331]I[0:launcher-loader: 1719] kasan_save_free_info+0x18/0x24
+[ 2479.386358]I[0:launcher-loader: 1719] ____kasan_slab_free+0x16c/0x170
+[ 2479.386385]I[0:launcher-loader: 1719] __kasan_slab_free+0x10/0x20
+[ 2479.386410]I[0:launcher-loader: 1719] kmem_cache_free+0x238/0x53c
+[ 2479.386435]I[0:launcher-loader: 1719] mempool_free_slab+0x1c/0x28
+[ 2479.386460]I[0:launcher-loader: 1719] mempool_free+0x7c/0x1a0
+[ 2479.386484]I[0:launcher-loader: 1719] bvec_free+0x34/0x80
+[ 2479.386514]I[0:launcher-loader: 1719] bio_free+0x60/0x98
+[ 2479.386540]I[0:launcher-loader: 1719] bio_put+0x50/0x21c
+[ 2479.386567]I[0:launcher-loader: 1719] f2fs_write_end_io+0x4ac/0x4d0
+[ 2479.386594]I[0:launcher-loader: 1719] bio_endio+0x2dc/0x300
+[ 2479.386622]I[0:launcher-loader: 1719] __dm_io_complete+0x324/0x37c
+[ 2479.386650]I[0:launcher-loader: 1719] dm_io_dec_pending+0x60/0xa4
+[ 2479.386676]I[0:launcher-loader: 1719] clone_endio+0xf8/0x2f0
+[ 2479.386700]I[0:launcher-loader: 1719] bio_endio+0x2dc/0x300
+[ 2479.386727]I[0:launcher-loader: 1719] blk_update_request+0x258/0x63c
+[ 2479.386754]I[0:launcher-loader: 1719] scsi_end_request+0x50/0x304
+[ 2479.386782]I[0:launcher-loader: 1719] scsi_io_completion+0x88/0x160
+[ 2479.386808]I[0:launcher-loader: 1719] scsi_finish_command+0x17c/0x194
+[ 2479.386833]I[0:launcher-loader: 1719] scsi_complete+0xcc/0x158
+[ 2479.386859]I[0:launcher-loader: 1719] blk_mq_complete_request+0x4c/0x5c
+[ 2479.386885]I[0:launcher-loader: 1719] scsi_done_internal+0xf4/0x1e0
+[ 2479.386910]I[0:launcher-loader: 1719] scsi_done+0x14/0x20
+[ 2479.386935]I[0:launcher-loader: 1719] ufshcd_compl_one_cqe+0x578/0x71c
+[ 2479.386963]I[0:launcher-loader: 1719] ufshcd_mcq_poll_cqe_nolock+0xc8/0x150
+[ 2479.386991]I[0:launcher-loader: 1719] ufshcd_intr+0x868/0xc0c
+[ 2479.387017]I[0:launcher-loader: 1719] __handle_irq_event_percpu+0xd0/0x348
+[ 2479.387044]I[0:launcher-loader: 1719] handle_irq_event_percpu+0x24/0x74
+[ 2479.387068]I[0:launcher-loader: 1719] handle_irq_event+0x74/0xe0
+[ 2479.387091]I[0:launcher-loader: 1719] handle_fasteoi_irq+0x174/0x240
+[ 2479.387118]I[0:launcher-loader: 1719] handle_irq_desc+0x7c/0x2c0
+[ 2479.387147]I[0:launcher-loader: 1719] generic_handle_domain_irq+0x1c/0x28
+[ 2479.387174]I[0:launcher-loader: 1719] gic_handle_irq+0x64/0x158
+[ 2479.387204]I[0:launcher-loader: 1719] call_on_irq_stack+0x2c/0x54
+[ 2479.387231]I[0:launcher-loader: 1719] do_interrupt_handler+0x70/0xa0
+[ 2479.387258]I[0:launcher-loader: 1719] el1_interrupt+0x34/0x68
+[ 2479.387283]I[0:launcher-loader: 1719] el1h_64_irq_handler+0x18/0x24
+[ 2479.387308]I[0:launcher-loader: 1719] el1h_64_irq+0x68/0x6c
+[ 2479.387332]I[0:launcher-loader: 1719] blk_attempt_bio_merge+0x8/0x170
+[ 2479.387356]I[0:launcher-loader: 1719] blk_mq_attempt_bio_merge+0x78/0x98
+[ 2479.387383]I[0:launcher-loader: 1719] blk_mq_submit_bio+0x324/0xa40
+[ 2479.387409]I[0:launcher-loader: 1719] __submit_bio+0x104/0x138
+[ 2479.387436]I[0:launcher-loader: 1719] submit_bio_noacct_nocheck+0x1d0/0x4a0
+[ 2479.387462]I[0:launcher-loader: 1719] submit_bio_noacct+0x618/0x804
+[ 2479.387487]I[0:launcher-loader: 1719] submit_bio+0x164/0x180
+[ 2479.387511]I[0:launcher-loader: 1719] f2fs_submit_read_bio+0xe4/0x1c4
+[ 2479.387537]I[0:launcher-loader: 1719] f2fs_mpage_readpages+0x888/0xa4c
+[ 2479.387563]I[0:launcher-loader: 1719] f2fs_readahead+0xd4/0x19c
+[ 2479.387587]I[0:launcher-loader: 1719] read_pages+0xb0/0x4ac
+[ 2479.387614]I[0:launcher-loader: 1719] page_cache_ra_unbounded+0x238/0x288
+[ 2479.387642]I[0:launcher-loader: 1719] do_page_cache_ra+0x60/0x6c
+[ 2479.387669]I[0:launcher-loader: 1719] page_cache_ra_order+0x318/0x364
+[ 2479.387695]I[0:launcher-loader: 1719] ondemand_readahead+0x30c/0x3d8
+[ 2479.387722]I[0:launcher-loader: 1719] page_cache_sync_ra+0xb4/0xc8
+[ 2479.387749]I[0:launcher-loader: 1719] filemap_read+0x268/0xd24
+[ 2479.387777]I[0:launcher-loader: 1719] f2fs_file_read_iter+0x1a0/0x62c
+[ 2479.387806]I[0:launcher-loader: 1719] vfs_read+0x258/0x34c
+[ 2479.387831]I[0:launcher-loader: 1719] ksys_pread64+0x8c/0xd0
+[ 2479.387857]I[0:launcher-loader: 1719] __arm64_sys_pread64+0x48/0x54
+[ 2479.387881]I[0:launcher-loader: 1719] invoke_syscall+0x58/0x158
+[ 2479.387909]I[0:launcher-loader: 1719] el0_svc_common+0xf0/0x134
+[ 2479.387935]I[0:launcher-loader: 1719] do_el0_svc+0x44/0x114
+[ 2479.387961]I[0:launcher-loader: 1719] el0_svc+0x2c/0x80
+[ 2479.387985]I[0:launcher-loader: 1719] el0t_64_sync_handler+0x48/0x114
+[ 2479.388010]I[0:launcher-loader: 1719] el0t_64_sync+0x190/0x194
+[ 2479.388038]I[0:launcher-loader: 1719] Kernel panic - not syncing: kernel: panic_on_warn set ...
+
+So let's set __exception_irq_entry with __irq_entry as a default.
+Applying this patch, we can see gic_hande_irq is included in Systemp.map as below.
+
+* Before
+ffffffc008010000 T __do_softirq
+ffffffc008010000 T __irqentry_text_end
+ffffffc008010000 T __irqentry_text_start
+ffffffc008010000 T __softirqentry_text_start
+ffffffc008010000 T _stext
+ffffffc00801066c T __softirqentry_text_end
+ffffffc008010670 T __entry_text_start
+
+* After
+ffffffc008010000 T __irqentry_text_start
+ffffffc008010000 T _stext
+ffffffc008010000 t gic_handle_irq
+ffffffc00801013c t gic_handle_irq
+ffffffc008010294 T __irqentry_text_end
+ffffffc008010298 T __do_softirq
+ffffffc008010298 T __softirqentry_text_start
+ffffffc008010904 T __softirqentry_text_end
+ffffffc008010908 T __entry_text_start
+
+Signed-off-by: Youngmin Nam <youngmin.nam@samsung.com>
+Signed-off-by: SEO HOYOUNG <hy50.seo@samsung.com>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Link: https://lore.kernel.org/r/20230424010436.779733-1-youngmin.nam@samsung.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Leon Chen <leonchen.oss@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/exception.h | 5 -----
+ 1 file changed, 5 deletions(-)
+
+diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
+index 19713d0f013b7..18dbb35a337f7 100644
+--- a/arch/arm64/include/asm/exception.h
++++ b/arch/arm64/include/asm/exception.h
+@@ -8,16 +8,11 @@
+ #define __ASM_EXCEPTION_H
+
+ #include <asm/esr.h>
+-#include <asm/kprobes.h>
+ #include <asm/ptrace.h>
+
+ #include <linux/interrupt.h>
+
+-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ #define __exception_irq_entry __irq_entry
+-#else
+-#define __exception_irq_entry __kprobes
+-#endif
+
+ static inline unsigned long disr_to_esr(u64 disr)
+ {
+--
+2.53.0
+
--- /dev/null
+From 6e49adb270c1f6456129f5d87b3898e6e0e1edfd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 16:20:57 +0800
+Subject: blk-mq: fix NULL dereference on q->elevator in blk_mq_elv_switch_none
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit 245165658e1c9f95c0fecfe02b9b1ebd30a1198a ]
+
+After grabbing q->sysfs_lock, q->elevator may become NULL because of
+elevator switch.
+
+Fix the NULL dereference on q->elevator by checking it with lock.
+
+Reported-by: Guangwu Zhang <guazhang@redhat.com>
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20230616132354.415109-1-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Leon Chen <leonchen.oss@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-mq.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/block/blk-mq.c b/block/blk-mq.c
+index f480b6ddba5ee..8a9d9e3db1668 100644
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -4732,9 +4732,6 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
+ {
+ struct blk_mq_qe_pair *qe;
+
+- if (!q->elevator)
+- return true;
+-
+ qe = kmalloc(sizeof(*qe), GFP_NOIO | __GFP_NOWARN | __GFP_NORETRY);
+ if (!qe)
+ return false;
+@@ -4742,6 +4739,12 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
+ /* q->elevator needs protection from ->sysfs_lock */
+ mutex_lock(&q->sysfs_lock);
+
++ /* the check has to be done with holding sysfs_lock */
++ if (!q->elevator) {
++ kfree(qe);
++ goto unlock;
++ }
++
+ INIT_LIST_HEAD(&qe->node);
+ qe->q = q;
+ qe->type = q->elevator->type;
+@@ -4756,6 +4759,7 @@ static bool blk_mq_elv_switch_none(struct list_head *head,
+ */
+ __module_get(qe->type->elevator_owner);
+ elevator_switch(q, NULL);
++unlock:
+ mutex_unlock(&q->sysfs_lock);
+
+ return true;
+--
+2.53.0
+
--- /dev/null
+From 8ef1d033e2b57b00a0bf1a8078fb69990a39e830 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 14:54:35 +0800
+Subject: drm/amdgpu: Limit BO list entry count to prevent resource exhaustion
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jesse.Zhang <Jesse.Zhang@amd.com>
+
+[ Upstream commit 6270b1a5dab94665d7adce3dc78bc9066ed28bdd ]
+
+Userspace can pass an arbitrary number of BO list entries via the
+bo_number field. Although the previous multiplication overflow check
+prevents out-of-bounds allocation, a large number of entries could still
+cause excessive memory allocation (up to potentially gigabytes) and
+unnecessarily long list processing times.
+
+Introduce a hard limit of 128k entries per BO list, which is more than
+sufficient for any realistic use case (e.g., a single list containing all
+buffers in a large scene). This prevents memory exhaustion attacks and
+ensures predictable performance.
+
+Return -EINVAL if the requested entry count exceeds the limit
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Suggested-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Jesse Zhang <jesse.zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 688b87d39e0aa8135105b40dc167d74b5ada5332)
+Cc: stable@vger.kernel.org
+Signed-off-by: Fang Wang <32840572@qq.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 79e43896edddb..28a5b54a3aae1 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -35,6 +35,7 @@
+
+ #define AMDGPU_BO_LIST_MAX_PRIORITY 32u
+ #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
++#define AMDGPU_BO_LIST_MAX_ENTRIES (128 * 1024)
+
+ static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
+ {
+@@ -232,6 +233,9 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ const uint32_t bo_number = in->bo_number;
+ struct drm_amdgpu_bo_list_entry *info;
+
++ if (bo_number > AMDGPU_BO_LIST_MAX_ENTRIES)
++ return -EINVAL;
++
+ /* copy the handle array from userspace to a kernel buffer */
+ if (likely(info_size == bo_info_size)) {
+ info = vmemdup_array_user(uptr, bo_number, info_size);
+--
+2.53.0
+
--- /dev/null
+From 366e5d08dee80c781f2aa4d35ef1a484d57c9804 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 14:54:25 +0800
+Subject: drm/amdgpu: Use vmemdup_array_user in
+ amdgpu_bo_create_list_entry_array
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+[ Upstream commit c4ac100e9ae252b09986766ad23b1f83ca3a369d ]
+
+Replace kvmalloc_array() + copy_from_user() with vmemdup_array_user() on
+the fast path.
+
+This shrinks the source code and improves separation between the kernel
+and userspace slabs.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Fang Wang <32840572@qq.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 41 +++++++++------------
+ 1 file changed, 17 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index fdc302aa59e7b..79e43896edddb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -226,43 +226,36 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
+ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param)
+ {
+- const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
+ const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
++ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
++ const uint32_t bo_info_size = in->bo_info_size;
++ const uint32_t bo_number = in->bo_number;
+ struct drm_amdgpu_bo_list_entry *info;
+- int r;
+-
+- info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
+- if (!info)
+- return -ENOMEM;
+
+ /* copy the handle array from userspace to a kernel buffer */
+- r = -EFAULT;
+- if (likely(info_size == in->bo_info_size)) {
+- unsigned long bytes = in->bo_number *
+- in->bo_info_size;
+-
+- if (copy_from_user(info, uptr, bytes))
+- goto error_free;
+-
++ if (likely(info_size == bo_info_size)) {
++ info = vmemdup_array_user(uptr, bo_number, info_size);
++ if (IS_ERR(info))
++ return PTR_ERR(info);
+ } else {
+- unsigned long bytes = min(in->bo_info_size, info_size);
++ const uint32_t bytes = min(bo_info_size, info_size);
+ unsigned i;
+
+- memset(info, 0, in->bo_number * info_size);
+- for (i = 0; i < in->bo_number; ++i) {
+- if (copy_from_user(&info[i], uptr, bytes))
+- goto error_free;
++ info = kvmalloc_array(bo_number, info_size, GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
+
+- uptr += in->bo_info_size;
++ memset(info, 0, bo_number * info_size);
++ for (i = 0; i < bo_number; ++i, uptr += bo_info_size) {
++ if (copy_from_user(&info[i], uptr, bytes)) {
++ kvfree(info);
++ return -EFAULT;
++ }
+ }
+ }
+
+ *info_param = info;
+ return 0;
+-
+-error_free:
+- kvfree(info);
+- return r;
+ }
+
+ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+--
+2.53.0
+
--- /dev/null
+From 7ae5ee1d41d3b4f203dddf7b145b73d97c1de435 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 14:53:58 +0800
+Subject: net: enetc: fix the deadlock of enetc_mdio_lock
+
+From: Jianpeng Chang <jianpeng.chang.cn@windriver.com>
+
+[ Upstream commit 50bd33f6b3922a6b760aa30d409cae891cec8fb5 ]
+
+After applying the workaround for err050089, the LS1028A platform
+experiences RCU stalls on RT kernel. This issue is caused by the
+recursive acquisition of the read lock enetc_mdio_lock. Here list some
+of the call stacks identified under the enetc_poll path that may lead to
+a deadlock:
+
+enetc_poll
+ -> enetc_lock_mdio
+ -> enetc_clean_rx_ring OR napi_complete_done
+ -> napi_gro_receive
+ -> enetc_start_xmit
+ -> enetc_lock_mdio
+ -> enetc_map_tx_buffs
+ -> enetc_unlock_mdio
+ -> enetc_unlock_mdio
+
+After enetc_poll acquires the read lock, a higher-priority writer attempts
+to acquire the lock, causing preemption. The writer detects that a
+read lock is already held and is scheduled out. However, readers under
+enetc_poll cannot acquire the read lock again because a writer is already
+waiting, leading to a thread hang.
+
+Currently, the deadlock is avoided by adjusting enetc_lock_mdio to prevent
+recursive lock acquisition.
+
+Fixes: 6d36ecdbc441 ("net: enetc: take the MDIO lock only once per NAPI poll cycle")
+Signed-off-by: Jianpeng Chang <jianpeng.chang.cn@windriver.com>
+Acked-by: Wei Fang <wei.fang@nxp.com>
+Link: https://patch.msgid.link/20251015021427.180757-1-jianpeng.chang.cn@windriver.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ adjusted context ]
+Signed-off-by: Charles Xu <charles_xu@189.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/enetc/enetc.c | 25 ++++++++++++++++----
+ 1 file changed, 21 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index bf49c07c8b513..a0177130dc37a 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -1225,6 +1225,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ /* next descriptor to process */
+ i = rx_ring->next_to_clean;
+
++ enetc_lock_mdio();
++
+ while (likely(rx_frm_cnt < work_limit)) {
+ union enetc_rx_bd *rxbd;
+ struct sk_buff *skb;
+@@ -1260,7 +1262,9 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ rx_byte_cnt += skb->len + ETH_HLEN;
+ rx_frm_cnt++;
+
++ enetc_unlock_mdio();
+ napi_gro_receive(napi, skb);
++ enetc_lock_mdio();
+ }
+
+ rx_ring->next_to_clean = i;
+@@ -1268,6 +1272,8 @@ static int enetc_clean_rx_ring(struct enetc_bdr *rx_ring,
+ rx_ring->stats.packets += rx_frm_cnt;
+ rx_ring->stats.bytes += rx_byte_cnt;
+
++ enetc_unlock_mdio();
++
+ return rx_frm_cnt;
+ }
+
+@@ -1562,6 +1568,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ /* next descriptor to process */
+ i = rx_ring->next_to_clean;
+
++ enetc_lock_mdio();
++
+ while (likely(rx_frm_cnt < work_limit)) {
+ union enetc_rx_bd *rxbd, *orig_rxbd;
+ int orig_i, orig_cleaned_cnt;
+@@ -1621,7 +1629,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ if (unlikely(!skb))
+ goto out;
+
++ enetc_unlock_mdio();
+ napi_gro_receive(napi, skb);
++ enetc_lock_mdio();
+ break;
+ case XDP_TX:
+ tx_ring = priv->xdp_tx_ring[rx_ring->index];
+@@ -1664,7 +1674,9 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ break;
+ }
+
++ enetc_unlock_mdio();
+ err = xdp_do_redirect(rx_ring->ndev, &xdp_buff, prog);
++ enetc_lock_mdio();
+ if (unlikely(err)) {
+ enetc_xdp_drop(rx_ring, orig_i, i);
+ rx_ring->stats.xdp_redirect_failures++;
+@@ -1684,8 +1696,11 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ rx_ring->stats.packets += rx_frm_cnt;
+ rx_ring->stats.bytes += rx_byte_cnt;
+
+- if (xdp_redirect_frm_cnt)
++ if (xdp_redirect_frm_cnt) {
++ enetc_unlock_mdio();
+ xdp_do_flush_map();
++ enetc_lock_mdio();
++ }
+
+ if (xdp_tx_frm_cnt)
+ enetc_update_tx_ring_tail(tx_ring);
+@@ -1694,6 +1709,8 @@ static int enetc_clean_rx_ring_xdp(struct enetc_bdr *rx_ring,
+ enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring) -
+ rx_ring->xdp.xdp_tx_in_flight);
+
++ enetc_unlock_mdio();
++
+ return rx_frm_cnt;
+ }
+
+@@ -1712,6 +1729,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
+ for (i = 0; i < v->count_tx_rings; i++)
+ if (!enetc_clean_tx_ring(&v->tx_ring[i], budget))
+ complete = false;
++ enetc_unlock_mdio();
+
+ prog = rx_ring->xdp.prog;
+ if (prog)
+@@ -1723,10 +1741,8 @@ static int enetc_poll(struct napi_struct *napi, int budget)
+ if (work_done)
+ v->rx_napi_work = true;
+
+- if (!complete) {
+- enetc_unlock_mdio();
++ if (!complete)
+ return budget;
+- }
+
+ napi_complete_done(napi, work_done);
+
+@@ -1735,6 +1751,7 @@ static int enetc_poll(struct napi_struct *napi, int budget)
+
+ v->rx_napi_work = false;
+
++ enetc_lock_mdio();
+ /* enable interrupts */
+ enetc_wr_reg_hot(v->rbier, ENETC_RBIER_RXTIE);
+
+--
+2.53.0
+
--- /dev/null
+From 657de84ac06b6b68879b6ca84288ce26b0cfa6cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 15:47:13 +0800
+Subject: padata: Fix pd UAF once and for all
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 71203f68c7749609d7fc8ae6ad054bdedeb24f91 ]
+
+There is a race condition/UAF in padata_reorder that goes back
+to the initial commit. A reference count is taken at the start
+of the process in padata_do_parallel, and released at the end in
+padata_serial_worker.
+
+This reference count is (and only is) required for padata_replace
+to function correctly. If padata_replace is never called then
+there is no issue.
+
+In the function padata_reorder which serves as the core of padata,
+as soon as padata is added to queue->serial.list, and the associated
+spin lock released, that padata may be processed and the reference
+count on pd would go away.
+
+Fix this by getting the next padata before the squeue->serial lock
+is released.
+
+In order to make this possible, simplify padata_reorder by only
+calling it once the next padata arrives.
+
+Fixes: 16295bec6398 ("padata: Generic parallelization/serialization interface")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+[ Adjust context of padata_find_next(). Replace
+cpumask_next_wrap(cpu, pd->cpumask.pcpu) with
+cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false) in padata_reorder() in
+v6.1 according to dc5bb9b769c9 ("cpumask: deprecate cpumask_next_wrap()") and
+f954a2d37637 ("padata: switch padata_find_next() to using cpumask_next_wrap()")
+. ]
+Signed-off-by: Bin Lan <lanbincn@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/padata.h | 3 -
+ kernel/padata.c | 136 +++++++++++------------------------------
+ 2 files changed, 37 insertions(+), 102 deletions(-)
+
+diff --git a/include/linux/padata.h b/include/linux/padata.h
+index 495b16b6b4d72..9ca779d7e310e 100644
+--- a/include/linux/padata.h
++++ b/include/linux/padata.h
+@@ -91,7 +91,6 @@ struct padata_cpumask {
+ * @cpu: Next CPU to be processed.
+ * @cpumask: The cpumasks in use for parallel and serial workers.
+ * @reorder_work: work struct for reordering.
+- * @lock: Reorder lock.
+ */
+ struct parallel_data {
+ struct padata_shell *ps;
+@@ -102,8 +101,6 @@ struct parallel_data {
+ unsigned int processed;
+ int cpu;
+ struct padata_cpumask cpumask;
+- struct work_struct reorder_work;
+- spinlock_t ____cacheline_aligned lock;
+ };
+
+ /**
+diff --git a/kernel/padata.c b/kernel/padata.c
+index d49f97abe086f..93e288dc373ee 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -253,20 +253,17 @@ EXPORT_SYMBOL(padata_do_parallel);
+ * be parallel processed by another cpu and is not yet present in
+ * the cpu's reorder queue.
+ */
+-static struct padata_priv *padata_find_next(struct parallel_data *pd,
+- bool remove_object)
++static struct padata_priv *padata_find_next(struct parallel_data *pd, int cpu,
++ unsigned int processed)
+ {
+ struct padata_priv *padata;
+ struct padata_list *reorder;
+- int cpu = pd->cpu;
+
+ reorder = per_cpu_ptr(pd->reorder_list, cpu);
+
+ spin_lock(&reorder->lock);
+- if (list_empty(&reorder->list)) {
+- spin_unlock(&reorder->lock);
+- return NULL;
+- }
++ if (list_empty(&reorder->list))
++ goto notfound;
+
+ padata = list_entry(reorder->list.next, struct padata_priv, list);
+
+@@ -274,101 +271,52 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd,
+ * Checks the rare case where two or more parallel jobs have hashed to
+ * the same CPU and one of the later ones finishes first.
+ */
+- if (padata->seq_nr != pd->processed) {
+- spin_unlock(&reorder->lock);
+- return NULL;
+- }
+-
+- if (remove_object) {
+- list_del_init(&padata->list);
+- ++pd->processed;
+- /* When sequence wraps around, reset to the first CPU. */
+- if (unlikely(pd->processed == 0))
+- pd->cpu = cpumask_first(pd->cpumask.pcpu);
+- else
+- pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
+- }
++ if (padata->seq_nr != processed)
++ goto notfound;
+
++ list_del_init(&padata->list);
+ spin_unlock(&reorder->lock);
+ return padata;
++
++notfound:
++ pd->processed = processed;
++ pd->cpu = cpu;
++ spin_unlock(&reorder->lock);
++ return NULL;
+ }
+
+-static void padata_reorder(struct parallel_data *pd)
++static void padata_reorder(struct padata_priv *padata)
+ {
++ struct parallel_data *pd = padata->pd;
+ struct padata_instance *pinst = pd->ps->pinst;
+- int cb_cpu;
+- struct padata_priv *padata;
+- struct padata_serial_queue *squeue;
+- struct padata_list *reorder;
++ unsigned int processed;
++ int cpu;
+
+- /*
+- * We need to ensure that only one cpu can work on dequeueing of
+- * the reorder queue the time. Calculating in which percpu reorder
+- * queue the next object will arrive takes some time. A spinlock
+- * would be highly contended. Also it is not clear in which order
+- * the objects arrive to the reorder queues. So a cpu could wait to
+- * get the lock just to notice that there is nothing to do at the
+- * moment. Therefore we use a trylock and let the holder of the lock
+- * care for all the objects enqueued during the holdtime of the lock.
+- */
+- if (!spin_trylock_bh(&pd->lock))
+- return;
++ processed = pd->processed;
++ cpu = pd->cpu;
+
+- while (1) {
+- padata = padata_find_next(pd, true);
++ do {
++ struct padata_serial_queue *squeue;
++ int cb_cpu;
+
+- /*
+- * If the next object that needs serialization is parallel
+- * processed by another cpu and is still on it's way to the
+- * cpu's reorder queue, nothing to do for now.
+- */
+- if (!padata)
+- break;
++ cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
++ processed++;
+
+ cb_cpu = padata->cb_cpu;
+ squeue = per_cpu_ptr(pd->squeue, cb_cpu);
+
+ spin_lock(&squeue->serial.lock);
+ list_add_tail(&padata->list, &squeue->serial.list);
+- spin_unlock(&squeue->serial.lock);
+-
+ queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
+- }
+
+- spin_unlock_bh(&pd->lock);
+-
+- /*
+- * The next object that needs serialization might have arrived to
+- * the reorder queues in the meantime.
+- *
+- * Ensure reorder queue is read after pd->lock is dropped so we see
+- * new objects from another task in padata_do_serial. Pairs with
+- * smp_mb in padata_do_serial.
+- */
+- smp_mb();
+-
+- reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
+- if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
+ /*
+- * Other context(eg. the padata_serial_worker) can finish the request.
+- * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
++ * If the next object that needs serialization is parallel
++ * processed by another cpu and is still on it's way to the
++ * cpu's reorder queue, end the loop.
+ */
+- padata_get_pd(pd);
+- if (!queue_work(pinst->serial_wq, &pd->reorder_work))
+- padata_put_pd(pd);
+- }
+-}
+-
+-static void invoke_padata_reorder(struct work_struct *work)
+-{
+- struct parallel_data *pd;
+-
+- local_bh_disable();
+- pd = container_of(work, struct parallel_data, reorder_work);
+- padata_reorder(pd);
+- local_bh_enable();
+- /* Pairs with putting the reorder_work in the serial_wq */
+- padata_put_pd(pd);
++ padata = padata_find_next(pd, cpu, processed);
++ spin_unlock(&squeue->serial.lock);
++ } while (padata);
+ }
+
+ static void padata_serial_worker(struct work_struct *serial_work)
+@@ -419,6 +367,7 @@ void padata_do_serial(struct padata_priv *padata)
+ struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
+ struct padata_priv *cur;
+ struct list_head *pos;
++ bool gotit = true;
+
+ spin_lock(&reorder->lock);
+ /* Sort in ascending order of sequence number. */
+@@ -428,17 +377,14 @@ void padata_do_serial(struct padata_priv *padata)
+ if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
+ break;
+ }
+- list_add(&padata->list, pos);
++ if (padata->seq_nr != pd->processed) {
++ gotit = false;
++ list_add(&padata->list, pos);
++ }
+ spin_unlock(&reorder->lock);
+
+- /*
+- * Ensure the addition to the reorder list is ordered correctly
+- * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
+- * in padata_reorder.
+- */
+- smp_mb();
+-
+- padata_reorder(pd);
++ if (gotit)
++ padata_reorder(padata);
+ }
+ EXPORT_SYMBOL(padata_do_serial);
+
+@@ -625,9 +571,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
+ padata_init_squeues(pd);
+ pd->seq_nr = -1;
+ refcount_set(&pd->refcnt, 1);
+- spin_lock_init(&pd->lock);
+ pd->cpu = cpumask_first(pd->cpumask.pcpu);
+- INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
+
+ return pd;
+
+@@ -1137,12 +1081,6 @@ void padata_free_shell(struct padata_shell *ps)
+ if (!ps)
+ return;
+
+- /*
+- * Wait for all _do_serial calls to finish to avoid touching
+- * freed pd's and ps's.
+- */
+- synchronize_rcu();
+-
+ mutex_lock(&ps->pinst->lock);
+ list_del(&ps->list);
+ pd = rcu_dereference_protected(ps->pd, 1);
+--
+2.53.0
+
--- /dev/null
+From b572728411f93463d175b597460540c9e14ba71f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 15:47:14 +0800
+Subject: padata: Remove comment for reorder_work
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 82a0302e7167d0b7c6cde56613db3748f8dd806d ]
+
+Remove comment for reorder_work which no longer exists.
+
+Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Fixes: 71203f68c774 ("padata: Fix pd UAF once and for all")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Bin Lan <lanbincn@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/padata.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/include/linux/padata.h b/include/linux/padata.h
+index 9ca779d7e310e..6f07e12a43819 100644
+--- a/include/linux/padata.h
++++ b/include/linux/padata.h
+@@ -90,7 +90,6 @@ struct padata_cpumask {
+ * @processed: Number of already processed objects.
+ * @cpu: Next CPU to be processed.
+ * @cpumask: The cpumasks in use for parallel and serial workers.
+- * @reorder_work: work struct for reordering.
+ */
+ struct parallel_data {
+ struct padata_shell *ps;
+--
+2.53.0
+
--- /dev/null
+From 2c6c201bca87537f91b4413923a9975142d8277d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2026 14:49:50 +0800
+Subject: regset: use kvzalloc() for regset_get_alloc()
+
+From: Douglas Anderson <dianders@chromium.org>
+
+commit 6b839b3b76cf17296ebd4a893841f32cae08229c upstream.
+
+While browsing through ChromeOS crash reports, I found one with an
+allocation failure that looked like this:
+
+ chrome: page allocation failure: order:7,
+ mode:0x40dc0(GFP_KERNEL|__GFP_COMP|__GFP_ZERO),
+ nodemask=(null),cpuset=urgent,mems_allowed=0
+ CPU: 7 PID: 3295 Comm: chrome Not tainted
+ 5.15.133-20574-g8044615ac35c #1 (HASH:1162 1)
+ Hardware name: Google Lazor (rev3 - 8) with KB Backlight (DT)
+ Call trace:
+ ...
+ warn_alloc+0x104/0x174
+ __alloc_pages+0x5f0/0x6e4
+ kmalloc_order+0x44/0x98
+ kmalloc_order_trace+0x34/0x124
+ __kmalloc+0x228/0x36c
+ __regset_get+0x68/0xcc
+ regset_get_alloc+0x1c/0x28
+ elf_core_dump+0x3d8/0xd8c
+ do_coredump+0xeb8/0x1378
+ get_signal+0x14c/0x804
+ ...
+
+An order 7 allocation is (1 << 7) contiguous pages, or 512K. It's not
+a surprise that this allocation failed on a system that's been running
+for a while.
+
+More digging showed that it was fairly easy to see the order 7
+allocation by just sending a SIGQUIT to chrome (or other processes) to
+generate a core dump. The actual amount being allocated was 279,584
+bytes and it was for "core_note_type" NT_ARM_SVE.
+
+There was quite a bit of discussion [1] on the mailing lists in
+response to my v1 patch attempting to switch to vmalloc. The overall
+conclusion was that we could likely reduce the 279,584 byte allocation
+by quite a bit and Mark Brown has sent a patch to that effect [2].
+However even with the 279,584 byte allocation gone there are still
+65,552 byte allocations. These are just barely more than the 65,536
+bytes and thus would require an order 5 allocation.
+
+An order 5 allocation is still something to avoid unless necessary and
+nothing needs the memory here to be contiguous. Change the allocation
+to kvzalloc() which should still be efficient for small allocations
+but doesn't force the memory subsystem to work hard (and maybe fail)
+at getting a large contiguous chunk.
+
+[1] https://lore.kernel.org/r/20240201171159.1.Id9ad163b60d21c9e56c2d686b0cc9083a8ba7924@changeid
+[2] https://lore.kernel.org/r/20240203-arm64-sve-ptrace-regset-size-v1-1-2c3ba1386b9e@kernel.org
+
+Link: https://lkml.kernel.org/r/20240205092626.v2.1.Id9ad163b60d21c9e56c2d686b0cc9083a8ba7924@changeid
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Al Viro <viro@ZenIV.linux.org.uk>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Dave Martin <Dave.Martin@arm.com>
+Cc: Eric Biederman <ebiederm@xmission.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Mark Brown <broonie@kernel.org>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Will Deacon <will@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Wen Yang <wen.yang@linux.dev>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/binfmt_elf.c | 2 +-
+ kernel/regset.c | 6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 762704eed9ce9..2fa739f2f7bb8 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -2014,7 +2014,7 @@ static void free_note_info(struct elf_note_info *info)
+ threads = t->next;
+ WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
+ for (i = 1; i < info->thread_notes; ++i)
+- kfree(t->notes[i].data);
++ kvfree(t->notes[i].data);
+ kfree(t);
+ }
+ kfree(info->psinfo.data);
+diff --git a/kernel/regset.c b/kernel/regset.c
+index 586823786f397..b2871fa68b2a7 100644
+--- a/kernel/regset.c
++++ b/kernel/regset.c
+@@ -16,14 +16,14 @@ static int __regset_get(struct task_struct *target,
+ if (size > regset->n * regset->size)
+ size = regset->n * regset->size;
+ if (!p) {
+- to_free = p = kzalloc(size, GFP_KERNEL);
++ to_free = p = kvzalloc(size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ }
+ res = regset->regset_get(target, regset,
+ (struct membuf){.p = p, .left = size});
+ if (res < 0) {
+- kfree(to_free);
++ kvfree(to_free);
+ return res;
+ }
+ *data = p;
+@@ -71,6 +71,6 @@ int copy_regset_to_user(struct task_struct *target,
+ ret = regset_get_alloc(target, regset, size, &buf);
+ if (ret > 0)
+ ret = copy_to_user(data, buf, ret) ? -EFAULT : 0;
+- kfree(buf);
++ kvfree(buf);
+ return ret;
+ }
+--
+2.53.0
+
ibmasm-fix-oob-reads-in-command_file_write-due-to-missing-size-checks.patch
ibmasm-fix-heap-over-read-in-ibmasm_send_i2o_message.patch
firmware-google-framebuffer-do-not-mark-framebuffer-as-busy.patch
+padata-fix-pd-uaf-once-and-for-all.patch
+padata-remove-comment-for-reorder_work.patch
+drm-amdgpu-use-vmemdup_array_user-in-amdgpu_bo_creat.patch
+drm-amdgpu-limit-bo-list-entry-count-to-prevent-reso.patch
+net-enetc-fix-the-deadlock-of-enetc_mdio_lock.patch
+blk-mq-fix-null-dereference-on-q-elevator-in-blk_mq_.patch
+arm64-set-__exception_irq_entry-with-__irq_entry-as-.patch
+regset-use-kvzalloc-for-regset_get_alloc.patch
--- /dev/null
+From 615616f8a49481419ac0af5866ea53c5117ed5b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 14:51:57 +0800
+Subject: drm/amdgpu: Limit BO list entry count to prevent resource exhaustion
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jesse.Zhang <Jesse.Zhang@amd.com>
+
+[ Upstream commit 6270b1a5dab94665d7adce3dc78bc9066ed28bdd ]
+
+Userspace can pass an arbitrary number of BO list entries via the
+bo_number field. Although the previous multiplication overflow check
+prevents out-of-bounds allocation, a large number of entries could still
+cause excessive memory allocation (up to potentially gigabytes) and
+unnecessarily long list processing times.
+
+Introduce a hard limit of 128k entries per BO list, which is more than
+sufficient for any realistic use case (e.g., a single list containing all
+buffers in a large scene). This prevents memory exhaustion attacks and
+ensures predictable performance.
+
+Return -EINVAL if the requested entry count exceeds the limit
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Suggested-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Jesse Zhang <jesse.zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 688b87d39e0aa8135105b40dc167d74b5ada5332)
+Cc: stable@vger.kernel.org
+Signed-off-by: Fang Wang <32840572@qq.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 66fb37b643882..ded22f244adab 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -36,6 +36,7 @@
+
+ #define AMDGPU_BO_LIST_MAX_PRIORITY 32u
+ #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
++#define AMDGPU_BO_LIST_MAX_ENTRIES (128 * 1024)
+
+ static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
+ {
+@@ -190,6 +191,9 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ const uint32_t bo_number = in->bo_number;
+ struct drm_amdgpu_bo_list_entry *info;
+
++ if (bo_number > AMDGPU_BO_LIST_MAX_ENTRIES)
++ return -EINVAL;
++
+ /* copy the handle array from userspace to a kernel buffer */
+ if (likely(info_size == bo_info_size)) {
+ info = vmemdup_array_user(uptr, bo_number, info_size);
+--
+2.53.0
+
--- /dev/null
+From e649837b98414cbd9bd9730f5c41280c0d4863bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 14:51:41 +0800
+Subject: drm/amdgpu: Use vmemdup_array_user in
+ amdgpu_bo_create_list_entry_array
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+[ Upstream commit c4ac100e9ae252b09986766ad23b1f83ca3a369d ]
+
+Replace kvmalloc_array() + copy_from_user() with vmemdup_array_user() on
+the fast path.
+
+This shrinks the source code and improves separation between the kernel
+and userspace slabs.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Fang Wang <32840572@qq.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 41 +++++++++------------
+ 1 file changed, 17 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 702f6610d0243..66fb37b643882 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -184,43 +184,36 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
+ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param)
+ {
+- const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
+ const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
++ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
++ const uint32_t bo_info_size = in->bo_info_size;
++ const uint32_t bo_number = in->bo_number;
+ struct drm_amdgpu_bo_list_entry *info;
+- int r;
+-
+- info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
+- if (!info)
+- return -ENOMEM;
+
+ /* copy the handle array from userspace to a kernel buffer */
+- r = -EFAULT;
+- if (likely(info_size == in->bo_info_size)) {
+- unsigned long bytes = in->bo_number *
+- in->bo_info_size;
+-
+- if (copy_from_user(info, uptr, bytes))
+- goto error_free;
+-
++ if (likely(info_size == bo_info_size)) {
++ info = vmemdup_array_user(uptr, bo_number, info_size);
++ if (IS_ERR(info))
++ return PTR_ERR(info);
+ } else {
+- unsigned long bytes = min(in->bo_info_size, info_size);
++ const uint32_t bytes = min(bo_info_size, info_size);
+ unsigned i;
+
+- memset(info, 0, in->bo_number * info_size);
+- for (i = 0; i < in->bo_number; ++i) {
+- if (copy_from_user(&info[i], uptr, bytes))
+- goto error_free;
++ info = kvmalloc_array(bo_number, info_size, GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
+
+- uptr += in->bo_info_size;
++ memset(info, 0, bo_number * info_size);
++ for (i = 0; i < bo_number; ++i, uptr += bo_info_size) {
++ if (copy_from_user(&info[i], uptr, bytes)) {
++ kvfree(info);
++ return -EFAULT;
++ }
+ }
+ }
+
+ *info_param = info;
+ return 0;
+-
+-error_free:
+- kvfree(info);
+- return r;
+ }
+
+ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+--
+2.53.0
+
--- /dev/null
+From 193830d22a5ec868a431ecda42002dc6c5cce09f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2026 16:14:29 +0800
+Subject: padata: Fix pd UAF once and for all
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 71203f68c7749609d7fc8ae6ad054bdedeb24f91 ]
+
+There is a race condition/UAF in padata_reorder that goes back
+to the initial commit. A reference count is taken at the start
+of the process in padata_do_parallel, and released at the end in
+padata_serial_worker.
+
+This reference count is (and only is) required for padata_replace
+to function correctly. If padata_replace is never called then
+there is no issue.
+
+In the function padata_reorder which serves as the core of padata,
+as soon as padata is added to queue->serial.list, and the associated
+spin lock released, that padata may be processed and the reference
+count on pd would go away.
+
+Fix this by getting the next padata before the squeue->serial lock
+is released.
+
+In order to make this possible, simplify padata_reorder by only
+calling it once the next padata arrives.
+
+Fixes: 16295bec6398 ("padata: Generic parallelization/serialization interface")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+[ Adjust context of padata_find_next(). Replace
+cpumask_next_wrap(cpu, pd->cpumask.pcpu) with
+cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false) in padata_reorder() in
+v6.12 according to dc5bb9b769c9 ("cpumask: deprecate cpumask_next_wrap()") and
+f954a2d37637 ("padata: switch padata_find_next() to using cpumask_next_wrap()")
+. ]
+Signed-off-by: Bin Lan <lanbincn@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/padata.h | 3 -
+ kernel/padata.c | 136 +++++++++++------------------------------
+ 2 files changed, 37 insertions(+), 102 deletions(-)
+
+diff --git a/include/linux/padata.h b/include/linux/padata.h
+index 0146daf344306..b486c7359de2b 100644
+--- a/include/linux/padata.h
++++ b/include/linux/padata.h
+@@ -91,7 +91,6 @@ struct padata_cpumask {
+ * @cpu: Next CPU to be processed.
+ * @cpumask: The cpumasks in use for parallel and serial workers.
+ * @reorder_work: work struct for reordering.
+- * @lock: Reorder lock.
+ */
+ struct parallel_data {
+ struct padata_shell *ps;
+@@ -102,8 +101,6 @@ struct parallel_data {
+ unsigned int processed;
+ int cpu;
+ struct padata_cpumask cpumask;
+- struct work_struct reorder_work;
+- spinlock_t ____cacheline_aligned lock;
+ };
+
+ /**
+diff --git a/kernel/padata.c b/kernel/padata.c
+index c3810f5bd7156..e61bdc248551f 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -261,20 +261,17 @@ EXPORT_SYMBOL(padata_do_parallel);
+ * be parallel processed by another cpu and is not yet present in
+ * the cpu's reorder queue.
+ */
+-static struct padata_priv *padata_find_next(struct parallel_data *pd,
+- bool remove_object)
++static struct padata_priv *padata_find_next(struct parallel_data *pd, int cpu,
++ unsigned int processed)
+ {
+ struct padata_priv *padata;
+ struct padata_list *reorder;
+- int cpu = pd->cpu;
+
+ reorder = per_cpu_ptr(pd->reorder_list, cpu);
+
+ spin_lock(&reorder->lock);
+- if (list_empty(&reorder->list)) {
+- spin_unlock(&reorder->lock);
+- return NULL;
+- }
++ if (list_empty(&reorder->list))
++ goto notfound;
+
+ padata = list_entry(reorder->list.next, struct padata_priv, list);
+
+@@ -282,101 +279,52 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd,
+ * Checks the rare case where two or more parallel jobs have hashed to
+ * the same CPU and one of the later ones finishes first.
+ */
+- if (padata->seq_nr != pd->processed) {
+- spin_unlock(&reorder->lock);
+- return NULL;
+- }
+-
+- if (remove_object) {
+- list_del_init(&padata->list);
+- ++pd->processed;
+- /* When sequence wraps around, reset to the first CPU. */
+- if (unlikely(pd->processed == 0))
+- pd->cpu = cpumask_first(pd->cpumask.pcpu);
+- else
+- pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
+- }
++ if (padata->seq_nr != processed)
++ goto notfound;
+
++ list_del_init(&padata->list);
+ spin_unlock(&reorder->lock);
+ return padata;
++
++notfound:
++ pd->processed = processed;
++ pd->cpu = cpu;
++ spin_unlock(&reorder->lock);
++ return NULL;
+ }
+
+-static void padata_reorder(struct parallel_data *pd)
++static void padata_reorder(struct padata_priv *padata)
+ {
++ struct parallel_data *pd = padata->pd;
+ struct padata_instance *pinst = pd->ps->pinst;
+- int cb_cpu;
+- struct padata_priv *padata;
+- struct padata_serial_queue *squeue;
+- struct padata_list *reorder;
++ unsigned int processed;
++ int cpu;
+
+- /*
+- * We need to ensure that only one cpu can work on dequeueing of
+- * the reorder queue the time. Calculating in which percpu reorder
+- * queue the next object will arrive takes some time. A spinlock
+- * would be highly contended. Also it is not clear in which order
+- * the objects arrive to the reorder queues. So a cpu could wait to
+- * get the lock just to notice that there is nothing to do at the
+- * moment. Therefore we use a trylock and let the holder of the lock
+- * care for all the objects enqueued during the holdtime of the lock.
+- */
+- if (!spin_trylock_bh(&pd->lock))
+- return;
++ processed = pd->processed;
++ cpu = pd->cpu;
+
+- while (1) {
+- padata = padata_find_next(pd, true);
++ do {
++ struct padata_serial_queue *squeue;
++ int cb_cpu;
+
+- /*
+- * If the next object that needs serialization is parallel
+- * processed by another cpu and is still on it's way to the
+- * cpu's reorder queue, nothing to do for now.
+- */
+- if (!padata)
+- break;
++ cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
++ processed++;
+
+ cb_cpu = padata->cb_cpu;
+ squeue = per_cpu_ptr(pd->squeue, cb_cpu);
+
+ spin_lock(&squeue->serial.lock);
+ list_add_tail(&padata->list, &squeue->serial.list);
+- spin_unlock(&squeue->serial.lock);
+-
+ queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
+- }
+
+- spin_unlock_bh(&pd->lock);
+-
+- /*
+- * The next object that needs serialization might have arrived to
+- * the reorder queues in the meantime.
+- *
+- * Ensure reorder queue is read after pd->lock is dropped so we see
+- * new objects from another task in padata_do_serial. Pairs with
+- * smp_mb in padata_do_serial.
+- */
+- smp_mb();
+-
+- reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
+- if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
+ /*
+- * Other context(eg. the padata_serial_worker) can finish the request.
+- * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
++ * If the next object that needs serialization is parallel
++ * processed by another cpu and is still on it's way to the
++ * cpu's reorder queue, end the loop.
+ */
+- padata_get_pd(pd);
+- if (!queue_work(pinst->serial_wq, &pd->reorder_work))
+- padata_put_pd(pd);
+- }
+-}
+-
+-static void invoke_padata_reorder(struct work_struct *work)
+-{
+- struct parallel_data *pd;
+-
+- local_bh_disable();
+- pd = container_of(work, struct parallel_data, reorder_work);
+- padata_reorder(pd);
+- local_bh_enable();
+- /* Pairs with putting the reorder_work in the serial_wq */
+- padata_put_pd(pd);
++ padata = padata_find_next(pd, cpu, processed);
++ spin_unlock(&squeue->serial.lock);
++ } while (padata);
+ }
+
+ static void padata_serial_worker(struct work_struct *serial_work)
+@@ -427,6 +375,7 @@ void padata_do_serial(struct padata_priv *padata)
+ struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
+ struct padata_priv *cur;
+ struct list_head *pos;
++ bool gotit = true;
+
+ spin_lock(&reorder->lock);
+ /* Sort in ascending order of sequence number. */
+@@ -436,17 +385,14 @@ void padata_do_serial(struct padata_priv *padata)
+ if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
+ break;
+ }
+- list_add(&padata->list, pos);
++ if (padata->seq_nr != pd->processed) {
++ gotit = false;
++ list_add(&padata->list, pos);
++ }
+ spin_unlock(&reorder->lock);
+
+- /*
+- * Ensure the addition to the reorder list is ordered correctly
+- * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
+- * in padata_reorder.
+- */
+- smp_mb();
+-
+- padata_reorder(pd);
++ if (gotit)
++ padata_reorder(padata);
+ }
+ EXPORT_SYMBOL(padata_do_serial);
+
+@@ -643,9 +589,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
+ padata_init_squeues(pd);
+ pd->seq_nr = -1;
+ refcount_set(&pd->refcnt, 1);
+- spin_lock_init(&pd->lock);
+ pd->cpu = cpumask_first(pd->cpumask.pcpu);
+- INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
+
+ return pd;
+
+@@ -1155,12 +1099,6 @@ void padata_free_shell(struct padata_shell *ps)
+ if (!ps)
+ return;
+
+- /*
+- * Wait for all _do_serial calls to finish to avoid touching
+- * freed pd's and ps's.
+- */
+- synchronize_rcu();
+-
+ mutex_lock(&ps->pinst->lock);
+ list_del(&ps->list);
+ pd = rcu_dereference_protected(ps->pd, 1);
+--
+2.53.0
+
--- /dev/null
+From 35f4e1155036287c8b5df74826c0ed56281bb006 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2026 16:14:30 +0800
+Subject: padata: Remove comment for reorder_work
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 82a0302e7167d0b7c6cde56613db3748f8dd806d ]
+
+Remove comment for reorder_work which no longer exists.
+
+Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Fixes: 71203f68c774 ("padata: Fix pd UAF once and for all")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Bin Lan <lanbincn@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/padata.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/include/linux/padata.h b/include/linux/padata.h
+index b486c7359de2b..765f2778e264a 100644
+--- a/include/linux/padata.h
++++ b/include/linux/padata.h
+@@ -90,7 +90,6 @@ struct padata_cpumask {
+ * @processed: Number of already processed objects.
+ * @cpu: Next CPU to be processed.
+ * @cpumask: The cpumasks in use for parallel and serial workers.
+- * @reorder_work: work struct for reordering.
+ */
+ struct parallel_data {
+ struct padata_shell *ps;
+--
+2.53.0
+
--- /dev/null
+From baae10961fc9741ffda4fd678283ca6c76fbbd98 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 01:21:13 +0200
+Subject: rust: init: fix `clippy::undocumented_unsafe_blocks` warnings
+
+From: Miguel Ojeda <ojeda@kernel.org>
+
+The stable backport in commit acc105db0826 ("rust: pin-init:
+add references to previously initialized fields") introduced some
+`clippy::undocumented_unsafe_blocks` warnings [1], e.g.
+
+ error: unsafe block missing a safety comment
+ --> rust/kernel/init/macros.rs:1015:25
+
+As well as:
+
+ --> rust/kernel/init/macros.rs:1243:45
+ --> rust/kernel/init/macros.rs:1286:22
+ --> rust/kernel/init/macros.rs:1374:45
+
+After discussing it with Benno and Gary, we decided to clean the build
+log by doing a minimal targeted stable commit.
+
+Thus, depending on the case:
+
+ - Reorder the attributes so that the existing `// SAFETY:` comments
+ may be seen by Clippy.
+
+ - Add a placeholder `// SAFETY: TODO.` comment.
+
+Cc: Benno Lossin <lossin@kernel.org>
+Cc: Gary Guo <gary@garyguo.net>
+Fixes: acc105db0826 ("rust: pin-init: add references to previously initialized fields")
+Link: https://lore.kernel.org/stable/20260421111111.57059-1-ojeda@kernel.org/ [1]
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/kernel/init/macros.rs | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/rust/kernel/init/macros.rs b/rust/kernel/init/macros.rs
+index e477e4de817bf..d6e27c5221155 100644
+--- a/rust/kernel/init/macros.rs
++++ b/rust/kernel/init/macros.rs
+@@ -1012,6 +1012,7 @@ macro_rules! __pin_data {
+ self,
+ slot: &'__slot mut $p_type,
+ ) -> ::core::pin::Pin<&'__slot mut $p_type> {
++ // SAFETY: TODO.
+ unsafe { ::core::pin::Pin::new_unchecked(slot) }
+ }
+ )*
+@@ -1235,11 +1236,11 @@ macro_rules! __init_internal {
+ // Unaligned fields will cause the compiler to emit E0793. We do not support
+ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
+ // `ptr::write` below has the same requirement.
++ #[allow(unused_variables, unused_assignments)]
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+- #[allow(unused_variables, unused_assignments)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
+ // Create the drop guard:
+@@ -1278,11 +1279,11 @@ macro_rules! __init_internal {
+ // Unaligned fields will cause the compiler to emit E0793. We do not support
+ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
+ // `ptr::write` below has the same requirement.
++ #[allow(unused_variables, unused_assignments)]
+ // SAFETY:
+ // - the field is not structurally pinned, since the line above must compile,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+- #[allow(unused_variables, unused_assignments)]
+ let $field = unsafe { &mut (*$slot).$field };
+
+ // Create the drop guard:
+@@ -1366,11 +1367,11 @@ macro_rules! __init_internal {
+ // Unaligned fields will cause the compiler to emit E0793. We do not support
+ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
+ // `ptr::write` below has the same requirement.
++ #[allow(unused_variables, unused_assignments)]
+ // SAFETY:
+ // - the project function does the correct field projection,
+ // - the field has been initialized,
+ // - the reference is only valid until the end of the initializer.
+- #[allow(unused_variables, unused_assignments)]
+ let $field = $crate::macros::paste!(unsafe { $data.[< __project_ $field >](&mut (*$slot).$field) });
+
+ // Create the drop guard:
+--
+2.53.0
+
kbuild-rust-allow-clippy-uninlined_format_args.patch
firmware-google-framebuffer-do-not-mark-framebuffer-as-busy.patch
arm64-mm-enable-batched-tlb-flush-in-unmap_hotplug_range.patch
+padata-fix-pd-uaf-once-and-for-all.patch
+padata-remove-comment-for-reorder_work.patch
+rust-init-fix-clippy-undocumented_unsafe_blocks-warn.patch
+drm-amdgpu-use-vmemdup_array_user-in-amdgpu_bo_creat.patch
+drm-amdgpu-limit-bo-list-entry-count-to-prevent-reso.patch
--- /dev/null
+From 14c8d143fc4f2dce5b9f942c65f86f1a5871fd38 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 11:38:16 +0800
+Subject: Bluetooth: MGMT: Fix possible UAFs
+
+From: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+
+[ Upstream commit 302a1f674c00dd5581ab8e493ef44767c5101aab ]
+
+This attemps to fix possible UAFs caused by struct mgmt_pending being
+freed while still being processed like in the following trace, in order
+to fix mgmt_pending_valid is introduce and use to check if the
+mgmt_pending hasn't been removed from the pending list, on the complete
+callbacks it is used to check and in addtion remove the cmd from the list
+while holding mgmt_pending_lock to avoid TOCTOU problems since if the cmd
+is left on the list it can still be accessed and freed.
+
+BUG: KASAN: slab-use-after-free in mgmt_add_adv_patterns_monitor_sync+0x35/0x50 net/bluetooth/mgmt.c:5223
+Read of size 8 at addr ffff8880709d4dc0 by task kworker/u11:0/55
+
+CPU: 0 UID: 0 PID: 55 Comm: kworker/u11:0 Not tainted 6.16.4 #2 PREEMPT(full)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.10.2-1ubuntu1 04/01/2014
+Workqueue: hci0 hci_cmd_sync_work
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x189/0x250 lib/dump_stack.c:120
+ print_address_description mm/kasan/report.c:378 [inline]
+ print_report+0xca/0x240 mm/kasan/report.c:482
+ kasan_report+0x118/0x150 mm/kasan/report.c:595
+ mgmt_add_adv_patterns_monitor_sync+0x35/0x50 net/bluetooth/mgmt.c:5223
+ hci_cmd_sync_work+0x210/0x3a0 net/bluetooth/hci_sync.c:332
+ process_one_work kernel/workqueue.c:3238 [inline]
+ process_scheduled_works+0xade/0x17b0 kernel/workqueue.c:3321
+ worker_thread+0x8a0/0xda0 kernel/workqueue.c:3402
+ kthread+0x711/0x8a0 kernel/kthread.c:464
+ ret_from_fork+0x3fc/0x770 arch/x86/kernel/process.c:148
+ ret_from_fork_asm+0x1a/0x30 home/kwqcheii/source/fuzzing/kernel/kasan/linux-6.16.4/arch/x86/entry/entry_64.S:245
+ </TASK>
+
+Allocated by task 12210:
+ kasan_save_stack mm/kasan/common.c:47 [inline]
+ kasan_save_track+0x3e/0x80 mm/kasan/common.c:68
+ poison_kmalloc_redzone mm/kasan/common.c:377 [inline]
+ __kasan_kmalloc+0x93/0xb0 mm/kasan/common.c:394
+ kasan_kmalloc include/linux/kasan.h:260 [inline]
+ __kmalloc_cache_noprof+0x230/0x3d0 mm/slub.c:4364
+ kmalloc_noprof include/linux/slab.h:905 [inline]
+ kzalloc_noprof include/linux/slab.h:1039 [inline]
+ mgmt_pending_new+0x65/0x1e0 net/bluetooth/mgmt_util.c:269
+ mgmt_pending_add+0x35/0x140 net/bluetooth/mgmt_util.c:296
+ __add_adv_patterns_monitor+0x130/0x200 net/bluetooth/mgmt.c:5247
+ add_adv_patterns_monitor+0x214/0x360 net/bluetooth/mgmt.c:5364
+ hci_mgmt_cmd+0x9c9/0xef0 net/bluetooth/hci_sock.c:1719
+ hci_sock_sendmsg+0x6ca/0xef0 net/bluetooth/hci_sock.c:1839
+ sock_sendmsg_nosec net/socket.c:714 [inline]
+ __sock_sendmsg+0x219/0x270 net/socket.c:729
+ sock_write_iter+0x258/0x330 net/socket.c:1133
+ new_sync_write fs/read_write.c:593 [inline]
+ vfs_write+0x5c9/0xb30 fs/read_write.c:686
+ ksys_write+0x145/0x250 fs/read_write.c:738
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xfa/0x3b0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Freed by task 12221:
+ kasan_save_stack mm/kasan/common.c:47 [inline]
+ kasan_save_track+0x3e/0x80 mm/kasan/common.c:68
+ kasan_save_free_info+0x46/0x50 mm/kasan/generic.c:576
+ poison_slab_object mm/kasan/common.c:247 [inline]
+ __kasan_slab_free+0x62/0x70 mm/kasan/common.c:264
+ kasan_slab_free include/linux/kasan.h:233 [inline]
+ slab_free_hook mm/slub.c:2381 [inline]
+ slab_free mm/slub.c:4648 [inline]
+ kfree+0x18e/0x440 mm/slub.c:4847
+ mgmt_pending_free net/bluetooth/mgmt_util.c:311 [inline]
+ mgmt_pending_foreach+0x30d/0x380 net/bluetooth/mgmt_util.c:257
+ __mgmt_power_off+0x169/0x350 net/bluetooth/mgmt.c:9444
+ hci_dev_close_sync+0x754/0x1330 net/bluetooth/hci_sync.c:5290
+ hci_dev_do_close net/bluetooth/hci_core.c:501 [inline]
+ hci_dev_close+0x108/0x200 net/bluetooth/hci_core.c:526
+ sock_do_ioctl+0xd9/0x300 net/socket.c:1192
+ sock_ioctl+0x576/0x790 net/socket.c:1313
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:907 [inline]
+ __se_sys_ioctl+0xf9/0x170 fs/ioctl.c:893
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xfa/0x3b0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Fixes: cf75ad8b41d2 ("Bluetooth: hci_sync: Convert MGMT_SET_POWERED")
+Fixes: 2bd1b237616b ("Bluetooth: hci_sync: Convert MGMT_OP_SET_DISCOVERABLE to use cmd_sync")
+Fixes: f056a65783cc ("Bluetooth: hci_sync: Convert MGMT_OP_SET_CONNECTABLE to use cmd_sync")
+Fixes: 3244845c6307 ("Bluetooth: hci_sync: Convert MGMT_OP_SSP")
+Fixes: d81a494c43df ("Bluetooth: hci_sync: Convert MGMT_OP_SET_LE")
+Fixes: b338d91703fa ("Bluetooth: Implement support for Mesh")
+Fixes: 6f6ff38a1e14 ("Bluetooth: hci_sync: Convert MGMT_OP_SET_LOCAL_NAME")
+Fixes: 71efbb08b538 ("Bluetooth: hci_sync: Convert MGMT_OP_SET_PHY_CONFIGURATION")
+Fixes: b747a83690c8 ("Bluetooth: hci_sync: Refactor add Adv Monitor")
+Fixes: abfeea476c68 ("Bluetooth: hci_sync: Convert MGMT_OP_START_DISCOVERY")
+Fixes: 26ac4c56f03f ("Bluetooth: hci_sync: Convert MGMT_OP_SET_ADVERTISING")
+Reported-by: cen zhang <zzzccc427@gmail.com>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Charles Xu <charles_xu@189.cn>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/mgmt.c | 259 ++++++++++++++++++++++++++------------
+ net/bluetooth/mgmt_util.c | 46 +++++++
+ net/bluetooth/mgmt_util.h | 3 +
+ 3 files changed, 231 insertions(+), 77 deletions(-)
+
+diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
+index 51a6ad6a36c8d..4bf6c0aae9673 100644
+--- a/net/bluetooth/mgmt.c
++++ b/net/bluetooth/mgmt.c
+@@ -1319,8 +1319,7 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
+ struct mgmt_mode *cp;
+
+ /* Make sure cmd still outstanding. */
+- if (err == -ECANCELED ||
+- cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
++ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
+ return;
+
+ cp = cmd->param;
+@@ -1347,23 +1346,29 @@ static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
+ mgmt_status(err));
+ }
+
+- mgmt_pending_remove(cmd);
++ mgmt_pending_free(cmd);
+ }
+
+ static int set_powered_sync(struct hci_dev *hdev, void *data)
+ {
+ struct mgmt_pending_cmd *cmd = data;
+- struct mgmt_mode *cp;
++ struct mgmt_mode cp;
++
++ mutex_lock(&hdev->mgmt_pending_lock);
+
+ /* Make sure cmd still outstanding. */
+- if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
++ if (!__mgmt_pending_listed(hdev, cmd)) {
++ mutex_unlock(&hdev->mgmt_pending_lock);
+ return -ECANCELED;
++ }
+
+- cp = cmd->param;
++ memcpy(&cp, cmd->param, sizeof(cp));
++
++ mutex_unlock(&hdev->mgmt_pending_lock);
+
+ BT_DBG("%s", hdev->name);
+
+- return hci_set_powered_sync(hdev, cp->val);
++ return hci_set_powered_sync(hdev, cp.val);
+ }
+
+ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
+@@ -1504,8 +1509,7 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
+ bt_dev_dbg(hdev, "err %d", err);
+
+ /* Make sure cmd still outstanding. */
+- if (err == -ECANCELED ||
+- cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
++ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
+ return;
+
+ hci_dev_lock(hdev);
+@@ -1527,12 +1531,15 @@ static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
+ new_settings(hdev, cmd->sk);
+
+ done:
+- mgmt_pending_remove(cmd);
++ mgmt_pending_free(cmd);
+ hci_dev_unlock(hdev);
+ }
+
+ static int set_discoverable_sync(struct hci_dev *hdev, void *data)
+ {
++ if (!mgmt_pending_listed(hdev, data))
++ return -ECANCELED;
++
+ BT_DBG("%s", hdev->name);
+
+ return hci_update_discoverable_sync(hdev);
+@@ -1679,8 +1686,7 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
+ bt_dev_dbg(hdev, "err %d", err);
+
+ /* Make sure cmd still outstanding. */
+- if (err == -ECANCELED ||
+- cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
++ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
+ return;
+
+ hci_dev_lock(hdev);
+@@ -1696,7 +1702,7 @@ static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
+
+ done:
+ if (cmd)
+- mgmt_pending_remove(cmd);
++ mgmt_pending_free(cmd);
+
+ hci_dev_unlock(hdev);
+ }
+@@ -1732,6 +1738,9 @@ static int set_connectable_update_settings(struct hci_dev *hdev,
+
+ static int set_connectable_sync(struct hci_dev *hdev, void *data)
+ {
++ if (!mgmt_pending_listed(hdev, data))
++ return -ECANCELED;
++
+ BT_DBG("%s", hdev->name);
+
+ return hci_update_connectable_sync(hdev);
+@@ -1908,14 +1917,17 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
+ {
+ struct cmd_lookup match = { NULL, hdev };
+ struct mgmt_pending_cmd *cmd = data;
+- struct mgmt_mode *cp = cmd->param;
+- u8 enable = cp->val;
++ struct mgmt_mode *cp;
++ u8 enable;
+ bool changed;
+
+ /* Make sure cmd still outstanding. */
+- if (err == -ECANCELED || cmd != pending_find(MGMT_OP_SET_SSP, hdev))
++ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
+ return;
+
++ cp = cmd->param;
++ enable = cp->val;
++
+ if (err) {
+ u8 mgmt_err = mgmt_status(err);
+
+@@ -1924,8 +1936,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
+ new_settings(hdev, NULL);
+ }
+
+- mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true,
+- cmd_status_rsp, &mgmt_err);
++ mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err);
+ return;
+ }
+
+@@ -1935,7 +1946,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
+ changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
+ }
+
+- mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, true, settings_rsp, &match);
++ settings_rsp(cmd, &match);
+
+ if (changed)
+ new_settings(hdev, match.sk);
+@@ -1949,14 +1960,25 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
+ static int set_ssp_sync(struct hci_dev *hdev, void *data)
+ {
+ struct mgmt_pending_cmd *cmd = data;
+- struct mgmt_mode *cp = cmd->param;
++ struct mgmt_mode cp;
+ bool changed = false;
+ int err;
+
+- if (cp->val)
++ mutex_lock(&hdev->mgmt_pending_lock);
++
++ if (!__mgmt_pending_listed(hdev, cmd)) {
++ mutex_unlock(&hdev->mgmt_pending_lock);
++ return -ECANCELED;
++ }
++
++ memcpy(&cp, cmd->param, sizeof(cp));
++
++ mutex_unlock(&hdev->mgmt_pending_lock);
++
++ if (cp.val)
+ changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
+
+- err = hci_write_ssp_mode_sync(hdev, cp->val);
++ err = hci_write_ssp_mode_sync(hdev, cp.val);
+
+ if (!err && changed)
+ hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
+@@ -2049,32 +2071,50 @@ static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
+
+ static void set_le_complete(struct hci_dev *hdev, void *data, int err)
+ {
++ struct mgmt_pending_cmd *cmd = data;
+ struct cmd_lookup match = { NULL, hdev };
+ u8 status = mgmt_status(err);
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+- if (status) {
+- mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, cmd_status_rsp,
+- &status);
++ if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
+ return;
++
++ if (status) {
++ mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
++ goto done;
+ }
+
+- mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, true, settings_rsp, &match);
++ settings_rsp(cmd, &match);
+
+ new_settings(hdev, match.sk);
+
+ if (match.sk)
+ sock_put(match.sk);
++
++done:
++ mgmt_pending_free(cmd);
+ }
+
+ static int set_le_sync(struct hci_dev *hdev, void *data)
+ {
+ struct mgmt_pending_cmd *cmd = data;
+- struct mgmt_mode *cp = cmd->param;
+- u8 val = !!cp->val;
++ struct mgmt_mode cp;
++ u8 val;
+ int err;
+
++ mutex_lock(&hdev->mgmt_pending_lock);
++
++ if (!__mgmt_pending_listed(hdev, cmd)) {
++ mutex_unlock(&hdev->mgmt_pending_lock);
++ return -ECANCELED;
++ }
++
++ memcpy(&cp, cmd->param, sizeof(cp));
++ val = !!cp.val;
++
++ mutex_unlock(&hdev->mgmt_pending_lock);
++
+ if (!val) {
+ hci_clear_adv_instance_sync(hdev, NULL, 0x00, true);
+
+@@ -2116,7 +2156,12 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
+ {
+ struct mgmt_pending_cmd *cmd = data;
+ u8 status = mgmt_status(err);
+- struct sock *sk = cmd->sk;
++ struct sock *sk;
++
++ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
++ return;
++
++ sk = cmd->sk;
+
+ if (status) {
+ mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,
+@@ -2131,24 +2176,37 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)
+ static int set_mesh_sync(struct hci_dev *hdev, void *data)
+ {
+ struct mgmt_pending_cmd *cmd = data;
+- struct mgmt_cp_set_mesh *cp = cmd->param;
+- size_t len = cmd->param_len;
++ struct mgmt_cp_set_mesh cp;
++ size_t len;
++
++ mutex_lock(&hdev->mgmt_pending_lock);
++
++ if (!__mgmt_pending_listed(hdev, cmd)) {
++ mutex_unlock(&hdev->mgmt_pending_lock);
++ return -ECANCELED;
++ }
++
++ memcpy(&cp, cmd->param, sizeof(cp));
++
++ mutex_unlock(&hdev->mgmt_pending_lock);
++
++ len = cmd->param_len;
+
+ memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types));
+
+- if (cp->enable)
++ if (cp.enable)
+ hci_dev_set_flag(hdev, HCI_MESH);
+ else
+ hci_dev_clear_flag(hdev, HCI_MESH);
+
+- hdev->le_scan_interval = __le16_to_cpu(cp->period);
+- hdev->le_scan_window = __le16_to_cpu(cp->window);
++ hdev->le_scan_interval = __le16_to_cpu(cp.period);
++ hdev->le_scan_window = __le16_to_cpu(cp.window);
+
+- len -= sizeof(*cp);
++ len -= sizeof(cp);
+
+ /* If filters don't fit, forward all adv pkts */
+ if (len <= sizeof(hdev->mesh_ad_types))
+- memcpy(hdev->mesh_ad_types, cp->ad_types, len);
++ memcpy(hdev->mesh_ad_types, cp.ad_types, len);
+
+ hci_update_passive_scan_sync(hdev);
+ return 0;
+@@ -3802,15 +3860,16 @@ static int name_changed_sync(struct hci_dev *hdev, void *data)
+ static void set_name_complete(struct hci_dev *hdev, void *data, int err)
+ {
+ struct mgmt_pending_cmd *cmd = data;
+- struct mgmt_cp_set_local_name *cp = cmd->param;
++ struct mgmt_cp_set_local_name *cp;
+ u8 status = mgmt_status(err);
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+- if (err == -ECANCELED ||
+- cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
++ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
+ return;
+
++ cp = cmd->param;
++
+ if (status) {
+ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
+ status);
+@@ -3822,16 +3881,27 @@ static void set_name_complete(struct hci_dev *hdev, void *data, int err)
+ hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
+ }
+
+- mgmt_pending_remove(cmd);
++ mgmt_pending_free(cmd);
+ }
+
+ static int set_name_sync(struct hci_dev *hdev, void *data)
+ {
+ struct mgmt_pending_cmd *cmd = data;
+- struct mgmt_cp_set_local_name *cp = cmd->param;
++ struct mgmt_cp_set_local_name cp;
++
++ mutex_lock(&hdev->mgmt_pending_lock);
++
++ if (!__mgmt_pending_listed(hdev, cmd)) {
++ mutex_unlock(&hdev->mgmt_pending_lock);
++ return -ECANCELED;
++ }
++
++ memcpy(&cp, cmd->param, sizeof(cp));
++
++ mutex_unlock(&hdev->mgmt_pending_lock);
+
+ if (lmp_bredr_capable(hdev)) {
+- hci_update_name_sync(hdev, cp->name);
++ hci_update_name_sync(hdev, cp.name);
+ hci_update_eir_sync(hdev);
+ }
+
+@@ -3983,12 +4053,10 @@ int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
+ static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
+ {
+ struct mgmt_pending_cmd *cmd = data;
+- struct sk_buff *skb = cmd->skb;
++ struct sk_buff *skb;
+ u8 status = mgmt_status(err);
+
+- if (err == -ECANCELED ||
+- cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
+- return;
++ skb = cmd->skb;
+
+ if (!status) {
+ if (!skb)
+@@ -4015,7 +4083,7 @@ static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
+ if (skb && !IS_ERR(skb))
+ kfree_skb(skb);
+
+- mgmt_pending_remove(cmd);
++ mgmt_pending_free(cmd);
+ }
+
+ static int set_default_phy_sync(struct hci_dev *hdev, void *data)
+@@ -4023,7 +4091,9 @@ static int set_default_phy_sync(struct hci_dev *hdev, void *data)
+ struct mgmt_pending_cmd *cmd = data;
+ struct mgmt_cp_set_phy_configuration *cp = cmd->param;
+ struct hci_cp_le_set_default_phy cp_phy;
+- u32 selected_phys = __le32_to_cpu(cp->selected_phys);
++ u32 selected_phys;
++
++ selected_phys = __le32_to_cpu(cp->selected_phys);
+
+ memset(&cp_phy, 0, sizeof(cp_phy));
+
+@@ -4163,7 +4233,7 @@ static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
+ goto unlock;
+ }
+
+- cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
++ cmd = mgmt_pending_new(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
+ len);
+ if (!cmd)
+ err = -ENOMEM;
+@@ -5253,7 +5323,17 @@ static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
+ {
+ struct mgmt_rp_add_adv_patterns_monitor rp;
+ struct mgmt_pending_cmd *cmd = data;
+- struct adv_monitor *monitor = cmd->user_data;
++ struct adv_monitor *monitor;
++
++ /* This is likely the result of hdev being closed and mgmt_index_removed
++ * is attempting to clean up any pending command so
++ * hci_adv_monitors_clear is about to be called which will take care of
++ * freeing the adv_monitor instances.
++ */
++ if (status == -ECANCELED && !mgmt_pending_valid(hdev, cmd))
++ return;
++
++ monitor = cmd->user_data;
+
+ hci_dev_lock(hdev);
+
+@@ -5279,9 +5359,20 @@ static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev,
+ static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data)
+ {
+ struct mgmt_pending_cmd *cmd = data;
+- struct adv_monitor *monitor = cmd->user_data;
++ struct adv_monitor *mon;
++
++ mutex_lock(&hdev->mgmt_pending_lock);
++
++ if (!__mgmt_pending_listed(hdev, cmd)) {
++ mutex_unlock(&hdev->mgmt_pending_lock);
++ return -ECANCELED;
++ }
++
++ mon = cmd->user_data;
++
++ mutex_unlock(&hdev->mgmt_pending_lock);
+
+- return hci_add_adv_monitor(hdev, monitor);
++ return hci_add_adv_monitor(hdev, mon);
+ }
+
+ static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
+@@ -5548,7 +5639,8 @@ static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
+ status);
+ }
+
+-static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
++static void read_local_oob_data_complete(struct hci_dev *hdev, void *data,
++ int err)
+ {
+ struct mgmt_rp_read_local_oob_data mgmt_rp;
+ size_t rp_size = sizeof(mgmt_rp);
+@@ -5568,7 +5660,8 @@ static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int e
+ bt_dev_dbg(hdev, "status %d", status);
+
+ if (status) {
+- mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
++ mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
++ status);
+ goto remove;
+ }
+
+@@ -5873,17 +5966,12 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+- if (err == -ECANCELED)
+- return;
+-
+- if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
+- cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
+- cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
++ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
+ return;
+
+ mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
+ cmd->param, 1);
+- mgmt_pending_remove(cmd);
++ mgmt_pending_free(cmd);
+
+ hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
+ DISCOVERY_FINDING);
+@@ -5891,6 +5979,9 @@ static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
+
+ static int start_discovery_sync(struct hci_dev *hdev, void *data)
+ {
++ if (!mgmt_pending_listed(hdev, data))
++ return -ECANCELED;
++
+ return hci_start_discovery_sync(hdev);
+ }
+
+@@ -6113,15 +6204,14 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
+ {
+ struct mgmt_pending_cmd *cmd = data;
+
+- if (err == -ECANCELED ||
+- cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
++ if (err == -ECANCELED || !mgmt_pending_valid(hdev, cmd))
+ return;
+
+ bt_dev_dbg(hdev, "err %d", err);
+
+ mgmt_cmd_complete(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_status(err),
+ cmd->param, 1);
+- mgmt_pending_remove(cmd);
++ mgmt_pending_free(cmd);
+
+ if (!err)
+ hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
+@@ -6129,6 +6219,9 @@ static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
+
+ static int stop_discovery_sync(struct hci_dev *hdev, void *data)
+ {
++ if (!mgmt_pending_listed(hdev, data))
++ return -ECANCELED;
++
+ return hci_stop_discovery_sync(hdev);
+ }
+
+@@ -6338,14 +6431,18 @@ static void enable_advertising_instance(struct hci_dev *hdev, int err)
+
+ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
+ {
++ struct mgmt_pending_cmd *cmd = data;
+ struct cmd_lookup match = { NULL, hdev };
+ u8 instance;
+ struct adv_info *adv_instance;
+ u8 status = mgmt_status(err);
+
++ if (err == -ECANCELED || !mgmt_pending_valid(hdev, data))
++ return;
++
+ if (status) {
+- mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true,
+- cmd_status_rsp, &status);
++ mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, status);
++ mgmt_pending_free(cmd);
+ return;
+ }
+
+@@ -6354,8 +6451,7 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
+ else
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING);
+
+- mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, true, settings_rsp,
+- &match);
++ settings_rsp(cmd, &match);
+
+ new_settings(hdev, match.sk);
+
+@@ -6387,10 +6483,23 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
+ static int set_adv_sync(struct hci_dev *hdev, void *data)
+ {
+ struct mgmt_pending_cmd *cmd = data;
+- struct mgmt_mode *cp = cmd->param;
+- u8 val = !!cp->val;
++ struct mgmt_mode cp;
++ u8 val;
+
+- if (cp->val == 0x02)
++ mutex_lock(&hdev->mgmt_pending_lock);
++
++ if (!__mgmt_pending_listed(hdev, cmd)) {
++ mutex_unlock(&hdev->mgmt_pending_lock);
++ return -ECANCELED;
++ }
++
++ memcpy(&cp, cmd->param, sizeof(cp));
++
++ mutex_unlock(&hdev->mgmt_pending_lock);
++
++ val = !!cp.val;
++
++ if (cp.val == 0x02)
+ hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
+ else
+ hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
+@@ -8100,10 +8209,6 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
+ u8 status = mgmt_status(err);
+ u16 eir_len;
+
+- if (err == -ECANCELED ||
+- cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
+- return;
+-
+ if (!status) {
+ if (!skb)
+ status = MGMT_STATUS_FAILED;
+@@ -8210,7 +8315,7 @@ static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
+ kfree_skb(skb);
+
+ kfree(mgmt_rp);
+- mgmt_pending_remove(cmd);
++ mgmt_pending_free(cmd);
+ }
+
+ static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
+@@ -8219,7 +8324,7 @@ static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
+ struct mgmt_pending_cmd *cmd;
+ int err;
+
+- cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
++ cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
+ cp, sizeof(*cp));
+ if (!cmd)
+ return -ENOMEM;
+diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c
+index 4ba500c377a4c..e612121b96d0c 100644
+--- a/net/bluetooth/mgmt_util.c
++++ b/net/bluetooth/mgmt_util.c
+@@ -320,6 +320,52 @@ void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)
+ mgmt_pending_free(cmd);
+ }
+
++bool __mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd)
++{
++ struct mgmt_pending_cmd *tmp;
++
++ lockdep_assert_held(&hdev->mgmt_pending_lock);
++
++ if (!cmd)
++ return false;
++
++ list_for_each_entry(tmp, &hdev->mgmt_pending, list) {
++ if (cmd == tmp)
++ return true;
++ }
++
++ return false;
++}
++
++bool mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd)
++{
++ bool listed;
++
++ mutex_lock(&hdev->mgmt_pending_lock);
++ listed = __mgmt_pending_listed(hdev, cmd);
++ mutex_unlock(&hdev->mgmt_pending_lock);
++
++ return listed;
++}
++
++bool mgmt_pending_valid(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd)
++{
++ bool listed;
++
++ if (!cmd)
++ return false;
++
++ mutex_lock(&hdev->mgmt_pending_lock);
++
++ listed = __mgmt_pending_listed(hdev, cmd);
++ if (listed)
++ list_del(&cmd->list);
++
++ mutex_unlock(&hdev->mgmt_pending_lock);
++
++ return listed;
++}
++
+ void mgmt_mesh_foreach(struct hci_dev *hdev,
+ void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data),
+ void *data, struct sock *sk)
+diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h
+index 024e51dd69375..bcba8c9d89528 100644
+--- a/net/bluetooth/mgmt_util.h
++++ b/net/bluetooth/mgmt_util.h
+@@ -65,6 +65,9 @@ struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,
+ void *data, u16 len);
+ void mgmt_pending_free(struct mgmt_pending_cmd *cmd);
+ void mgmt_pending_remove(struct mgmt_pending_cmd *cmd);
++bool __mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd);
++bool mgmt_pending_listed(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd);
++bool mgmt_pending_valid(struct hci_dev *hdev, struct mgmt_pending_cmd *cmd);
+ void mgmt_mesh_foreach(struct hci_dev *hdev,
+ void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data),
+ void *data, struct sock *sk);
+--
+2.53.0
+
--- /dev/null
+From 8fa379de90ed0d8469891065935ed206cc1d8bab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 14:53:54 +0800
+Subject: drm/amdgpu: Limit BO list entry count to prevent resource exhaustion
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jesse.Zhang <Jesse.Zhang@amd.com>
+
+[ Upstream commit 6270b1a5dab94665d7adce3dc78bc9066ed28bdd ]
+
+Userspace can pass an arbitrary number of BO list entries via the
+bo_number field. Although the previous multiplication overflow check
+prevents out-of-bounds allocation, a large number of entries could still
+cause excessive memory allocation (up to potentially gigabytes) and
+unnecessarily long list processing times.
+
+Introduce a hard limit of 128k entries per BO list, which is more than
+sufficient for any realistic use case (e.g., a single list containing all
+buffers in a large scene). This prevents memory exhaustion attacks and
+ensures predictable performance.
+
+Return -EINVAL if the requested entry count exceeds the limit
+
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Suggested-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Jesse Zhang <jesse.zhang@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 688b87d39e0aa8135105b40dc167d74b5ada5332)
+Cc: stable@vger.kernel.org
+Signed-off-by: Fang Wang <32840572@qq.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index db0a1c828fe15..4efdc49d1015f 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -36,6 +36,7 @@
+
+ #define AMDGPU_BO_LIST_MAX_PRIORITY 32u
+ #define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
++#define AMDGPU_BO_LIST_MAX_ENTRIES (128 * 1024)
+
+ static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
+ {
+@@ -201,6 +202,9 @@ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ const uint32_t bo_number = in->bo_number;
+ struct drm_amdgpu_bo_list_entry *info;
+
++ if (bo_number > AMDGPU_BO_LIST_MAX_ENTRIES)
++ return -EINVAL;
++
+ /* copy the handle array from userspace to a kernel buffer */
+ if (likely(info_size == bo_info_size)) {
+ info = vmemdup_array_user(uptr, bo_number, info_size);
+--
+2.53.0
+
--- /dev/null
+From 77374daf3024e730c058c2fdb5e62de8d407ba7e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 14:53:18 +0800
+Subject: drm/amdgpu: Use vmemdup_array_user in
+ amdgpu_bo_create_list_entry_array
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+[ Upstream commit c4ac100e9ae252b09986766ad23b1f83ca3a369d ]
+
+Replace kvmalloc_array() + copy_from_user() with vmemdup_array_user() on
+the fast path.
+
+This shrinks the source code and improves separation between the kernel
+and userspace slabs.
+
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Fang Wang <32840572@qq.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 41 +++++++++------------
+ 1 file changed, 17 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+index 9a53ca555e708..db0a1c828fe15 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+@@ -195,43 +195,36 @@ void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
+ int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param)
+ {
+- const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
+ const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);
++ const void __user *uptr = u64_to_user_ptr(in->bo_info_ptr);
++ const uint32_t bo_info_size = in->bo_info_size;
++ const uint32_t bo_number = in->bo_number;
+ struct drm_amdgpu_bo_list_entry *info;
+- int r;
+-
+- info = kvmalloc_array(in->bo_number, info_size, GFP_KERNEL);
+- if (!info)
+- return -ENOMEM;
+
+ /* copy the handle array from userspace to a kernel buffer */
+- r = -EFAULT;
+- if (likely(info_size == in->bo_info_size)) {
+- unsigned long bytes = in->bo_number *
+- in->bo_info_size;
+-
+- if (copy_from_user(info, uptr, bytes))
+- goto error_free;
+-
++ if (likely(info_size == bo_info_size)) {
++ info = vmemdup_array_user(uptr, bo_number, info_size);
++ if (IS_ERR(info))
++ return PTR_ERR(info);
+ } else {
+- unsigned long bytes = min(in->bo_info_size, info_size);
++ const uint32_t bytes = min(bo_info_size, info_size);
+ unsigned i;
+
+- memset(info, 0, in->bo_number * info_size);
+- for (i = 0; i < in->bo_number; ++i) {
+- if (copy_from_user(&info[i], uptr, bytes))
+- goto error_free;
++ info = kvmalloc_array(bo_number, info_size, GFP_KERNEL);
++ if (!info)
++ return -ENOMEM;
+
+- uptr += in->bo_info_size;
++ memset(info, 0, bo_number * info_size);
++ for (i = 0; i < bo_number; ++i, uptr += bo_info_size) {
++ if (copy_from_user(&info[i], uptr, bytes)) {
++ kvfree(info);
++ return -EFAULT;
++ }
+ }
+ }
+
+ *info_param = info;
+ return 0;
+-
+-error_free:
+- kvfree(info);
+- return r;
+ }
+
+ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
+--
+2.53.0
+
--- /dev/null
+From 524de63074ab356fb3a0b14d998e69bd69f20d72 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 13:46:42 +0800
+Subject: padata: Fix pd UAF once and for all
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 71203f68c7749609d7fc8ae6ad054bdedeb24f91 ]
+
+There is a race condition/UAF in padata_reorder that goes back
+to the initial commit. A reference count is taken at the start
+of the process in padata_do_parallel, and released at the end in
+padata_serial_worker.
+
+This reference count is (and only is) required for padata_replace
+to function correctly. If padata_replace is never called then
+there is no issue.
+
+In the function padata_reorder which serves as the core of padata,
+as soon as padata is added to queue->serial.list, and the associated
+spin lock released, that padata may be processed and the reference
+count on pd would go away.
+
+Fix this by getting the next padata before the squeue->serial lock
+is released.
+
+In order to make this possible, simplify padata_reorder by only
+calling it once the next padata arrives.
+
+Fixes: 16295bec6398 ("padata: Generic parallelization/serialization interface")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+[ Adjust context of padata_find_next(). Replace
+cpumask_next_wrap(cpu, pd->cpumask.pcpu) with
+cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false) in padata_reorder() in
+v6.6 according to dc5bb9b769c9 ("cpumask: deprecate cpumask_next_wrap()") and
+f954a2d37637 ("padata: switch padata_find_next() to using cpumask_next_wrap()")
+. ]
+Signed-off-by: Bin Lan <lanbincn@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/padata.h | 3 -
+ kernel/padata.c | 136 +++++++++++------------------------------
+ 2 files changed, 37 insertions(+), 102 deletions(-)
+
+diff --git a/include/linux/padata.h b/include/linux/padata.h
+index 495b16b6b4d72..9ca779d7e310e 100644
+--- a/include/linux/padata.h
++++ b/include/linux/padata.h
+@@ -91,7 +91,6 @@ struct padata_cpumask {
+ * @cpu: Next CPU to be processed.
+ * @cpumask: The cpumasks in use for parallel and serial workers.
+ * @reorder_work: work struct for reordering.
+- * @lock: Reorder lock.
+ */
+ struct parallel_data {
+ struct padata_shell *ps;
+@@ -102,8 +101,6 @@ struct parallel_data {
+ unsigned int processed;
+ int cpu;
+ struct padata_cpumask cpumask;
+- struct work_struct reorder_work;
+- spinlock_t ____cacheline_aligned lock;
+ };
+
+ /**
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 9260ab0b39eb5..44ea75bfd8681 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -261,20 +261,17 @@ EXPORT_SYMBOL(padata_do_parallel);
+ * be parallel processed by another cpu and is not yet present in
+ * the cpu's reorder queue.
+ */
+-static struct padata_priv *padata_find_next(struct parallel_data *pd,
+- bool remove_object)
++static struct padata_priv *padata_find_next(struct parallel_data *pd, int cpu,
++ unsigned int processed)
+ {
+ struct padata_priv *padata;
+ struct padata_list *reorder;
+- int cpu = pd->cpu;
+
+ reorder = per_cpu_ptr(pd->reorder_list, cpu);
+
+ spin_lock(&reorder->lock);
+- if (list_empty(&reorder->list)) {
+- spin_unlock(&reorder->lock);
+- return NULL;
+- }
++ if (list_empty(&reorder->list))
++ goto notfound;
+
+ padata = list_entry(reorder->list.next, struct padata_priv, list);
+
+@@ -282,101 +279,52 @@ static struct padata_priv *padata_find_next(struct parallel_data *pd,
+ * Checks the rare case where two or more parallel jobs have hashed to
+ * the same CPU and one of the later ones finishes first.
+ */
+- if (padata->seq_nr != pd->processed) {
+- spin_unlock(&reorder->lock);
+- return NULL;
+- }
+-
+- if (remove_object) {
+- list_del_init(&padata->list);
+- ++pd->processed;
+- /* When sequence wraps around, reset to the first CPU. */
+- if (unlikely(pd->processed == 0))
+- pd->cpu = cpumask_first(pd->cpumask.pcpu);
+- else
+- pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
+- }
++ if (padata->seq_nr != processed)
++ goto notfound;
+
++ list_del_init(&padata->list);
+ spin_unlock(&reorder->lock);
+ return padata;
++
++notfound:
++ pd->processed = processed;
++ pd->cpu = cpu;
++ spin_unlock(&reorder->lock);
++ return NULL;
+ }
+
+-static void padata_reorder(struct parallel_data *pd)
++static void padata_reorder(struct padata_priv *padata)
+ {
++ struct parallel_data *pd = padata->pd;
+ struct padata_instance *pinst = pd->ps->pinst;
+- int cb_cpu;
+- struct padata_priv *padata;
+- struct padata_serial_queue *squeue;
+- struct padata_list *reorder;
++ unsigned int processed;
++ int cpu;
+
+- /*
+- * We need to ensure that only one cpu can work on dequeueing of
+- * the reorder queue the time. Calculating in which percpu reorder
+- * queue the next object will arrive takes some time. A spinlock
+- * would be highly contended. Also it is not clear in which order
+- * the objects arrive to the reorder queues. So a cpu could wait to
+- * get the lock just to notice that there is nothing to do at the
+- * moment. Therefore we use a trylock and let the holder of the lock
+- * care for all the objects enqueued during the holdtime of the lock.
+- */
+- if (!spin_trylock_bh(&pd->lock))
+- return;
++ processed = pd->processed;
++ cpu = pd->cpu;
+
+- while (1) {
+- padata = padata_find_next(pd, true);
++ do {
++ struct padata_serial_queue *squeue;
++ int cb_cpu;
+
+- /*
+- * If the next object that needs serialization is parallel
+- * processed by another cpu and is still on it's way to the
+- * cpu's reorder queue, nothing to do for now.
+- */
+- if (!padata)
+- break;
++ cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
++ processed++;
+
+ cb_cpu = padata->cb_cpu;
+ squeue = per_cpu_ptr(pd->squeue, cb_cpu);
+
+ spin_lock(&squeue->serial.lock);
+ list_add_tail(&padata->list, &squeue->serial.list);
+- spin_unlock(&squeue->serial.lock);
+-
+ queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
+- }
+
+- spin_unlock_bh(&pd->lock);
+-
+- /*
+- * The next object that needs serialization might have arrived to
+- * the reorder queues in the meantime.
+- *
+- * Ensure reorder queue is read after pd->lock is dropped so we see
+- * new objects from another task in padata_do_serial. Pairs with
+- * smp_mb in padata_do_serial.
+- */
+- smp_mb();
+-
+- reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
+- if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
+ /*
+- * Other context(eg. the padata_serial_worker) can finish the request.
+- * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
++ * If the next object that needs serialization is parallel
++ * processed by another cpu and is still on it's way to the
++ * cpu's reorder queue, end the loop.
+ */
+- padata_get_pd(pd);
+- if (!queue_work(pinst->serial_wq, &pd->reorder_work))
+- padata_put_pd(pd);
+- }
+-}
+-
+-static void invoke_padata_reorder(struct work_struct *work)
+-{
+- struct parallel_data *pd;
+-
+- local_bh_disable();
+- pd = container_of(work, struct parallel_data, reorder_work);
+- padata_reorder(pd);
+- local_bh_enable();
+- /* Pairs with putting the reorder_work in the serial_wq */
+- padata_put_pd(pd);
++ padata = padata_find_next(pd, cpu, processed);
++ spin_unlock(&squeue->serial.lock);
++ } while (padata);
+ }
+
+ static void padata_serial_worker(struct work_struct *serial_work)
+@@ -427,6 +375,7 @@ void padata_do_serial(struct padata_priv *padata)
+ struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
+ struct padata_priv *cur;
+ struct list_head *pos;
++ bool gotit = true;
+
+ spin_lock(&reorder->lock);
+ /* Sort in ascending order of sequence number. */
+@@ -436,17 +385,14 @@ void padata_do_serial(struct padata_priv *padata)
+ if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
+ break;
+ }
+- list_add(&padata->list, pos);
++ if (padata->seq_nr != pd->processed) {
++ gotit = false;
++ list_add(&padata->list, pos);
++ }
+ spin_unlock(&reorder->lock);
+
+- /*
+- * Ensure the addition to the reorder list is ordered correctly
+- * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
+- * in padata_reorder.
+- */
+- smp_mb();
+-
+- padata_reorder(pd);
++ if (gotit)
++ padata_reorder(padata);
+ }
+ EXPORT_SYMBOL(padata_do_serial);
+
+@@ -633,9 +579,7 @@ static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
+ padata_init_squeues(pd);
+ pd->seq_nr = -1;
+ refcount_set(&pd->refcnt, 1);
+- spin_lock_init(&pd->lock);
+ pd->cpu = cpumask_first(pd->cpumask.pcpu);
+- INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
+
+ return pd;
+
+@@ -1145,12 +1089,6 @@ void padata_free_shell(struct padata_shell *ps)
+ if (!ps)
+ return;
+
+- /*
+- * Wait for all _do_serial calls to finish to avoid touching
+- * freed pd's and ps's.
+- */
+- synchronize_rcu();
+-
+ mutex_lock(&ps->pinst->lock);
+ list_del(&ps->list);
+ pd = rcu_dereference_protected(ps->pd, 1);
+--
+2.53.0
+
--- /dev/null
+From 34cdf4709277360b6d3b2b86e07bc3fe31311b30 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2026 13:46:43 +0800
+Subject: padata: Remove comment for reorder_work
+
+From: Herbert Xu <herbert@gondor.apana.org.au>
+
+[ Upstream commit 82a0302e7167d0b7c6cde56613db3748f8dd806d ]
+
+Remove comment for reorder_work which no longer exists.
+
+Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
+Fixes: 71203f68c774 ("padata: Fix pd UAF once and for all")
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Bin Lan <lanbincn@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/padata.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/include/linux/padata.h b/include/linux/padata.h
+index 9ca779d7e310e..6f07e12a43819 100644
+--- a/include/linux/padata.h
++++ b/include/linux/padata.h
+@@ -90,7 +90,6 @@ struct padata_cpumask {
+ * @processed: Number of already processed objects.
+ * @cpu: Next CPU to be processed.
+ * @cpumask: The cpumasks in use for parallel and serial workers.
+- * @reorder_work: work struct for reordering.
+ */
+ struct parallel_data {
+ struct padata_shell *ps;
+--
+2.53.0
+
--- /dev/null
+From 639eb566eefc602173be132d72d60fc8ea05f6e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2026 15:14:21 +0800
+Subject: regset: use kvzalloc() for regset_get_alloc()
+
+From: Douglas Anderson <dianders@chromium.org>
+
+commit 6b839b3b76cf17296ebd4a893841f32cae08229c upstream.
+
+While browsing through ChromeOS crash reports, I found one with an
+allocation failure that looked like this:
+
+ chrome: page allocation failure: order:7,
+ mode:0x40dc0(GFP_KERNEL|__GFP_COMP|__GFP_ZERO),
+ nodemask=(null),cpuset=urgent,mems_allowed=0
+ CPU: 7 PID: 3295 Comm: chrome Not tainted
+ 5.15.133-20574-g8044615ac35c #1 (HASH:1162 1)
+ Hardware name: Google Lazor (rev3 - 8) with KB Backlight (DT)
+ Call trace:
+ ...
+ warn_alloc+0x104/0x174
+ __alloc_pages+0x5f0/0x6e4
+ kmalloc_order+0x44/0x98
+ kmalloc_order_trace+0x34/0x124
+ __kmalloc+0x228/0x36c
+ __regset_get+0x68/0xcc
+ regset_get_alloc+0x1c/0x28
+ elf_core_dump+0x3d8/0xd8c
+ do_coredump+0xeb8/0x1378
+ get_signal+0x14c/0x804
+ ...
+
+An order 7 allocation is (1 << 7) contiguous pages, or 512K. It's not
+a surprise that this allocation failed on a system that's been running
+for a while.
+
+More digging showed that it was fairly easy to see the order 7
+allocation by just sending a SIGQUIT to chrome (or other processes) to
+generate a core dump. The actual amount being allocated was 279,584
+bytes and it was for "core_note_type" NT_ARM_SVE.
+
+There was quite a bit of discussion [1] on the mailing lists in
+response to my v1 patch attempting to switch to vmalloc. The overall
+conclusion was that we could likely reduce the 279,584 byte allocation
+by quite a bit and Mark Brown has sent a patch to that effect [2].
+However even with the 279,584 byte allocation gone there are still
+65,552 byte allocations. These are just barely more than the 65,536
+bytes and thus would require an order 5 allocation.
+
+An order 5 allocation is still something to avoid unless necessary and
+nothing needs the memory here to be contiguous. Change the allocation
+to kvzalloc() which should still be efficient for small allocations
+but doesn't force the memory subsystem to work hard (and maybe fail)
+at getting a large contiguous chunk.
+
+[1] https://lore.kernel.org/r/20240201171159.1.Id9ad163b60d21c9e56c2d686b0cc9083a8ba7924@changeid
+[2] https://lore.kernel.org/r/20240203-arm64-sve-ptrace-regset-size-v1-1-2c3ba1386b9e@kernel.org
+
+Link: https://lkml.kernel.org/r/20240205092626.v2.1.Id9ad163b60d21c9e56c2d686b0cc9083a8ba7924@changeid
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Al Viro <viro@ZenIV.linux.org.uk>
+Cc: Christian Brauner <brauner@kernel.org>
+Cc: Dave Martin <Dave.Martin@arm.com>
+Cc: Eric Biederman <ebiederm@xmission.com>
+Cc: Jan Kara <jack@suse.cz>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Mark Brown <broonie@kernel.org>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Will Deacon <will@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Wen Yang <wen.yang@linux.dev>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/binfmt_elf.c | 2 +-
+ kernel/regset.c | 6 +++---
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
+index 3ff7d2e47c7e9..e4348dd76658e 100644
+--- a/fs/binfmt_elf.c
++++ b/fs/binfmt_elf.c
+@@ -2035,7 +2035,7 @@ static void free_note_info(struct elf_note_info *info)
+ threads = t->next;
+ WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
+ for (i = 1; i < info->thread_notes; ++i)
+- kfree(t->notes[i].data);
++ kvfree(t->notes[i].data);
+ kfree(t);
+ }
+ kfree(info->psinfo.data);
+diff --git a/kernel/regset.c b/kernel/regset.c
+index 586823786f397..b2871fa68b2a7 100644
+--- a/kernel/regset.c
++++ b/kernel/regset.c
+@@ -16,14 +16,14 @@ static int __regset_get(struct task_struct *target,
+ if (size > regset->n * regset->size)
+ size = regset->n * regset->size;
+ if (!p) {
+- to_free = p = kzalloc(size, GFP_KERNEL);
++ to_free = p = kvzalloc(size, GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ }
+ res = regset->regset_get(target, regset,
+ (struct membuf){.p = p, .left = size});
+ if (res < 0) {
+- kfree(to_free);
++ kvfree(to_free);
+ return res;
+ }
+ *data = p;
+@@ -71,6 +71,6 @@ int copy_regset_to_user(struct task_struct *target,
+ ret = regset_get_alloc(target, regset, size, &buf);
+ if (ret > 0)
+ ret = copy_to_user(data, buf, ret) ? -EFAULT : 0;
+- kfree(buf);
++ kvfree(buf);
+ return ret;
+ }
+--
+2.53.0
+
ibmasm-fix-oob-reads-in-command_file_write-due-to-missing-size-checks.patch
ibmasm-fix-heap-over-read-in-ibmasm_send_i2o_message.patch
firmware-google-framebuffer-do-not-mark-framebuffer-as-busy.patch
+bluetooth-mgmt-fix-possible-uafs.patch
+padata-fix-pd-uaf-once-and-for-all.patch
+padata-remove-comment-for-reorder_work.patch
+drm-amdgpu-use-vmemdup_array_user-in-amdgpu_bo_creat.patch
+drm-amdgpu-limit-bo-list-entry-count-to-prevent-reso.patch
+regset-use-kvzalloc-for-regset_get_alloc.patch