--- /dev/null
+From 48478b9f791376b4b89018d7afdfd06865498f65 Mon Sep 17 00:00:00 2001
+From: Anshuman Khandual <anshuman.khandual@arm.com>
+Date: Mon, 9 Mar 2026 02:57:24 +0000
+Subject: arm64/mm: Enable batched TLB flush in unmap_hotplug_range()
+
+From: Anshuman Khandual <anshuman.khandual@arm.com>
+
+commit 48478b9f791376b4b89018d7afdfd06865498f65 upstream.
+
+During a memory hot remove operation, both linear and vmemmap mappings for
+the memory range being removed, get unmapped via unmap_hotplug_range() but
+mapped pages get freed only for vmemmap mapping. This is just a sequential
+operation where each table entry gets cleared, followed by a leaf specific
+TLB flush, and then followed by memory free operation when applicable.
+
+This approach was simple and uniform both for vmemmap and linear mappings.
+But linear mapping might contain CONT marked block memory where it becomes
+necessary to first clear out all entire in the range before a TLB flush.
+This is as per the architecture requirement. Hence batch all TLB flushes
+during the table tear down walk and finally do it in unmap_hotplug_range().
+
+Prior to this fix, it was hypothetically possible for a speculative access
+to a higher address in the contiguous block to fill the TLB with shattered
+entries for the entire contiguous range after a lower address had already
+been cleared and invalidated. Due to the table entries being shattered, the
+subsequent TLB invalidation for the higher address would not then clear the
+TLB entries for the lower address, meaning stale TLB entries could persist.
+
+Besides it also helps in improving the performance via TLBI range operation
+along with reduced synchronization instructions. The time spent executing
+unmap_hotplug_range() improved 97% measured over a 2GB memory hot removal
+in KVM guest.
+
+This scheme is not applicable during vmemmap mapping tear down where memory
+needs to be freed and hence a TLB flush is required after clearing out page
+table entry.
+
+Cc: Will Deacon <will@kernel.org>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: linux-kernel@vger.kernel.org
+Closes: https://lore.kernel.org/all/aWZYXhrT6D2M-7-N@willie-the-truck/
+Fixes: bbd6ec605c0f ("arm64/mm: Enable memory hot remove")
+Cc: stable@vger.kernel.org
+Reviewed-by: David Hildenbrand (Arm) <david@kernel.org>
+Reviewed-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/mm/mmu.c | 36 ++++++++++++++++++++----------------
+ 1 file changed, 20 insertions(+), 16 deletions(-)
+
+--- a/arch/arm64/mm/mmu.c
++++ b/arch/arm64/mm/mmu.c
+@@ -869,10 +869,14 @@ static void unmap_hotplug_pte_range(pmd_
+
+ WARN_ON(!pte_present(pte));
+ __pte_clear(&init_mm, addr, ptep);
+- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+- if (free_mapped)
++ if (free_mapped) {
++ /* CONT blocks are not supported in the vmemmap */
++ WARN_ON(pte_cont(pte));
++ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ free_hotplug_page_range(pte_page(pte),
+ PAGE_SIZE, altmap);
++ }
++ /* unmap_hotplug_range() flushes TLB for !free_mapped */
+ } while (addr += PAGE_SIZE, addr < end);
+ }
+
+@@ -893,15 +897,14 @@ static void unmap_hotplug_pmd_range(pud_
+ WARN_ON(!pmd_present(pmd));
+ if (pmd_sect(pmd)) {
+ pmd_clear(pmdp);
+-
+- /*
+- * One TLBI should be sufficient here as the PMD_SIZE
+- * range is mapped with a single block entry.
+- */
+- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+- if (free_mapped)
++ if (free_mapped) {
++ /* CONT blocks are not supported in the vmemmap */
++ WARN_ON(pmd_cont(pmd));
++ flush_tlb_kernel_range(addr, addr + PMD_SIZE);
+ free_hotplug_page_range(pmd_page(pmd),
+ PMD_SIZE, altmap);
++ }
++ /* unmap_hotplug_range() flushes TLB for !free_mapped */
+ continue;
+ }
+ WARN_ON(!pmd_table(pmd));
+@@ -926,15 +929,12 @@ static void unmap_hotplug_pud_range(p4d_
+ WARN_ON(!pud_present(pud));
+ if (pud_sect(pud)) {
+ pud_clear(pudp);
+-
+- /*
+- * One TLBI should be sufficient here as the PUD_SIZE
+- * range is mapped with a single block entry.
+- */
+- flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+- if (free_mapped)
++ if (free_mapped) {
++ flush_tlb_kernel_range(addr, addr + PUD_SIZE);
+ free_hotplug_page_range(pud_page(pud),
+ PUD_SIZE, altmap);
++ }
++ /* unmap_hotplug_range() flushes TLB for !free_mapped */
+ continue;
+ }
+ WARN_ON(!pud_table(pud));
+@@ -964,6 +964,7 @@ static void unmap_hotplug_p4d_range(pgd_
+ static void unmap_hotplug_range(unsigned long addr, unsigned long end,
+ bool free_mapped, struct vmem_altmap *altmap)
+ {
++ unsigned long start = addr;
+ unsigned long next;
+ pgd_t *pgdp, pgd;
+
+@@ -985,6 +986,9 @@ static void unmap_hotplug_range(unsigned
+ WARN_ON(!pgd_present(pgd));
+ unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap);
+ } while (addr = next, addr < end);
++
++ if (!free_mapped)
++ flush_tlb_kernel_range(start, end);
+ }
+
+ static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr,
--- /dev/null
+From a2225b6e834a838ae3c93709760edc0a169eb2f2 Mon Sep 17 00:00:00 2001
+From: Douglas Anderson <dianders@chromium.org>
+Date: Mon, 6 Apr 2026 16:22:54 -0700
+Subject: driver core: Don't let a device probe until it's ready
+
+From: Douglas Anderson <dianders@chromium.org>
+
+commit a2225b6e834a838ae3c93709760edc0a169eb2f2 upstream.
+
+The moment we link a "struct device" into the list of devices for the
+bus, it's possible probe can happen. This is because another thread
+can load the driver at any time and that can cause the device to
+probe. This has been seen in practice with a stack crawl that looks
+like this [1]:
+
+ really_probe()
+ __driver_probe_device()
+ driver_probe_device()
+ __driver_attach()
+ bus_for_each_dev()
+ driver_attach()
+ bus_add_driver()
+ driver_register()
+ __platform_driver_register()
+ init_module() [some module]
+ do_one_initcall()
+ do_init_module()
+ load_module()
+ __arm64_sys_finit_module()
+ invoke_syscall()
+
+As a result of the above, it was seen that device_links_driver_bound()
+could be called for the device before "dev->fwnode->dev" was
+assigned. This prevented __fw_devlink_pickup_dangling_consumers() from
+being called which meant that other devices waiting on our driver's
+sub-nodes were stuck deferring forever.
+
+It's believed that this problem is showing up suddenly for two
+reasons:
+1. Android has recently (last ~1 year) implemented an optimization to
+ the order it loads modules [2]. When devices opt-in to this faster
+ loading, modules are loaded one-after-the-other very quickly. This
+ is unlike how other distributions do it. The reproduction of this
+ problem has only been seen on devices that opt-in to Android's
+ "parallel module loading".
+2. Android devices typically opt-in to fw_devlink, and the most
+ noticeable issue is the NULL "dev->fwnode->dev" in
+ device_links_driver_bound(). fw_devlink is somewhat new code and
+ also not in use by all Linux devices.
+
+Even though the specific symptom where "dev->fwnode->dev" wasn't
+assigned could be fixed by moving that assignment higher in
+device_add(), other parts of device_add() (like the call to
+device_pm_add()) are also important to run before probe. Only moving
+the "dev->fwnode->dev" assignment would likely fix the current
+symptoms but lead to difficult-to-debug problems in the future.
+
+Fix the problem by preventing probe until device_add() has run far
+enough that the device is ready to probe. If somehow we end up trying
+to probe before we're allowed, __driver_probe_device() will return
+-EPROBE_DEFER which will make certain the device is noticed.
+
+In the race condition that was seen with Android's faster module
+loading, we will temporarily add the device to the deferred list and
+then take it off immediately when device_add() probes the device.
+
+Instead of adding another flag to the bitfields already in "struct
+device", instead add a new "flags" field and use that. This allows us
+to freely change the bit from different thread without worrying about
+corrupting nearby bits (and means threads changing other bit won't
+corrupt us).
+
+[1] Captured on a machine running a downstream 6.6 kernel
+[2] https://cs.android.com/android/platform/superproject/main/+/main:system/core/libmodprobe/libmodprobe.cpp?q=LoadModulesParallel
+
+Cc: stable@vger.kernel.org
+Fixes: 2023c610dc54 ("Driver core: add new device to bus's list before probing")
+Reviewed-by: Alan Stern <stern@rowland.harvard.edu>
+Reviewed-by: Rafael J. Wysocki (Intel) <rafael@kernel.org>
+Reviewed-by: Danilo Krummrich <dakr@kernel.org>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Acked-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patch.msgid.link/20260406162231.v5.1.Id750b0fbcc94f23ed04b7aecabcead688d0d8c17@changeid
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/core.c | 15 +++++++++++++++
+ drivers/base/dd.c | 20 ++++++++++++++++++++
+ include/linux/device.h | 44 ++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 79 insertions(+)
+
+--- a/drivers/base/core.c
++++ b/drivers/base/core.c
+@@ -3691,6 +3691,21 @@ int device_add(struct device *dev)
+ fw_devlink_link_device(dev);
+ }
+
++ /*
++ * The moment the device was linked into the bus's "klist_devices" in
++ * bus_add_device() then it's possible that probe could have been
++ * attempted in a different thread via userspace loading a driver
++ * matching the device. "ready_to_probe" being unset would have
++ * blocked those attempts. Now that all of the above initialization has
++ * happened, unblock probe. If probe happens through another thread
++ * after this point but before bus_probe_device() runs then it's fine.
++ * bus_probe_device() -> device_initial_probe() -> __device_attach()
++ * will notice (under device_lock) that the device is already bound.
++ */
++ device_lock(dev);
++ dev_set_ready_to_probe(dev);
++ device_unlock(dev);
++
+ bus_probe_device(dev);
+
+ /*
+--- a/drivers/base/dd.c
++++ b/drivers/base/dd.c
+@@ -844,6 +844,26 @@ static int __driver_probe_device(const s
+ if (dev->driver)
+ return -EBUSY;
+
++ /*
++ * In device_add(), the "struct device" gets linked into the subsystem's
++ * list of devices and broadcast to userspace (via uevent) before we're
++ * quite ready to probe. Those open pathways to driver probe before
++ * we've finished enough of device_add() to reliably support probe.
++ * Detect this and tell other pathways to try again later. device_add()
++ * itself will also try to probe immediately after setting
++ * "ready_to_probe".
++ */
++ if (!dev_ready_to_probe(dev))
++ return dev_err_probe(dev, -EPROBE_DEFER, "Device not ready to probe\n");
++
++ /*
++ * Set can_match = true after calling dev_ready_to_probe(), so
++ * driver_deferred_probe_add() won't actually add the device to the
++ * deferred probe list when dev_ready_to_probe() returns false.
++ *
++ * When dev_ready_to_probe() returns false, it means that device_add()
++ * will do another probe() attempt for us.
++ */
+ dev->can_match = true;
+ dev_dbg(dev, "bus: '%s': %s: matched device with driver %s\n",
+ drv->bus->name, __func__, drv->name);
+--- a/include/linux/device.h
++++ b/include/linux/device.h
+@@ -500,6 +500,21 @@ struct device_physical_location {
+ };
+
+ /**
++ * enum struct_device_flags - Flags in struct device
++ *
++ * Each flag should have a set of accessor functions created via
++ * __create_dev_flag_accessors() for each access.
++ *
++ * @DEV_FLAG_READY_TO_PROBE: If set then device_add() has finished enough
++ * initialization that probe could be called.
++ */
++enum struct_device_flags {
++ DEV_FLAG_READY_TO_PROBE = 0,
++
++ DEV_FLAG_COUNT
++};
++
++/**
+ * struct device - The basic device structure
+ * @parent: The device's "parent" device, the device to which it is attached.
+ * In most cases, a parent device is some sort of bus or host
+@@ -594,6 +609,7 @@ struct device_physical_location {
+ * @dma_skip_sync: DMA sync operations can be skipped for coherent buffers.
+ * @dma_iommu: Device is using default IOMMU implementation for DMA and
+ * doesn't rely on dma_ops structure.
++ * @flags: DEV_FLAG_XXX flags. Use atomic bitfield operations to modify.
+ *
+ * At the lowest level, every device in a Linux system is represented by an
+ * instance of struct device. The device structure contains the information
+@@ -716,8 +732,36 @@ struct device {
+ #ifdef CONFIG_IOMMU_DMA
+ bool dma_iommu:1;
+ #endif
++
++ DECLARE_BITMAP(flags, DEV_FLAG_COUNT);
+ };
+
++#define __create_dev_flag_accessors(accessor_name, flag_name) \
++static inline bool dev_##accessor_name(const struct device *dev) \
++{ \
++ return test_bit(flag_name, dev->flags); \
++} \
++static inline void dev_set_##accessor_name(struct device *dev) \
++{ \
++ set_bit(flag_name, dev->flags); \
++} \
++static inline void dev_clear_##accessor_name(struct device *dev) \
++{ \
++ clear_bit(flag_name, dev->flags); \
++} \
++static inline void dev_assign_##accessor_name(struct device *dev, bool value) \
++{ \
++ assign_bit(flag_name, dev->flags, value); \
++} \
++static inline bool dev_test_and_set_##accessor_name(struct device *dev) \
++{ \
++ return test_and_set_bit(flag_name, dev->flags); \
++}
++
++__create_dev_flag_accessors(ready_to_probe, DEV_FLAG_READY_TO_PROBE);
++
++#undef __create_dev_flag_accessors
++
+ /**
+ * struct device_link - Device link representation.
+ * @supplier: The device on the supplier end of the link.
--- /dev/null
+From 6597ff1d8de3f583be169587efeafd8af134e138 Mon Sep 17 00:00:00 2001
+From: David Carlier <devnexen@gmail.com>
+Date: Sat, 11 Apr 2026 07:29:38 +0100
+Subject: drm/nouveau: fix nvkm_device leak on aperture removal failure
+
+From: David Carlier <devnexen@gmail.com>
+
+commit 6597ff1d8de3f583be169587efeafd8af134e138 upstream.
+
+When aperture_remove_conflicting_pci_devices() fails during probe, the
+error path returns directly without unwinding the nvkm_device that was
+just allocated by nvkm_device_pci_new(). This leaks both the device
+wrapper and the pci_enable_device() reference taken inside it.
+
+Jump to the existing fail_nvkm label so nvkm_device_del() runs and
+balances both. The leak was introduced when the intermediate
+nvkm_device_del() between detection and aperture removal was dropped
+in favor of creating the pci device once.
+
+Fixes: c0bfe34330b5 ("drm/nouveau: create pci device once")
+Cc: stable@vger.kernel.org
+Signed-off-by: David Carlier <devnexen@gmail.com>
+Link: https://patch.msgid.link/20260411062938.22925-1-devnexen@gmail.com
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_drm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
++++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
+@@ -853,7 +853,7 @@ static int nouveau_drm_probe(struct pci_
+ /* Remove conflicting drivers (vesafb, efifb etc). */
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver_pci);
+ if (ret)
+- return ret;
++ goto fail_nvkm;
+
+ pci_set_master(pdev);
+
--- /dev/null
+From 2fc87d37be1b730a149b035f9375fdb8cc5333a5 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Mon, 20 Apr 2026 21:16:09 +0200
+Subject: drm/nouveau: fix u32 overflow in pushbuf reloc bounds check
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+commit 2fc87d37be1b730a149b035f9375fdb8cc5333a5 upstream.
+
+nouveau_gem_pushbuf_reloc_apply() validates each relocation with
+
+ if (r->reloc_bo_offset + 4 > nvbo->bo.base.size)
+
+but reloc_bo_offset is __u32 (uapi/drm/nouveau_drm.h) and the integer
+literal 4 promotes to unsigned int, so the addition is performed in 32
+bits and wraps before the comparison against the size_t bo size.
+
+Cast to u64 so the addition happens in 64-bit arithmetic.
+
+Cc: Lyude Paul <lyude@redhat.com>
+Cc: Danilo Krummrich <dakr@kernel.org>
+Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
+Cc: Maxime Ripard <mripard@kernel.org>
+Cc: Thomas Zimmermann <tzimmermann@suse.de>
+Cc: David Airlie <airlied@gmail.com>
+Cc: Simona Vetter <simona@ffwll.ch>
+Reported-by: Anthropic
+Cc: stable <stable@kernel.org>
+Assisted-by: gkh_clanker_t1000
+Fixes: a1606a9596e5 ("drm/nouveau: new gem pushbuf interface, bump to 0.0.16")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[ Add Fixes: tag. - Danilo ]
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_gem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -686,7 +686,7 @@ nouveau_gem_pushbuf_reloc_apply(struct n
+ }
+ nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
+
+- if (unlikely(r->reloc_bo_offset + 4 >
++ if (unlikely((u64)r->reloc_bo_offset + 4 >
+ nvbo->bo.base.size)) {
+ NV_PRINTK(err, cli, "reloc outside of bo\n");
+ ret = -EINVAL;
--- /dev/null
+From f3850d399de3b6142b02315227ef9e772ed0c302 Mon Sep 17 00:00:00 2001
+From: Thomas Zimmermann <tzimmermann@suse.de>
+Date: Tue, 17 Feb 2026 16:56:12 +0100
+Subject: firmware: google: framebuffer: Do not mark framebuffer as busy
+
+From: Thomas Zimmermann <tzimmermann@suse.de>
+
+commit f3850d399de3b6142b02315227ef9e772ed0c302 upstream.
+
+Remove the flag IORESOURCE_BUSY flag from coreboot's framebuffer
+resource. It prevents simpledrm from successfully requesting the
+range for its own use; resulting in errors such as
+
+[ 2.775430] simple-framebuffer simple-framebuffer.0: [drm] could not acquire memory region [mem 0x80000000-0x80407fff flags 0x80000200]
+
+As with other uses of simple-framebuffer, the simple-framebuffer
+device should only declare it's I/O resources, but not actively use
+them.
+
+Signed-off-by: Thomas Zimmermann <tzimmermann@suse.de>
+Fixes: 851b4c14532d ("firmware: coreboot: Add coreboot framebuffer driver")
+Acked-by: Tzung-Bi Shih <tzungbi@kernel.org>
+Acked-by: Julius Werner <jwerner@chromium.org>
+Cc: Samuel Holland <samuel@sholland.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Tzung-Bi Shih <tzungbi@kernel.org>
+Cc: Brian Norris <briannorris@chromium.org>
+Cc: Julius Werner <jwerner@chromium.org>
+Cc: chrome-platform@lists.linux.dev
+Cc: <stable@vger.kernel.org> # v4.18+
+Link: https://patch.msgid.link/20260217155836.96267-3-tzimmermann@suse.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/firmware/google/framebuffer-coreboot.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/firmware/google/framebuffer-coreboot.c
++++ b/drivers/firmware/google/framebuffer-coreboot.c
+@@ -53,7 +53,7 @@ static int framebuffer_probe(struct core
+ return -ENODEV;
+
+ memset(&res, 0, sizeof(res));
+- res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ res.flags = IORESOURCE_MEM;
+ res.name = "Coreboot Framebuffer";
+ res.start = fb->physical_address;
+ length = PAGE_ALIGN(fb->y_resolution * fb->bytes_per_line);
--- /dev/null
+From 1214bf28965ceaf584fb20d357731264dd2e10e1 Mon Sep 17 00:00:00 2001
+From: Pengpeng Hou <pengpeng@iscas.ac.cn>
+Date: Thu, 2 Apr 2026 13:40:16 +0800
+Subject: greybus: gb-beagleplay: bound bootloader receive buffering
+
+From: Pengpeng Hou <pengpeng@iscas.ac.cn>
+
+commit 1214bf28965ceaf584fb20d357731264dd2e10e1 upstream.
+
+cc1352_bootloader_rx() appends each serdev chunk into the fixed
+rx_buffer before parsing bootloader packets. The helper can keep
+leftover bytes between callbacks and may receive multiple packets in one
+callback, so a single count value is not constrained by one packet
+length.
+
+Check that the incoming chunk fits in the remaining receive buffer space
+before memcpy(). If it does not, drop the staged data and consume the
+bytes instead of overflowing rx_buffer.
+
+Fixes: 0cf7befa3ea2 ("greybus: gb-beagleplay: Add firmware upload API")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Pengpeng Hou <pengpeng@iscas.ac.cn>
+Link: https://patch.msgid.link/20260402054016.38587-1-pengpeng@iscas.ac.cn
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/greybus/gb-beagleplay.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/greybus/gb-beagleplay.c
++++ b/drivers/greybus/gb-beagleplay.c
+@@ -535,6 +535,13 @@ static size_t cc1352_bootloader_rx(struc
+ int ret;
+ size_t off = 0;
+
++ if (count > sizeof(bg->rx_buffer) - bg->rx_buffer_len) {
++ dev_warn(&bg->sd->dev,
++ "dropping oversized bootloader receive chunk");
++ bg->rx_buffer_len = 0;
++ return count;
++ }
++
+ memcpy(bg->rx_buffer + bg->rx_buffer_len, data, count);
+ bg->rx_buffer_len += count;
+
--- /dev/null
+From 6b526dca0966f2370835765019a54319b78fca8d Mon Sep 17 00:00:00 2001
+From: Weigang He <geoffreyhe2@gmail.com>
+Date: Mon, 30 Mar 2026 12:08:00 +0000
+Subject: greybus: gb-beagleplay: fix sleep in atomic context in hdlc_tx_frames()
+
+From: Weigang He <geoffreyhe2@gmail.com>
+
+commit 6b526dca0966f2370835765019a54319b78fca8d upstream.
+
+hdlc_append() calls usleep_range() to wait for circular buffer space,
+but it is called with tx_producer_lock (a spinlock) held via
+hdlc_tx_frames() -> hdlc_append_tx_frame()/hdlc_append_tx_u8()/etc.
+Sleeping while holding a spinlock is illegal and can trigger
+"BUG: scheduling while atomic".
+
+Fix this by moving the buffer-space wait out of hdlc_append() and into
+hdlc_tx_frames(), before the spinlock is acquired. The new flow:
+
+ 1. Pre-calculate the worst-case encoded frame length.
+ 2. Wait (with sleep) outside the lock until enough space is available,
+ kicking the TX consumer work to drain the buffer.
+ 3. Acquire the spinlock, re-verify space, and write the entire frame
+ atomically.
+
+This ensures that sleeping only happens without any lock held, and
+that frames are either fully enqueued or not written at all.
+
+This bug is found by CodeQL static analysis tool (interprocedural
+sleep-in-atomic query) and my code review.
+
+Fixes: ec558bbfea67 ("greybus: Add BeaglePlay Linux Driver")
+Cc: stable <stable@kernel.org>
+Cc: Ayush Singh <ayushdevel1325@gmail.com>
+Cc: Johan Hovold <johan@kernel.org>
+Cc: Alex Elder <elder@kernel.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Weigang He <geoffreyhe2@gmail.com>
+Link: https://patch.msgid.link/20260330120801.981506-1-geoffreyhe2@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/greybus/gb-beagleplay.c | 107 +++++++++++++++++++++++++++++++++-------
+ 1 file changed, 90 insertions(+), 17 deletions(-)
+
+--- a/drivers/greybus/gb-beagleplay.c
++++ b/drivers/greybus/gb-beagleplay.c
+@@ -242,30 +242,26 @@ static void hdlc_write(struct gb_beaglep
+ }
+
+ /**
+- * hdlc_append() - Queue HDLC data for sending.
++ * hdlc_append() - Queue a single HDLC byte for sending.
+ * @bg: beagleplay greybus driver
+ * @value: hdlc byte to transmit
+ *
+- * Assumes that producer lock as been acquired.
++ * Caller must hold tx_producer_lock and must have ensured sufficient
++ * space in the circular buffer before calling (see hdlc_tx_frames()).
+ */
+ static void hdlc_append(struct gb_beagleplay *bg, u8 value)
+ {
+- int tail, head = bg->tx_circ_buf.head;
++ int head = bg->tx_circ_buf.head;
++ int tail = READ_ONCE(bg->tx_circ_buf.tail);
+
+- while (true) {
+- tail = READ_ONCE(bg->tx_circ_buf.tail);
+-
+- if (CIRC_SPACE(head, tail, TX_CIRC_BUF_SIZE) >= 1) {
+- bg->tx_circ_buf.buf[head] = value;
+-
+- /* Finish producing HDLC byte */
+- smp_store_release(&bg->tx_circ_buf.head,
+- (head + 1) & (TX_CIRC_BUF_SIZE - 1));
+- return;
+- }
+- dev_warn(&bg->sd->dev, "Tx circ buf full");
+- usleep_range(3000, 5000);
+- }
++ lockdep_assert_held(&bg->tx_producer_lock);
++ if (WARN_ON_ONCE(CIRC_SPACE(head, tail, TX_CIRC_BUF_SIZE) < 1))
++ return;
++
++ bg->tx_circ_buf.buf[head] = value;
++ /* Ensure buffer write is visible before advancing head. */
++ smp_store_release(&bg->tx_circ_buf.head,
++ (head + 1) & (TX_CIRC_BUF_SIZE - 1));
+ }
+
+ static void hdlc_append_escaped(struct gb_beagleplay *bg, u8 value)
+@@ -313,13 +309,90 @@ static void hdlc_transmit(struct work_st
+ spin_unlock_bh(&bg->tx_consumer_lock);
+ }
+
++/**
++ * hdlc_encoded_length() - Calculate worst-case encoded length of an HDLC frame.
++ * @payloads: array of payload buffers
++ * @count: number of payloads
++ *
++ * Returns the maximum number of bytes needed in the circular buffer.
++ */
++static size_t hdlc_encoded_length(const struct hdlc_payload payloads[],
++ size_t count)
++{
++ size_t i, payload_len = 0;
++
++ for (i = 0; i < count; i++)
++ payload_len += payloads[i].len;
++
++ /*
++ * Worst case: every data byte needs escaping (doubles in size).
++ * data bytes = address(1) + control(1) + payload + crc(2)
++ * framing = opening flag(1) + closing flag(1)
++ */
++ return 2 + (1 + 1 + payload_len + 2) * 2;
++}
++
++#define HDLC_TX_BUF_WAIT_RETRIES 500
++#define HDLC_TX_BUF_WAIT_US_MIN 3000
++#define HDLC_TX_BUF_WAIT_US_MAX 5000
++
++/**
++ * hdlc_tx_frames() - Encode and queue an HDLC frame for transmission.
++ * @bg: beagleplay greybus driver
++ * @address: HDLC address field
++ * @control: HDLC control field
++ * @payloads: array of payload buffers
++ * @count: number of payloads
++ *
++ * Sleeps outside the spinlock until enough circular-buffer space is
++ * available, then verifies space under the lock and writes the entire
++ * frame atomically. Either a complete frame is enqueued or nothing is
++ * written, avoiding both sleeping in atomic context and partial frames.
++ */
+ static void hdlc_tx_frames(struct gb_beagleplay *bg, u8 address, u8 control,
+ const struct hdlc_payload payloads[], size_t count)
+ {
++ size_t needed = hdlc_encoded_length(payloads, count);
++ int retries = HDLC_TX_BUF_WAIT_RETRIES;
+ size_t i;
++ int head, tail;
++
++ /* Wait outside the lock for sufficient buffer space. */
++ while (retries--) {
++ /* Pairs with smp_store_release() in hdlc_append(). */
++ head = smp_load_acquire(&bg->tx_circ_buf.head);
++ tail = READ_ONCE(bg->tx_circ_buf.tail);
++
++ if (CIRC_SPACE(head, tail, TX_CIRC_BUF_SIZE) >= needed)
++ break;
++
++ /* Kick the consumer and sleep — no lock held. */
++ schedule_work(&bg->tx_work);
++ usleep_range(HDLC_TX_BUF_WAIT_US_MIN, HDLC_TX_BUF_WAIT_US_MAX);
++ }
++
++ if (retries < 0) {
++ dev_warn_ratelimited(&bg->sd->dev,
++ "Tx circ buf full, dropping frame\n");
++ return;
++ }
+
+ spin_lock(&bg->tx_producer_lock);
+
++ /*
++ * Re-check under the lock. Should not fail since
++ * tx_producer_lock serialises all producers and the
++ * consumer only frees space, but guard against it.
++ */
++ head = bg->tx_circ_buf.head;
++ tail = READ_ONCE(bg->tx_circ_buf.tail);
++ if (unlikely(CIRC_SPACE(head, tail, TX_CIRC_BUF_SIZE) < needed)) {
++ spin_unlock(&bg->tx_producer_lock);
++ dev_warn_ratelimited(&bg->sd->dev,
++ "Tx circ buf space lost, dropping frame\n");
++ return;
++ }
++
+ hdlc_append_tx_frame(bg);
+ hdlc_append_tx_u8(bg, address);
+ hdlc_append_tx_u8(bg, control);
--- /dev/null
+From 9aad71144fa3682cca3837a06c8623016790e7ec Mon Sep 17 00:00:00 2001
+From: Tyllis Xu <livelycarpet87@gmail.com>
+Date: Sat, 14 Mar 2026 11:58:05 -0500
+Subject: ibmasm: fix heap over-read in ibmasm_send_i2o_message()
+
+From: Tyllis Xu <livelycarpet87@gmail.com>
+
+commit 9aad71144fa3682cca3837a06c8623016790e7ec upstream.
+
+The ibmasm_send_i2o_message() function uses get_dot_command_size() to
+compute the byte count for memcpy_toio(), but this value is derived from
+user-controlled fields in the dot_command_header (command_size: u8,
+data_size: u16) and is never validated against the actual allocation size.
+A root user can write a small buffer with inflated header fields, causing
+memcpy_toio() to read up to ~65 KB past the end of the allocation into
+adjacent kernel heap, which is then forwarded to the service processor
+over MMIO.
+
+Silently clamping the copy size is not sufficient: if the header fields
+claim a larger size than the buffer, the SP receives a dot command whose
+own header is inconsistent with the I2O message length, which can cause
+the SP to desynchronize. Reject such commands outright by returning
+failure.
+
+Validate command_size before calling get_mfa_inbound() to avoid leaking
+an I2O message frame: reading INBOUND_QUEUE_PORT dequeues a hardware
+frame from the controller's free pool, and returning without a
+corresponding set_mfa_inbound() call would permanently exhaust it.
+
+Additionally, clamp command_size to I2O_COMMAND_SIZE before the
+memcpy_toio() so the MMIO write stays within the I2O message frame,
+consistent with the clamping already performed by outgoing_message_size()
+for the header field.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: Yuhao Jiang <danisjiang@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Tyllis Xu <LivelyCarpet87@gmail.com>
+Link: https://patch.msgid.link/20260314165805.548293-1-LivelyCarpet87@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/ibmasm/lowlevel.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+--- a/drivers/misc/ibmasm/lowlevel.c
++++ b/drivers/misc/ibmasm/lowlevel.c
+@@ -19,17 +19,21 @@ static struct i2o_header header = I2O_HE
+ int ibmasm_send_i2o_message(struct service_processor *sp)
+ {
+ u32 mfa;
+- unsigned int command_size;
++ size_t command_size;
+ struct i2o_message *message;
+ struct command *command = sp->current_command;
+
++ command_size = get_dot_command_size(command->buffer);
++ if (command_size > command->buffer_size)
++ return 1;
++ if (command_size > I2O_COMMAND_SIZE)
++ command_size = I2O_COMMAND_SIZE;
++
+ mfa = get_mfa_inbound(sp->base_address);
+ if (!mfa)
+ return 1;
+
+- command_size = get_dot_command_size(command->buffer);
+- header.message_size = outgoing_message_size(command_size);
+-
++ header.message_size = outgoing_message_size((unsigned int)command_size);
+ message = get_i2o_message(sp->base_address, mfa);
+
+ memcpy_toio(&message->header, &header, sizeof(struct i2o_header));
--- /dev/null
+From 0eb09f737428e482a32a2e31e5e223f2b35a71d3 Mon Sep 17 00:00:00 2001
+From: Tyllis Xu <livelycarpet87@gmail.com>
+Date: Sat, 14 Mar 2026 11:53:54 -0500
+Subject: ibmasm: fix OOB reads in command_file_write due to missing size checks
+
+From: Tyllis Xu <livelycarpet87@gmail.com>
+
+commit 0eb09f737428e482a32a2e31e5e223f2b35a71d3 upstream.
+
+The command_file_write() handler allocates a kernel buffer of exactly
+count bytes and copies user data into it, but does not validate the
+buffer against the dot command protocol before passing it to
+get_dot_command_size() and get_dot_command_timeout().
+
+Since both the allocation size (count) and the header fields (command_size,
+data_size) are independently user-controlled, an attacker can cause
+get_dot_command_size() to return a value exceeding the allocation,
+triggering OOB reads in get_dot_command_timeout() and an out-of-bounds
+memcpy_toio() that leaks kernel heap memory to the service processor.
+
+Fix with two guards: reject writes smaller than sizeof(struct
+dot_command_header) before allocation, then after copying user data
+reject commands where the buffer is smaller than the total size declared
+by the header (sizeof(header) + command_size + data_size). This ensures
+all subsequent header and payload field accesses stay within the buffer.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: Yuhao Jiang <danisjiang@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Tyllis Xu <LivelyCarpet87@gmail.com>
+Link: https://patch.msgid.link/20260314165355.548119-1-LivelyCarpet87@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/ibmasm/ibmasmfs.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/misc/ibmasm/ibmasmfs.c
++++ b/drivers/misc/ibmasm/ibmasmfs.c
+@@ -303,6 +303,8 @@ static ssize_t command_file_write(struct
+ return -EINVAL;
+ if (count == 0 || count > IBMASM_CMD_MAX_BUFFER_SIZE)
+ return 0;
++ if (count < sizeof(struct dot_command_header))
++ return -EINVAL;
+ if (*offset != 0)
+ return 0;
+
+@@ -319,6 +321,11 @@ static ssize_t command_file_write(struct
+ return -EFAULT;
+ }
+
++ if (count < get_dot_command_size(cmd->buffer)) {
++ command_put(cmd);
++ return -EINVAL;
++ }
++
+ spin_lock_irqsave(&command_data->sp->lock, flags);
+ if (command_data->command) {
+ spin_unlock_irqrestore(&command_data->sp->lock, flags);
--- /dev/null
+From 10eea3c147141c90cf409b8df56d245c9d7f88d9 Mon Sep 17 00:00:00 2001
+From: Miguel Ojeda <ojeda@kernel.org>
+Date: Tue, 31 Mar 2026 22:58:48 +0200
+Subject: kbuild: rust: allow `clippy::uninlined_format_args`
+
+From: Miguel Ojeda <ojeda@kernel.org>
+
+commit 10eea3c147141c90cf409b8df56d245c9d7f88d9 upstream.
+
+Clippy in Rust 1.88.0 (only) reports [1]:
+
+ warning: variables can be used directly in the `format!` string
+ --> rust/macros/module.rs:112:23
+ |
+ 112 | let content = format!("{param}:{content}", param = param, content = content);
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
+ = note: `-W clippy::uninlined-format-args` implied by `-W clippy::all`
+ = help: to override `-W clippy::all` add `#[allow(clippy::uninlined_format_args)]`
+ help: change this to
+ |
+ 112 - let content = format!("{param}:{content}", param = param, content = content);
+ 112 + let content = format!("{param}:{content}");
+
+ warning: variables can be used directly in the `format!` string
+ --> rust/macros/module.rs:198:14
+ |
+ 198 | t => panic!("Unsupported parameter type {}", t),
+ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args
+ = note: `-W clippy::uninlined-format-args` implied by `-W clippy::all`
+ = help: to override `-W clippy::all` add `#[allow(clippy::uninlined_format_args)]`
+ help: change this to
+ |
+ 198 - t => panic!("Unsupported parameter type {}", t),
+ 198 + t => panic!("Unsupported parameter type {t}"),
+ |
+
+The reason it only triggers in that version is that the lint was moved
+from `pedantic` to `style` in Rust 1.88.0 and then back to `pedantic`
+in Rust 1.89.0 [2][3].
+
+In the first case, the suggestion is fair and a pure simplification, thus
+we will clean it up separately.
+
+To keep the behavior the same across all versions, and since the lint
+does not work for all macros (e.g. custom ones like `pr_info!`), disable
+it globally.
+
+Cc: stable@vger.kernel.org # Needed in 6.12.y and later (Rust is pinned in older LTSs).
+Link: https://lore.kernel.org/rust-for-linux/CANiq72=drAtf3y_DZ-2o4jb6Az9J3Yj4QYwWnbRui4sm4AJD3Q@mail.gmail.com/ [1]
+Link: https://github.com/rust-lang/rust-clippy/pull/15287 [2]
+Link: https://github.com/rust-lang/rust-clippy/issues/15151 [3]
+Link: https://patch.msgid.link/20260331205849.498295-1-ojeda@kernel.org
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Makefile | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/Makefile
++++ b/Makefile
+@@ -459,6 +459,7 @@ export rust_common_flags := --edition=20
+ -Aclippy::needless_lifetimes \
+ -Wclippy::no_mangle_with_rust_abi \
+ -Wclippy::undocumented_unsafe_blocks \
++ -Aclippy::uninlined_format_args \
+ -Wclippy::unnecessary_safety_comment \
+ -Wclippy::unnecessary_safety_doc \
+ -Wrustdoc::missing_crate_level_docs \
--- /dev/null
+From d45963a93c1495e9f1338fde91d0ebba8fd22474 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Thu, 19 Feb 2026 15:34:35 +0100
+Subject: leds: qcom-lpg: Check for array overflow when selecting the high resolution
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+commit d45963a93c1495e9f1338fde91d0ebba8fd22474 upstream.
+
+When selecting the high resolution values from the array, FIELD_GET() is
+used to pull from a 3 bit register, yet the array being indexed has only
+5 values in it. Odds are the hardware is sane, but just to be safe,
+properly check before just overflowing and reading random data and then
+setting up chip values based on that.
+
+Cc: stable <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Link: https://patch.msgid.link/2026021934-nearby-playroom-036b@gregkh
+Signed-off-by: Lee Jones <lee@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/leds/rgb/leds-qcom-lpg.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/leds/rgb/leds-qcom-lpg.c
++++ b/drivers/leds/rgb/leds-qcom-lpg.c
+@@ -1272,7 +1272,12 @@ static int lpg_pwm_get_state(struct pwm_
+ return ret;
+
+ if (chan->subtype == LPG_SUBTYPE_HI_RES_PWM) {
+- refclk = lpg_clk_rates_hi_res[FIELD_GET(PWM_CLK_SELECT_HI_RES_MASK, val)];
++ unsigned int clk_idx = FIELD_GET(PWM_CLK_SELECT_HI_RES_MASK, val);
++
++ if (clk_idx >= ARRAY_SIZE(lpg_clk_rates_hi_res))
++ return -EINVAL;
++
++ refclk = lpg_clk_rates_hi_res[clk_idx];
+ resolution = lpg_pwm_resolution_hi_res[FIELD_GET(PWM_SIZE_HI_RES_MASK, val)];
+ } else {
+ refclk = lpg_clk_rates[FIELD_GET(PWM_CLK_SELECT_MASK, val)];
--- /dev/null
+From 0c965d2784fbbd7f8e3b96d875c9cfdf7c00da3d Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Wed, 22 Apr 2026 15:45:12 +0800
+Subject: LoongArch: Add spectre boundry for syscall dispatch table
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+commit 0c965d2784fbbd7f8e3b96d875c9cfdf7c00da3d upstream.
+
+The LoongArch syscall number is directly controlled by userspace, but
+does not have a array_index_nospec() boundry to prevent access past the
+syscall function pointer tables.
+
+Cc: stable@vger.kernel.org
+Assisted-by: gkh_clanker_2000
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/syscall.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/loongarch/kernel/syscall.c
++++ b/arch/loongarch/kernel/syscall.c
+@@ -9,6 +9,7 @@
+ #include <linux/entry-common.h>
+ #include <linux/errno.h>
+ #include <linux/linkage.h>
++#include <linux/nospec.h>
+ #include <linux/objtool.h>
+ #include <linux/randomize_kstack.h>
+ #include <linux/syscalls.h>
+@@ -61,7 +62,7 @@ void noinstr __no_stack_protector do_sys
+ add_random_kstack_offset();
+
+ if (nr < NR_syscalls) {
+- syscall_fn = sys_call_table[nr];
++ syscall_fn = sys_call_table[array_index_nospec(nr, NR_syscalls)];
+ regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
+ regs->regs[7], regs->regs[8], regs->regs[9]);
+ }
--- /dev/null
+From 4b6e6ead556734bdc14024c5f837132b1e7a4b84 Mon Sep 17 00:00:00 2001
+From: Tyllis Xu <livelycarpet87@gmail.com>
+Date: Sun, 8 Mar 2026 00:21:08 -0600
+Subject: misc: ibmasm: fix OOB MMIO read in ibmasm_handle_mouse_interrupt()
+
+From: Tyllis Xu <livelycarpet87@gmail.com>
+
+commit 4b6e6ead556734bdc14024c5f837132b1e7a4b84 upstream.
+
+ibmasm_handle_mouse_interrupt() performs an out-of-bounds MMIO read
+when the queue reader or writer index from hardware exceeds
+REMOTE_QUEUE_SIZE (60).
+
+A compromised service processor can trigger this by writing an
+out-of-range value to the reader or writer MMIO register before
+asserting an interrupt. Since writer is re-read from hardware on
+every loop iteration, it can also be set to an out-of-range value
+after the loop has already started.
+
+The root cause is that get_queue_reader() and get_queue_writer() return
+raw readl() values that are passed directly into get_queue_entry(),
+which computes:
+
+ queue_begin + reader * sizeof(struct remote_input)
+
+with no bounds check. This unchecked MMIO address is then passed to
+memcpy_fromio(), reading 8 bytes from unintended device registers.
+For sufficiently large values the address falls outside the PCI BAR
+mapping entirely, triggering a machine check exception.
+
+Fix by checking both indices against REMOTE_QUEUE_SIZE at the top of
+the loop body, before any call to get_queue_entry(). On an out-of-range
+value, reset the reader register to 0 via set_queue_reader() before
+breaking, so that normal queue operation can resume if the corrupted
+hardware state is transient.
+
+Reported-by: Yuhao Jiang <danisjiang@gmail.com>
+Fixes: 278d72ae8803 ("[PATCH] ibmasm driver: redesign handling of remote control events")
+Cc: stable@vger.kernel.org
+Cc: ychen@northwestern.edu
+Signed-off-by: Tyllis Xu <LivelyCarpet87@gmail.com>
+Link: https://patch.msgid.link/20260308062108.258940-1-LivelyCarpet87@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/misc/ibmasm/remote.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/misc/ibmasm/remote.c
++++ b/drivers/misc/ibmasm/remote.c
+@@ -177,6 +177,11 @@ void ibmasm_handle_mouse_interrupt(struc
+ writer = get_queue_writer(sp);
+
+ while (reader != writer) {
++ if (reader >= REMOTE_QUEUE_SIZE || writer >= REMOTE_QUEUE_SIZE) {
++ set_queue_reader(sp, 0);
++ break;
++ }
++
+ memcpy_fromio(&input, get_queue_entry(sp, reader),
+ sizeof(struct remote_input));
+
usb-chipidea-otg-not-wait-vbus-drop-if-use-role_switch.patch
usb-chipidea-core-allow-ci_irq_handler-handle-both-id-and-vbus-change.patch
alsa-usb-audio-evaluate-packsize-caps-at-the-right-place.patch
+loongarch-add-spectre-boundry-for-syscall-dispatch-table.patch
+drm-nouveau-fix-u32-overflow-in-pushbuf-reloc-bounds-check.patch
+leds-qcom-lpg-check-for-array-overflow-when-selecting-the-high-resolution.patch
+greybus-gb-beagleplay-bound-bootloader-receive-buffering.patch
+greybus-gb-beagleplay-fix-sleep-in-atomic-context-in-hdlc_tx_frames.patch
+misc-ibmasm-fix-oob-mmio-read-in-ibmasm_handle_mouse_interrupt.patch
+ibmasm-fix-oob-reads-in-command_file_write-due-to-missing-size-checks.patch
+ibmasm-fix-heap-over-read-in-ibmasm_send_i2o_message.patch
+driver-core-don-t-let-a-device-probe-until-it-s-ready.patch
+drm-nouveau-fix-nvkm_device-leak-on-aperture-removal-failure.patch
+kbuild-rust-allow-clippy-uninlined_format_args.patch
+firmware-google-framebuffer-do-not-mark-framebuffer-as-busy.patch
+arm64-mm-enable-batched-tlb-flush-in-unmap_hotplug_range.patch