]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.19-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 17 Mar 2026 11:48:37 +0000 (12:48 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 17 Mar 2026 11:48:37 +0000 (12:48 +0100)
added patches:
arm64-gcs-honour-mprotect-prot_none-on-shadow-stack-mappings.patch
bpf-fix-kprobe_multi-cookies-access-in-show_fdinfo-callback.patch
btrfs-fix-chunk-map-leak-in-btrfs_map_block-after-btrfs_chunk_map_num_copies.patch
cpufreq-intel_pstate-fix-null-pointer-dereference-in-update_cpu_qos_request.patch
device-property-allow-secondary-lookup-in-fwnode_get_next_child_node.patch
drm-amd-disable-mes-lr-compute-w-a.patch
drm-amdgpu-add-upper-bound-check-on-user-inputs-in-signal-ioctl.patch
drm-amdgpu-add-upper-bound-check-on-user-inputs-in-wait-ioctl.patch
drm-amdgpu-userq-fix-reference-leak-in-amdgpu_userq_wait_ioctl.patch
drm-bridge-samsung-dsim-fix-memory-leak-in-error-path.patch
drm-bridge-ti-sn65dsi86-enable-hpd-polling-if-irq-is-not-used.patch
drm-i915-alpm-alpm-disable-fixes.patch
drm-xe-sync-cleanup-partially-initialized-sync-on-parse-failure.patch
drm-xe-sync-fix-user-fence-leak-on-alloc-failure.patch
gpiolib-normalize-the-return-value-of-gc-get-on-behalf-of-buggy-drivers.patch
ice-reintroduce-retry-mechanism-for-indirect-aq.patch
iomap-don-t-mark-folio-uptodate-if-read-io-has-bytes-pending.patch
iomap-reject-delalloc-mappings-during-writeback.patch
ipmi-si-don-t-block-module-unload-if-the-bmc-is-messed-up.patch
ipmi-si-fix-check-for-a-misbehaving-bmc.patch
ipmi-si-handle-waiting-messages-when-bmc-failure-detected.patch
ipmi-si-use-a-long-timeout-when-the-bmc-is-misbehaving.patch
irqchip-gic-v3-its-limit-number-of-per-device-msis-to-the-range-the-its-supports.patch
ixgbevf-fix-link-setup-issue.patch
kunit-irq-ensure-timer-doesn-t-fire-too-frequently.patch
mm-damon-core-clear-walk_control-on-inactive-context-in-damos_walk.patch
mm-huge_memory-fix-a-folio_split-race-condition-with-folio_try_get.patch
mm-memfd_luo-always-dirty-all-folios.patch
mm-memfd_luo-always-make-all-folios-uptodate.patch
mm-slab-fix-an-incorrect-check-in-obj_exts_alloc_size.patch
nfsd-fix-cred-ref-leak-in-nfsd_nl_listener_set_doit.patch
nouveau-gsp-drop-warn_on-in-acpi-probes.patch
nsfs-tighten-permission-checks-for-handle-opening.patch
nstree-tighten-permission-checks-for-listing.patch
s390-pfault-fix-virtual-vs-physical-address-confusion.patch
sched_ext-disable-preemption-between-scx_claim_exit-and-kicking-helper-work.patch
sched_ext-fix-starvation-of-scx_enable-under-fair-class-saturation.patch
staging-rtl8723bs-fix-potential-out-of-bounds-read-in-rtw_restruct_wmm_ie.patch
staging-rtl8723bs-properly-validate-the-data-in-rtw_get_ie_ex.patch
staging-sm750fb-add-missing-pci_release_region-on-error-and-removal.patch

41 files changed:
queue-6.19/arm64-gcs-honour-mprotect-prot_none-on-shadow-stack-mappings.patch [new file with mode: 0644]
queue-6.19/bpf-fix-kprobe_multi-cookies-access-in-show_fdinfo-callback.patch [new file with mode: 0644]
queue-6.19/btrfs-fix-chunk-map-leak-in-btrfs_map_block-after-btrfs_chunk_map_num_copies.patch [new file with mode: 0644]
queue-6.19/cpufreq-intel_pstate-fix-null-pointer-dereference-in-update_cpu_qos_request.patch [new file with mode: 0644]
queue-6.19/device-property-allow-secondary-lookup-in-fwnode_get_next_child_node.patch [new file with mode: 0644]
queue-6.19/drm-amd-disable-mes-lr-compute-w-a.patch [new file with mode: 0644]
queue-6.19/drm-amdgpu-add-upper-bound-check-on-user-inputs-in-signal-ioctl.patch [new file with mode: 0644]
queue-6.19/drm-amdgpu-add-upper-bound-check-on-user-inputs-in-wait-ioctl.patch [new file with mode: 0644]
queue-6.19/drm-amdgpu-userq-fix-reference-leak-in-amdgpu_userq_wait_ioctl.patch [new file with mode: 0644]
queue-6.19/drm-bridge-samsung-dsim-fix-memory-leak-in-error-path.patch [new file with mode: 0644]
queue-6.19/drm-bridge-ti-sn65dsi86-enable-hpd-polling-if-irq-is-not-used.patch [new file with mode: 0644]
queue-6.19/drm-i915-alpm-alpm-disable-fixes.patch [new file with mode: 0644]
queue-6.19/drm-xe-sync-cleanup-partially-initialized-sync-on-parse-failure.patch [new file with mode: 0644]
queue-6.19/drm-xe-sync-fix-user-fence-leak-on-alloc-failure.patch [new file with mode: 0644]
queue-6.19/gpiolib-normalize-the-return-value-of-gc-get-on-behalf-of-buggy-drivers.patch [new file with mode: 0644]
queue-6.19/ice-reintroduce-retry-mechanism-for-indirect-aq.patch [new file with mode: 0644]
queue-6.19/iomap-don-t-mark-folio-uptodate-if-read-io-has-bytes-pending.patch [new file with mode: 0644]
queue-6.19/iomap-reject-delalloc-mappings-during-writeback.patch [new file with mode: 0644]
queue-6.19/ipmi-si-don-t-block-module-unload-if-the-bmc-is-messed-up.patch [new file with mode: 0644]
queue-6.19/ipmi-si-fix-check-for-a-misbehaving-bmc.patch [new file with mode: 0644]
queue-6.19/ipmi-si-handle-waiting-messages-when-bmc-failure-detected.patch [new file with mode: 0644]
queue-6.19/ipmi-si-use-a-long-timeout-when-the-bmc-is-misbehaving.patch [new file with mode: 0644]
queue-6.19/irqchip-gic-v3-its-limit-number-of-per-device-msis-to-the-range-the-its-supports.patch [new file with mode: 0644]
queue-6.19/ixgbevf-fix-link-setup-issue.patch [new file with mode: 0644]
queue-6.19/kunit-irq-ensure-timer-doesn-t-fire-too-frequently.patch [new file with mode: 0644]
queue-6.19/mm-damon-core-clear-walk_control-on-inactive-context-in-damos_walk.patch [new file with mode: 0644]
queue-6.19/mm-huge_memory-fix-a-folio_split-race-condition-with-folio_try_get.patch [new file with mode: 0644]
queue-6.19/mm-memfd_luo-always-dirty-all-folios.patch [new file with mode: 0644]
queue-6.19/mm-memfd_luo-always-make-all-folios-uptodate.patch [new file with mode: 0644]
queue-6.19/mm-slab-fix-an-incorrect-check-in-obj_exts_alloc_size.patch [new file with mode: 0644]
queue-6.19/nfsd-fix-cred-ref-leak-in-nfsd_nl_listener_set_doit.patch [new file with mode: 0644]
queue-6.19/nouveau-gsp-drop-warn_on-in-acpi-probes.patch [new file with mode: 0644]
queue-6.19/nsfs-tighten-permission-checks-for-handle-opening.patch [new file with mode: 0644]
queue-6.19/nstree-tighten-permission-checks-for-listing.patch [new file with mode: 0644]
queue-6.19/s390-pfault-fix-virtual-vs-physical-address-confusion.patch [new file with mode: 0644]
queue-6.19/sched_ext-disable-preemption-between-scx_claim_exit-and-kicking-helper-work.patch [new file with mode: 0644]
queue-6.19/sched_ext-fix-starvation-of-scx_enable-under-fair-class-saturation.patch [new file with mode: 0644]
queue-6.19/series
queue-6.19/staging-rtl8723bs-fix-potential-out-of-bounds-read-in-rtw_restruct_wmm_ie.patch [new file with mode: 0644]
queue-6.19/staging-rtl8723bs-properly-validate-the-data-in-rtw_get_ie_ex.patch [new file with mode: 0644]
queue-6.19/staging-sm750fb-add-missing-pci_release_region-on-error-and-removal.patch [new file with mode: 0644]

diff --git a/queue-6.19/arm64-gcs-honour-mprotect-prot_none-on-shadow-stack-mappings.patch b/queue-6.19/arm64-gcs-honour-mprotect-prot_none-on-shadow-stack-mappings.patch
new file mode 100644 (file)
index 0000000..37f342f
--- /dev/null
@@ -0,0 +1,46 @@
+From 47a8aad135ac1aed04b7b0c0a8157fd208075827 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Mon, 23 Feb 2026 17:45:31 +0000
+Subject: arm64: gcs: Honour mprotect(PROT_NONE) on shadow stack mappings
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 47a8aad135ac1aed04b7b0c0a8157fd208075827 upstream.
+
+vm_get_page_prot() short-circuits the protection_map[] lookup for a
+VM_SHADOW_STACK mapping since it uses a different PIE index from the
+typical read/write/exec permissions. However, the side effect is that it
+also ignores mprotect(PROT_NONE) by creating an accessible PTE.
+
+Special-case the !(vm_flags & VM_ACCESS_FLAGS) flags to use the
+protection_map[VM_NONE] permissions instead. No GCS attributes are
+required for an inaccessible PTE.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Fixes: 6497b66ba694 ("arm64/mm: Map pages for guarded control stack")
+Cc: stable@vger.kernel.org
+Cc: Mark Brown <broonie@kernel.org>
+Cc: Will Deacon <will@kernel.org>
+Cc: David Hildenbrand <david@kernel.org>
+Reviewed-by: David Hildenbrand (Arm) <david@kernel.org>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/mm/mmap.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/mm/mmap.c
++++ b/arch/arm64/mm/mmap.c
+@@ -91,7 +91,11 @@ pgprot_t vm_get_page_prot(vm_flags_t vm_
+       /* Short circuit GCS to avoid bloating the table. */
+       if (system_supports_gcs() && (vm_flags & VM_SHADOW_STACK)) {
+-              prot = gcs_page_prot;
++              /* Honour mprotect(PROT_NONE) on shadow stack mappings */
++              if (vm_flags & VM_ACCESS_FLAGS)
++                      prot = gcs_page_prot;
++              else
++                      prot = pgprot_val(protection_map[VM_NONE]);
+       } else {
+               prot = pgprot_val(protection_map[vm_flags &
+                                  (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
diff --git a/queue-6.19/bpf-fix-kprobe_multi-cookies-access-in-show_fdinfo-callback.patch b/queue-6.19/bpf-fix-kprobe_multi-cookies-access-in-show_fdinfo-callback.patch
new file mode 100644 (file)
index 0000000..ff05103
--- /dev/null
@@ -0,0 +1,44 @@
+From ad6fface76da42721c15e8fb281570aaa44a2c01 Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@kernel.org>
+Date: Wed, 25 Feb 2026 12:12:49 +0100
+Subject: bpf: Fix kprobe_multi cookies access in show_fdinfo callback
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+commit ad6fface76da42721c15e8fb281570aaa44a2c01 upstream.
+
+We don't check if cookies are available on the kprobe_multi link
+before accessing them in show_fdinfo callback, we should.
+
+Cc: stable@vger.kernel.org
+Fixes: da7e9c0a7fbc ("bpf: Add show_fdinfo for kprobe_multi")
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Link: https://lore.kernel.org/r/20260225111249.186230-1-jolsa@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/bpf_trace.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/bpf_trace.c
++++ b/kernel/trace/bpf_trace.c
+@@ -2441,8 +2441,10 @@ static void bpf_kprobe_multi_show_fdinfo
+                                        struct seq_file *seq)
+ {
+       struct bpf_kprobe_multi_link *kmulti_link;
++      bool has_cookies;
+       kmulti_link = container_of(link, struct bpf_kprobe_multi_link, link);
++      has_cookies = !!kmulti_link->cookies;
+       seq_printf(seq,
+                  "kprobe_cnt:\t%u\n"
+@@ -2454,7 +2456,7 @@ static void bpf_kprobe_multi_show_fdinfo
+       for (int i = 0; i < kmulti_link->cnt; i++) {
+               seq_printf(seq,
+                          "%llu\t %pS\n",
+-                         kmulti_link->cookies[i],
++                         has_cookies ? kmulti_link->cookies[i] : 0,
+                          (void *)kmulti_link->addrs[i]);
+       }
+ }
diff --git a/queue-6.19/btrfs-fix-chunk-map-leak-in-btrfs_map_block-after-btrfs_chunk_map_num_copies.patch b/queue-6.19/btrfs-fix-chunk-map-leak-in-btrfs_map_block-after-btrfs_chunk_map_num_copies.patch
new file mode 100644 (file)
index 0000000..20076f6
--- /dev/null
@@ -0,0 +1,38 @@
+From f15fb3d41543244d1179f423da4a4832a55bc050 Mon Sep 17 00:00:00 2001
+From: Mark Harmstone <mark@harmstone.com>
+Date: Fri, 20 Feb 2026 12:53:17 +0000
+Subject: btrfs: fix chunk map leak in btrfs_map_block() after btrfs_chunk_map_num_copies()
+
+From: Mark Harmstone <mark@harmstone.com>
+
+commit f15fb3d41543244d1179f423da4a4832a55bc050 upstream.
+
+Fix a chunk map leak in btrfs_map_block(): if we return early with -EINVAL,
+we're not freeing the chunk map that we've just looked up.
+
+Fixes: 0ae653fbec2b ("btrfs: reduce chunk_map lookups in btrfs_map_block()")
+CC: stable@vger.kernel.org # 6.12+
+Reviewed-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: Mark Harmstone <mark@harmstone.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/volumes.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -6707,8 +6707,10 @@ int btrfs_map_block(struct btrfs_fs_info
+               return PTR_ERR(map);
+       num_copies = btrfs_chunk_map_num_copies(map);
+-      if (io_geom.mirror_num > num_copies)
+-              return -EINVAL;
++      if (io_geom.mirror_num > num_copies) {
++              ret = -EINVAL;
++              goto out;
++      }
+       map_offset = logical - map->start;
+       io_geom.raid56_full_stripe_start = (u64)-1;
diff --git a/queue-6.19/cpufreq-intel_pstate-fix-null-pointer-dereference-in-update_cpu_qos_request.patch b/queue-6.19/cpufreq-intel_pstate-fix-null-pointer-dereference-in-update_cpu_qos_request.patch
new file mode 100644 (file)
index 0000000..7b266f1
--- /dev/null
@@ -0,0 +1,60 @@
+From ab39cc4cb8ceecdc2b61747433e7237f1ac2b789 Mon Sep 17 00:00:00 2001
+From: David Arcari <darcari@redhat.com>
+Date: Tue, 24 Feb 2026 07:21:06 -0500
+Subject: cpufreq: intel_pstate: Fix NULL pointer dereference in update_cpu_qos_request()
+
+From: David Arcari <darcari@redhat.com>
+
+commit ab39cc4cb8ceecdc2b61747433e7237f1ac2b789 upstream.
+
+The update_cpu_qos_request() function attempts to initialize the 'freq'
+variable by dereferencing 'cpudata' before verifying if the 'policy'
+is valid.
+
+This issue occurs on systems booted with the "nosmt" parameter, where
+all_cpu_data[cpu] is NULL for the SMT sibling threads. As a result,
+any call to update_qos_requests() will result in a NULL pointer
+dereference as the code will attempt to access pstate.turbo_freq using
+the NULL cpudata pointer.
+
+Also, pstate.turbo_freq may be updated by intel_pstate_get_hwp_cap()
+after initializing the 'freq' variable, so it is better to defer the
+'freq' until intel_pstate_get_hwp_cap() has been called.
+
+Fix this by deferring the 'freq' assignment until after the policy and
+driver_data have been validated.
+
+Fixes: ae1bdd23b99f ("cpufreq: intel_pstate: Adjust frequency percentage computations")
+Reported-by: Jirka Hladky <jhladky@redhat.com>
+Closes: https://lore.kernel.org/all/CAE4VaGDfiPvz3AzrwrwM4kWB3SCkMci25nPO8W1JmTBd=xHzZg@mail.gmail.com/
+Signed-off-by: David Arcari <darcari@redhat.com>
+Cc: 6.18+ <stable@vger.kernel.org> # 6.18+
+[ rjw: Added one paragraph to the changelog ]
+Link: https://patch.msgid.link/20260224122106.228116-1-darcari@redhat.com
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/cpufreq/intel_pstate.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1647,8 +1647,8 @@ static ssize_t store_no_turbo(struct kob
+ static void update_cpu_qos_request(int cpu, enum freq_qos_req_type type)
+ {
+       struct cpudata *cpudata = all_cpu_data[cpu];
+-      unsigned int freq = cpudata->pstate.turbo_freq;
+       struct freq_qos_request *req;
++      unsigned int freq;
+       struct cpufreq_policy *policy __free(put_cpufreq_policy) = cpufreq_cpu_get(cpu);
+       if (!policy)
+@@ -1661,6 +1661,8 @@ static void update_cpu_qos_request(int c
+       if (hwp_active)
+               intel_pstate_get_hwp_cap(cpudata);
++      freq = cpudata->pstate.turbo_freq;
++
+       if (type == FREQ_QOS_MIN) {
+               freq = DIV_ROUND_UP(freq * global.min_perf_pct, 100);
+       } else {
diff --git a/queue-6.19/device-property-allow-secondary-lookup-in-fwnode_get_next_child_node.patch b/queue-6.19/device-property-allow-secondary-lookup-in-fwnode_get_next_child_node.patch
new file mode 100644 (file)
index 0000000..e698f93
--- /dev/null
@@ -0,0 +1,75 @@
+From 2692c614f8f05929d692b3dbfd3faef1f00fbaf0 Mon Sep 17 00:00:00 2001
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Date: Tue, 10 Feb 2026 14:58:22 +0100
+Subject: device property: Allow secondary lookup in fwnode_get_next_child_node()
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+commit 2692c614f8f05929d692b3dbfd3faef1f00fbaf0 upstream.
+
+When device_get_child_node_count() got split to the fwnode and device
+respective APIs, the fwnode didn't inherit the ability to traverse over
+the secondary fwnode. Hence any user, that switches from device to fwnode
+API misses this feature. In particular, this was revealed by the commit
+1490cbb9dbfd ("device property: Split fwnode_get_child_node_count()")
+that effectively broke the GPIO enumeration on Intel Galileo boards.
+Fix this by moving the secondary lookup from device to fwnode API.
+
+Note, in general no device_*() API should go into the depth of the fwnode
+implementation.
+
+Fixes: 114dbb4fa7c4 ("drivers property: When no children in primary, try secondary")
+Cc: stable@vger.kernel.org
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Rafael J. Wysocki (Intel) <rafael@kernel.org>
+Reviewed-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Link: https://patch.msgid.link/20260210135822.47335-1-andriy.shevchenko@linux.intel.com
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/base/property.c |   27 +++++++++++++--------------
+ 1 file changed, 13 insertions(+), 14 deletions(-)
+
+--- a/drivers/base/property.c
++++ b/drivers/base/property.c
+@@ -797,7 +797,18 @@ struct fwnode_handle *
+ fwnode_get_next_child_node(const struct fwnode_handle *fwnode,
+                          struct fwnode_handle *child)
+ {
+-      return fwnode_call_ptr_op(fwnode, get_next_child_node, child);
++      struct fwnode_handle *next;
++
++      if (IS_ERR_OR_NULL(fwnode))
++              return NULL;
++
++      /* Try to find a child in primary fwnode */
++      next = fwnode_call_ptr_op(fwnode, get_next_child_node, child);
++      if (next)
++              return next;
++
++      /* When no more children in primary, continue with secondary */
++      return fwnode_call_ptr_op(fwnode->secondary, get_next_child_node, child);
+ }
+ EXPORT_SYMBOL_GPL(fwnode_get_next_child_node);
+@@ -841,19 +852,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_availa
+ struct fwnode_handle *device_get_next_child_node(const struct device *dev,
+                                                struct fwnode_handle *child)
+ {
+-      const struct fwnode_handle *fwnode = dev_fwnode(dev);
+-      struct fwnode_handle *next;
+-
+-      if (IS_ERR_OR_NULL(fwnode))
+-              return NULL;
+-
+-      /* Try to find a child in primary fwnode */
+-      next = fwnode_get_next_child_node(fwnode, child);
+-      if (next)
+-              return next;
+-
+-      /* When no more children in primary, continue with secondary */
+-      return fwnode_get_next_child_node(fwnode->secondary, child);
++      return fwnode_get_next_child_node(dev_fwnode(dev), child);
+ }
+ EXPORT_SYMBOL_GPL(device_get_next_child_node);
diff --git a/queue-6.19/drm-amd-disable-mes-lr-compute-w-a.patch b/queue-6.19/drm-amd-disable-mes-lr-compute-w-a.patch
new file mode 100644 (file)
index 0000000..97b031c
--- /dev/null
@@ -0,0 +1,60 @@
+From 6b0d812971370c64b837a2db4275410f478272fe Mon Sep 17 00:00:00 2001
+From: Mario Limonciello <mario.limonciello@amd.com>
+Date: Wed, 25 Feb 2026 10:51:16 -0600
+Subject: drm/amd: Disable MES LR compute W/A
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+commit 6b0d812971370c64b837a2db4275410f478272fe upstream.
+
+A workaround was introduced in commit 1fb710793ce2 ("drm/amdgpu: Enable
+MES lr_compute_wa by default") to help with some hangs observed in gfx1151.
+
+This WA didn't fully fix the issue.  It was actually fixed by adjusting
+the VGPR size to the correct value that matched the hardware in commit
+b42f3bf9536c ("drm/amdkfd: bump minimum vgpr size for gfx1151").
+
+There are reports of instability on other products with newer GC microcode
+versions, and I believe they're caused by this workaround. As we don't
+need the workaround any more, remove it.
+
+Fixes: b42f3bf9536c ("drm/amdkfd: bump minimum vgpr size for gfx1151")
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 9973e64bd6ee7642860a6f3b6958cbf14e89cabd)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/mes_v11_0.c |    5 -----
+ drivers/gpu/drm/amd/amdgpu/mes_v12_0.c |    5 -----
+ 2 files changed, 10 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+@@ -718,11 +718,6 @@ static int mes_v11_0_set_hw_resources(st
+       mes_set_hw_res_pkt.enable_reg_active_poll = 1;
+       mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
+       mes_set_hw_res_pkt.oversubscription_timer = 50;
+-      if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x7f)
+-              mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
+-      else
+-              dev_info_once(mes->adev->dev,
+-                            "MES FW version must be >= 0x7f to enable LR compute workaround.\n");
+       if (amdgpu_mes_log_enable) {
+               mes_set_hw_res_pkt.enable_mes_event_int_logging = 1;
+--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
++++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+@@ -779,11 +779,6 @@ static int mes_v12_0_set_hw_resources(st
+       mes_set_hw_res_pkt.use_different_vmid_compute = 1;
+       mes_set_hw_res_pkt.enable_reg_active_poll = 1;
+       mes_set_hw_res_pkt.enable_level_process_quantum_check = 1;
+-      if ((mes->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x82)
+-              mes_set_hw_res_pkt.enable_lr_compute_wa = 1;
+-      else
+-              dev_info_once(adev->dev,
+-                            "MES FW version must be >= 0x82 to enable LR compute workaround.\n");
+       /*
+        * Keep oversubscribe timer for sdma . When we have unmapped doorbell
diff --git a/queue-6.19/drm-amdgpu-add-upper-bound-check-on-user-inputs-in-signal-ioctl.patch b/queue-6.19/drm-amdgpu-add-upper-bound-check-on-user-inputs-in-signal-ioctl.patch
new file mode 100644 (file)
index 0000000..dde85de
--- /dev/null
@@ -0,0 +1,52 @@
+From ea78f8c68f4f6211c557df49174c54d167821962 Mon Sep 17 00:00:00 2001
+From: Sunil Khatri <sunil.khatri@amd.com>
+Date: Fri, 20 Feb 2026 13:47:58 +0530
+Subject: drm/amdgpu: add upper bound check on user inputs in signal ioctl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sunil Khatri <sunil.khatri@amd.com>
+
+commit ea78f8c68f4f6211c557df49174c54d167821962 upstream.
+
+Huge input values in amdgpu_userq_signal_ioctl can lead to a OOM and
+could be exploited.
+
+So check these input value against AMDGPU_USERQ_MAX_HANDLES
+which is big enough value for genuine use cases and could
+potentially avoid OOM.
+
+Signed-off-by: Sunil Khatri <sunil.khatri@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit be267e15f99bc97cbe202cd556717797cdcf79a5)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c |    7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+@@ -35,6 +35,8 @@
+ static const struct dma_fence_ops amdgpu_userq_fence_ops;
+ static struct kmem_cache *amdgpu_userq_fence_slab;
++#define AMDGPU_USERQ_MAX_HANDLES      (1U << 16)
++
+ int amdgpu_userq_fence_slab_init(void)
+ {
+       amdgpu_userq_fence_slab = kmem_cache_create("amdgpu_userq_fence",
+@@ -476,6 +478,11 @@ int amdgpu_userq_signal_ioctl(struct drm
+       if (!amdgpu_userq_enabled(dev))
+               return -ENOTSUPP;
++      if (args->num_syncobj_handles > AMDGPU_USERQ_MAX_HANDLES ||
++          args->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
++          args->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
++              return -EINVAL;
++
+       num_syncobj_handles = args->num_syncobj_handles;
+       syncobj_handles = memdup_user(u64_to_user_ptr(args->syncobj_handles),
+                                     size_mul(sizeof(u32), num_syncobj_handles));
diff --git a/queue-6.19/drm-amdgpu-add-upper-bound-check-on-user-inputs-in-wait-ioctl.patch b/queue-6.19/drm-amdgpu-add-upper-bound-check-on-user-inputs-in-wait-ioctl.patch
new file mode 100644 (file)
index 0000000..efef683
--- /dev/null
@@ -0,0 +1,45 @@
+From 64ac7c09fc44985ec9bb6a9db740899fa40ca613 Mon Sep 17 00:00:00 2001
+From: Sunil Khatri <sunil.khatri@amd.com>
+Date: Tue, 24 Feb 2026 12:13:09 +0530
+Subject: drm/amdgpu: add upper bound check on user inputs in wait ioctl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sunil Khatri <sunil.khatri@amd.com>
+
+commit 64ac7c09fc44985ec9bb6a9db740899fa40ca613 upstream.
+
+Huge input values in amdgpu_userq_wait_ioctl can lead to a OOM and
+could be exploited.
+
+So check these input value against AMDGPU_USERQ_MAX_HANDLES
+which is big enough value for genuine use cases and could
+potentially avoid OOM.
+
+v2: squash in Srini's fix
+
+Signed-off-by: Sunil Khatri <sunil.khatri@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit fcec012c664247531aed3e662f4280ff804d1476)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+@@ -668,6 +668,11 @@ int amdgpu_userq_wait_ioctl(struct drm_d
+       if (!amdgpu_userq_enabled(dev))
+               return -ENOTSUPP;
++      if (wait_info->num_syncobj_handles > AMDGPU_USERQ_MAX_HANDLES ||
++          wait_info->num_bo_write_handles > AMDGPU_USERQ_MAX_HANDLES ||
++          wait_info->num_bo_read_handles > AMDGPU_USERQ_MAX_HANDLES)
++              return -EINVAL;
++
+       num_read_bo_handles = wait_info->num_bo_read_handles;
+       bo_handles_read = memdup_user(u64_to_user_ptr(wait_info->bo_read_handles),
+                                     size_mul(sizeof(u32), num_read_bo_handles));
diff --git a/queue-6.19/drm-amdgpu-userq-fix-reference-leak-in-amdgpu_userq_wait_ioctl.patch b/queue-6.19/drm-amdgpu-userq-fix-reference-leak-in-amdgpu_userq_wait_ioctl.patch
new file mode 100644 (file)
index 0000000..948a3b9
--- /dev/null
@@ -0,0 +1,47 @@
+From 49abfa812617a7f2d0132c70d23ac98b389c6ec1 Mon Sep 17 00:00:00 2001
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Date: Mon, 23 Feb 2026 12:41:30 +0000
+Subject: drm/amdgpu/userq: Fix reference leak in amdgpu_userq_wait_ioctl
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+
+commit 49abfa812617a7f2d0132c70d23ac98b389c6ec1 upstream.
+
+Drop reference to syncobj and timeline fence when aborting the ioctl due
+output array being too small.
+
+Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com>
+Fixes: a292fdecd728 ("drm/amdgpu: Implement userqueue signal/wait IOCTL")
+Cc: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
+Cc: Christian König <christian.koenig@amd.com>
+Cc: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 68951e9c3e6bb22396bc42ef2359751c8315dd27)
+Cc: <stable@vger.kernel.org> # v6.16+
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq_fence.c
+@@ -880,6 +880,7 @@ int amdgpu_userq_wait_ioctl(struct drm_d
+                               dma_fence_unwrap_for_each(f, &iter, fence) {
+                                       if (num_fences >= wait_info->num_fences) {
+                                               r = -EINVAL;
++                                              dma_fence_put(fence);
+                                               goto free_fences;
+                                       }
+@@ -904,6 +905,7 @@ int amdgpu_userq_wait_ioctl(struct drm_d
+                       if (num_fences >= wait_info->num_fences) {
+                               r = -EINVAL;
++                              dma_fence_put(fence);
+                               goto free_fences;
+                       }
diff --git a/queue-6.19/drm-bridge-samsung-dsim-fix-memory-leak-in-error-path.patch b/queue-6.19/drm-bridge-samsung-dsim-fix-memory-leak-in-error-path.patch
new file mode 100644 (file)
index 0000000..1b2dd83
--- /dev/null
@@ -0,0 +1,87 @@
+From 803ec1faf7c1823e6e3b1f2aaa81be18528c9436 Mon Sep 17 00:00:00 2001
+From: Osama Abdelkader <osama.abdelkader@gmail.com>
+Date: Mon, 9 Feb 2026 19:41:14 +0100
+Subject: drm/bridge: samsung-dsim: Fix memory leak in error path
+
+From: Osama Abdelkader <osama.abdelkader@gmail.com>
+
+commit 803ec1faf7c1823e6e3b1f2aaa81be18528c9436 upstream.
+
+In samsung_dsim_host_attach(), drm_bridge_add() is called to add the
+bridge. However, if samsung_dsim_register_te_irq() or
+pdata->host_ops->attach() fails afterwards, the function returns
+without removing the bridge, causing a memory leak.
+
+Fix this by adding proper error handling with goto labels to ensure
+drm_bridge_remove() is called in all error paths. Also ensure that
+samsung_dsim_unregister_te_irq() is called if the attach operation
+fails after the TE IRQ has been registered.
+
+samsung_dsim_unregister_te_irq() function is moved without changes
+to be before samsung_dsim_host_attach() to avoid forward declaration.
+
+Fixes: e7447128ca4a ("drm: bridge: Generalize Exynos-DSI driver into a Samsung DSIM bridge")
+Cc: stable@vger.kernel.org
+Signed-off-by: Osama Abdelkader <osama.abdelkader@gmail.com>
+Reviewed-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Link: https://patch.msgid.link/20260209184115.10937-1-osama.abdelkader@gmail.com
+Signed-off-by: Luca Ceresoli <luca.ceresoli@bootlin.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/bridge/samsung-dsim.c |   25 ++++++++++++++++---------
+ 1 file changed, 16 insertions(+), 9 deletions(-)
+
+--- a/drivers/gpu/drm/bridge/samsung-dsim.c
++++ b/drivers/gpu/drm/bridge/samsung-dsim.c
+@@ -1881,6 +1881,14 @@ static int samsung_dsim_register_te_irq(
+       return 0;
+ }
++static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi)
++{
++      if (dsi->te_gpio) {
++              free_irq(gpiod_to_irq(dsi->te_gpio), dsi);
++              gpiod_put(dsi->te_gpio);
++      }
++}
++
+ static int samsung_dsim_host_attach(struct mipi_dsi_host *host,
+                                   struct mipi_dsi_device *device)
+ {
+@@ -1955,13 +1963,13 @@ of_find_panel_or_bridge:
+       if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+               ret = samsung_dsim_register_te_irq(dsi, &device->dev);
+               if (ret)
+-                      return ret;
++                      goto err_remove_bridge;
+       }
+       if (pdata->host_ops && pdata->host_ops->attach) {
+               ret = pdata->host_ops->attach(dsi, device);
+               if (ret)
+-                      return ret;
++                      goto err_unregister_te_irq;
+       }
+       dsi->lanes = device->lanes;
+@@ -1969,14 +1977,13 @@ of_find_panel_or_bridge:
+       dsi->mode_flags = device->mode_flags;
+       return 0;
+-}
+-static void samsung_dsim_unregister_te_irq(struct samsung_dsim *dsi)
+-{
+-      if (dsi->te_gpio) {
+-              free_irq(gpiod_to_irq(dsi->te_gpio), dsi);
+-              gpiod_put(dsi->te_gpio);
+-      }
++err_unregister_te_irq:
++      if (!(device->mode_flags & MIPI_DSI_MODE_VIDEO))
++              samsung_dsim_unregister_te_irq(dsi);
++err_remove_bridge:
++      drm_bridge_remove(&dsi->bridge);
++      return ret;
+ }
+ static int samsung_dsim_host_detach(struct mipi_dsi_host *host,
diff --git a/queue-6.19/drm-bridge-ti-sn65dsi86-enable-hpd-polling-if-irq-is-not-used.patch b/queue-6.19/drm-bridge-ti-sn65dsi86-enable-hpd-polling-if-irq-is-not-used.patch
new file mode 100644 (file)
index 0000000..9c612e0
--- /dev/null
@@ -0,0 +1,51 @@
+From 0b87d51690dd5131cbe9fbd23746b037aab89815 Mon Sep 17 00:00:00 2001
+From: Franz Schnyder <franz.schnyder@toradex.com>
+Date: Fri, 6 Feb 2026 13:37:36 +0100
+Subject: drm/bridge: ti-sn65dsi86: Enable HPD polling if IRQ is not used
+
+From: Franz Schnyder <franz.schnyder@toradex.com>
+
+commit 0b87d51690dd5131cbe9fbd23746b037aab89815 upstream.
+
+Fallback to polling to detect hotplug events on systems without
+interrupts.
+
+On systems where the interrupt line of the bridge is not connected,
+the bridge cannot notify hotplug events. Only add the
+DRM_BRIDGE_OP_HPD flag if an interrupt has been registered
+otherwise remain in polling mode.
+
+Fixes: 55e8ff842051 ("drm/bridge: ti-sn65dsi86: Add HPD for DisplayPort connector type")
+Cc: stable@vger.kernel.org # 6.16: 9133bc3f0564: drm/bridge: ti-sn65dsi86: Add
+Signed-off-by: Franz Schnyder <franz.schnyder@toradex.com>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+[dianders: Adjusted Fixes/stable line based on discussion]
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patch.msgid.link/20260206123758.374555-1-fra.schnyder@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/bridge/ti-sn65dsi86.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -1415,6 +1415,7 @@ static int ti_sn_bridge_probe(struct aux
+ {
+       struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
+       struct device_node *np = pdata->dev->of_node;
++      const struct i2c_client *client = to_i2c_client(pdata->dev);
+       int ret;
+       pdata->next_bridge = devm_drm_of_get_bridge(&adev->dev, np, 1, 0);
+@@ -1433,8 +1434,9 @@ static int ti_sn_bridge_probe(struct aux
+                          ? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
+       if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) {
+-              pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT |
+-                                  DRM_BRIDGE_OP_HPD;
++              pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT;
++              if (client->irq)
++                      pdata->bridge.ops |= DRM_BRIDGE_OP_HPD;
+               /*
+                * If comms were already enabled they would have been enabled
+                * with the wrong value of HPD_DISABLE. Update it now. Comms
diff --git a/queue-6.19/drm-i915-alpm-alpm-disable-fixes.patch b/queue-6.19/drm-i915-alpm-alpm-disable-fixes.patch
new file mode 100644 (file)
index 0000000..ea3c893
--- /dev/null
@@ -0,0 +1,49 @@
+From eb4a7139e97374f42b7242cc754e77f1623fbcd5 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Jouni=20H=C3=B6gander?= <jouni.hogander@intel.com>
+Date: Thu, 12 Feb 2026 08:27:31 +0200
+Subject: drm/i915/alpm: ALPM disable fixes
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jouni Högander <jouni.hogander@intel.com>
+
+commit eb4a7139e97374f42b7242cc754e77f1623fbcd5 upstream.
+
+PORT_ALPM_CTL is supposed to be written only before link training. Remove
+writing it from ALPM disable.
+
+Also clearing ALPM_CTL_ALPM_AUX_LESS_ENABLE and is not about disabling ALPM
+but switching to AUX-Wake ALPM. Stop touching this bit on ALPM disable.
+
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/7153
+Fixes: 1ccbf135862b ("drm/i915/psr: Enable ALPM on source side for eDP Panel replay")
+Cc: Animesh Manna <animesh.manna@intel.com>
+Cc: Jani Nikula <jani.nikula@linux.intel.com>
+Cc: <stable@vger.kernel.org> # v6.10+
+Signed-off-by: Jouni Högander <jouni.hogander@intel.com>
+Reviewed-by: MichaÅ‚ Grzelak <michal.grzelak@intel.com>
+Link: https://patch.msgid.link/20260212062731.397801-1-jouni.hogander@intel.com
+(cherry picked from commit 008304c9ae75c772d3460040de56e12112cdf5e6)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/i915/display/intel_alpm.c |    7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/i915/display/intel_alpm.c
++++ b/drivers/gpu/drm/i915/display/intel_alpm.c
+@@ -558,12 +558,7 @@ void intel_alpm_disable(struct intel_dp
+       mutex_lock(&intel_dp->alpm.lock);
+       intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
+-                   ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE |
+-                   ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
+-
+-      intel_de_rmw(display,
+-                   PORT_ALPM_CTL(cpu_transcoder),
+-                   PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
++                   ALPM_CTL_ALPM_ENABLE | ALPM_CTL_LOBF_ENABLE, 0);
+       drm_dbg_kms(display->drm, "Disabling ALPM\n");
+       mutex_unlock(&intel_dp->alpm.lock);
diff --git a/queue-6.19/drm-xe-sync-cleanup-partially-initialized-sync-on-parse-failure.patch b/queue-6.19/drm-xe-sync-cleanup-partially-initialized-sync-on-parse-failure.patch
new file mode 100644 (file)
index 0000000..1cc1dc0
--- /dev/null
@@ -0,0 +1,83 @@
+From 1bfd7575092420ba5a0b944953c95b74a5646ff8 Mon Sep 17 00:00:00 2001
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+Date: Thu, 19 Feb 2026 23:35:18 +0000
+Subject: drm/xe/sync: Cleanup partially initialized sync on parse failure
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+commit 1bfd7575092420ba5a0b944953c95b74a5646ff8 upstream.
+
+xe_sync_entry_parse() can allocate references (syncobj, fence, chain fence,
+or user fence) before hitting a later failure path. Several of those paths
+returned directly, leaving partially initialized state and leaking refs.
+
+Route these error paths through a common free_sync label and call
+xe_sync_entry_cleanup(sync) before returning the error.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20260219233516.2938172-5-shuicheng.lin@intel.com
+(cherry picked from commit f939bdd9207a5d1fc55cced5459858480686ce22)
+Cc: stable@vger.kernel.org
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_sync.c |   24 +++++++++++++++++-------
+ 1 file changed, 17 insertions(+), 7 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_sync.c
++++ b/drivers/gpu/drm/xe/xe_sync.c
+@@ -146,8 +146,10 @@ int xe_sync_entry_parse(struct xe_device
+               if (!signal) {
+                       sync->fence = drm_syncobj_fence_get(sync->syncobj);
+-                      if (XE_IOCTL_DBG(xe, !sync->fence))
+-                              return -EINVAL;
++                      if (XE_IOCTL_DBG(xe, !sync->fence)) {
++                              err = -EINVAL;
++                              goto free_sync;
++                      }
+               }
+               break;
+@@ -167,17 +169,21 @@ int xe_sync_entry_parse(struct xe_device
+               if (signal) {
+                       sync->chain_fence = dma_fence_chain_alloc();
+-                      if (!sync->chain_fence)
+-                              return -ENOMEM;
++                      if (!sync->chain_fence) {
++                              err = -ENOMEM;
++                              goto free_sync;
++                      }
+               } else {
+                       sync->fence = drm_syncobj_fence_get(sync->syncobj);
+-                      if (XE_IOCTL_DBG(xe, !sync->fence))
+-                              return -EINVAL;
++                      if (XE_IOCTL_DBG(xe, !sync->fence)) {
++                              err = -EINVAL;
++                              goto free_sync;
++                      }
+                       err = dma_fence_chain_find_seqno(&sync->fence,
+                                                        sync_in.timeline_value);
+                       if (err)
+-                              return err;
++                              goto free_sync;
+               }
+               break;
+@@ -218,6 +224,10 @@ int xe_sync_entry_parse(struct xe_device
+       sync->timeline_value = sync_in.timeline_value;
+       return 0;
++
++free_sync:
++      xe_sync_entry_cleanup(sync);
++      return err;
+ }
+ ALLOW_ERROR_INJECTION(xe_sync_entry_parse, ERRNO);
diff --git a/queue-6.19/drm-xe-sync-fix-user-fence-leak-on-alloc-failure.patch b/queue-6.19/drm-xe-sync-fix-user-fence-leak-on-alloc-failure.patch
new file mode 100644 (file)
index 0000000..6bd6b70
--- /dev/null
@@ -0,0 +1,41 @@
+From 0879c3f04f67e2a1677c25dcc24669ce21eb6a6c Mon Sep 17 00:00:00 2001
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+Date: Thu, 19 Feb 2026 23:35:19 +0000
+Subject: drm/xe/sync: Fix user fence leak on alloc failure
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+commit 0879c3f04f67e2a1677c25dcc24669ce21eb6a6c upstream.
+
+When dma_fence_chain_alloc() fails, properly release the user fence
+reference to prevent a memory leak.
+
+Fixes: 0995c2fc39b0 ("drm/xe: Enforce correct user fence signaling order using")
+Cc: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Link: https://patch.msgid.link/20260219233516.2938172-6-shuicheng.lin@intel.com
+(cherry picked from commit a5d5634cde48a9fcd68c8504aa07f89f175074a0)
+Cc: stable@vger.kernel.org
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/xe/xe_sync.c |    6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpu/drm/xe/xe_sync.c
++++ b/drivers/gpu/drm/xe/xe_sync.c
+@@ -200,8 +200,10 @@ int xe_sync_entry_parse(struct xe_device
+                       if (XE_IOCTL_DBG(xe, IS_ERR(sync->ufence)))
+                               return PTR_ERR(sync->ufence);
+                       sync->ufence_chain_fence = dma_fence_chain_alloc();
+-                      if (!sync->ufence_chain_fence)
+-                              return -ENOMEM;
++                      if (!sync->ufence_chain_fence) {
++                              err = -ENOMEM;
++                              goto free_sync;
++                      }
+                       sync->ufence_syncobj = ufence_syncobj;
+               }
diff --git a/queue-6.19/gpiolib-normalize-the-return-value-of-gc-get-on-behalf-of-buggy-drivers.patch b/queue-6.19/gpiolib-normalize-the-return-value-of-gc-get-on-behalf-of-buggy-drivers.patch
new file mode 100644 (file)
index 0000000..20b143e
--- /dev/null
@@ -0,0 +1,46 @@
+From ec2cceadfae72304ca19650f9cac4b2a97b8a2fc Mon Sep 17 00:00:00 2001
+From: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
+Date: Thu, 19 Feb 2026 10:51:33 +0100
+Subject: gpiolib: normalize the return value of gc->get() on behalf of buggy drivers
+
+From: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
+
+commit ec2cceadfae72304ca19650f9cac4b2a97b8a2fc upstream.
+
+Commit 86ef402d805d ("gpiolib: sanitize the return value of
+gpio_chip::get()") started checking the return value of the .get()
+callback in struct gpio_chip. Now - almost a year later - it turns out
+that there are quite a few drivers in tree that can break with this
+change. Partially revert it: normalize the return value in GPIO core but
+also emit a warning.
+
+Cc: stable@vger.kernel.org
+Fixes: 86ef402d805d ("gpiolib: sanitize the return value of gpio_chip::get()")
+Reported-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Closes: https://lore.kernel.org/all/aZSkqGTqMp_57qC7@google.com/
+Reviewed-by: Linus Walleij <linusw@kernel.org>
+Reviewed-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Link: https://patch.msgid.link/20260219-gpiolib-set-normalize-v2-1-f84630e45796@oss.qualcomm.com
+Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@oss.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpio/gpiolib.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/gpio/gpiolib.c
++++ b/drivers/gpio/gpiolib.c
+@@ -3268,8 +3268,12 @@ static int gpiochip_get(struct gpio_chip
+       /* Make sure this is called after checking for gc->get(). */
+       ret = gc->get(gc, offset);
+-      if (ret > 1)
+-              ret = -EBADE;
++      if (ret > 1) {
++              gpiochip_warn(gc,
++                      "invalid return value from gc->get(): %d, consider fixing the driver\n",
++                      ret);
++              ret = !!ret;
++      }
+       return ret;
+ }
diff --git a/queue-6.19/ice-reintroduce-retry-mechanism-for-indirect-aq.patch b/queue-6.19/ice-reintroduce-retry-mechanism-for-indirect-aq.patch
new file mode 100644 (file)
index 0000000..69f08cd
--- /dev/null
@@ -0,0 +1,74 @@
+From 326256c0a72d4877cec1d4df85357da106233128 Mon Sep 17 00:00:00 2001
+From: Jakub Staniszewski <jakub.staniszewski@linux.intel.com>
+Date: Tue, 13 Jan 2026 20:38:16 +0100
+Subject: ice: reintroduce retry mechanism for indirect AQ
+
+From: Jakub Staniszewski <jakub.staniszewski@linux.intel.com>
+
+commit 326256c0a72d4877cec1d4df85357da106233128 upstream.
+
+Add retry mechanism for indirect Admin Queue (AQ) commands. To do so we
+need to keep the command buffer.
+
+This technically reverts commit 43a630e37e25
+("ice: remove unused buffer copy code in ice_sq_send_cmd_retry()"),
+but combines it with a fix in the logic by using a kmemdup() call,
+making it more robust and less likely to break in the future due to
+programmer error.
+
+Cc: Michal Schmidt <mschmidt@redhat.com>
+Cc: stable@vger.kernel.org
+Fixes: 3056df93f7a8 ("ice: Re-send some AQ commands, as result of EBUSY AQ error")
+Signed-off-by: Jakub Staniszewski <jakub.staniszewski@linux.intel.com>
+Co-developed-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
+Signed-off-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ice/ice_common.c |   12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/ethernet/intel/ice/ice_common.c
++++ b/drivers/net/ethernet/intel/ice/ice_common.c
+@@ -1879,6 +1879,7 @@ ice_sq_send_cmd_retry(struct ice_hw *hw,
+ {
+       struct libie_aq_desc desc_cpy;
+       bool is_cmd_for_retry;
++      u8 *buf_cpy = NULL;
+       u8 idx = 0;
+       u16 opcode;
+       int status;
+@@ -1888,8 +1889,11 @@ ice_sq_send_cmd_retry(struct ice_hw *hw,
+       memset(&desc_cpy, 0, sizeof(desc_cpy));
+       if (is_cmd_for_retry) {
+-              /* All retryable cmds are direct, without buf. */
+-              WARN_ON(buf);
++              if (buf) {
++                      buf_cpy = kmemdup(buf, buf_size, GFP_KERNEL);
++                      if (!buf_cpy)
++                              return -ENOMEM;
++              }
+               memcpy(&desc_cpy, desc, sizeof(desc_cpy));
+       }
+@@ -1901,12 +1905,14 @@ ice_sq_send_cmd_retry(struct ice_hw *hw,
+                   hw->adminq.sq_last_status != LIBIE_AQ_RC_EBUSY)
+                       break;
++              if (buf_cpy)
++                      memcpy(buf, buf_cpy, buf_size);
+               memcpy(desc, &desc_cpy, sizeof(desc_cpy));
+-
+               msleep(ICE_SQ_SEND_DELAY_TIME_MS);
+       } while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
++      kfree(buf_cpy);
+       return status;
+ }
diff --git a/queue-6.19/iomap-don-t-mark-folio-uptodate-if-read-io-has-bytes-pending.patch b/queue-6.19/iomap-don-t-mark-folio-uptodate-if-read-io-has-bytes-pending.patch
new file mode 100644 (file)
index 0000000..fbda89b
--- /dev/null
@@ -0,0 +1,72 @@
+From debc1a492b2695d05973994fb0f796dbd9ceaae6 Mon Sep 17 00:00:00 2001
+From: Joanne Koong <joannelkoong@gmail.com>
+Date: Tue, 3 Mar 2026 15:34:20 -0800
+Subject: iomap: don't mark folio uptodate if read IO has bytes pending
+
+From: Joanne Koong <joannelkoong@gmail.com>
+
+commit debc1a492b2695d05973994fb0f796dbd9ceaae6 upstream.
+
+If a folio has ifs metadata attached to it and the folio is partially
+read in through an async IO helper with the rest of it then being read
+in through post-EOF zeroing or as inline data, and the helper
+successfully finishes the read first, then post-EOF zeroing / reading
+inline will mark the folio as uptodate in iomap_set_range_uptodate().
+
+This is a problem because when the read completion path later calls
+iomap_read_end(), it will call folio_end_read(), which sets the uptodate
+bit using XOR semantics. Calling folio_end_read() on a folio that was
+already marked uptodate clears the uptodate bit.
+
+Fix this by not marking the folio as uptodate if the read IO has bytes
+pending. The folio uptodate state will be set in the read completion
+path through iomap_end_read() -> folio_end_read().
+
+Reported-by: Wei Gao <wegao@suse.com>
+Suggested-by: Sasha Levin <sashal@kernel.org>
+Tested-by: Wei Gao <wegao@suse.com>
+Reviewed-by: Darrick J. Wong <djwong@kernel.org>
+Cc: stable@vger.kernel.org # v6.19
+Link: https://lore.kernel.org/linux-fsdevel/aYbmy8JdgXwsGaPP@autotest-wegao.qe.prg2.suse.org/
+Fixes: b2f35ac4146d ("iomap: add caller-provided callbacks for read and readahead")
+Signed-off-by: Joanne Koong <joannelkoong@gmail.com>
+Link: https://patch.msgid.link/20260303233420.874231-2-joannelkoong@gmail.com
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/iomap/buffered-io.c |   15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/fs/iomap/buffered-io.c
++++ b/fs/iomap/buffered-io.c
+@@ -79,18 +79,27 @@ static void iomap_set_range_uptodate(str
+ {
+       struct iomap_folio_state *ifs = folio->private;
+       unsigned long flags;
+-      bool uptodate = true;
++      bool mark_uptodate = true;
+       if (folio_test_uptodate(folio))
+               return;
+       if (ifs) {
+               spin_lock_irqsave(&ifs->state_lock, flags);
+-              uptodate = ifs_set_range_uptodate(folio, ifs, off, len);
++              /*
++               * If a read with bytes pending is in progress, we must not call
++               * folio_mark_uptodate(). The read completion path
++               * (iomap_read_end()) will call folio_end_read(), which uses XOR
++               * semantics to set the uptodate bit. If we set it here, the XOR
++               * in folio_end_read() will clear it, leaving the folio not
++               * uptodate.
++               */
++              mark_uptodate = ifs_set_range_uptodate(folio, ifs, off, len) &&
++                              !ifs->read_bytes_pending;
+               spin_unlock_irqrestore(&ifs->state_lock, flags);
+       }
+-      if (uptodate)
++      if (mark_uptodate)
+               folio_mark_uptodate(folio);
+ }
diff --git a/queue-6.19/iomap-reject-delalloc-mappings-during-writeback.patch b/queue-6.19/iomap-reject-delalloc-mappings-during-writeback.patch
new file mode 100644 (file)
index 0000000..1166997
--- /dev/null
@@ -0,0 +1,58 @@
+From d320f160aa5ff36cdf83c645cca52b615e866e32 Mon Sep 17 00:00:00 2001
+From: "Darrick J. Wong" <djwong@kernel.org>
+Date: Mon, 2 Mar 2026 09:30:02 -0800
+Subject: iomap: reject delalloc mappings during writeback
+
+From: Darrick J. Wong <djwong@kernel.org>
+
+commit d320f160aa5ff36cdf83c645cca52b615e866e32 upstream.
+
+Filesystems should never provide a delayed allocation mapping to
+writeback; they're supposed to allocate the space before replying.
+This can lead to weird IO errors and crashes in the block layer if the
+filesystem is being malicious, or if it hadn't set iomap->dev because
+it's a delalloc mapping.
+
+Fix this by failing writeback on delalloc mappings.  Currently no
+filesystems actually misbehave in this manner, but we ought to be
+stricter about things like that.
+
+Cc: stable@vger.kernel.org # v5.5
+Fixes: 598ecfbaa742ac ("iomap: lift the xfs writeback code to iomap")
+Signed-off-by: Darrick J. Wong <djwong@kernel.org>
+Link: https://patch.msgid.link/20260302173002.GL13829@frogsfrogsfrogs
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Carlos Maiolino <cmaiolino@redhat.com>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/iomap/ioend.c |   13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/fs/iomap/ioend.c
++++ b/fs/iomap/ioend.c
+@@ -163,17 +163,18 @@ ssize_t iomap_add_to_ioend(struct iomap_
+       WARN_ON_ONCE(!folio->private && map_len < dirty_len);
+       switch (wpc->iomap.type) {
+-      case IOMAP_INLINE:
+-              WARN_ON_ONCE(1);
+-              return -EIO;
++      case IOMAP_UNWRITTEN:
++              ioend_flags |= IOMAP_IOEND_UNWRITTEN;
++              break;
++      case IOMAP_MAPPED:
++              break;
+       case IOMAP_HOLE:
+               return map_len;
+       default:
+-              break;
++              WARN_ON_ONCE(1);
++              return -EIO;
+       }
+-      if (wpc->iomap.type == IOMAP_UNWRITTEN)
+-              ioend_flags |= IOMAP_IOEND_UNWRITTEN;
+       if (wpc->iomap.flags & IOMAP_F_SHARED)
+               ioend_flags |= IOMAP_IOEND_SHARED;
+       if (folio_test_dropbehind(folio))
diff --git a/queue-6.19/ipmi-si-don-t-block-module-unload-if-the-bmc-is-messed-up.patch b/queue-6.19/ipmi-si-don-t-block-module-unload-if-the-bmc-is-messed-up.patch
new file mode 100644 (file)
index 0000000..5e5d7dc
--- /dev/null
@@ -0,0 +1,35 @@
+From f895e5df80316a308c2f7d64d13a78494630ea05 Mon Sep 17 00:00:00 2001
+From: Corey Minyard <corey@minyard.net>
+Date: Thu, 12 Feb 2026 21:52:48 -0600
+Subject: ipmi:si: Don't block module unload if the BMC is messed up
+
+From: Corey Minyard <corey@minyard.net>
+
+commit f895e5df80316a308c2f7d64d13a78494630ea05 upstream.
+
+If the BMC is in a bad state, don't bother waiting for queues messages
+since there can't be any.  Otherwise the unload is blocked until the
+BMC is back in a good state.
+
+Reported-by: Rafael J. Wysocki <rafael@kernel.org>
+Fixes: bc3a9d217755 ("ipmi:si: Gracefully handle if the BMC is non-functional")
+Cc: stable@vger.kernel.org # 4.18
+Signed-off-by: Corey Minyard <corey@minyard.net>
+Reviewed-by: Rafael J. Wysocki (Intel) <rafael@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_si_intf.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -2226,7 +2226,8 @@ static void wait_msg_processed(struct sm
+       unsigned long jiffies_now;
+       long time_diff;
+-      while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
++      while (smi_info->si_state != SI_HOSED &&
++                  (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL))) {
+               jiffies_now = jiffies;
+               time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
+                    * SI_USEC_PER_JIFFY);
diff --git a/queue-6.19/ipmi-si-fix-check-for-a-misbehaving-bmc.patch b/queue-6.19/ipmi-si-fix-check-for-a-misbehaving-bmc.patch
new file mode 100644 (file)
index 0000000..4d87c60
--- /dev/null
@@ -0,0 +1,69 @@
+From cae66f1a1dcd23e17da5a015ef9d731129f9d2dd Mon Sep 17 00:00:00 2001
+From: Corey Minyard <corey@minyard.net>
+Date: Fri, 13 Feb 2026 00:15:04 -0600
+Subject: ipmi:si: Fix check for a misbehaving BMC
+
+From: Corey Minyard <corey@minyard.net>
+
+commit cae66f1a1dcd23e17da5a015ef9d731129f9d2dd upstream.
+
+There is a race on checking the state in the sender, it needs to be
+checked under a lock.  But you also need a check to avoid issues with
+a misbehaving BMC for run to completion mode.  So leave the check at
+the beginning for run to completion, and add a check under the lock
+to avoid the race.
+
+Reported-by: Rafael J. Wysocki <rafael@kernel.org>
+Fixes: bc3a9d217755 ("ipmi:si: Gracefully handle if the BMC is non-functional")
+Cc: stable@vger.kernel.org # 4.18
+Signed-off-by: Corey Minyard <corey@minyard.net>
+Reviewed-by: Rafael J. Wysocki (Intel) <rafael@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_si_intf.c |   24 +++++++++++++-----------
+ 1 file changed, 13 insertions(+), 11 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -924,9 +924,14 @@ static int sender(void *send_info, struc
+ {
+       struct smi_info   *smi_info = send_info;
+       unsigned long     flags;
++      int rv = IPMI_CC_NO_ERROR;
+       debug_timestamp(smi_info, "Enqueue");
++      /*
++       * Check here for run to completion mode.  A check under lock is
++       * later.
++       */
+       if (smi_info->si_state == SI_HOSED)
+               return IPMI_BUS_ERR;
+@@ -940,18 +945,15 @@ static int sender(void *send_info, struc
+       }
+       spin_lock_irqsave(&smi_info->si_lock, flags);
+-      /*
+-       * The following two lines don't need to be under the lock for
+-       * the lock's sake, but they do need SMP memory barriers to
+-       * avoid getting things out of order.  We are already claiming
+-       * the lock, anyway, so just do it under the lock to avoid the
+-       * ordering problem.
+-       */
+-      BUG_ON(smi_info->waiting_msg);
+-      smi_info->waiting_msg = msg;
+-      check_start_timer_thread(smi_info);
++      if (smi_info->si_state == SI_HOSED) {
++              rv = IPMI_BUS_ERR;
++      } else {
++              BUG_ON(smi_info->waiting_msg);
++              smi_info->waiting_msg = msg;
++              check_start_timer_thread(smi_info);
++      }
+       spin_unlock_irqrestore(&smi_info->si_lock, flags);
+-      return IPMI_CC_NO_ERROR;
++      return rv;
+ }
+ static void set_run_to_completion(void *send_info, bool i_run_to_completion)
diff --git a/queue-6.19/ipmi-si-handle-waiting-messages-when-bmc-failure-detected.patch b/queue-6.19/ipmi-si-handle-waiting-messages-when-bmc-failure-detected.patch
new file mode 100644 (file)
index 0000000..fd1967d
--- /dev/null
@@ -0,0 +1,41 @@
+From 52c9ee202edd21d0599ac3b5a6fe1da2a2f053e5 Mon Sep 17 00:00:00 2001
+From: Corey Minyard <corey@minyard.net>
+Date: Fri, 6 Feb 2026 09:59:32 -0600
+Subject: ipmi:si: Handle waiting messages when BMC failure detected
+
+From: Corey Minyard <corey@minyard.net>
+
+commit 52c9ee202edd21d0599ac3b5a6fe1da2a2f053e5 upstream.
+
+If a BMC failure is detected, the current message is returned with an
+error.  However, if there was a waiting message, it would not be
+handled.
+
+Add a check for the waiting message after handling the current message.
+
+Suggested-by: Guenter Roeck <linux@roeck-us.net>
+Reported-by: Rafael J. Wysocki <rafael@kernel.org>
+Closes: https://lore.kernel.org/linux-acpi/CAK8fFZ58fidGUCHi5WFX0uoTPzveUUDzT=k=AAm4yWo3bAuCFg@mail.gmail.com/
+Fixes: bc3a9d217755 ("ipmi:si: Gracefully handle if the BMC is non-functional")
+Cc: stable@vger.kernel.org # 4.18
+Signed-off-by: Corey Minyard <corey@minyard.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_si_intf.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -809,6 +809,12 @@ restart:
+                        */
+                       return_hosed_msg(smi_info, IPMI_BUS_ERR);
+               }
++              if (smi_info->waiting_msg != NULL) {
++                      /* Also handle if there was a message waiting. */
++                      smi_info->curr_msg = smi_info->waiting_msg;
++                      smi_info->waiting_msg = NULL;
++                      return_hosed_msg(smi_info, IPMI_BUS_ERR);
++              }
+               smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED);
+               goto out;
+       }
diff --git a/queue-6.19/ipmi-si-use-a-long-timeout-when-the-bmc-is-misbehaving.patch b/queue-6.19/ipmi-si-use-a-long-timeout-when-the-bmc-is-misbehaving.patch
new file mode 100644 (file)
index 0000000..7f938f5
--- /dev/null
@@ -0,0 +1,35 @@
+From c3bb3295637cc9bf514f690941ca9a385bf30113 Mon Sep 17 00:00:00 2001
+From: Corey Minyard <corey@minyard.net>
+Date: Fri, 6 Feb 2026 10:33:52 -0600
+Subject: ipmi:si: Use a long timeout when the BMC is misbehaving
+
+From: Corey Minyard <corey@minyard.net>
+
+commit c3bb3295637cc9bf514f690941ca9a385bf30113 upstream.
+
+If the driver goes into HOSED state, don't reset the timeout to the
+short timeout in the timeout handler.
+
+Reported-by: Igor Raits <igor@gooddata.com>
+Closes: https://lore.kernel.org/linux-acpi/CAK8fFZ58fidGUCHi5WFX0uoTPzveUUDzT=k=AAm4yWo3bAuCFg@mail.gmail.com/
+Fixes: bc3a9d217755 ("ipmi:si: Gracefully handle if the BMC is non-functional")
+Cc: stable@vger.kernel.org # 4.18
+Signed-off-by: Corey Minyard <corey@minyard.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/char/ipmi/ipmi_si_intf.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -1113,7 +1113,9 @@ static void smi_timeout(struct timer_lis
+                    * SI_USEC_PER_JIFFY);
+       smi_result = smi_event_handler(smi_info, time_diff);
+-      if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
++      if (smi_info->si_state == SI_HOSED) {
++              timeout = jiffies + SI_TIMEOUT_HOSED;
++      } else if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
+               /* Running with interrupts, only do long timeouts. */
+               timeout = jiffies + SI_TIMEOUT_JIFFIES;
+               smi_inc_stat(smi_info, long_timeouts);
diff --git a/queue-6.19/irqchip-gic-v3-its-limit-number-of-per-device-msis-to-the-range-the-its-supports.patch b/queue-6.19/irqchip-gic-v3-its-limit-number-of-per-device-msis-to-the-range-the-its-supports.patch
new file mode 100644 (file)
index 0000000..f020a14
--- /dev/null
@@ -0,0 +1,62 @@
+From ce9e40a9a5e5cff0b1b0d2fa582b3d71a8ce68e8 Mon Sep 17 00:00:00 2001
+From: Marc Zyngier <maz@kernel.org>
+Date: Fri, 6 Feb 2026 15:48:16 +0000
+Subject: irqchip/gic-v3-its: Limit number of per-device MSIs to the range the ITS supports
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit ce9e40a9a5e5cff0b1b0d2fa582b3d71a8ce68e8 upstream.
+
+The ITS driver blindly assumes that EventIDs are in abundant supply, to the
+point where it never checks how many the hardware actually supports.
+
+It turns out that some pretty esoteric integrations make it so that only a
+few bits are available, all the way down to a single bit.
+
+Enforce the advertised limitation at the point of allocating the device
+structure, and hope that the endpoint driver can deal with such limitation.
+
+Fixes: 84a6a2e7fc18d ("irqchip: GICv3: ITS: device allocation and configuration")
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Reviewed-by: Zenghui Yu <zenghui.yu@linux.dev>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260206154816.3582887-1-maz@kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/irqchip/irq-gic-v3-its.c   |    4 ++++
+ include/linux/irqchip/arm-gic-v3.h |    1 +
+ 2 files changed, 5 insertions(+)
+
+--- a/drivers/irqchip/irq-gic-v3-its.c
++++ b/drivers/irqchip/irq-gic-v3-its.c
+@@ -3475,6 +3475,7 @@ static struct its_device *its_create_dev
+       int lpi_base;
+       int nr_lpis;
+       int nr_ites;
++      int id_bits;
+       int sz;
+       if (!its_alloc_device_table(its, dev_id))
+@@ -3486,7 +3487,10 @@ static struct its_device *its_create_dev
+       /*
+        * Even if the device wants a single LPI, the ITT must be
+        * sized as a power of two (and you need at least one bit...).
++       * Also honor the ITS's own EID limit.
+        */
++      id_bits = FIELD_GET(GITS_TYPER_IDBITS, its->typer) + 1;
++      nvecs = min_t(unsigned int, nvecs, BIT(id_bits));
+       nr_ites = max(2, nvecs);
+       sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
+       sz = max(sz, ITS_ITT_ALIGN);
+--- a/include/linux/irqchip/arm-gic-v3.h
++++ b/include/linux/irqchip/arm-gic-v3.h
+@@ -394,6 +394,7 @@
+ #define GITS_TYPER_VLPIS              (1UL << 1)
+ #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT       4
+ #define GITS_TYPER_ITT_ENTRY_SIZE     GENMASK_ULL(7, 4)
++#define GITS_TYPER_IDBITS             GENMASK_ULL(12, 8)
+ #define GITS_TYPER_IDBITS_SHIFT               8
+ #define GITS_TYPER_DEVBITS_SHIFT      13
+ #define GITS_TYPER_DEVBITS            GENMASK_ULL(17, 13)
diff --git a/queue-6.19/ixgbevf-fix-link-setup-issue.patch b/queue-6.19/ixgbevf-fix-link-setup-issue.patch
new file mode 100644 (file)
index 0000000..e99b862
--- /dev/null
@@ -0,0 +1,50 @@
+From feae40a6a178bb525a15f19288016e5778102a99 Mon Sep 17 00:00:00 2001
+From: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
+Date: Wed, 10 Dec 2025 12:26:51 +0100
+Subject: ixgbevf: fix link setup issue
+
+From: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
+
+commit feae40a6a178bb525a15f19288016e5778102a99 upstream.
+
+It may happen that VF spawned for E610 adapter has problem with setting
+link up. This happens when ixgbevf supporting mailbox API 1.6 cooperates
+with PF driver which doesn't support this version of API, and hence
+doesn't support new approach for getting PF link data.
+
+In that case VF asks PF to provide link data but as PF doesn't support
+it, returns -EOPNOTSUPP what leads to early bail from link configuration
+sequence.
+
+Avoid such situation by using legacy VFLINKS approach whenever negotiated
+API version is less than 1.6.
+
+To reproduce the issue just create VF and set its link up - adapter must
+be any from the E610 family, ixgbevf must support API 1.6 or higher while
+ixgbevf must not.
+
+Fixes: 53f0eb62b4d2 ("ixgbevf: fix getting link speed data for E610 devices")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Piotr Kwapulinski <piotr.kwapulinski@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Cc: stable@vger.kernel.org
+Signed-off-by: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/ixgbevf/vf.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/ixgbevf/vf.c
++++ b/drivers/net/ethernet/intel/ixgbevf/vf.c
+@@ -852,7 +852,8 @@ static s32 ixgbevf_check_mac_link_vf(str
+       if (!mac->get_link_status)
+               goto out;
+-      if (hw->mac.type == ixgbe_mac_e610_vf) {
++      if (hw->mac.type == ixgbe_mac_e610_vf &&
++          hw->api_version >= ixgbe_mbox_api_16) {
+               ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up);
+               if (ret_val)
+                       goto out;
diff --git a/queue-6.19/kunit-irq-ensure-timer-doesn-t-fire-too-frequently.patch b/queue-6.19/kunit-irq-ensure-timer-doesn-t-fire-too-frequently.patch
new file mode 100644 (file)
index 0000000..94219f9
--- /dev/null
@@ -0,0 +1,126 @@
+From 201ceb94aa1def0024a7c18ce643e5f65026be06 Mon Sep 17 00:00:00 2001
+From: Eric Biggers <ebiggers@kernel.org>
+Date: Mon, 23 Feb 2026 19:37:51 -0800
+Subject: kunit: irq: Ensure timer doesn't fire too frequently
+
+From: Eric Biggers <ebiggers@kernel.org>
+
+commit 201ceb94aa1def0024a7c18ce643e5f65026be06 upstream.
+
+Fix a bug where kunit_run_irq_test() could hang if the system is too
+slow.  This was noticed with the crypto library tests in certain VMs.
+
+Specifically, if kunit_irq_test_timer_func() and the associated hrtimer
+code took over 5us to run, then the CPU would spend all its time
+executing that code in hardirq context.  As a result, the task executing
+kunit_run_irq_test() never had a chance to run, exit the loop, and
+cancel the timer.
+
+To fix it, make kunit_irq_test_timer_func() increase the timer interval
+when the other contexts aren't having a chance to run.
+
+Fixes: 950a81224e8b ("lib/crypto: tests: Add hash-test-template.h and gen-hash-testvecs.py")
+Cc: stable@vger.kernel.org
+Reviewed-by: David Gow <david@davidgow.net>
+Link: https://lore.kernel.org/r/20260224033751.97615-1-ebiggers@kernel.org
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/kunit/run-in-irq-context.h |   44 +++++++++++++++++++++++--------------
+ 1 file changed, 28 insertions(+), 16 deletions(-)
+
+--- a/include/kunit/run-in-irq-context.h
++++ b/include/kunit/run-in-irq-context.h
+@@ -12,16 +12,16 @@
+ #include <linux/hrtimer.h>
+ #include <linux/workqueue.h>
+-#define KUNIT_IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5)
+-
+ struct kunit_irq_test_state {
+       bool (*func)(void *test_specific_state);
+       void *test_specific_state;
+       bool task_func_reported_failure;
+       bool hardirq_func_reported_failure;
+       bool softirq_func_reported_failure;
++      atomic_t task_func_calls;
+       atomic_t hardirq_func_calls;
+       atomic_t softirq_func_calls;
++      ktime_t interval;
+       struct hrtimer timer;
+       struct work_struct bh_work;
+ };
+@@ -30,14 +30,25 @@ static enum hrtimer_restart kunit_irq_te
+ {
+       struct kunit_irq_test_state *state =
+               container_of(timer, typeof(*state), timer);
++      int task_calls, hardirq_calls, softirq_calls;
+       WARN_ON_ONCE(!in_hardirq());
+-      atomic_inc(&state->hardirq_func_calls);
++      task_calls = atomic_read(&state->task_func_calls);
++      hardirq_calls = atomic_inc_return(&state->hardirq_func_calls);
++      softirq_calls = atomic_read(&state->softirq_func_calls);
++
++      /*
++       * If the timer is firing too often for the softirq or task to ever have
++       * a chance to run, increase the timer interval.  This is needed on very
++       * slow systems.
++       */
++      if (hardirq_calls >= 20 && (softirq_calls == 0 || task_calls == 0))
++              state->interval = ktime_add_ns(state->interval, 250);
+       if (!state->func(state->test_specific_state))
+               state->hardirq_func_reported_failure = true;
+-      hrtimer_forward_now(&state->timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL);
++      hrtimer_forward_now(&state->timer, state->interval);
+       queue_work(system_bh_wq, &state->bh_work);
+       return HRTIMER_RESTART;
+ }
+@@ -86,10 +97,14 @@ static inline void kunit_run_irq_test(st
+       struct kunit_irq_test_state state = {
+               .func = func,
+               .test_specific_state = test_specific_state,
++              /*
++               * Start with a 5us timer interval.  If the system can't keep
++               * up, kunit_irq_test_timer_func() will increase it.
++               */
++              .interval = us_to_ktime(5),
+       };
+       unsigned long end_jiffies;
+-      int hardirq_calls, softirq_calls;
+-      bool allctx = false;
++      int task_calls, hardirq_calls, softirq_calls;
+       /*
+        * Set up a hrtimer (the way we access hardirq context) and a work
+@@ -104,21 +119,18 @@ static inline void kunit_run_irq_test(st
+        * and hardirq), or 1 second, whichever comes first.
+        */
+       end_jiffies = jiffies + HZ;
+-      hrtimer_start(&state.timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL,
+-                    HRTIMER_MODE_REL_HARD);
+-      for (int task_calls = 0, calls = 0;
+-           ((calls < max_iterations) || !allctx) &&
+-           !time_after(jiffies, end_jiffies);
+-           task_calls++) {
++      hrtimer_start(&state.timer, state.interval, HRTIMER_MODE_REL_HARD);
++      do {
+               if (!func(test_specific_state))
+                       state.task_func_reported_failure = true;
++              task_calls = atomic_inc_return(&state.task_func_calls);
+               hardirq_calls = atomic_read(&state.hardirq_func_calls);
+               softirq_calls = atomic_read(&state.softirq_func_calls);
+-              calls = task_calls + hardirq_calls + softirq_calls;
+-              allctx = (task_calls > 0) && (hardirq_calls > 0) &&
+-                       (softirq_calls > 0);
+-      }
++      } while ((task_calls + hardirq_calls + softirq_calls < max_iterations ||
++                (task_calls == 0 || hardirq_calls == 0 ||
++                 softirq_calls == 0)) &&
++               !time_after(jiffies, end_jiffies));
+       /* Cancel the timer and work. */
+       hrtimer_cancel(&state.timer);
diff --git a/queue-6.19/mm-damon-core-clear-walk_control-on-inactive-context-in-damos_walk.patch b/queue-6.19/mm-damon-core-clear-walk_control-on-inactive-context-in-damos_walk.patch
new file mode 100644 (file)
index 0000000..9a9b1be
--- /dev/null
@@ -0,0 +1,73 @@
+From d210fdcac9c0d1380eab448aebc93f602c1cd4e6 Mon Sep 17 00:00:00 2001
+From: Raul Pazemecxas De Andrade <raul_pazemecxas@hotmail.com>
+Date: Mon, 23 Feb 2026 17:10:59 -0800
+Subject: mm/damon/core: clear walk_control on inactive context in damos_walk()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Raul Pazemecxas De Andrade <raul_pazemecxas@hotmail.com>
+
+commit d210fdcac9c0d1380eab448aebc93f602c1cd4e6 upstream.
+
+damos_walk() sets ctx->walk_control to the caller-provided control
+structure before checking whether the context is running.  If the context
+is inactive (damon_is_running() returns false), the function returns
+-EINVAL without clearing ctx->walk_control.  This leaves a dangling
+pointer to a stack-allocated structure that will be freed when the caller
+returns.
+
+This is structurally identical to the bug fixed in commit f9132fbc2e83
+("mm/damon/core: remove call_control in inactive contexts") for
+damon_call(), which had the same pattern of linking a control object and
+returning an error without unlinking it.
+
+The dangling walk_control pointer can cause:
+1. Use-after-free if the context is later started and kdamond
+  Â dereferences ctx->walk_control (e.g., in damos_walk_cancel()
+  Â which writes to control->canceled and calls complete())
+2. Permanent -EBUSY from subsequent damos_walk() calls, since the
+  Â stale pointer is non-NULL
+
+Nonetheless, the real user impact is quite restrictive.  The
+use-after-free is impossible because there is no damos_walk() callers who
+starts the context later.  The permanent -EBUSY can actually confuse
+users, as DAMON is not running.  But the symptom is kept only while the
+context is turned off.  Turning it on again will make DAMON internally
+uses a newly generated damon_ctx object that doesn't have the invalid
+damos_walk_control pointer, so everything will work fine again.
+
+Fix this by clearing ctx->walk_control under walk_control_lock before
+returning -EINVAL, mirroring the fix pattern from f9132fbc2e83.
+
+Link: https://lkml.kernel.org/r/20260224011102.56033-1-sj@kernel.org
+Fixes: bf0eaba0ff9c ("mm/damon/core: implement damos_walk()")
+Reported-by: Raul Pazemecxas De Andrade <raul_pazemecxas@hotmail.com>
+Closes: https://lore.kernel.org/CPUPR80MB8171025468965E583EF2490F956CA@CPUPR80MB8171.lamprd80.prod.outlook.com
+Signed-off-by: Raul Pazemecxas De Andrade <raul_pazemecxas@hotmail.com>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Reviewed-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org>   [6.14+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1531,8 +1531,13 @@ int damos_walk(struct damon_ctx *ctx, st
+       }
+       ctx->walk_control = control;
+       mutex_unlock(&ctx->walk_control_lock);
+-      if (!damon_is_running(ctx))
++      if (!damon_is_running(ctx)) {
++              mutex_lock(&ctx->walk_control_lock);
++              if (ctx->walk_control == control)
++                      ctx->walk_control = NULL;
++              mutex_unlock(&ctx->walk_control_lock);
+               return -EINVAL;
++      }
+       wait_for_completion(&control->completion);
+       if (control->canceled)
+               return -ECANCELED;
diff --git a/queue-6.19/mm-huge_memory-fix-a-folio_split-race-condition-with-folio_try_get.patch b/queue-6.19/mm-huge_memory-fix-a-folio_split-race-condition-with-folio_try_get.patch
new file mode 100644 (file)
index 0000000..1326733
--- /dev/null
@@ -0,0 +1,132 @@
+From 577a1f495fd78d8fb61b67ac3d3b595b01f6fcb0 Mon Sep 17 00:00:00 2001
+From: Zi Yan <ziy@nvidia.com>
+Date: Mon, 2 Mar 2026 15:31:59 -0500
+Subject: mm/huge_memory: fix a folio_split() race condition with folio_try_get()
+
+From: Zi Yan <ziy@nvidia.com>
+
+commit 577a1f495fd78d8fb61b67ac3d3b595b01f6fcb0 upstream.
+
+During a pagecache folio split, the values in the related xarray should
+not be changed from the original folio at xarray split time until all
+after-split folios are well formed and stored in the xarray.  Current use
+of xas_try_split() in __split_unmapped_folio() lets some after-split
+folios show up at wrong indices in the xarray.  When these misplaced
+after-split folios are unfrozen, before correct folios are stored via
+__xa_store(), and grabbed by folio_try_get(), they are returned to
+userspace at wrong file indices, causing data corruption.  More detailed
+explanation is at the bottom.
+
+The reproducer is at: https://github.com/dfinity/thp-madv-remove-test
+It
+1. creates a memfd,
+2. forks,
+3. in the child process, maps the file with large folios (via shmem code
+   path) and reads the mapped file continuously with 16 threads,
+4. in the parent process, uses madvise(MADV_REMOVE) to punch poles in the
+   large folio.
+
+Data corruption can be observed without the fix.  Basically, data from a
+wrong page->index is returned.
+
+Fix it by using the original folio in xas_try_split() calls, so that
+folio_try_get() can get the right after-split folios after the original
+folio is unfrozen.
+
+Uniform split, split_huge_page*(), is not affected, since it uses
+xas_split_alloc() and xas_split() only once and stores the original folio
+in the xarray.  Change xas_split() used in uniform split branch to use the
+original folio to avoid confusion.
+
+Fixes below points to the commit introduces the code, but folio_split() is
+used in a later commit 7460b470a131f ("mm/truncate: use folio_split() in
+truncate operation").
+
+More details:
+
+For example, a folio f is split non-uniformly into f, f2, f3, f4 like
+below:
++----------------+---------+----+----+
+|       f        |    f2   | f3 | f4 |
++----------------+---------+----+----+
+but the xarray would look like below after __split_unmapped_folio() is
+done:
++----------------+---------+----+----+
+|       f        |    f2   | f3 | f3 |
++----------------+---------+----+----+
+
+After __split_unmapped_folio(), the code changes the xarray and unfreezes
+after-split folios:
+
+1. unfreezes f2, __xa_store(f2)
+2. unfreezes f3, __xa_store(f3)
+3. unfreezes f4, __xa_store(f4), which overwrites the second f3 to f4.
+4. unfreezes f.
+
+Meanwhile, a parallel filemap_get_entry() can read the second f3 from the
+xarray and use folio_try_get() on it at step 2 when f3 is unfrozen. Then,
+f3 is wrongly returned to user.
+
+After the fix, the xarray looks like below after __split_unmapped_folio():
++----------------+---------+----+----+
+|       f        |    f    | f  | f  |
++----------------+---------+----+----+
+so that the race window no longer exists.
+
+[ziy@nvidia.com: move comment, per David]
+  Link: https://lkml.kernel.org/r/5C9FA053-A4C6-4615-BE05-74E47A6462B3@nvidia.com
+Link: https://lkml.kernel.org/r/20260302203159.3208341-1-ziy@nvidia.com
+Fixes: 00527733d0dc ("mm/huge_memory: add two new (not yet used) functions for folio_split()")
+Signed-off-by: Zi Yan <ziy@nvidia.com>
+Reported-by: Bas van Dijk <bas@dfinity.org>
+Closes: https://lore.kernel.org/all/CAKNNEtw5_kZomhkugedKMPOG-sxs5Q5OLumWJdiWXv+C9Yct0w@mail.gmail.com/
+Tested-by: Lance Yang <lance.yang@linux.dev>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
+Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: David Hildenbrand <david@kernel.org>
+Cc: Dev Jain <dev.jain@arm.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Nico Pache <npache@redhat.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c |   13 +++++++++----
+ 1 file changed, 9 insertions(+), 4 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -3631,6 +3631,7 @@ static int __split_unmapped_folio(struct
+       const bool is_anon = folio_test_anon(folio);
+       int old_order = folio_order(folio);
+       int start_order = split_type == SPLIT_TYPE_UNIFORM ? new_order : old_order - 1;
++      struct folio *old_folio = folio;
+       int split_order;
+       /*
+@@ -3651,12 +3652,16 @@ static int __split_unmapped_folio(struct
+                        * uniform split has xas_split_alloc() called before
+                        * irq is disabled to allocate enough memory, whereas
+                        * non-uniform split can handle ENOMEM.
++                       * Use the to-be-split folio, so that a parallel
++                       * folio_try_get() waits on it until xarray is updated
++                       * with after-split folios and the original one is
++                       * unfrozen.
+                        */
+-                      if (split_type == SPLIT_TYPE_UNIFORM)
+-                              xas_split(xas, folio, old_order);
+-                      else {
++                      if (split_type == SPLIT_TYPE_UNIFORM) {
++                              xas_split(xas, old_folio, old_order);
++                      } else {
+                               xas_set_order(xas, folio->index, split_order);
+-                              xas_try_split(xas, folio, old_order);
++                              xas_try_split(xas, old_folio, old_order);
+                               if (xas_error(xas))
+                                       return xas_error(xas);
+                       }
diff --git a/queue-6.19/mm-memfd_luo-always-dirty-all-folios.patch b/queue-6.19/mm-memfd_luo-always-dirty-all-folios.patch
new file mode 100644 (file)
index 0000000..3f8e748
--- /dev/null
@@ -0,0 +1,103 @@
+From 7e04bf1f33151a30e06a65b74b5f2c19fc2be128 Mon Sep 17 00:00:00 2001
+From: "Pratyush Yadav (Google)" <pratyush@kernel.org>
+Date: Mon, 23 Feb 2026 18:39:29 +0100
+Subject: mm: memfd_luo: always dirty all folios
+
+From: Pratyush Yadav (Google) <pratyush@kernel.org>
+
+commit 7e04bf1f33151a30e06a65b74b5f2c19fc2be128 upstream.
+
+A dirty folio is one which has been written to.  A clean folio is its
+opposite.  Since a clean folio has no user data, it can be freed under
+memory pressure.
+
+memfd preservation with LUO saves the flag at preserve().  This is
+problematic.  The folio might get dirtied later.  Saving it at freeze()
+also doesn't work, since the dirty bit from PTE is normally synced at
+unmap and there might still be mappings of the file at freeze().
+
+To see why this is a problem, say a folio is clean at preserve, but gets
+dirtied later.  The serialized state of the folio will mark it as clean.
+After retrieve, the next kernel will see the folio as clean and might try
+to reclaim it under memory pressure.  This will result in losing user
+data.
+
+Mark all folios of the file as dirty, and always set the
+MEMFD_LUO_FOLIO_DIRTY flag.  This comes with the side effect of making all
+clean folios un-reclaimable.  This is a cost that has to be paid for
+participants of live update.  It is not expected to be a common use case
+to preserve a lot of clean folios anyway.
+
+Since the value of pfolio->flags is a constant now, drop the flags
+variable and set it directly.
+
+Link: https://lkml.kernel.org/r/20260223173931.2221759-3-pratyush@kernel.org
+Fixes: b3749f174d68 ("mm: memfd_luo: allow preserving memfd")
+Signed-off-by: Pratyush Yadav (Google) <pratyush@kernel.org>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memfd_luo.c | 26 +++++++++++++++++++++-----
+ 1 file changed, 21 insertions(+), 5 deletions(-)
+
+diff --git a/mm/memfd_luo.c b/mm/memfd_luo.c
+index 1c9510289312..b8edb9f981d7 100644
+--- a/mm/memfd_luo.c
++++ b/mm/memfd_luo.c
+@@ -146,7 +146,6 @@ static int memfd_luo_preserve_folios(struct file *file,
+       for (i = 0; i < nr_folios; i++) {
+               struct memfd_luo_folio_ser *pfolio = &folios_ser[i];
+               struct folio *folio = folios[i];
+-              unsigned int flags = 0;
+               err = kho_preserve_folio(folio);
+               if (err)
+@@ -154,8 +153,26 @@ static int memfd_luo_preserve_folios(struct file *file,
+               folio_lock(folio);
+-              if (folio_test_dirty(folio))
+-                      flags |= MEMFD_LUO_FOLIO_DIRTY;
++              /*
++               * A dirty folio is one which has been written to. A clean folio
++               * is its opposite. Since a clean folio does not carry user
++               * data, it can be freed by page reclaim under memory pressure.
++               *
++               * Saving the dirty flag at prepare() time doesn't work since it
++               * can change later. Saving it at freeze() also won't work
++               * because the dirty bit is normally synced at unmap and there
++               * might still be a mapping of the file at freeze().
++               *
++               * To see why this is a problem, say a folio is clean at
++               * preserve, but gets dirtied later. The pfolio flags will mark
++               * it as clean. After retrieve, the next kernel might try to
++               * reclaim this folio under memory pressure, losing user data.
++               *
++               * Unconditionally mark it dirty to avoid this problem. This
++               * comes at the cost of making clean folios un-reclaimable after
++               * live update.
++               */
++              folio_mark_dirty(folio);
+               /*
+                * If the folio is not uptodate, it was fallocated but never
+@@ -174,12 +191,11 @@ static int memfd_luo_preserve_folios(struct file *file,
+                       flush_dcache_folio(folio);
+                       folio_mark_uptodate(folio);
+               }
+-              flags |= MEMFD_LUO_FOLIO_UPTODATE;
+               folio_unlock(folio);
+               pfolio->pfn = folio_pfn(folio);
+-              pfolio->flags = flags;
++              pfolio->flags = MEMFD_LUO_FOLIO_DIRTY | MEMFD_LUO_FOLIO_UPTODATE;
+               pfolio->index = folio->index;
+       }
+-- 
+2.53.0
+
diff --git a/queue-6.19/mm-memfd_luo-always-make-all-folios-uptodate.patch b/queue-6.19/mm-memfd_luo-always-make-all-folios-uptodate.patch
new file mode 100644 (file)
index 0000000..5a7a1c5
--- /dev/null
@@ -0,0 +1,105 @@
+From 50d7b4332f27762d24641970fc34bb68a2621926 Mon Sep 17 00:00:00 2001
+From: "Pratyush Yadav (Google)" <pratyush@kernel.org>
+Date: Mon, 23 Feb 2026 18:39:28 +0100
+Subject: mm: memfd_luo: always make all folios uptodate
+
+From: Pratyush Yadav (Google) <pratyush@kernel.org>
+
+commit 50d7b4332f27762d24641970fc34bb68a2621926 upstream.
+
+Patch series "mm: memfd_luo: fixes for folio flag preservation".
+
+This series contains a couple fixes for flag preservation for memfd live
+update.
+
+The first patch fixes memfd preservation when fallocate() was used to
+pre-allocate some pages.  For these memfds, all the writes to fallocated
+pages touched after preserve were lost.
+
+The second patch fixes dirty flag tracking.  If the dirty flag is not
+tracked correctly, the next kernel might incorrectly reclaim some folios
+under memory pressure, losing user data.  This is a theoretical bug that I
+observed when reading the code, and haven't been able to reproduce it.
+
+
+This patch (of 2):
+
+When a folio is added to a shmem file via fallocate, it is not zeroed on
+allocation.  This is done as a performance optimization since it is
+possible the folio will never end up being used at all.  When the folio is
+used, shmem checks for the uptodate flag, and if absent, zeroes the folio
+(and sets the flag) before returning to user.
+
+With LUO, the flags of each folio are saved at preserve time.  It is
+possible to have a memfd with some folios fallocated but not uptodate.
+For those, the uptodate flag doesn't get saved.  The folios might later
+end up being used and become uptodate.  They would get passed to the next
+kernel via KHO correctly since they did get preserved.  But they won't
+have the MEMFD_LUO_FOLIO_UPTODATE flag.
+
+This means that when the memfd is retrieved, the folios will be added to
+the shmem file without the uptodate flag.  They will be zeroed before
+first use, losing the data in those folios.
+
+Since we take a big performance hit in allocating, zeroing, and pinning
+all folios at prepare time anyway, take some more and zero all
+non-uptodate ones too.
+
+Later when there is a stronger need to make prepare faster, this can be
+optimized.
+
+To avoid racing with another uptodate operation, take the folio lock.
+
+Link: https://lkml.kernel.org/r/20260223173931.2221759-2-pratyush@kernel.org
+Fixes: b3749f174d68 ("mm: memfd_luo: allow preserving memfd")
+Signed-off-by: Pratyush Yadav (Google) <pratyush@kernel.org>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Cc: Pasha Tatashin <pasha.tatashin@soleen.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memfd_luo.c | 25 +++++++++++++++++++++++--
+ 1 file changed, 23 insertions(+), 2 deletions(-)
+
+diff --git a/mm/memfd_luo.c b/mm/memfd_luo.c
+index e485b828d173..1c9510289312 100644
+--- a/mm/memfd_luo.c
++++ b/mm/memfd_luo.c
+@@ -152,10 +152,31 @@ static int memfd_luo_preserve_folios(struct file *file,
+               if (err)
+                       goto err_unpreserve;
++              folio_lock(folio);
++
+               if (folio_test_dirty(folio))
+                       flags |= MEMFD_LUO_FOLIO_DIRTY;
+-              if (folio_test_uptodate(folio))
+-                      flags |= MEMFD_LUO_FOLIO_UPTODATE;
++
++              /*
++               * If the folio is not uptodate, it was fallocated but never
++               * used. Saving this flag at prepare() doesn't work since it
++               * might change later when someone uses the folio.
++               *
++               * Since we have taken the performance penalty of allocating,
++               * zeroing, and pinning all the folios in the holes, take a bit
++               * more and zero all non-uptodate folios too.
++               *
++               * NOTE: For someone looking to improve preserve performance,
++               * this is a good place to look.
++               */
++              if (!folio_test_uptodate(folio)) {
++                      folio_zero_range(folio, 0, folio_size(folio));
++                      flush_dcache_folio(folio);
++                      folio_mark_uptodate(folio);
++              }
++              flags |= MEMFD_LUO_FOLIO_UPTODATE;
++
++              folio_unlock(folio);
+               pfolio->pfn = folio_pfn(folio);
+               pfolio->flags = flags;
+-- 
+2.53.0
+
diff --git a/queue-6.19/mm-slab-fix-an-incorrect-check-in-obj_exts_alloc_size.patch b/queue-6.19/mm-slab-fix-an-incorrect-check-in-obj_exts_alloc_size.patch
new file mode 100644 (file)
index 0000000..36829ed
--- /dev/null
@@ -0,0 +1,60 @@
+From 8dafa9f5900c4855a65dbfee51e3bd00636deee1 Mon Sep 17 00:00:00 2001
+From: Harry Yoo <harry.yoo@oracle.com>
+Date: Mon, 9 Mar 2026 16:22:19 +0900
+Subject: mm/slab: fix an incorrect check in obj_exts_alloc_size()
+
+From: Harry Yoo <harry.yoo@oracle.com>
+
+commit 8dafa9f5900c4855a65dbfee51e3bd00636deee1 upstream.
+
+obj_exts_alloc_size() prevents recursive allocation of slabobj_ext
+array from the same cache, to avoid creating slabs that are never freed.
+
+There is one mistake that returns the original size when memory
+allocation profiling is disabled. The assumption was that
+memcg-triggered slabobj_ext allocation is always served from
+KMALLOC_CGROUP type. But this is wrong [1]: when the caller specifies
+both __GFP_RECLAIMABLE and __GFP_ACCOUNT with SLUB_TINY enabled, the
+allocation is served from normal kmalloc. This is because kmalloc_type()
+prioritizes __GFP_RECLAIMABLE over __GFP_ACCOUNT, and SLUB_TINY aliases
+KMALLOC_RECLAIM with KMALLOC_NORMAL.
+
+As a result, the recursion guard is bypassed and the problematic slabs
+can be created. Fix this by removing the mem_alloc_profiling_enabled()
+check entirely. The remaining is_kmalloc_normal() check is still
+sufficient to detect whether the cache is of KMALLOC_NORMAL type and
+avoid bumping the size if it's not.
+
+Without SLUB_TINY, no functional change intended.
+With SLUB_TINY, allocations with __GFP_ACCOUNT|__GFP_RECLAIMABLE
+now allocate a larger array if the sizes equal.
+
+Reported-by: Zw Tang <shicenci@gmail.com>
+Fixes: 280ea9c3154b ("mm/slab: avoid allocating slabobj_ext array from its own slab")
+Closes: https://lore.kernel.org/linux-mm/CAPHJ_VKuMKSke8b11AZQw1PTSFN4n2C0gFxC6xGOG0ZLHgPmnA@mail.gmail.com [1]
+Cc: stable@vger.kernel.org
+Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
+Link: https://patch.msgid.link/20260309072219.22653-1-harry.yoo@oracle.com
+Tested-by: Zw Tang <shicenci@gmail.com>
+Signed-off-by: Vlastimil Babka (SUSE) <vbabka@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c |    7 -------
+ 1 file changed, 7 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2113,13 +2113,6 @@ static inline size_t obj_exts_alloc_size
+       size_t sz = sizeof(struct slabobj_ext) * slab->objects;
+       struct kmem_cache *obj_exts_cache;
+-      /*
+-       * slabobj_ext array for KMALLOC_CGROUP allocations
+-       * are served from KMALLOC_NORMAL caches.
+-       */
+-      if (!mem_alloc_profiling_enabled())
+-              return sz;
+-
+       if (sz > KMALLOC_MAX_CACHE_SIZE)
+               return sz;
diff --git a/queue-6.19/nfsd-fix-cred-ref-leak-in-nfsd_nl_listener_set_doit.patch b/queue-6.19/nfsd-fix-cred-ref-leak-in-nfsd_nl_listener_set_doit.patch
new file mode 100644 (file)
index 0000000..92f8604
--- /dev/null
@@ -0,0 +1,41 @@
+From 92978c83bb4eef55d02a6c990c01c423131eefa7 Mon Sep 17 00:00:00 2001
+From: Kuniyuki Iwashima <kuniyu@google.com>
+Date: Sat, 24 Jan 2026 04:18:41 +0000
+Subject: nfsd: Fix cred ref leak in nfsd_nl_listener_set_doit().
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+commit 92978c83bb4eef55d02a6c990c01c423131eefa7 upstream.
+
+nfsd_nl_listener_set_doit() uses get_current_cred() without
+put_cred().
+
+As we can see from other callers, svc_xprt_create_from_sa()
+does not require the extra refcount.
+
+nfsd_nl_listener_set_doit() is always in the process context,
+sendmsg(), and current->cred does not go away.
+
+Let's use current_cred() in nfsd_nl_listener_set_doit().
+
+Fixes: 16a471177496 ("NFSD: add listener-{set,get} netlink command")
+Cc: stable@vger.kernel.org
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfsctl.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -1993,7 +1993,7 @@ int nfsd_nl_listener_set_doit(struct sk_
+               }
+               ret = svc_xprt_create_from_sa(serv, xcl_name, net, sa, 0,
+-                                            get_current_cred());
++                                            current_cred());
+               /* always save the latest error */
+               if (ret < 0)
+                       err = ret;
diff --git a/queue-6.19/nouveau-gsp-drop-warn_on-in-acpi-probes.patch b/queue-6.19/nouveau-gsp-drop-warn_on-in-acpi-probes.patch
new file mode 100644 (file)
index 0000000..0b62cc9
--- /dev/null
@@ -0,0 +1,58 @@
+From 9478c166c46934160135e197b049b5a05753f2ad Mon Sep 17 00:00:00 2001
+From: Dave Airlie <airlied@redhat.com>
+Date: Thu, 21 Nov 2024 11:46:01 +1000
+Subject: nouveau/gsp: drop WARN_ON in ACPI probes
+
+From: Dave Airlie <airlied@redhat.com>
+
+commit 9478c166c46934160135e197b049b5a05753f2ad upstream.
+
+These WARN_ONs seem to trigger a lot, and we don't seem to have a
+plan to fix them, so just drop them, as they are most likely
+harmless.
+
+Cc: stable@vger.kernel.org
+Fixes: 176fdcbddfd2 ("drm/nouveau/gsp/r535: add support for booting GSP-RM")
+Signed-off-by: Dave Airlie <airlied@redhat.com>
+Link: https://patch.msgid.link/20241121014601.229391-1-airlied@gmail.com
+Signed-off-by: Danilo Krummrich <dakr@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
+@@ -737,8 +737,8 @@ r535_gsp_acpi_caps(acpi_handle handle, C
+       if (!obj)
+               goto done;
+-      if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
+-          WARN_ON(obj->buffer.length != 4))
++      if (obj->type != ACPI_TYPE_BUFFER ||
++          obj->buffer.length != 4)
+               goto done;
+       caps->status = 0;
+@@ -773,8 +773,8 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_
+       if (!obj)
+               goto done;
+-      if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) ||
+-          WARN_ON(obj->buffer.length != 4))
++      if (obj->type != ACPI_TYPE_BUFFER ||
++          obj->buffer.length != 4)
+               goto done;
+       jt->status = 0;
+@@ -861,8 +861,8 @@ r535_gsp_acpi_dod(acpi_handle handle, DO
+       _DOD = output.pointer;
+-      if (WARN_ON(_DOD->type != ACPI_TYPE_PACKAGE) ||
+-          WARN_ON(_DOD->package.count > ARRAY_SIZE(dod->acpiIdList)))
++      if (_DOD->type != ACPI_TYPE_PACKAGE ||
++          _DOD->package.count > ARRAY_SIZE(dod->acpiIdList))
+               return;
+       for (int i = 0; i < _DOD->package.count; i++) {
diff --git a/queue-6.19/nsfs-tighten-permission-checks-for-handle-opening.patch b/queue-6.19/nsfs-tighten-permission-checks-for-handle-opening.patch
new file mode 100644 (file)
index 0000000..934bdbb
--- /dev/null
@@ -0,0 +1,35 @@
+From d2324a9317f00013facb0ba00b00440e19d2af5e Mon Sep 17 00:00:00 2001
+From: Christian Brauner <brauner@kernel.org>
+Date: Thu, 26 Feb 2026 14:50:10 +0100
+Subject: nsfs: tighten permission checks for handle opening
+
+From: Christian Brauner <brauner@kernel.org>
+
+commit d2324a9317f00013facb0ba00b00440e19d2af5e upstream.
+
+Even privileged services should not necessarily be able to see other
+privileged service's namespaces so they can't leak information to each
+other. Use may_see_all_namespaces() helper that centralizes this policy
+until the nstree adapts.
+
+Link: https://patch.msgid.link/20260226-work-visibility-fixes-v1-2-d2c2853313bd@kernel.org
+Fixes: 5222470b2fbb ("nsfs: support file handles")
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Cc: stable@kernel.org # v6.18+
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nsfs.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/nsfs.c
++++ b/fs/nsfs.c
+@@ -614,7 +614,7 @@ static struct dentry *nsfs_fh_to_dentry(
+               return ERR_PTR(-EOPNOTSUPP);
+       }
+-      if (owning_ns && !ns_capable(owning_ns, CAP_SYS_ADMIN)) {
++      if (owning_ns && !may_see_all_namespaces()) {
+               ns->ops->put(ns);
+               return ERR_PTR(-EPERM);
+       }
diff --git a/queue-6.19/nstree-tighten-permission-checks-for-listing.patch b/queue-6.19/nstree-tighten-permission-checks-for-listing.patch
new file mode 100644 (file)
index 0000000..fa32c71
--- /dev/null
@@ -0,0 +1,76 @@
+From 8d76afe84fa2babf604b3c173730d4d2b067e361 Mon Sep 17 00:00:00 2001
+From: Christian Brauner <brauner@kernel.org>
+Date: Thu, 26 Feb 2026 14:50:11 +0100
+Subject: nstree: tighten permission checks for listing
+
+From: Christian Brauner <brauner@kernel.org>
+
+commit 8d76afe84fa2babf604b3c173730d4d2b067e361 upstream.
+
+Even privileged services should not necessarily be able to see other
+privileged service's namespaces so they can't leak information to each
+other. Use may_see_all_namespaces() helper that centralizes this policy
+until the nstree adapts.
+
+Link: https://patch.msgid.link/20260226-work-visibility-fixes-v1-3-d2c2853313bd@kernel.org
+Fixes: 76b6f5dfb3fd ("nstree: add listns()")
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Cc: stable@kernel.org # v6.19+
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/nstree.c | 29 ++++-------------------------
+ 1 file changed, 4 insertions(+), 25 deletions(-)
+
+diff --git a/kernel/nstree.c b/kernel/nstree.c
+index f36c59e6951d..6d12e5900ac0 100644
+--- a/kernel/nstree.c
++++ b/kernel/nstree.c
+@@ -515,32 +515,11 @@ static inline bool __must_check ns_requested(const struct klistns *kls,
+ static inline bool __must_check may_list_ns(const struct klistns *kls,
+                                           struct ns_common *ns)
+ {
+-      if (kls->user_ns) {
+-              if (kls->userns_capable)
+-                      return true;
+-      } else {
+-              struct ns_common *owner;
+-              struct user_namespace *user_ns;
+-
+-              owner = ns_owner(ns);
+-              if (owner)
+-                      user_ns = to_user_ns(owner);
+-              else
+-                      user_ns = &init_user_ns;
+-              if (ns_capable_noaudit(user_ns, CAP_SYS_ADMIN))
+-                      return true;
+-      }
+-
++      if (kls->user_ns && kls->userns_capable)
++              return true;
+       if (is_current_namespace(ns))
+               return true;
+-
+-      if (ns->ns_type != CLONE_NEWUSER)
+-              return false;
+-
+-      if (ns_capable_noaudit(to_user_ns(ns), CAP_SYS_ADMIN))
+-              return true;
+-
+-      return false;
++      return may_see_all_namespaces();
+ }
+ static inline void ns_put(struct ns_common *ns)
+@@ -600,7 +579,7 @@ static ssize_t do_listns_userns(struct klistns *kls)
+       ret = 0;
+       head = &to_ns_common(kls->user_ns)->ns_owner_root.ns_list_head;
+-      kls->userns_capable = ns_capable_noaudit(kls->user_ns, CAP_SYS_ADMIN);
++      kls->userns_capable = may_see_all_namespaces();
+       rcu_read_lock();
+-- 
+2.53.0
+
diff --git a/queue-6.19/s390-pfault-fix-virtual-vs-physical-address-confusion.patch b/queue-6.19/s390-pfault-fix-virtual-vs-physical-address-confusion.patch
new file mode 100644 (file)
index 0000000..e12684b
--- /dev/null
@@ -0,0 +1,54 @@
+From d879ac6756b662a085a743e76023c768c3241579 Mon Sep 17 00:00:00 2001
+From: Alexander Gordeev <agordeev@linux.ibm.com>
+Date: Tue, 24 Feb 2026 07:41:07 +0100
+Subject: s390/pfault: Fix virtual vs physical address confusion
+
+From: Alexander Gordeev <agordeev@linux.ibm.com>
+
+commit d879ac6756b662a085a743e76023c768c3241579 upstream.
+
+When Linux is running as guest, runs a user space process and the
+user space process accesses a page that the host has paged out,
+the guest gets a pfault interrupt and schedules a different process.
+Without this mechanism the host would have to suspend the whole
+virtual CPU until the page has been paged in.
+
+To setup the pfault interrupt the real address of parameter list
+should be passed to DIAGNOSE 0x258, but a virtual address is passed
+instead.
+
+That has a performance impact, since the pfault setup never succeeds,
+the interrupt is never delivered to a guest and the whole virtual CPU
+is suspended as result.
+
+Cc: stable@vger.kernel.org
+Fixes: c98d2ecae08f ("s390/mm: Uncouple physical vs virtual address spaces")
+Reported-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/mm/pfault.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/s390/mm/pfault.c
++++ b/arch/s390/mm/pfault.c
+@@ -62,7 +62,7 @@ int __pfault_init(void)
+               "0:     nopr    %%r7\n"
+               EX_TABLE(0b, 0b)
+               : [rc] "+d" (rc)
+-              : [refbk] "a" (&pfault_init_refbk), "m" (pfault_init_refbk)
++              : [refbk] "a" (virt_to_phys(&pfault_init_refbk)), "m" (pfault_init_refbk)
+               : "cc");
+       return rc;
+ }
+@@ -84,7 +84,7 @@ void __pfault_fini(void)
+               "0:     nopr    %%r7\n"
+               EX_TABLE(0b, 0b)
+               :
+-              : [refbk] "a" (&pfault_fini_refbk), "m" (pfault_fini_refbk)
++              : [refbk] "a" (virt_to_phys(&pfault_fini_refbk)), "m" (pfault_fini_refbk)
+               : "cc");
+ }
diff --git a/queue-6.19/sched_ext-disable-preemption-between-scx_claim_exit-and-kicking-helper-work.patch b/queue-6.19/sched_ext-disable-preemption-between-scx_claim_exit-and-kicking-helper-work.patch
new file mode 100644 (file)
index 0000000..c8d35a6
--- /dev/null
@@ -0,0 +1,71 @@
+From 83236b2e43dba00bee5b82eb5758816b1a674f6a Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 24 Feb 2026 21:39:58 -1000
+Subject: sched_ext: Disable preemption between scx_claim_exit() and kicking helper work
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 83236b2e43dba00bee5b82eb5758816b1a674f6a upstream.
+
+scx_claim_exit() atomically sets exit_kind, which prevents scx_error() from
+triggering further error handling. After claiming exit, the caller must kick
+the helper kthread work which initiates bypass mode and teardown.
+
+If the calling task gets preempted between claiming exit and kicking the
+helper work, and the BPF scheduler fails to schedule it back (since error
+handling is now disabled), the helper work is never queued, bypass mode
+never activates, tasks stop being dispatched, and the system wedges.
+
+Disable preemption across scx_claim_exit() and the subsequent work kicking
+in all callers - scx_disable() and scx_vexit(). Add
+lockdep_assert_preemption_disabled() to scx_claim_exit() to enforce the
+requirement.
+
+Fixes: f0e1a0643a59 ("sched_ext: Implement BPF extensible scheduler class")
+Cc: stable@vger.kernel.org # v6.12+
+Reviewed-by: Andrea Righi <arighi@nvidia.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -4390,10 +4390,19 @@ done:
+       scx_bypass(false);
+ }
++/*
++ * Claim the exit on @sch. The caller must ensure that the helper kthread work
++ * is kicked before the current task can be preempted. Once exit_kind is
++ * claimed, scx_error() can no longer trigger, so if the current task gets
++ * preempted and the BPF scheduler fails to schedule it back, the helper work
++ * will never be kicked and the whole system can wedge.
++ */
+ static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
+ {
+       int none = SCX_EXIT_NONE;
++      lockdep_assert_preemption_disabled();
++
+       if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
+               return false;
+@@ -4416,6 +4425,7 @@ static void scx_disable(enum scx_exit_ki
+       rcu_read_lock();
+       sch = rcu_dereference(scx_root);
+       if (sch) {
++              guard(preempt)();
+               scx_claim_exit(sch, kind);
+               kthread_queue_work(sch->helper, &sch->disable_work);
+       }
+@@ -4738,6 +4748,8 @@ static bool scx_vexit(struct scx_sched *
+ {
+       struct scx_exit_info *ei = sch->exit_info;
++      guard(preempt)();
++
+       if (!scx_claim_exit(sch, kind))
+               return false;
diff --git a/queue-6.19/sched_ext-fix-starvation-of-scx_enable-under-fair-class-saturation.patch b/queue-6.19/sched_ext-fix-starvation-of-scx_enable-under-fair-class-saturation.patch
new file mode 100644 (file)
index 0000000..7d3d0f7
--- /dev/null
@@ -0,0 +1,138 @@
+From b06ccbabe2506fd70b9167a644978b049150224a Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Tue, 3 Mar 2026 01:01:15 -1000
+Subject: sched_ext: Fix starvation of scx_enable() under fair-class saturation
+
+From: Tejun Heo <tj@kernel.org>
+
+commit b06ccbabe2506fd70b9167a644978b049150224a upstream.
+
+During scx_enable(), the READY -> ENABLED task switching loop changes the
+calling thread's sched_class from fair to ext. Since fair has higher
+priority than ext, saturating fair-class workloads can indefinitely starve
+the enable thread, hanging the system. This was introduced when the enable
+path switched from preempt_disable() to scx_bypass() which doesn't protect
+against fair-class starvation. Note that the original preempt_disable()
+protection wasn't complete either - in partial switch modes, the calling
+thread could still be starved after preempt_enable() as it may have been
+switched to ext class.
+
+Fix it by offloading the enable body to a dedicated system-wide RT
+(SCHED_FIFO) kthread which cannot be starved by either fair or ext class
+tasks. scx_enable() lazily creates the kthread on first use and passes the
+ops pointer through a struct scx_enable_cmd containing the kthread_work,
+then synchronously waits for completion.
+
+The workfn runs on a different kthread from sch->helper (which runs
+disable_work), so it can safely flush disable_work on the error path
+without deadlock.
+
+Fixes: 8c2090c504e9 ("sched_ext: Initialize in bypass mode")
+Cc: stable@vger.kernel.org # v6.12+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c |   66 ++++++++++++++++++++++++++++++++++++++++++++---------
+ 1 file changed, 56 insertions(+), 10 deletions(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -4935,20 +4935,30 @@ static int validate_ops(struct scx_sched
+       return 0;
+ }
+-static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
++/*
++ * scx_enable() is offloaded to a dedicated system-wide RT kthread to avoid
++ * starvation. During the READY -> ENABLED task switching loop, the calling
++ * thread's sched_class gets switched from fair to ext. As fair has higher
++ * priority than ext, the calling thread can be indefinitely starved under
++ * fair-class saturation, leading to a system hang.
++ */
++struct scx_enable_cmd {
++      struct kthread_work     work;
++      struct sched_ext_ops    *ops;
++      int                     ret;
++};
++
++static void scx_enable_workfn(struct kthread_work *work)
+ {
++      struct scx_enable_cmd *cmd =
++              container_of(work, struct scx_enable_cmd, work);
++      struct sched_ext_ops *ops = cmd->ops;
+       struct scx_sched *sch;
+       struct scx_task_iter sti;
+       struct task_struct *p;
+       unsigned long timeout;
+       int i, cpu, ret;
+-      if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
+-                         cpu_possible_mask)) {
+-              pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
+-              return -EINVAL;
+-      }
+-
+       mutex_lock(&scx_enable_mutex);
+       if (scx_enable_state() != SCX_DISABLED) {
+@@ -5165,13 +5175,15 @@ static int scx_enable(struct sched_ext_o
+       atomic_long_inc(&scx_enable_seq);
+-      return 0;
++      cmd->ret = 0;
++      return;
+ err_free_ksyncs:
+       free_kick_syncs();
+ err_unlock:
+       mutex_unlock(&scx_enable_mutex);
+-      return ret;
++      cmd->ret = ret;
++      return;
+ err_disable_unlock_all:
+       scx_cgroup_unlock();
+@@ -5190,7 +5202,41 @@ err_disable:
+        */
+       scx_error(sch, "scx_enable() failed (%d)", ret);
+       kthread_flush_work(&sch->disable_work);
+-      return 0;
++      cmd->ret = 0;
++}
++
++static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
++{
++      static struct kthread_worker *helper;
++      static DEFINE_MUTEX(helper_mutex);
++      struct scx_enable_cmd cmd;
++
++      if (!cpumask_equal(housekeeping_cpumask(HK_TYPE_DOMAIN),
++                         cpu_possible_mask)) {
++              pr_err("sched_ext: Not compatible with \"isolcpus=\" domain isolation\n");
++              return -EINVAL;
++      }
++
++      if (!READ_ONCE(helper)) {
++              mutex_lock(&helper_mutex);
++              if (!helper) {
++                      helper = kthread_run_worker(0, "scx_enable_helper");
++                      if (IS_ERR_OR_NULL(helper)) {
++                              helper = NULL;
++                              mutex_unlock(&helper_mutex);
++                              return -ENOMEM;
++                      }
++                      sched_set_fifo(helper->task);
++              }
++              mutex_unlock(&helper_mutex);
++      }
++
++      kthread_init_work(&cmd.work, scx_enable_workfn);
++      cmd.ops = ops;
++
++      kthread_queue_work(READ_ONCE(helper), &cmd.work);
++      kthread_flush_work(&cmd.work);
++      return cmd.ret;
+ }
index 2840705453f6ef861f279aa441c52e87e183a1d7..75e761996158e23434f7fdd7139493267043a472 100644 (file)
@@ -214,3 +214,43 @@ mm-fix-a-hmm_range_fault-livelock-starvation-problem.patch
 nsfs-tighten-permission-checks-for-ns-iteration-ioctls.patch
 liveupdate-luo_file-remember-retrieve-status.patch
 kthread-consolidate-kthread-exit-paths-to-prevent-use-after-free.patch
+cpufreq-intel_pstate-fix-null-pointer-dereference-in-update_cpu_qos_request.patch
+drm-amdgpu-add-upper-bound-check-on-user-inputs-in-signal-ioctl.patch
+drm-amdgpu-userq-fix-reference-leak-in-amdgpu_userq_wait_ioctl.patch
+drm-amdgpu-add-upper-bound-check-on-user-inputs-in-wait-ioctl.patch
+drm-amd-disable-mes-lr-compute-w-a.patch
+ipmi-si-don-t-block-module-unload-if-the-bmc-is-messed-up.patch
+ipmi-si-use-a-long-timeout-when-the-bmc-is-misbehaving.patch
+drm-bridge-samsung-dsim-fix-memory-leak-in-error-path.patch
+drm-bridge-ti-sn65dsi86-enable-hpd-polling-if-irq-is-not-used.patch
+ipmi-si-handle-waiting-messages-when-bmc-failure-detected.patch
+nouveau-gsp-drop-warn_on-in-acpi-probes.patch
+drm-i915-alpm-alpm-disable-fixes.patch
+gpiolib-normalize-the-return-value-of-gc-get-on-behalf-of-buggy-drivers.patch
+ipmi-si-fix-check-for-a-misbehaving-bmc.patch
+drm-xe-sync-fix-user-fence-leak-on-alloc-failure.patch
+drm-xe-sync-cleanup-partially-initialized-sync-on-parse-failure.patch
+s390-pfault-fix-virtual-vs-physical-address-confusion.patch
+bpf-fix-kprobe_multi-cookies-access-in-show_fdinfo-callback.patch
+arm64-gcs-honour-mprotect-prot_none-on-shadow-stack-mappings.patch
+nfsd-fix-cred-ref-leak-in-nfsd_nl_listener_set_doit.patch
+device-property-allow-secondary-lookup-in-fwnode_get_next_child_node.patch
+irqchip-gic-v3-its-limit-number-of-per-device-msis-to-the-range-the-its-supports.patch
+btrfs-fix-chunk-map-leak-in-btrfs_map_block-after-btrfs_chunk_map_num_copies.patch
+sched_ext-disable-preemption-between-scx_claim_exit-and-kicking-helper-work.patch
+sched_ext-fix-starvation-of-scx_enable-under-fair-class-saturation.patch
+iomap-don-t-mark-folio-uptodate-if-read-io-has-bytes-pending.patch
+iomap-reject-delalloc-mappings-during-writeback.patch
+nsfs-tighten-permission-checks-for-handle-opening.patch
+nstree-tighten-permission-checks-for-listing.patch
+ice-reintroduce-retry-mechanism-for-indirect-aq.patch
+kunit-irq-ensure-timer-doesn-t-fire-too-frequently.patch
+ixgbevf-fix-link-setup-issue.patch
+mm-memfd_luo-always-make-all-folios-uptodate.patch
+mm-memfd_luo-always-dirty-all-folios.patch
+mm-huge_memory-fix-a-folio_split-race-condition-with-folio_try_get.patch
+mm-damon-core-clear-walk_control-on-inactive-context-in-damos_walk.patch
+mm-slab-fix-an-incorrect-check-in-obj_exts_alloc_size.patch
+staging-sm750fb-add-missing-pci_release_region-on-error-and-removal.patch
+staging-rtl8723bs-properly-validate-the-data-in-rtw_get_ie_ex.patch
+staging-rtl8723bs-fix-potential-out-of-bounds-read-in-rtw_restruct_wmm_ie.patch
diff --git a/queue-6.19/staging-rtl8723bs-fix-potential-out-of-bounds-read-in-rtw_restruct_wmm_ie.patch b/queue-6.19/staging-rtl8723bs-fix-potential-out-of-bounds-read-in-rtw_restruct_wmm_ie.patch
new file mode 100644 (file)
index 0000000..fa9b3fe
--- /dev/null
@@ -0,0 +1,39 @@
+From a75281626fc8fa6dc6c9cc314ee423e8bc45203b Mon Sep 17 00:00:00 2001
+From: Luka Gejak <luka.gejak@linux.dev>
+Date: Tue, 24 Feb 2026 14:26:47 +0100
+Subject: staging: rtl8723bs: fix potential out-of-bounds read in rtw_restruct_wmm_ie
+
+From: Luka Gejak <luka.gejak@linux.dev>
+
+commit a75281626fc8fa6dc6c9cc314ee423e8bc45203b upstream.
+
+The current code checks 'i + 5 < in_len' at the end of the if statement.
+However, it accesses 'in_ie[i + 5]' before that check, which can lead
+to an out-of-bounds read. Move the length check to the beginning of the
+conditional to ensure the index is within bounds before accessing the
+array.
+
+Fixes: 554c0a3abf21 ("staging: Add rtl8723bs sdio wifi driver")
+Cc: stable <stable@kernel.org>
+Signed-off-by: Luka Gejak <luka.gejak@linux.dev>
+Reviewed-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/20260224132647.11642-2-luka.gejak@linux.dev
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/rtl8723bs/core/rtw_mlme.c |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
++++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
+@@ -2002,7 +2002,10 @@ int rtw_restruct_wmm_ie(struct adapter *
+       while (i < in_len) {
+               ielength = initial_out_len;
+-              if (in_ie[i] == 0xDD && in_ie[i+2] == 0x00 && in_ie[i+3] == 0x50  && in_ie[i+4] == 0xF2 && in_ie[i+5] == 0x02 && i+5 < in_len) { /* WMM element ID and OUI */
++              if (i + 5 < in_len &&
++                  in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 &&
++                  in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 &&
++                  in_ie[i + 5] == 0x02) {
+                       for (j = i; j < i + 9; j++) {
+                               out_ie[ielength] = in_ie[j];
+                               ielength++;
diff --git a/queue-6.19/staging-rtl8723bs-properly-validate-the-data-in-rtw_get_ie_ex.patch b/queue-6.19/staging-rtl8723bs-properly-validate-the-data-in-rtw_get_ie_ex.patch
new file mode 100644 (file)
index 0000000..a716bdd
--- /dev/null
@@ -0,0 +1,56 @@
+From f0109b9d3e1e455429279d602f6276e34689750a Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Mon, 23 Feb 2026 14:31:35 +0100
+Subject: staging: rtl8723bs: properly validate the data in rtw_get_ie_ex()
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+commit f0109b9d3e1e455429279d602f6276e34689750a upstream.
+
+Just like in commit 154828bf9559 ("staging: rtl8723bs: fix out-of-bounds
+read in rtw_get_ie() parser"), we don't trust the data in the frame so
+we should check the length better before acting on it
+
+Cc: stable <stable@kernel.org>
+Assisted-by: gkh_clanker_2000
+Tested-by: Navaneeth K <knavaneeth786@gmail.com>
+Reviewed-by: Navaneeth K <knavaneeth786@gmail.com>
+Link: https://patch.msgid.link/2026022336-arrange-footwork-6e54@gregkh
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/rtl8723bs/core/rtw_ieee80211.c |   15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
++++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+@@ -185,20 +185,25 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len
+       cnt = 0;
+-      while (cnt < in_len) {
++      while (cnt + 2 <= in_len) {
++              u8 ie_len = in_ie[cnt + 1];
++
++              if (cnt + 2 + ie_len > in_len)
++                      break;
++
+               if (eid == in_ie[cnt]
+-                      && (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) {
++                      && (!oui || (ie_len >= oui_len && !memcmp(&in_ie[cnt + 2], oui, oui_len)))) {
+                       target_ie = &in_ie[cnt];
+                       if (ie)
+-                              memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2);
++                              memcpy(ie, &in_ie[cnt], ie_len + 2);
+                       if (ielen)
+-                              *ielen = in_ie[cnt+1]+2;
++                              *ielen = ie_len + 2;
+                       break;
+               }
+-              cnt += in_ie[cnt+1]+2; /* goto next */
++              cnt += ie_len + 2; /* goto next */
+       }
+       return target_ie;
diff --git a/queue-6.19/staging-sm750fb-add-missing-pci_release_region-on-error-and-removal.patch b/queue-6.19/staging-sm750fb-add-missing-pci_release_region-on-error-and-removal.patch
new file mode 100644 (file)
index 0000000..e90b99a
--- /dev/null
@@ -0,0 +1,90 @@
+From 8225489ddb900656cc21573b4e1b00c9181fd777 Mon Sep 17 00:00:00 2001
+From: Artem Lytkin <iprintercanon@gmail.com>
+Date: Mon, 16 Feb 2026 20:20:38 +0000
+Subject: staging: sm750fb: add missing pci_release_region on error and removal
+
+From: Artem Lytkin <iprintercanon@gmail.com>
+
+commit 8225489ddb900656cc21573b4e1b00c9181fd777 upstream.
+
+hw_sm750_map() calls pci_request_region() but never releases the
+region on error paths or in lynxfb_pci_remove(). This causes a
+resource leak that prevents the PCI region from being mapped again
+after driver removal or a failed probe. A TODO comment in the code
+acknowledges this missing cleanup.
+
+Restructure the error handling in hw_sm750_map() to properly release
+the PCI region on ioremap failures, and add pci_release_region() to
+lynxfb_pci_remove().
+
+Signed-off-by: Artem Lytkin <iprintercanon@gmail.com>
+Cc: stable <stable@kernel.org>
+Link: https://patch.msgid.link/20260216202038.1828-1-iprintercanon@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/staging/sm750fb/sm750.c    |    1 +
+ drivers/staging/sm750fb/sm750_hw.c |   22 +++++++++++-----------
+ 2 files changed, 12 insertions(+), 11 deletions(-)
+
+--- a/drivers/staging/sm750fb/sm750.c
++++ b/drivers/staging/sm750fb/sm750.c
+@@ -1123,6 +1123,7 @@ static void lynxfb_pci_remove(struct pci
+       iounmap(sm750_dev->pvReg);
+       iounmap(sm750_dev->pvMem);
++      pci_release_region(pdev, 1);
+       kfree(g_settings);
+ }
+--- a/drivers/staging/sm750fb/sm750_hw.c
++++ b/drivers/staging/sm750fb/sm750_hw.c
+@@ -36,16 +36,11 @@ int hw_sm750_map(struct sm750_dev *sm750
+       pr_info("mmio phyAddr = %lx\n", sm750_dev->vidreg_start);
+-      /*
+-       * reserve the vidreg space of smi adaptor
+-       * if you do this, you need to add release region code
+-       * in lynxfb_remove, or memory will not be mapped again
+-       * successfully
+-       */
++      /* reserve the vidreg space of smi adaptor */
+       ret = pci_request_region(pdev, 1, "sm750fb");
+       if (ret) {
+               pr_err("Can not request PCI regions.\n");
+-              goto exit;
++              return ret;
+       }
+       /* now map mmio and vidmem */
+@@ -54,7 +49,7 @@ int hw_sm750_map(struct sm750_dev *sm750
+       if (!sm750_dev->pvReg) {
+               pr_err("mmio failed\n");
+               ret = -EFAULT;
+-              goto exit;
++              goto err_release_region;
+       }
+       pr_info("mmio virtual addr = %p\n", sm750_dev->pvReg);
+@@ -79,13 +74,18 @@ int hw_sm750_map(struct sm750_dev *sm750
+       sm750_dev->pvMem =
+               ioremap_wc(sm750_dev->vidmem_start, sm750_dev->vidmem_size);
+       if (!sm750_dev->pvMem) {
+-              iounmap(sm750_dev->pvReg);
+               pr_err("Map video memory failed\n");
+               ret = -EFAULT;
+-              goto exit;
++              goto err_unmap_reg;
+       }
+       pr_info("video memory vaddr = %p\n", sm750_dev->pvMem);
+-exit:
++
++      return 0;
++
++err_unmap_reg:
++      iounmap(sm750_dev->pvReg);
++err_release_region:
++      pci_release_region(pdev, 1);
+       return ret;
+ }