]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.1-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 7 Feb 2026 15:01:55 +0000 (16:01 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 7 Feb 2026 15:01:55 +0000 (16:01 +0100)
added patches:
arm-9468-1-fix-memset64-on-big-endian.patch
platform-x86-intel_telemetry-fix-swapped-arrays-in-pss-output.patch
rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch
revert-drm-amd-check-if-aspm-is-enabled-from-pcie-subsystem.patch
x86-kfence-fix-booting-on-32bit-non-pae-systems.patch

queue-6.1/arm-9468-1-fix-memset64-on-big-endian.patch [new file with mode: 0644]
queue-6.1/platform-x86-intel_telemetry-fix-swapped-arrays-in-pss-output.patch [new file with mode: 0644]
queue-6.1/rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch [new file with mode: 0644]
queue-6.1/revert-drm-amd-check-if-aspm-is-enabled-from-pcie-subsystem.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/x86-kfence-fix-booting-on-32bit-non-pae-systems.patch [new file with mode: 0644]

diff --git a/queue-6.1/arm-9468-1-fix-memset64-on-big-endian.patch b/queue-6.1/arm-9468-1-fix-memset64-on-big-endian.patch
new file mode 100644 (file)
index 0000000..665d71a
--- /dev/null
@@ -0,0 +1,40 @@
+From 23ea2a4c72323feb6e3e025e8a6f18336513d5ad Mon Sep 17 00:00:00 2001
+From: Thomas Weissschuh <thomas.weissschuh@linutronix.de>
+Date: Wed, 7 Jan 2026 11:01:49 +0100
+Subject: ARM: 9468/1: fix memset64() on big-endian
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Thomas Weissschuh <thomas.weissschuh@linutronix.de>
+
+commit 23ea2a4c72323feb6e3e025e8a6f18336513d5ad upstream.
+
+On big-endian systems the 32-bit low and high halves need to be swapped
+for the underlying assembly implementation to work correctly.
+
+Fixes: fd1d362600e2 ("ARM: implement memset32 & memset64")
+Cc: stable@vger.kernel.org
+Signed-off-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reviewed-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm/include/asm/string.h |    5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/include/asm/string.h
++++ b/arch/arm/include/asm/string.h
+@@ -42,7 +42,10 @@ static inline void *memset32(uint32_t *p
+ extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi);
+ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
+ {
+-      return __memset64(p, v, n * 8, v >> 32);
++      if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN))
++              return __memset64(p, v, n * 8, v >> 32);
++      else
++              return __memset64(p, v >> 32, n * 8, v);
+ }
+ /*
diff --git a/queue-6.1/platform-x86-intel_telemetry-fix-swapped-arrays-in-pss-output.patch b/queue-6.1/platform-x86-intel_telemetry-fix-swapped-arrays-in-pss-output.patch
new file mode 100644 (file)
index 0000000..c47d1f9
--- /dev/null
@@ -0,0 +1,54 @@
+From 25e9e322d2ab5c03602eff4fbf4f7c40019d8de2 Mon Sep 17 00:00:00 2001
+From: Kaushlendra Kumar <kaushlendra.kumar@intel.com>
+Date: Wed, 24 Dec 2025 08:50:53 +0530
+Subject: platform/x86: intel_telemetry: Fix swapped arrays in PSS output
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kaushlendra Kumar <kaushlendra.kumar@intel.com>
+
+commit 25e9e322d2ab5c03602eff4fbf4f7c40019d8de2 upstream.
+
+The LTR blocking statistics and wakeup event counters are incorrectly
+cross-referenced during debugfs output rendering. The code populates
+pss_ltr_blkd[] with LTR blocking data and pss_s0ix_wakeup[] with wakeup
+data, but the display loops reference the wrong arrays.
+
+This causes the "LTR Blocking Status" section to print wakeup events
+and the "Wakes Status" section to print LTR blockers, misleading power
+management analysis and S0ix residency debugging.
+
+Fix by aligning array usage with the intended output section labels.
+
+Fixes: 87bee290998d ("platform:x86: Add Intel Telemetry Debugfs interfaces")
+Cc: stable@vger.kernel.org
+Signed-off-by: Kaushlendra Kumar <kaushlendra.kumar@intel.com>
+Link: https://patch.msgid.link/20251224032053.3915900-1-kaushlendra.kumar@intel.com
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/platform/x86/intel/telemetry/debugfs.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/platform/x86/intel/telemetry/debugfs.c
++++ b/drivers/platform/x86/intel/telemetry/debugfs.c
+@@ -449,7 +449,7 @@ static int telem_pss_states_show(struct
+       for (index = 0; index < debugfs_conf->pss_ltr_evts; index++) {
+               seq_printf(s, "%-32s\t%u\n",
+                          debugfs_conf->pss_ltr_data[index].name,
+-                         pss_s0ix_wakeup[index]);
++                         pss_ltr_blkd[index]);
+       }
+       seq_puts(s, "\n--------------------------------------\n");
+@@ -459,7 +459,7 @@ static int telem_pss_states_show(struct
+       for (index = 0; index < debugfs_conf->pss_wakeup_evts; index++) {
+               seq_printf(s, "%-32s\t%u\n",
+                          debugfs_conf->pss_wakeup[index].name,
+-                         pss_ltr_blkd[index]);
++                         pss_s0ix_wakeup[index]);
+       }
+       return 0;
diff --git a/queue-6.1/rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch b/queue-6.1/rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch
new file mode 100644 (file)
index 0000000..b8ed1dd
--- /dev/null
@@ -0,0 +1,94 @@
+From bd3884a204c3b507e6baa9a4091aa927f9af5404 Mon Sep 17 00:00:00 2001
+From: Ilya Dryomov <idryomov@gmail.com>
+Date: Wed, 7 Jan 2026 22:37:55 +0100
+Subject: rbd: check for EOD after exclusive lock is ensured to be held
+
+From: Ilya Dryomov <idryomov@gmail.com>
+
+commit bd3884a204c3b507e6baa9a4091aa927f9af5404 upstream.
+
+Similar to commit 870611e4877e ("rbd: get snapshot context after
+exclusive lock is ensured to be held"), move the "beyond EOD" check
+into the image request state machine so that it's performed after
+exclusive lock is ensured to be held.  This avoids various race
+conditions which can arise when the image is shrunk under I/O (in
+practice, mostly readahead).  In one such scenario
+
+    rbd_assert(objno < rbd_dev->object_map_size);
+
+can be triggered if a close-to-EOD read gets queued right before the
+shrink is initiated and the EOD check is performed against an outdated
+mapping_size.  After the resize is done on the server side and exclusive
+lock is (re)acquired bringing along the new (now shrunk) object map, the
+read starts going through the state machine and rbd_obj_may_exist() gets
+invoked on an object that is out of bounds of rbd_dev->object_map array.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Reviewed-by: Dongsheng Yang <dongsheng.yang@linux.dev>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/block/rbd.c |   33 +++++++++++++++++++++------------
+ 1 file changed, 21 insertions(+), 12 deletions(-)
+
+--- a/drivers/block/rbd.c
++++ b/drivers/block/rbd.c
+@@ -3496,11 +3496,29 @@ static void rbd_img_object_requests(stru
+       rbd_assert(!need_exclusive_lock(img_req) ||
+                  __rbd_is_lock_owner(rbd_dev));
+-      if (rbd_img_is_write(img_req)) {
+-              rbd_assert(!img_req->snapc);
++      if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
++              rbd_assert(!rbd_img_is_write(img_req));
++      } else {
++              struct request *rq = blk_mq_rq_from_pdu(img_req);
++              u64 off = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
++              u64 len = blk_rq_bytes(rq);
++              u64 mapping_size;
++
+               down_read(&rbd_dev->header_rwsem);
+-              img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
++              mapping_size = rbd_dev->mapping.size;
++              if (rbd_img_is_write(img_req)) {
++                      rbd_assert(!img_req->snapc);
++                      img_req->snapc =
++                          ceph_get_snap_context(rbd_dev->header.snapc);
++              }
+               up_read(&rbd_dev->header_rwsem);
++
++              if (unlikely(off + len > mapping_size)) {
++                      rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)",
++                               off, len, mapping_size);
++                      img_req->pending.result = -EIO;
++                      return;
++              }
+       }
+       for_each_obj_request(img_req, obj_req) {
+@@ -4726,7 +4744,6 @@ static void rbd_queue_workfn(struct work
+       struct request *rq = blk_mq_rq_from_pdu(img_request);
+       u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
+       u64 length = blk_rq_bytes(rq);
+-      u64 mapping_size;
+       int result;
+       /* Ignore/skip any zero-length requests */
+@@ -4739,17 +4756,9 @@ static void rbd_queue_workfn(struct work
+       blk_mq_start_request(rq);
+       down_read(&rbd_dev->header_rwsem);
+-      mapping_size = rbd_dev->mapping.size;
+       rbd_img_capture_header(img_request);
+       up_read(&rbd_dev->header_rwsem);
+-      if (offset + length > mapping_size) {
+-              rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
+-                       length, mapping_size);
+-              result = -EIO;
+-              goto err_img_request;
+-      }
+-
+       dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
+            img_request, obj_op_name(op_type), offset, length);
diff --git a/queue-6.1/revert-drm-amd-check-if-aspm-is-enabled-from-pcie-subsystem.patch b/queue-6.1/revert-drm-amd-check-if-aspm-is-enabled-from-pcie-subsystem.patch
new file mode 100644 (file)
index 0000000..e71a32f
--- /dev/null
@@ -0,0 +1,46 @@
+From 243b467dea1735fed904c2e54d248a46fa417a2d Mon Sep 17 00:00:00 2001
+From: Bert Karwatzki <spasswolf@web.de>
+Date: Sun, 1 Feb 2026 01:24:45 +0100
+Subject: Revert "drm/amd: Check if ASPM is enabled from PCIe subsystem"
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bert Karwatzki <spasswolf@web.de>
+
+commit 243b467dea1735fed904c2e54d248a46fa417a2d upstream.
+
+This reverts commit 7294863a6f01248d72b61d38478978d638641bee.
+
+This commit was erroneously applied again after commit 0ab5d711ec74
+("drm/amd: Refactor `amdgpu_aspm` to be evaluated per device")
+removed it, leading to very hard to debug crashes, when used with a system with two
+AMD GPUs of which only one supports ASPM.
+
+Link: https://lore.kernel.org/linux-acpi/20251006120944.7880-1-spasswolf@web.de/
+Link: https://github.com/acpica/acpica/issues/1060
+Fixes: 0ab5d711ec74 ("drm/amd: Refactor `amdgpu_aspm` to be evaluated per device")
+Signed-off-by: Bert Karwatzki <spasswolf@web.de>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 97a9689300eb2b393ba5efc17c8e5db835917080)
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c |    3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -2067,9 +2067,6 @@ static int amdgpu_pci_probe(struct pci_d
+                       return -ENODEV;
+       }
+-      if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev))
+-              amdgpu_aspm = 0;
+-
+       if (amdgpu_virtual_display ||
+           amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
+               supports_atomic = true;
index 6bd96d575e012295f4f80b8863d2026b37fdd254..5405cc6718b63d45b3c7c034e02ff4e57412084b 100644 (file)
@@ -1 +1,6 @@
 nvmet-tcp-add-bounds-checks-in-nvmet_tcp_build_pdu_iovec.patch
+x86-kfence-fix-booting-on-32bit-non-pae-systems.patch
+platform-x86-intel_telemetry-fix-swapped-arrays-in-pss-output.patch
+rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch
+arm-9468-1-fix-memset64-on-big-endian.patch
+revert-drm-amd-check-if-aspm-is-enabled-from-pcie-subsystem.patch
diff --git a/queue-6.1/x86-kfence-fix-booting-on-32bit-non-pae-systems.patch b/queue-6.1/x86-kfence-fix-booting-on-32bit-non-pae-systems.patch
new file mode 100644 (file)
index 0000000..a4cebf6
--- /dev/null
@@ -0,0 +1,68 @@
+From 16459fe7e0ca6520a6e8f603de4ccd52b90fd765 Mon Sep 17 00:00:00 2001
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+Date: Mon, 26 Jan 2026 21:10:46 +0000
+Subject: x86/kfence: fix booting on 32bit non-PAE systems
+
+From: Andrew Cooper <andrew.cooper3@citrix.com>
+
+commit 16459fe7e0ca6520a6e8f603de4ccd52b90fd765 upstream.
+
+The original patch inverted the PTE unconditionally to avoid
+L1TF-vulnerable PTEs, but Linux doesn't make this adjustment in 2-level
+paging.
+
+Adjust the logic to use the flip_protnone_guard() helper, which is a nop
+on 2-level paging but inverts the address bits in all other paging modes.
+
+This doesn't matter for the Xen aspect of the original change.  Linux no
+longer supports running 32bit PV under Xen, and Xen doesn't support
+running any 32bit PV guests without using PAE paging.
+
+Link: https://lkml.kernel.org/r/20260126211046.2096622-1-andrew.cooper3@citrix.com
+Fixes: b505f1944535 ("x86/kfence: avoid writing L1TF-vulnerable PTEs")
+Reported-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Closes: https://lore.kernel.org/lkml/CAKFNMokwjw68ubYQM9WkzOuH51wLznHpEOMSqtMoV1Rn9JV_gw@mail.gmail.com/
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Tested-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Tested-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Marco Elver <elver@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/kfence.h |    7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/arch/x86/include/asm/kfence.h
++++ b/arch/x86/include/asm/kfence.h
+@@ -42,7 +42,7 @@ static inline bool kfence_protect_page(u
+ {
+       unsigned int level;
+       pte_t *pte = lookup_address(addr, &level);
+-      pteval_t val;
++      pteval_t val, new;
+       if (WARN_ON(!pte || level != PG_LEVEL_4K))
+               return false;
+@@ -57,11 +57,12 @@ static inline bool kfence_protect_page(u
+               return true;
+       /*
+-       * Otherwise, invert the entire PTE.  This avoids writing out an
++       * Otherwise, flip the Present bit, taking care to avoid writing an
+        * L1TF-vulnerable PTE (not present, without the high address bits
+        * set).
+        */
+-      set_pte(pte, __pte(~val));
++      new = val ^ _PAGE_PRESENT;
++      set_pte(pte, __pte(flip_protnone_guard(val, new, PTE_PFN_MASK)));
+       /*
+        * If the page was protected (non-present) and we're making it