From bd33734e441d692eb1b273d5b069831960fe82a3 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 7 Feb 2026 16:01:43 +0100 Subject: [PATCH] 5.15-stable patches added patches: arm-9468-1-fix-memset64-on-big-endian.patch platform-x86-intel_telemetry-fix-swapped-arrays-in-pss-output.patch rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch x86-kfence-fix-booting-on-32bit-non-pae-systems.patch --- ...rm-9468-1-fix-memset64-on-big-endian.patch | 40 ++++++++ ...try-fix-swapped-arrays-in-pss-output.patch | 54 +++++++++++ ...exclusive-lock-is-ensured-to-be-held.patch | 94 +++++++++++++++++++ queue-5.15/series | 4 + ...fix-booting-on-32bit-non-pae-systems.patch | 68 ++++++++++++++ 5 files changed, 260 insertions(+) create mode 100644 queue-5.15/arm-9468-1-fix-memset64-on-big-endian.patch create mode 100644 queue-5.15/platform-x86-intel_telemetry-fix-swapped-arrays-in-pss-output.patch create mode 100644 queue-5.15/rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch create mode 100644 queue-5.15/x86-kfence-fix-booting-on-32bit-non-pae-systems.patch diff --git a/queue-5.15/arm-9468-1-fix-memset64-on-big-endian.patch b/queue-5.15/arm-9468-1-fix-memset64-on-big-endian.patch new file mode 100644 index 00000000000..665d71ad3f7 --- /dev/null +++ b/queue-5.15/arm-9468-1-fix-memset64-on-big-endian.patch @@ -0,0 +1,40 @@ +From 23ea2a4c72323feb6e3e025e8a6f18336513d5ad Mon Sep 17 00:00:00 2001 +From: Thomas Weissschuh +Date: Wed, 7 Jan 2026 11:01:49 +0100 +Subject: ARM: 9468/1: fix memset64() on big-endian +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Thomas Weissschuh + +commit 23ea2a4c72323feb6e3e025e8a6f18336513d5ad upstream. + +On big-endian systems the 32-bit low and high halves need to be swapped +for the underlying assembly implementation to work correctly. + +Fixes: fd1d362600e2 ("ARM: implement memset32 & memset64") +Cc: stable@vger.kernel.org +Signed-off-by: Thomas Weißschuh +Reviewed-by: Matthew Wilcox (Oracle) +Reviewed-by: Arnd Bergmann +Signed-off-by: Russell King (Oracle) +Signed-off-by: Greg Kroah-Hartman +--- + arch/arm/include/asm/string.h | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/arch/arm/include/asm/string.h ++++ b/arch/arm/include/asm/string.h +@@ -42,7 +42,10 @@ static inline void *memset32(uint32_t *p + extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi); + static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) + { +- return __memset64(p, v, n * 8, v >> 32); ++ if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) ++ return __memset64(p, v, n * 8, v >> 32); ++ else ++ return __memset64(p, v >> 32, n * 8, v); + } + + /* diff --git a/queue-5.15/platform-x86-intel_telemetry-fix-swapped-arrays-in-pss-output.patch b/queue-5.15/platform-x86-intel_telemetry-fix-swapped-arrays-in-pss-output.patch new file mode 100644 index 00000000000..c47d1f9e608 --- /dev/null +++ b/queue-5.15/platform-x86-intel_telemetry-fix-swapped-arrays-in-pss-output.patch @@ -0,0 +1,54 @@ +From 25e9e322d2ab5c03602eff4fbf4f7c40019d8de2 Mon Sep 17 00:00:00 2001 +From: Kaushlendra Kumar +Date: Wed, 24 Dec 2025 08:50:53 +0530 +Subject: platform/x86: intel_telemetry: Fix swapped arrays in PSS output +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Kaushlendra Kumar + +commit 25e9e322d2ab5c03602eff4fbf4f7c40019d8de2 upstream. + +The LTR blocking statistics and wakeup event counters are incorrectly +cross-referenced during debugfs output rendering. The code populates +pss_ltr_blkd[] with LTR blocking data and pss_s0ix_wakeup[] with wakeup +data, but the display loops reference the wrong arrays. + +This causes the "LTR Blocking Status" section to print wakeup events +and the "Wakes Status" section to print LTR blockers, misleading power +management analysis and S0ix residency debugging. + +Fix by aligning array usage with the intended output section labels. + +Fixes: 87bee290998d ("platform:x86: Add Intel Telemetry Debugfs interfaces") +Cc: stable@vger.kernel.org +Signed-off-by: Kaushlendra Kumar +Link: https://patch.msgid.link/20251224032053.3915900-1-kaushlendra.kumar@intel.com +Reviewed-by: Ilpo Järvinen +Signed-off-by: Ilpo Järvinen +Signed-off-by: Greg Kroah-Hartman +--- + drivers/platform/x86/intel/telemetry/debugfs.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +--- a/drivers/platform/x86/intel/telemetry/debugfs.c ++++ b/drivers/platform/x86/intel/telemetry/debugfs.c +@@ -449,7 +449,7 @@ static int telem_pss_states_show(struct + for (index = 0; index < debugfs_conf->pss_ltr_evts; index++) { + seq_printf(s, "%-32s\t%u\n", + debugfs_conf->pss_ltr_data[index].name, +- pss_s0ix_wakeup[index]); ++ pss_ltr_blkd[index]); + } + + seq_puts(s, "\n--------------------------------------\n"); +@@ -459,7 +459,7 @@ static int telem_pss_states_show(struct + for (index = 0; index < debugfs_conf->pss_wakeup_evts; index++) { + seq_printf(s, "%-32s\t%u\n", + debugfs_conf->pss_wakeup[index].name, +- pss_ltr_blkd[index]); ++ pss_s0ix_wakeup[index]); + } + + return 0; diff --git a/queue-5.15/rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch b/queue-5.15/rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch new file mode 100644 index 00000000000..eb9cf431b8b --- /dev/null +++ b/queue-5.15/rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch @@ -0,0 +1,94 @@ +From bd3884a204c3b507e6baa9a4091aa927f9af5404 Mon Sep 17 00:00:00 2001 +From: Ilya Dryomov +Date: Wed, 7 Jan 2026 22:37:55 +0100 +Subject: rbd: check for EOD after exclusive lock is ensured to be held + +From: Ilya Dryomov + +commit bd3884a204c3b507e6baa9a4091aa927f9af5404 upstream. + +Similar to commit 870611e4877e ("rbd: get snapshot context after +exclusive lock is ensured to be held"), move the "beyond EOD" check +into the image request state machine so that it's performed after +exclusive lock is ensured to be held. This avoids various race +conditions which can arise when the image is shrunk under I/O (in +practice, mostly readahead). In one such scenario + + rbd_assert(objno < rbd_dev->object_map_size); + +can be triggered if a close-to-EOD read gets queued right before the +shrink is initiated and the EOD check is performed against an outdated +mapping_size. After the resize is done on the server side and exclusive +lock is (re)acquired bringing along the new (now shrunk) object map, the +read starts going through the state machine and rbd_obj_may_exist() gets +invoked on an object that is out of bounds of rbd_dev->object_map array. + +Cc: stable@vger.kernel.org +Signed-off-by: Ilya Dryomov +Reviewed-by: Dongsheng Yang +Signed-off-by: Greg Kroah-Hartman +--- + drivers/block/rbd.c | 33 +++++++++++++++++++++------------ + 1 file changed, 21 insertions(+), 12 deletions(-) + +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -3497,11 +3497,29 @@ static void rbd_img_object_requests(stru + rbd_assert(!need_exclusive_lock(img_req) || + __rbd_is_lock_owner(rbd_dev)); + +- if (rbd_img_is_write(img_req)) { +- rbd_assert(!img_req->snapc); ++ if (test_bit(IMG_REQ_CHILD, &img_req->flags)) { ++ rbd_assert(!rbd_img_is_write(img_req)); ++ } else { ++ struct request *rq = blk_mq_rq_from_pdu(img_req); ++ u64 off = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; ++ u64 len = blk_rq_bytes(rq); ++ u64 mapping_size; ++ + down_read(&rbd_dev->header_rwsem); +- img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); ++ mapping_size = rbd_dev->mapping.size; ++ if (rbd_img_is_write(img_req)) { ++ rbd_assert(!img_req->snapc); ++ img_req->snapc = ++ ceph_get_snap_context(rbd_dev->header.snapc); ++ } + up_read(&rbd_dev->header_rwsem); ++ ++ if (unlikely(off + len > mapping_size)) { ++ rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", ++ off, len, mapping_size); ++ img_req->pending.result = -EIO; ++ return; ++ } + } + + for_each_obj_request(img_req, obj_req) { +@@ -4727,7 +4745,6 @@ static void rbd_queue_workfn(struct work + struct request *rq = blk_mq_rq_from_pdu(img_request); + u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; + u64 length = blk_rq_bytes(rq); +- u64 mapping_size; + int result; + + /* Ignore/skip any zero-length requests */ +@@ -4740,17 +4757,9 @@ static void rbd_queue_workfn(struct work + blk_mq_start_request(rq); + + down_read(&rbd_dev->header_rwsem); +- mapping_size = rbd_dev->mapping.size; + rbd_img_capture_header(img_request); + up_read(&rbd_dev->header_rwsem); + +- if (offset + length > mapping_size) { +- rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset, +- length, mapping_size); +- result = -EIO; +- goto err_img_request; +- } +- + dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev, + img_request, obj_op_name(op_type), offset, length); + diff --git a/queue-5.15/series b/queue-5.15/series index e69de29bb2d..4bafdbdb05d 100644 --- a/queue-5.15/series +++ b/queue-5.15/series @@ -0,0 +1,4 @@ +x86-kfence-fix-booting-on-32bit-non-pae-systems.patch +platform-x86-intel_telemetry-fix-swapped-arrays-in-pss-output.patch +rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch +arm-9468-1-fix-memset64-on-big-endian.patch diff --git a/queue-5.15/x86-kfence-fix-booting-on-32bit-non-pae-systems.patch b/queue-5.15/x86-kfence-fix-booting-on-32bit-non-pae-systems.patch new file mode 100644 index 00000000000..a4cebf6c451 --- /dev/null +++ b/queue-5.15/x86-kfence-fix-booting-on-32bit-non-pae-systems.patch @@ -0,0 +1,68 @@ +From 16459fe7e0ca6520a6e8f603de4ccd52b90fd765 Mon Sep 17 00:00:00 2001 +From: Andrew Cooper +Date: Mon, 26 Jan 2026 21:10:46 +0000 +Subject: x86/kfence: fix booting on 32bit non-PAE systems + +From: Andrew Cooper + +commit 16459fe7e0ca6520a6e8f603de4ccd52b90fd765 upstream. + +The original patch inverted the PTE unconditionally to avoid +L1TF-vulnerable PTEs, but Linux doesn't make this adjustment in 2-level +paging. + +Adjust the logic to use the flip_protnone_guard() helper, which is a nop +on 2-level paging but inverts the address bits in all other paging modes. + +This doesn't matter for the Xen aspect of the original change. Linux no +longer supports running 32bit PV under Xen, and Xen doesn't support +running any 32bit PV guests without using PAE paging. + +Link: https://lkml.kernel.org/r/20260126211046.2096622-1-andrew.cooper3@citrix.com +Fixes: b505f1944535 ("x86/kfence: avoid writing L1TF-vulnerable PTEs") +Reported-by: Ryusuke Konishi +Closes: https://lore.kernel.org/lkml/CAKFNMokwjw68ubYQM9WkzOuH51wLznHpEOMSqtMoV1Rn9JV_gw@mail.gmail.com/ +Signed-off-by: Andrew Cooper +Tested-by: Ryusuke Konishi +Tested-by: Borislav Petkov (AMD) +Cc: Alexander Potapenko +Cc: Marco Elver +Cc: Dmitry Vyukov +Cc: Thomas Gleixner +Cc: Ingo Molnar +Cc: Dave Hansen +Cc: "H. Peter Anvin" +Cc: Jann Horn +Cc: +Signed-off-by: Andrew Morton +Signed-off-by: Greg Kroah-Hartman +--- + arch/x86/include/asm/kfence.h | 7 ++++--- + 1 file changed, 4 insertions(+), 3 deletions(-) + +--- a/arch/x86/include/asm/kfence.h ++++ b/arch/x86/include/asm/kfence.h +@@ -42,7 +42,7 @@ static inline bool kfence_protect_page(u + { + unsigned int level; + pte_t *pte = lookup_address(addr, &level); +- pteval_t val; ++ pteval_t val, new; + + if (WARN_ON(!pte || level != PG_LEVEL_4K)) + return false; +@@ -57,11 +57,12 @@ static inline bool kfence_protect_page(u + return true; + + /* +- * Otherwise, invert the entire PTE. This avoids writing out an ++ * Otherwise, flip the Present bit, taking care to avoid writing an + * L1TF-vulnerable PTE (not present, without the high address bits + * set). + */ +- set_pte(pte, __pte(~val)); ++ new = val ^ _PAGE_PRESENT; ++ set_pte(pte, __pte(flip_protnone_guard(val, new, PTE_PFN_MASK))); + + /* + * If the page was protected (non-present) and we're making it -- 2.47.3