From: Greg Kroah-Hartman Date: Sat, 7 Feb 2026 15:01:27 +0000 (+0100) Subject: 5.10-stable patches X-Git-Tag: v5.10.250~50 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=38cbce7961512c13811387662b0d82befcfe548a;p=thirdparty%2Fkernel%2Fstable-queue.git 5.10-stable patches added patches: arm-9468-1-fix-memset64-on-big-endian.patch rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch series --- diff --git a/queue-5.10/arm-9468-1-fix-memset64-on-big-endian.patch b/queue-5.10/arm-9468-1-fix-memset64-on-big-endian.patch new file mode 100644 index 0000000000..82aa738268 --- /dev/null +++ b/queue-5.10/arm-9468-1-fix-memset64-on-big-endian.patch @@ -0,0 +1,40 @@ +From 23ea2a4c72323feb6e3e025e8a6f18336513d5ad Mon Sep 17 00:00:00 2001 +From: Thomas Weissschuh +Date: Wed, 7 Jan 2026 11:01:49 +0100 +Subject: ARM: 9468/1: fix memset64() on big-endian +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Thomas Weissschuh + +commit 23ea2a4c72323feb6e3e025e8a6f18336513d5ad upstream. + +On big-endian systems the 32-bit low and high halves need to be swapped +for the underlying assembly implementation to work correctly. + +Fixes: fd1d362600e2 ("ARM: implement memset32 & memset64") +Cc: stable@vger.kernel.org +Signed-off-by: Thomas Weißschuh +Reviewed-by: Matthew Wilcox (Oracle) +Reviewed-by: Arnd Bergmann +Signed-off-by: Russell King (Oracle) +Signed-off-by: Greg Kroah-Hartman +--- + arch/arm/include/asm/string.h | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/arch/arm/include/asm/string.h ++++ b/arch/arm/include/asm/string.h +@@ -36,7 +36,10 @@ static inline void *memset32(uint32_t *p + extern void *__memset64(uint64_t *, uint32_t low, __kernel_size_t, uint32_t hi); + static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n) + { +- return __memset64(p, v, n * 8, v >> 32); ++ if (IS_ENABLED(CONFIG_CPU_LITTLE_ENDIAN)) ++ return __memset64(p, v, n * 8, v >> 32); ++ else ++ return __memset64(p, v >> 32, n * 8, v); + } + + #endif diff --git a/queue-5.10/rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch b/queue-5.10/rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch new file mode 100644 index 0000000000..8584840e08 --- /dev/null +++ b/queue-5.10/rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch @@ -0,0 +1,94 @@ +From bd3884a204c3b507e6baa9a4091aa927f9af5404 Mon Sep 17 00:00:00 2001 +From: Ilya Dryomov +Date: Wed, 7 Jan 2026 22:37:55 +0100 +Subject: rbd: check for EOD after exclusive lock is ensured to be held + +From: Ilya Dryomov + +commit bd3884a204c3b507e6baa9a4091aa927f9af5404 upstream. + +Similar to commit 870611e4877e ("rbd: get snapshot context after +exclusive lock is ensured to be held"), move the "beyond EOD" check +into the image request state machine so that it's performed after +exclusive lock is ensured to be held. This avoids various race +conditions which can arise when the image is shrunk under I/O (in +practice, mostly readahead). In one such scenario + + rbd_assert(objno < rbd_dev->object_map_size); + +can be triggered if a close-to-EOD read gets queued right before the +shrink is initiated and the EOD check is performed against an outdated +mapping_size. After the resize is done on the server side and exclusive +lock is (re)acquired bringing along the new (now shrunk) object map, the +read starts going through the state machine and rbd_obj_may_exist() gets +invoked on an object that is out of bounds of rbd_dev->object_map array. + +Cc: stable@vger.kernel.org +Signed-off-by: Ilya Dryomov +Reviewed-by: Dongsheng Yang +Signed-off-by: Greg Kroah-Hartman +--- + drivers/block/rbd.c | 33 +++++++++++++++++++++------------ + 1 file changed, 21 insertions(+), 12 deletions(-) + +--- a/drivers/block/rbd.c ++++ b/drivers/block/rbd.c +@@ -3560,11 +3560,29 @@ static void rbd_img_object_requests(stru + rbd_assert(!need_exclusive_lock(img_req) || + __rbd_is_lock_owner(rbd_dev)); + +- if (rbd_img_is_write(img_req)) { +- rbd_assert(!img_req->snapc); ++ if (test_bit(IMG_REQ_CHILD, &img_req->flags)) { ++ rbd_assert(!rbd_img_is_write(img_req)); ++ } else { ++ struct request *rq = blk_mq_rq_from_pdu(img_req); ++ u64 off = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; ++ u64 len = blk_rq_bytes(rq); ++ u64 mapping_size; ++ + down_read(&rbd_dev->header_rwsem); +- img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc); ++ mapping_size = rbd_dev->mapping.size; ++ if (rbd_img_is_write(img_req)) { ++ rbd_assert(!img_req->snapc); ++ img_req->snapc = ++ ceph_get_snap_context(rbd_dev->header.snapc); ++ } + up_read(&rbd_dev->header_rwsem); ++ ++ if (unlikely(off + len > mapping_size)) { ++ rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", ++ off, len, mapping_size); ++ img_req->pending.result = -EIO; ++ return; ++ } + } + + for_each_obj_request(img_req, obj_req) { +@@ -4781,7 +4799,6 @@ static void rbd_queue_workfn(struct work + struct request *rq = blk_mq_rq_from_pdu(img_request); + u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; + u64 length = blk_rq_bytes(rq); +- u64 mapping_size; + int result; + + /* Ignore/skip any zero-length requests */ +@@ -4794,17 +4811,9 @@ static void rbd_queue_workfn(struct work + blk_mq_start_request(rq); + + down_read(&rbd_dev->header_rwsem); +- mapping_size = rbd_dev->mapping.size; + rbd_img_capture_header(img_request); + up_read(&rbd_dev->header_rwsem); + +- if (offset + length > mapping_size) { +- rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset, +- length, mapping_size); +- result = -EIO; +- goto err_img_request; +- } +- + dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev, + img_request, obj_op_name(op_type), offset, length); + diff --git a/queue-5.10/series b/queue-5.10/series new file mode 100644 index 0000000000..bc143a98c3 --- /dev/null +++ b/queue-5.10/series @@ -0,0 +1,2 @@ +rbd-check-for-eod-after-exclusive-lock-is-ensured-to-be-held.patch +arm-9468-1-fix-memset64-on-big-endian.patch