From 8f54545d0c5895bb77c3045174f7e3b2d5ccc02c Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 6 Nov 2024 11:58:35 +0100 Subject: [PATCH] 6.6-stable patches added patches: block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch --- ...cks-in-blk_rq_map_user_bvec-for-real.patch | 104 ++++++++++++++++++ ...d-availability-check-in-mi_enum_attr.patch | 71 ++++++++++++ queue-6.6/series | 2 + 3 files changed, 177 insertions(+) create mode 100644 queue-6.6/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch create mode 100644 queue-6.6/fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch diff --git a/queue-6.6/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch b/queue-6.6/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch new file mode 100644 index 00000000000..c3db47adbb7 --- /dev/null +++ b/queue-6.6/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch @@ -0,0 +1,104 @@ +From be0e822bb3f5259c7f9424ba97e8175211288813 Mon Sep 17 00:00:00 2001 +From: Christoph Hellwig +Date: Mon, 28 Oct 2024 10:07:48 +0100 +Subject: block: fix queue limits checks in blk_rq_map_user_bvec for real + +From: Christoph Hellwig + +commit be0e822bb3f5259c7f9424ba97e8175211288813 upstream. + +blk_rq_map_user_bvec currently only has ad-hoc checks for queue limits, +and the last fix to it enabled valid NVMe I/O to pass, but also allowed +invalid one for drivers that set a max_segment_size or seg_boundary +limit. + +Fix it once for all by using the bio_split_rw_at helper from the I/O +path that indicates if and where a bio would be have to be split to +adhere to the queue limits, and it returns a positive value, turn that +into -EREMOTEIO to retry using the copy path. + +Fixes: 2ff949441802 ("block: fix sanity checks in blk_rq_map_user_bvec") +Signed-off-by: Christoph Hellwig +Reviewed-by: John Garry +Link: https://lore.kernel.org/r/20241028090840.446180-1-hch@lst.de +Signed-off-by: Jens Axboe +Signed-off-by: Greg Kroah-Hartman +--- + block/blk-map.c | 56 +++++++++++++++++--------------------------------------- + 1 file changed, 17 insertions(+), 39 deletions(-) + +--- a/block/blk-map.c ++++ b/block/blk-map.c +@@ -561,55 +561,33 @@ EXPORT_SYMBOL(blk_rq_append_bio); + /* Prepare bio for passthrough IO given ITER_BVEC iter */ + static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter) + { +- struct request_queue *q = rq->q; +- size_t nr_iter = iov_iter_count(iter); +- size_t nr_segs = iter->nr_segs; +- struct bio_vec *bvecs, *bvprvp = NULL; +- const struct queue_limits *lim = &q->limits; +- unsigned int nsegs = 0, bytes = 0; ++ const struct queue_limits *lim = &rq->q->limits; ++ unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT; ++ unsigned int nsegs; + struct bio *bio; +- size_t i; ++ int ret; + +- if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q)) +- return -EINVAL; +- if (nr_segs > queue_max_segments(q)) ++ if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes) + return -EINVAL; + +- /* no iovecs to alloc, as we already have a BVEC iterator */ ++ /* reuse the bvecs from the iterator instead of allocating new ones */ + bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL); +- if (bio == NULL) ++ if (!bio) + return -ENOMEM; +- + bio_iov_bvec_set(bio, (struct iov_iter *)iter); +- blk_rq_bio_prep(rq, bio, nr_segs); +- +- /* loop to perform a bunch of sanity checks */ +- bvecs = (struct bio_vec *)iter->bvec; +- for (i = 0; i < nr_segs; i++) { +- struct bio_vec *bv = &bvecs[i]; + +- /* +- * If the queue doesn't support SG gaps and adding this +- * offset would create a gap, fallback to copy. +- */ +- if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) { +- blk_mq_map_bio_put(bio); +- return -EREMOTEIO; +- } +- /* check full condition */ +- if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len) +- goto put_bio; +- if (bytes + bv->bv_len > nr_iter) +- break; +- +- nsegs++; +- bytes += bv->bv_len; +- bvprvp = bv; ++ /* check that the data layout matches the hardware restrictions */ ++ ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes); ++ if (ret) { ++ /* if we would have to split the bio, copy instead */ ++ if (ret > 0) ++ ret = -EREMOTEIO; ++ blk_mq_map_bio_put(bio); ++ return ret; + } ++ ++ blk_rq_bio_prep(rq, bio, nsegs); + return 0; +-put_bio: +- blk_mq_map_bio_put(bio); +- return -EINVAL; + } + + /** diff --git a/queue-6.6/fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch b/queue-6.6/fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch new file mode 100644 index 00000000000..fd12392eb06 --- /dev/null +++ b/queue-6.6/fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch @@ -0,0 +1,71 @@ +From 090f612756a9720ec18b0b130e28be49839d7cb5 Mon Sep 17 00:00:00 2001 +From: Konstantin Komarov +Date: Thu, 5 Sep 2024 15:03:48 +0300 +Subject: fs/ntfs3: Sequential field availability check in mi_enum_attr() + +From: Konstantin Komarov + +commit 090f612756a9720ec18b0b130e28be49839d7cb5 upstream. + +The code is slightly reformatted to consistently check field availability +without duplication. + +Fixes: 556bdf27c2dd ("ntfs3: Add bounds checking to mi_enum_attr()") +Signed-off-by: Konstantin Komarov +Signed-off-by: Greg Kroah-Hartman +--- + fs/ntfs3/record.c | 15 +++++++-------- + 1 file changed, 7 insertions(+), 8 deletions(-) + +--- a/fs/ntfs3/record.c ++++ b/fs/ntfs3/record.c +@@ -237,6 +237,7 @@ struct ATTRIB *mi_enum_attr(struct mft_i + } + + /* Can we use the first field (attr->type). */ ++ /* NOTE: this code also checks attr->size availability. */ + if (off + 8 > used) { + static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8); + return NULL; +@@ -257,10 +258,6 @@ struct ATTRIB *mi_enum_attr(struct mft_i + return NULL; + + asize = le32_to_cpu(attr->size); +- if (asize < SIZEOF_RESIDENT) { +- /* Impossible 'cause we should not return such attribute. */ +- return NULL; +- } + + /* Check overflow and boundary. */ + if (off + asize < off || off + asize > used) +@@ -290,6 +287,10 @@ struct ATTRIB *mi_enum_attr(struct mft_i + if (attr->non_res != 1) + return NULL; + ++ /* Can we use memory including attr->nres.valid_size? */ ++ if (asize < SIZEOF_NONRESIDENT) ++ return NULL; ++ + t16 = le16_to_cpu(attr->nres.run_off); + if (t16 > asize) + return NULL; +@@ -316,7 +317,8 @@ struct ATTRIB *mi_enum_attr(struct mft_i + + if (!attr->nres.svcn && is_attr_ext(attr)) { + /* First segment of sparse/compressed attribute */ +- if (asize + 8 < SIZEOF_NONRESIDENT_EX) ++ /* Can we use memory including attr->nres.total_size? */ ++ if (asize < SIZEOF_NONRESIDENT_EX) + return NULL; + + tot_size = le64_to_cpu(attr->nres.total_size); +@@ -326,9 +328,6 @@ struct ATTRIB *mi_enum_attr(struct mft_i + if (tot_size > alloc_size) + return NULL; + } else { +- if (asize + 8 < SIZEOF_NONRESIDENT) +- return NULL; +- + if (attr->nres.c_unit) + return NULL; + diff --git a/queue-6.6/series b/queue-6.6/series index 5532a3ee91c..e602e9db8fd 100644 --- a/queue-6.6/series +++ b/queue-6.6/series @@ -148,3 +148,5 @@ asoc-sof-ipc4-topology-add-definition-for-generic-switch-enum-control.patch asoc-sof-ipc4-control-add-support-for-alsa-switch-control.patch asoc-sof-ipc4-control-add-support-for-alsa-enum-control.patch drm-amd-display-add-null-checks-for-stream-and-plane-before-dereferencing.patch +fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch +block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch -- 2.47.3