--- /dev/null
+From be0e822bb3f5259c7f9424ba97e8175211288813 Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Mon, 28 Oct 2024 10:07:48 +0100
+Subject: block: fix queue limits checks in blk_rq_map_user_bvec for real
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit be0e822bb3f5259c7f9424ba97e8175211288813 upstream.
+
+blk_rq_map_user_bvec currently only has ad-hoc checks for queue limits,
+and the last fix to it enabled valid NVMe I/O to pass, but also allowed
+invalid one for drivers that set a max_segment_size or seg_boundary
+limit.
+
+Fix it once for all by using the bio_split_rw_at helper from the I/O
+path that indicates if and where a bio would be have to be split to
+adhere to the queue limits, and it returns a positive value, turn that
+into -EREMOTEIO to retry using the copy path.
+
+Fixes: 2ff949441802 ("block: fix sanity checks in blk_rq_map_user_bvec")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: John Garry <john.g.garry@oracle.com>
+Link: https://lore.kernel.org/r/20241028090840.446180-1-hch@lst.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-map.c | 56 +++++++++++++++++---------------------------------------
+ 1 file changed, 17 insertions(+), 39 deletions(-)
+
+--- a/block/blk-map.c
++++ b/block/blk-map.c
+@@ -561,55 +561,33 @@ EXPORT_SYMBOL(blk_rq_append_bio);
+ /* Prepare bio for passthrough IO given ITER_BVEC iter */
+ static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
+ {
+- struct request_queue *q = rq->q;
+- size_t nr_iter = iov_iter_count(iter);
+- size_t nr_segs = iter->nr_segs;
+- struct bio_vec *bvecs, *bvprvp = NULL;
+- const struct queue_limits *lim = &q->limits;
+- unsigned int nsegs = 0, bytes = 0;
++ const struct queue_limits *lim = &rq->q->limits;
++ unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
++ unsigned int nsegs;
+ struct bio *bio;
+- size_t i;
++ int ret;
+
+- if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
+- return -EINVAL;
+- if (nr_segs > queue_max_segments(q))
++ if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
+ return -EINVAL;
+
+- /* no iovecs to alloc, as we already have a BVEC iterator */
++ /* reuse the bvecs from the iterator instead of allocating new ones */
+ bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
+- if (bio == NULL)
++ if (!bio)
+ return -ENOMEM;
+-
+ bio_iov_bvec_set(bio, (struct iov_iter *)iter);
+- blk_rq_bio_prep(rq, bio, nr_segs);
+-
+- /* loop to perform a bunch of sanity checks */
+- bvecs = (struct bio_vec *)iter->bvec;
+- for (i = 0; i < nr_segs; i++) {
+- struct bio_vec *bv = &bvecs[i];
+
+- /*
+- * If the queue doesn't support SG gaps and adding this
+- * offset would create a gap, fallback to copy.
+- */
+- if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
+- blk_mq_map_bio_put(bio);
+- return -EREMOTEIO;
+- }
+- /* check full condition */
+- if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
+- goto put_bio;
+- if (bytes + bv->bv_len > nr_iter)
+- break;
+-
+- nsegs++;
+- bytes += bv->bv_len;
+- bvprvp = bv;
++ /* check that the data layout matches the hardware restrictions */
++ ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
++ if (ret) {
++ /* if we would have to split the bio, copy instead */
++ if (ret > 0)
++ ret = -EREMOTEIO;
++ blk_mq_map_bio_put(bio);
++ return ret;
+ }
++
++ blk_rq_bio_prep(rq, bio, nsegs);
+ return 0;
+-put_bio:
+- blk_mq_map_bio_put(bio);
+- return -EINVAL;
+ }
+
+ /**
--- /dev/null
+From 090f612756a9720ec18b0b130e28be49839d7cb5 Mon Sep 17 00:00:00 2001
+From: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Date: Thu, 5 Sep 2024 15:03:48 +0300
+Subject: fs/ntfs3: Sequential field availability check in mi_enum_attr()
+
+From: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+
+commit 090f612756a9720ec18b0b130e28be49839d7cb5 upstream.
+
+The code is slightly reformatted to consistently check field availability
+without duplication.
+
+Fixes: 556bdf27c2dd ("ntfs3: Add bounds checking to mi_enum_attr()")
+Signed-off-by: Konstantin Komarov <almaz.alexandrovich@paragon-software.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ntfs3/record.c | 15 +++++++--------
+ 1 file changed, 7 insertions(+), 8 deletions(-)
+
+--- a/fs/ntfs3/record.c
++++ b/fs/ntfs3/record.c
+@@ -237,6 +237,7 @@ struct ATTRIB *mi_enum_attr(struct mft_i
+ }
+
+ /* Can we use the first field (attr->type). */
++ /* NOTE: this code also checks attr->size availability. */
+ if (off + 8 > used) {
+ static_assert(ALIGN(sizeof(enum ATTR_TYPE), 8) == 8);
+ return NULL;
+@@ -257,10 +258,6 @@ struct ATTRIB *mi_enum_attr(struct mft_i
+ return NULL;
+
+ asize = le32_to_cpu(attr->size);
+- if (asize < SIZEOF_RESIDENT) {
+- /* Impossible 'cause we should not return such attribute. */
+- return NULL;
+- }
+
+ /* Check overflow and boundary. */
+ if (off + asize < off || off + asize > used)
+@@ -290,6 +287,10 @@ struct ATTRIB *mi_enum_attr(struct mft_i
+ if (attr->non_res != 1)
+ return NULL;
+
++ /* Can we use memory including attr->nres.valid_size? */
++ if (asize < SIZEOF_NONRESIDENT)
++ return NULL;
++
+ t16 = le16_to_cpu(attr->nres.run_off);
+ if (t16 > asize)
+ return NULL;
+@@ -316,7 +317,8 @@ struct ATTRIB *mi_enum_attr(struct mft_i
+
+ if (!attr->nres.svcn && is_attr_ext(attr)) {
+ /* First segment of sparse/compressed attribute */
+- if (asize + 8 < SIZEOF_NONRESIDENT_EX)
++ /* Can we use memory including attr->nres.total_size? */
++ if (asize < SIZEOF_NONRESIDENT_EX)
+ return NULL;
+
+ tot_size = le64_to_cpu(attr->nres.total_size);
+@@ -326,9 +328,6 @@ struct ATTRIB *mi_enum_attr(struct mft_i
+ if (tot_size > alloc_size)
+ return NULL;
+ } else {
+- if (asize + 8 < SIZEOF_NONRESIDENT)
+- return NULL;
+-
+ if (attr->nres.c_unit)
+ return NULL;
+