]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
drop block patch
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 6 Nov 2024 11:10:03 +0000 (12:10 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 6 Nov 2024 11:10:03 +0000 (12:10 +0100)
queue-6.11/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch [deleted file]
queue-6.11/series
queue-6.6/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch [deleted file]
queue-6.6/series

diff --git a/queue-6.11/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch b/queue-6.11/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch
deleted file mode 100644 (file)
index c3db47a..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-From be0e822bb3f5259c7f9424ba97e8175211288813 Mon Sep 17 00:00:00 2001
-From: Christoph Hellwig <hch@lst.de>
-Date: Mon, 28 Oct 2024 10:07:48 +0100
-Subject: block: fix queue limits checks in blk_rq_map_user_bvec for real
-
-From: Christoph Hellwig <hch@lst.de>
-
-commit be0e822bb3f5259c7f9424ba97e8175211288813 upstream.
-
-blk_rq_map_user_bvec currently only has ad-hoc checks for queue limits,
-and the last fix to it enabled valid NVMe I/O to pass, but also allowed
-invalid one for drivers that set a max_segment_size or seg_boundary
-limit.
-
-Fix it once for all by using the bio_split_rw_at helper from the I/O
-path that indicates if and where a bio would be have to be split to
-adhere to the queue limits, and it returns a positive value, turn that
-into -EREMOTEIO to retry using the copy path.
-
-Fixes: 2ff949441802 ("block: fix sanity checks in blk_rq_map_user_bvec")
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Reviewed-by: John Garry <john.g.garry@oracle.com>
-Link: https://lore.kernel.org/r/20241028090840.446180-1-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- block/blk-map.c |   56 +++++++++++++++++---------------------------------------
- 1 file changed, 17 insertions(+), 39 deletions(-)
-
---- a/block/blk-map.c
-+++ b/block/blk-map.c
-@@ -561,55 +561,33 @@ EXPORT_SYMBOL(blk_rq_append_bio);
- /* Prepare bio for passthrough IO given ITER_BVEC iter */
- static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
- {
--      struct request_queue *q = rq->q;
--      size_t nr_iter = iov_iter_count(iter);
--      size_t nr_segs = iter->nr_segs;
--      struct bio_vec *bvecs, *bvprvp = NULL;
--      const struct queue_limits *lim = &q->limits;
--      unsigned int nsegs = 0, bytes = 0;
-+      const struct queue_limits *lim = &rq->q->limits;
-+      unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
-+      unsigned int nsegs;
-       struct bio *bio;
--      size_t i;
-+      int ret;
--      if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
--              return -EINVAL;
--      if (nr_segs > queue_max_segments(q))
-+      if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
-               return -EINVAL;
--      /* no iovecs to alloc, as we already have a BVEC iterator */
-+      /* reuse the bvecs from the iterator instead of allocating new ones */
-       bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
--      if (bio == NULL)
-+      if (!bio)
-               return -ENOMEM;
--
-       bio_iov_bvec_set(bio, (struct iov_iter *)iter);
--      blk_rq_bio_prep(rq, bio, nr_segs);
--
--      /* loop to perform a bunch of sanity checks */
--      bvecs = (struct bio_vec *)iter->bvec;
--      for (i = 0; i < nr_segs; i++) {
--              struct bio_vec *bv = &bvecs[i];
--              /*
--               * If the queue doesn't support SG gaps and adding this
--               * offset would create a gap, fallback to copy.
--               */
--              if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
--                      blk_mq_map_bio_put(bio);
--                      return -EREMOTEIO;
--              }
--              /* check full condition */
--              if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
--                      goto put_bio;
--              if (bytes + bv->bv_len > nr_iter)
--                      break;
--
--              nsegs++;
--              bytes += bv->bv_len;
--              bvprvp = bv;
-+      /* check that the data layout matches the hardware restrictions */
-+      ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
-+      if (ret) {
-+              /* if we would have to split the bio, copy instead */
-+              if (ret > 0)
-+                      ret = -EREMOTEIO;
-+              blk_mq_map_bio_put(bio);
-+              return ret;
-       }
-+
-+      blk_rq_bio_prep(rq, bio, nsegs);
-       return 0;
--put_bio:
--      blk_mq_map_bio_put(bio);
--      return -EINVAL;
- }
- /**
index 8d052f95570486679ad64afe61543d95c2961a39..001923712600dad35e1c18f8c07dfe724547df57 100644 (file)
@@ -243,4 +243,3 @@ drm-amdgpu-swsmu-fix-ordering-for-setting-workload_mask.patch
 drm-amdgpu-swsmu-default-to-fullscreen-3d-profile-for-dgpus.patch
 fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch
 drm-amdgpu-handle-default-profile-on-on-devices-without-fullscreen-3d.patch
-block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch
diff --git a/queue-6.6/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch b/queue-6.6/block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch
deleted file mode 100644 (file)
index c3db47a..0000000
+++ /dev/null
@@ -1,104 +0,0 @@
-From be0e822bb3f5259c7f9424ba97e8175211288813 Mon Sep 17 00:00:00 2001
-From: Christoph Hellwig <hch@lst.de>
-Date: Mon, 28 Oct 2024 10:07:48 +0100
-Subject: block: fix queue limits checks in blk_rq_map_user_bvec for real
-
-From: Christoph Hellwig <hch@lst.de>
-
-commit be0e822bb3f5259c7f9424ba97e8175211288813 upstream.
-
-blk_rq_map_user_bvec currently only has ad-hoc checks for queue limits,
-and the last fix to it enabled valid NVMe I/O to pass, but also allowed
-invalid one for drivers that set a max_segment_size or seg_boundary
-limit.
-
-Fix it once for all by using the bio_split_rw_at helper from the I/O
-path that indicates if and where a bio would be have to be split to
-adhere to the queue limits, and it returns a positive value, turn that
-into -EREMOTEIO to retry using the copy path.
-
-Fixes: 2ff949441802 ("block: fix sanity checks in blk_rq_map_user_bvec")
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Reviewed-by: John Garry <john.g.garry@oracle.com>
-Link: https://lore.kernel.org/r/20241028090840.446180-1-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
----
- block/blk-map.c |   56 +++++++++++++++++---------------------------------------
- 1 file changed, 17 insertions(+), 39 deletions(-)
-
---- a/block/blk-map.c
-+++ b/block/blk-map.c
-@@ -561,55 +561,33 @@ EXPORT_SYMBOL(blk_rq_append_bio);
- /* Prepare bio for passthrough IO given ITER_BVEC iter */
- static int blk_rq_map_user_bvec(struct request *rq, const struct iov_iter *iter)
- {
--      struct request_queue *q = rq->q;
--      size_t nr_iter = iov_iter_count(iter);
--      size_t nr_segs = iter->nr_segs;
--      struct bio_vec *bvecs, *bvprvp = NULL;
--      const struct queue_limits *lim = &q->limits;
--      unsigned int nsegs = 0, bytes = 0;
-+      const struct queue_limits *lim = &rq->q->limits;
-+      unsigned int max_bytes = lim->max_hw_sectors << SECTOR_SHIFT;
-+      unsigned int nsegs;
-       struct bio *bio;
--      size_t i;
-+      int ret;
--      if (!nr_iter || (nr_iter >> SECTOR_SHIFT) > queue_max_hw_sectors(q))
--              return -EINVAL;
--      if (nr_segs > queue_max_segments(q))
-+      if (!iov_iter_count(iter) || iov_iter_count(iter) > max_bytes)
-               return -EINVAL;
--      /* no iovecs to alloc, as we already have a BVEC iterator */
-+      /* reuse the bvecs from the iterator instead of allocating new ones */
-       bio = blk_rq_map_bio_alloc(rq, 0, GFP_KERNEL);
--      if (bio == NULL)
-+      if (!bio)
-               return -ENOMEM;
--
-       bio_iov_bvec_set(bio, (struct iov_iter *)iter);
--      blk_rq_bio_prep(rq, bio, nr_segs);
--
--      /* loop to perform a bunch of sanity checks */
--      bvecs = (struct bio_vec *)iter->bvec;
--      for (i = 0; i < nr_segs; i++) {
--              struct bio_vec *bv = &bvecs[i];
--              /*
--               * If the queue doesn't support SG gaps and adding this
--               * offset would create a gap, fallback to copy.
--               */
--              if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv->bv_offset)) {
--                      blk_mq_map_bio_put(bio);
--                      return -EREMOTEIO;
--              }
--              /* check full condition */
--              if (nsegs >= nr_segs || bytes > UINT_MAX - bv->bv_len)
--                      goto put_bio;
--              if (bytes + bv->bv_len > nr_iter)
--                      break;
--
--              nsegs++;
--              bytes += bv->bv_len;
--              bvprvp = bv;
-+      /* check that the data layout matches the hardware restrictions */
-+      ret = bio_split_rw_at(bio, lim, &nsegs, max_bytes);
-+      if (ret) {
-+              /* if we would have to split the bio, copy instead */
-+              if (ret > 0)
-+                      ret = -EREMOTEIO;
-+              blk_mq_map_bio_put(bio);
-+              return ret;
-       }
-+
-+      blk_rq_bio_prep(rq, bio, nsegs);
-       return 0;
--put_bio:
--      blk_mq_map_bio_put(bio);
--      return -EINVAL;
- }
- /**
index e602e9db8fd011f8fa02753385f5008de382dca4..a1be5146b157a24e52cc12440c65a99b8b0e30b8 100644 (file)
@@ -149,4 +149,3 @@ asoc-sof-ipc4-control-add-support-for-alsa-switch-control.patch
 asoc-sof-ipc4-control-add-support-for-alsa-enum-control.patch
 drm-amd-display-add-null-checks-for-stream-and-plane-before-dereferencing.patch
 fs-ntfs3-sequential-field-availability-check-in-mi_enum_attr.patch
-block-fix-queue-limits-checks-in-blk_rq_map_user_bvec-for-real.patch