]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.17-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 30 Mar 2022 15:18:00 +0000 (17:18 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 30 Mar 2022 15:18:00 +0000 (17:18 +0200)
added patches:
block-ensure-plug-merging-checks-the-correct-queue-at-least-once.patch
block-flush-plug-based-on-hardware-and-software-queue-order.patch

queue-5.17/block-ensure-plug-merging-checks-the-correct-queue-at-least-once.patch [new file with mode: 0644]
queue-5.17/block-flush-plug-based-on-hardware-and-software-queue-order.patch [new file with mode: 0644]
queue-5.17/series

diff --git a/queue-5.17/block-ensure-plug-merging-checks-the-correct-queue-at-least-once.patch b/queue-5.17/block-ensure-plug-merging-checks-the-correct-queue-at-least-once.patch
new file mode 100644 (file)
index 0000000..289a787
--- /dev/null
@@ -0,0 +1,57 @@
+From 5b2050718d095cd3242d1f42aaaea3a2fec8e6f0 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Fri, 11 Mar 2022 10:21:43 -0700
+Subject: block: ensure plug merging checks the correct queue at least once
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 5b2050718d095cd3242d1f42aaaea3a2fec8e6f0 upstream.
+
+Song reports that a RAID rebuild workload runs much slower recently,
+and it is seeing a lot less merging than it did previously. The reason
+is that a previous commit reduced the amount of work we do for plug
+merging. RAID rebuild interleaves requests between disks, so a last-entry
+check in plug merging always misses a merge opportunity since we always
+find a different disk than what we are looking for.
+
+Modify the logic such that it's still a one-hit cache, but ensure that
+we check enough to find the right target before giving up.
+
+Fixes: d38a9c04c0d5 ("block: only check previous entry for plug merge attempt")
+Reported-and-tested-by: Song Liu <song@kernel.org>
+Reviewed-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-merge.c |   20 ++++++++++++++------
+ 1 file changed, 14 insertions(+), 6 deletions(-)
+
+--- a/block/blk-merge.c
++++ b/block/blk-merge.c
+@@ -1089,12 +1089,20 @@ bool blk_attempt_plug_merge(struct reque
+       if (!plug || rq_list_empty(plug->mq_list))
+               return false;
+-      /* check the previously added entry for a quick merge attempt */
+-      rq = rq_list_peek(&plug->mq_list);
+-      if (rq->q == q) {
+-              if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
+-                              BIO_MERGE_OK)
+-                      return true;
++      rq_list_for_each(&plug->mq_list, rq) {
++              if (rq->q == q) {
++                      if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
++                          BIO_MERGE_OK)
++                              return true;
++                      break;
++              }
++
++              /*
++               * Only keep iterating plug list for merges if we have multiple
++               * queues
++               */
++              if (!plug->multiple_queues)
++                      break;
+       }
+       return false;
+ }
diff --git a/queue-5.17/block-flush-plug-based-on-hardware-and-software-queue-order.patch b/queue-5.17/block-flush-plug-based-on-hardware-and-software-queue-order.patch
new file mode 100644 (file)
index 0000000..50aba3c
--- /dev/null
@@ -0,0 +1,106 @@
+From 26fed4ac4eab09c27fbae1859696cc38f0536407 Mon Sep 17 00:00:00 2001
+From: Jens Axboe <axboe@kernel.dk>
+Date: Fri, 11 Mar 2022 10:24:17 -0700
+Subject: block: flush plug based on hardware and software queue order
+
+From: Jens Axboe <axboe@kernel.dk>
+
+commit 26fed4ac4eab09c27fbae1859696cc38f0536407 upstream.
+
+We used to sort the plug list if we had multiple queues before dispatching
+requests to the IO scheduler. This usually isn't needed, but for certain
+workloads that interleave requests to disks, it's a less efficient to
+process the plug list one-by-one if everything is interleaved.
+
+Don't sort the list, but skip through it and flush out entries that have
+the same target at the same time.
+
+Fixes: df87eb0fce8f ("block: get rid of plug list sorting")
+Reported-and-tested-by: Song Liu <song@kernel.org>
+Reviewed-by: Song Liu <songliubraving@fb.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-mq.c |   59 +++++++++++++++++++++++++++------------------------------
+ 1 file changed, 28 insertions(+), 31 deletions(-)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -2561,13 +2561,36 @@ static void __blk_mq_flush_plug_list(str
+       q->mq_ops->queue_rqs(&plug->mq_list);
+ }
++static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched)
++{
++      struct blk_mq_hw_ctx *this_hctx = NULL;
++      struct blk_mq_ctx *this_ctx = NULL;
++      struct request *requeue_list = NULL;
++      unsigned int depth = 0;
++      LIST_HEAD(list);
++
++      do {
++              struct request *rq = rq_list_pop(&plug->mq_list);
++
++              if (!this_hctx) {
++                      this_hctx = rq->mq_hctx;
++                      this_ctx = rq->mq_ctx;
++              } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
++                      rq_list_add(&requeue_list, rq);
++                      continue;
++              }
++              list_add_tail(&rq->queuelist, &list);
++              depth++;
++      } while (!rq_list_empty(plug->mq_list));
++
++      plug->mq_list = requeue_list;
++      trace_block_unplug(this_hctx->queue, depth, !from_sched);
++      blk_mq_sched_insert_requests(this_hctx, this_ctx, &list, from_sched);
++}
++
+ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
+ {
+-      struct blk_mq_hw_ctx *this_hctx;
+-      struct blk_mq_ctx *this_ctx;
+       struct request *rq;
+-      unsigned int depth;
+-      LIST_HEAD(list);
+       if (rq_list_empty(plug->mq_list))
+               return;
+@@ -2603,35 +2626,9 @@ void blk_mq_flush_plug_list(struct blk_p
+                       return;
+       }
+-      this_hctx = NULL;
+-      this_ctx = NULL;
+-      depth = 0;
+       do {
+-              rq = rq_list_pop(&plug->mq_list);
+-
+-              if (!this_hctx) {
+-                      this_hctx = rq->mq_hctx;
+-                      this_ctx = rq->mq_ctx;
+-              } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) {
+-                      trace_block_unplug(this_hctx->queue, depth,
+-                                              !from_schedule);
+-                      blk_mq_sched_insert_requests(this_hctx, this_ctx,
+-                                              &list, from_schedule);
+-                      depth = 0;
+-                      this_hctx = rq->mq_hctx;
+-                      this_ctx = rq->mq_ctx;
+-
+-              }
+-
+-              list_add(&rq->queuelist, &list);
+-              depth++;
++              blk_mq_dispatch_plug_list(plug, from_schedule);
+       } while (!rq_list_empty(plug->mq_list));
+-
+-      if (!list_empty(&list)) {
+-              trace_block_unplug(this_hctx->queue, depth, !from_schedule);
+-              blk_mq_sched_insert_requests(this_hctx, this_ctx, &list,
+-                                              from_schedule);
+-      }
+ }
+ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
index 1c965170b053eb0485024e41587a60ff0944627d..e0a7eb9d6dbbf759f60b8de3dc0da9ab42e2dd5a 100644 (file)
@@ -8,3 +8,5 @@ locking-lockdep-avoid-potential-access-of-invalid-memory-in-lock_class.patch
 drm-amdgpu-move-px-checking-into-amdgpu_device_ip_early_init.patch
 drm-amdgpu-only-check-for-_pr3-on-dgpus.patch
 iommu-iova-improve-32-bit-free-space-estimate.patch
+block-flush-plug-based-on-hardware-and-software-queue-order.patch
+block-ensure-plug-merging-checks-the-correct-queue-at-least-once.patch