]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Drop block MQ backport from 6.6
authorSasha Levin <sashal@kernel.org>
Mon, 15 Dec 2025 01:34:07 +0000 (20:34 -0500)
committerSasha Levin <sashal@kernel.org>
Mon, 15 Dec 2025 01:34:07 +0000 (20:34 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-6.6/block-mq-deadline-introduce-dd_start_request.patch [deleted file]
queue-6.6/block-mq-deadline-remove-support-for-zone-write-lock.patch [deleted file]
queue-6.6/block-mq-deadline-switch-back-to-a-single-dispatch-l.patch [deleted file]
queue-6.6/series

diff --git a/queue-6.6/block-mq-deadline-introduce-dd_start_request.patch b/queue-6.6/block-mq-deadline-introduce-dd_start_request.patch
deleted file mode 100644 (file)
index 23e643a..0000000
+++ /dev/null
@@ -1,74 +0,0 @@
-From 01ba5be1cc0a82131bae24f8cfd96c94415ace79 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 13 Oct 2025 12:28:02 -0700
-Subject: block/mq-deadline: Introduce dd_start_request()
-
-From: Bart Van Assche <bvanassche@acm.org>
-
-[ Upstream commit 93a358af59c6e8ab00b57cfdb1c437516a4948ca ]
-
-Prepare for adding a second caller of this function. No functionality
-has been changed.
-
-Cc: Damien Le Moal <dlemoal@kernel.org>
-Cc: Yu Kuai <yukuai@kernel.org>
-Cc: chengkaitao <chengkaitao@kylinos.cn>
-Signed-off-by: Bart Van Assche <bvanassche@acm.org>
-Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: d60055cf5270 ("block/mq-deadline: Switch back to a single dispatch list")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/mq-deadline.c | 22 ++++++++++++++--------
- 1 file changed, 14 insertions(+), 8 deletions(-)
-
-diff --git a/block/mq-deadline.c b/block/mq-deadline.c
-index 23638b03d7b3d..b62f534a389e3 100644
---- a/block/mq-deadline.c
-+++ b/block/mq-deadline.c
-@@ -338,6 +338,19 @@ static bool started_after(struct deadline_data *dd, struct request *rq,
-       return time_after(start_time, latest_start);
- }
-+static struct request *dd_start_request(struct deadline_data *dd,
-+                                      enum dd_data_dir data_dir,
-+                                      struct request *rq)
-+{
-+      u8 ioprio_class = dd_rq_ioclass(rq);
-+      enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
-+
-+      dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq);
-+      dd->per_prio[prio].stats.dispatched++;
-+      rq->rq_flags |= RQF_STARTED;
-+      return rq;
-+}
-+
- /*
-  * deadline_dispatch_requests selects the best request according to
-  * read/write expire, fifo_batch, etc and with a start time <= @latest_start.
-@@ -348,8 +361,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
- {
-       struct request *rq, *next_rq;
-       enum dd_data_dir data_dir;
--      enum dd_prio prio;
--      u8 ioprio_class;
-       lockdep_assert_held(&dd->lock);
-@@ -443,12 +454,7 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
-       dd->batching++;
-       deadline_move_request(dd, per_prio, rq);
- done:
--      ioprio_class = dd_rq_ioclass(rq);
--      prio = ioprio_class_to_prio[ioprio_class];
--      dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq);
--      dd->per_prio[prio].stats.dispatched++;
--      rq->rq_flags |= RQF_STARTED;
--      return rq;
-+      return dd_start_request(dd, data_dir, rq);
- }
- /*
--- 
-2.51.0
-
diff --git a/queue-6.6/block-mq-deadline-remove-support-for-zone-write-lock.patch b/queue-6.6/block-mq-deadline-remove-support-for-zone-write-lock.patch
deleted file mode 100644 (file)
index 95c8c3b..0000000
+++ /dev/null
@@ -1,320 +0,0 @@
-From bf2022eaa2291ad1243b0711d5bd03ba4105ffbb Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 8 Apr 2024 10:41:21 +0900
-Subject: block: mq-deadline: Remove support for zone write locking
-
-From: Damien Le Moal <dlemoal@kernel.org>
-
-[ Upstream commit fde02699c242e88a71286677d27cc890a959b67f ]
-
-With the block layer generic plugging of write operations for zoned
-block devices, mq-deadline, or any other scheduler, can only ever
-see at most one write operation per zone at any time. There is thus no
-sequentiality requirements for these writes and thus no need to tightly
-control the dispatching of write requests using zone write locking.
-
-Remove all the code that implement this control in the mq-deadline
-scheduler and remove advertizing support for the
-ELEVATOR_F_ZBD_SEQ_WRITE elevator feature.
-
-Signed-off-by: Damien Le Moal <dlemoal@kernel.org>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Reviewed-by: Bart Van Assche <bvanassche@acm.org>
-Tested-by: Hans Holmberg <hans.holmberg@wdc.com>
-Tested-by: Dennis Maisenbacher <dennis.maisenbacher@wdc.com>
-Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
-Link: https://lore.kernel.org/r/20240408014128.205141-22-dlemoal@kernel.org
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: d60055cf5270 ("block/mq-deadline: Switch back to a single dispatch list")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/mq-deadline.c | 176 ++------------------------------------------
- 1 file changed, 6 insertions(+), 170 deletions(-)
-
-diff --git a/block/mq-deadline.c b/block/mq-deadline.c
-index 78a8aa204c156..23638b03d7b3d 100644
---- a/block/mq-deadline.c
-+++ b/block/mq-deadline.c
-@@ -102,7 +102,6 @@ struct deadline_data {
-       int prio_aging_expire;
-       spinlock_t lock;
--      spinlock_t zone_lock;
- };
- /* Maps an I/O priority class to a deadline scheduler priority. */
-@@ -157,8 +156,7 @@ deadline_latter_request(struct request *rq)
- }
- /*
-- * Return the first request for which blk_rq_pos() >= @pos. For zoned devices,
-- * return the first request after the start of the zone containing @pos.
-+ * Return the first request for which blk_rq_pos() >= @pos.
-  */
- static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
-                               enum dd_data_dir data_dir, sector_t pos)
-@@ -170,14 +168,6 @@ static inline struct request *deadline_from_pos(struct dd_per_prio *per_prio,
-               return NULL;
-       rq = rb_entry_rq(node);
--      /*
--       * A zoned write may have been requeued with a starting position that
--       * is below that of the most recently dispatched request. Hence, for
--       * zoned writes, start searching from the start of a zone.
--       */
--      if (blk_rq_is_seq_zoned_write(rq))
--              pos = round_down(pos, rq->q->limits.chunk_sectors);
--
-       while (node) {
-               rq = rb_entry_rq(node);
-               if (blk_rq_pos(rq) >= pos) {
-@@ -308,36 +298,6 @@ static inline bool deadline_check_fifo(struct dd_per_prio *per_prio,
-       return time_is_before_eq_jiffies((unsigned long)rq->fifo_time);
- }
--/*
-- * Check if rq has a sequential request preceding it.
-- */
--static bool deadline_is_seq_write(struct deadline_data *dd, struct request *rq)
--{
--      struct request *prev = deadline_earlier_request(rq);
--
--      if (!prev)
--              return false;
--
--      return blk_rq_pos(prev) + blk_rq_sectors(prev) == blk_rq_pos(rq);
--}
--
--/*
-- * Skip all write requests that are sequential from @rq, even if we cross
-- * a zone boundary.
-- */
--static struct request *deadline_skip_seq_writes(struct deadline_data *dd,
--                                              struct request *rq)
--{
--      sector_t pos = blk_rq_pos(rq);
--
--      do {
--              pos += blk_rq_sectors(rq);
--              rq = deadline_latter_request(rq);
--      } while (rq && blk_rq_pos(rq) == pos);
--
--      return rq;
--}
--
- /*
-  * For the specified data direction, return the next request to
-  * dispatch using arrival ordered lists.
-@@ -346,40 +306,10 @@ static struct request *
- deadline_fifo_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
-                     enum dd_data_dir data_dir)
- {
--      struct request *rq, *rb_rq, *next;
--      unsigned long flags;
--
-       if (list_empty(&per_prio->fifo_list[data_dir]))
-               return NULL;
--      rq = rq_entry_fifo(per_prio->fifo_list[data_dir].next);
--      if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
--              return rq;
--
--      /*
--       * Look for a write request that can be dispatched, that is one with
--       * an unlocked target zone. For some HDDs, breaking a sequential
--       * write stream can lead to lower throughput, so make sure to preserve
--       * sequential write streams, even if that stream crosses into the next
--       * zones and these zones are unlocked.
--       */
--      spin_lock_irqsave(&dd->zone_lock, flags);
--      list_for_each_entry_safe(rq, next, &per_prio->fifo_list[DD_WRITE],
--                               queuelist) {
--              /* Check whether a prior request exists for the same zone. */
--              rb_rq = deadline_from_pos(per_prio, data_dir, blk_rq_pos(rq));
--              if (rb_rq && blk_rq_pos(rb_rq) < blk_rq_pos(rq))
--                      rq = rb_rq;
--              if (blk_req_can_dispatch_to_zone(rq) &&
--                  (blk_queue_nonrot(rq->q) ||
--                   !deadline_is_seq_write(dd, rq)))
--                      goto out;
--      }
--      rq = NULL;
--out:
--      spin_unlock_irqrestore(&dd->zone_lock, flags);
--
--      return rq;
-+      return rq_entry_fifo(per_prio->fifo_list[data_dir].next);
- }
- /*
-@@ -390,36 +320,8 @@ static struct request *
- deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
-                     enum dd_data_dir data_dir)
- {
--      struct request *rq;
--      unsigned long flags;
--
--      rq = deadline_from_pos(per_prio, data_dir,
--                             per_prio->latest_pos[data_dir]);
--      if (!rq)
--              return NULL;
--
--      if (data_dir == DD_READ || !blk_queue_is_zoned(rq->q))
--              return rq;
--
--      /*
--       * Look for a write request that can be dispatched, that is one with
--       * an unlocked target zone. For some HDDs, breaking a sequential
--       * write stream can lead to lower throughput, so make sure to preserve
--       * sequential write streams, even if that stream crosses into the next
--       * zones and these zones are unlocked.
--       */
--      spin_lock_irqsave(&dd->zone_lock, flags);
--      while (rq) {
--              if (blk_req_can_dispatch_to_zone(rq))
--                      break;
--              if (blk_queue_nonrot(rq->q))
--                      rq = deadline_latter_request(rq);
--              else
--                      rq = deadline_skip_seq_writes(dd, rq);
--      }
--      spin_unlock_irqrestore(&dd->zone_lock, flags);
--
--      return rq;
-+      return deadline_from_pos(per_prio, data_dir,
-+                               per_prio->latest_pos[data_dir]);
- }
- /*
-@@ -525,10 +427,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
-               rq = next_rq;
-       }
--      /*
--       * For a zoned block device, if we only have writes queued and none of
--       * them can be dispatched, rq will be NULL.
--       */
-       if (!rq)
-               return NULL;
-@@ -549,10 +447,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
-       prio = ioprio_class_to_prio[ioprio_class];
-       dd->per_prio[prio].latest_pos[data_dir] = blk_rq_pos(rq);
-       dd->per_prio[prio].stats.dispatched++;
--      /*
--       * If the request needs its target zone locked, do it.
--       */
--      blk_req_zone_write_lock(rq);
-       rq->rq_flags |= RQF_STARTED;
-       return rq;
- }
-@@ -736,7 +630,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
-       dd->fifo_batch = fifo_batch;
-       dd->prio_aging_expire = prio_aging_expire;
-       spin_lock_init(&dd->lock);
--      spin_lock_init(&dd->zone_lock);
-       /* We dispatch from request queue wide instead of hw queue */
-       blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
-@@ -818,12 +711,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
-       lockdep_assert_held(&dd->lock);
--      /*
--       * This may be a requeue of a write request that has locked its
--       * target zone. If it is the case, this releases the zone lock.
--       */
--      blk_req_zone_write_unlock(rq);
--
-       prio = ioprio_class_to_prio[ioprio_class];
-       per_prio = &dd->per_prio[prio];
-       if (!rq->elv.priv[0]) {
-@@ -855,18 +742,6 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
-                */
-               rq->fifo_time = jiffies + dd->fifo_expire[data_dir];
-               insert_before = &per_prio->fifo_list[data_dir];
--#ifdef CONFIG_BLK_DEV_ZONED
--              /*
--               * Insert zoned writes such that requests are sorted by
--               * position per zone.
--               */
--              if (blk_rq_is_seq_zoned_write(rq)) {
--                      struct request *rq2 = deadline_latter_request(rq);
--
--                      if (rq2 && blk_rq_zone_no(rq2) == blk_rq_zone_no(rq))
--                              insert_before = &rq2->queuelist;
--              }
--#endif
-               list_add_tail(&rq->queuelist, insert_before);
-       }
- }
-@@ -901,33 +776,8 @@ static void dd_prepare_request(struct request *rq)
-       rq->elv.priv[0] = NULL;
- }
--static bool dd_has_write_work(struct blk_mq_hw_ctx *hctx)
--{
--      struct deadline_data *dd = hctx->queue->elevator->elevator_data;
--      enum dd_prio p;
--
--      for (p = 0; p <= DD_PRIO_MAX; p++)
--              if (!list_empty_careful(&dd->per_prio[p].fifo_list[DD_WRITE]))
--                      return true;
--
--      return false;
--}
--
- /*
-  * Callback from inside blk_mq_free_request().
-- *
-- * For zoned block devices, write unlock the target zone of
-- * completed write requests. Do this while holding the zone lock
-- * spinlock so that the zone is never unlocked while deadline_fifo_request()
-- * or deadline_next_request() are executing. This function is called for
-- * all requests, whether or not these requests complete successfully.
-- *
-- * For a zoned block device, __dd_dispatch_request() may have stopped
-- * dispatching requests if all the queued requests are write requests directed
-- * at zones that are already locked due to on-going write requests. To ensure
-- * write request dispatch progress in this case, mark the queue as needing a
-- * restart to ensure that the queue is run again after completion of the
-- * request and zones being unlocked.
-  */
- static void dd_finish_request(struct request *rq)
- {
-@@ -942,21 +792,8 @@ static void dd_finish_request(struct request *rq)
-        * called dd_insert_requests(). Skip requests that bypassed I/O
-        * scheduling. See also blk_mq_request_bypass_insert().
-        */
--      if (!rq->elv.priv[0])
--              return;
--
--      atomic_inc(&per_prio->stats.completed);
--
--      if (blk_queue_is_zoned(q)) {
--              unsigned long flags;
--
--              spin_lock_irqsave(&dd->zone_lock, flags);
--              blk_req_zone_write_unlock(rq);
--              spin_unlock_irqrestore(&dd->zone_lock, flags);
--
--              if (dd_has_write_work(rq->mq_hctx))
--                      blk_mq_sched_mark_restart_hctx(rq->mq_hctx);
--      }
-+      if (rq->elv.priv[0])
-+              atomic_inc(&per_prio->stats.completed);
- }
- static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
-@@ -1280,7 +1117,6 @@ static struct elevator_type mq_deadline = {
-       .elevator_attrs = deadline_attrs,
-       .elevator_name = "mq-deadline",
-       .elevator_alias = "deadline",
--      .elevator_features = ELEVATOR_F_ZBD_SEQ_WRITE,
-       .elevator_owner = THIS_MODULE,
- };
- MODULE_ALIAS("mq-deadline-iosched");
--- 
-2.51.0
-
diff --git a/queue-6.6/block-mq-deadline-switch-back-to-a-single-dispatch-l.patch b/queue-6.6/block-mq-deadline-switch-back-to-a-single-dispatch-l.patch
deleted file mode 100644 (file)
index e42da31..0000000
+++ /dev/null
@@ -1,227 +0,0 @@
-From 220216b3c0fa5760e6f83e6ad8f76dde2e35d6ad Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 13 Oct 2025 12:28:03 -0700
-Subject: block/mq-deadline: Switch back to a single dispatch list
-
-From: Bart Van Assche <bvanassche@acm.org>
-
-[ Upstream commit d60055cf52703a705b86fb25b9b7931ec7ee399c ]
-
-Commit c807ab520fc3 ("block/mq-deadline: Add I/O priority support")
-modified the behavior of request flag BLK_MQ_INSERT_AT_HEAD from
-dispatching a request before other requests into dispatching a request
-before other requests with the same I/O priority. This is not correct since
-BLK_MQ_INSERT_AT_HEAD is used when requeuing requests and also when a flush
-request is inserted.  Both types of requests should be dispatched as soon
-as possible. Hence, make the mq-deadline I/O scheduler again ignore the I/O
-priority for BLK_MQ_INSERT_AT_HEAD requests.
-
-Cc: Damien Le Moal <dlemoal@kernel.org>
-Cc: Yu Kuai <yukuai@kernel.org>
-Reported-by: chengkaitao <chengkaitao@kylinos.cn>
-Closes: https://lore.kernel.org/linux-block/20251009155253.14611-1-pilgrimtao@gmail.com/
-Fixes: c807ab520fc3 ("block/mq-deadline: Add I/O priority support")
-Signed-off-by: Bart Van Assche <bvanassche@acm.org>
-Reviewed-by: Damien Le Moalv <dlemoal@kernel.org>
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/mq-deadline.c | 107 +++++++++++++++++++-------------------------
- 1 file changed, 47 insertions(+), 60 deletions(-)
-
-diff --git a/block/mq-deadline.c b/block/mq-deadline.c
-index b62f534a389e3..437eaa8b09e68 100644
---- a/block/mq-deadline.c
-+++ b/block/mq-deadline.c
-@@ -71,7 +71,6 @@ struct io_stats_per_prio {
-  * present on both sort_list[] and fifo_list[].
-  */
- struct dd_per_prio {
--      struct list_head dispatch;
-       struct rb_root sort_list[DD_DIR_COUNT];
-       struct list_head fifo_list[DD_DIR_COUNT];
-       /* Position of the most recently dispatched request. */
-@@ -84,6 +83,7 @@ struct deadline_data {
-        * run time data
-        */
-+      struct list_head dispatch;
-       struct dd_per_prio per_prio[DD_PRIO_COUNT];
-       /* Data direction of latest dispatched request. */
-@@ -364,16 +364,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
-       lockdep_assert_held(&dd->lock);
--      if (!list_empty(&per_prio->dispatch)) {
--              rq = list_first_entry(&per_prio->dispatch, struct request,
--                                    queuelist);
--              if (started_after(dd, rq, latest_start))
--                      return NULL;
--              list_del_init(&rq->queuelist);
--              data_dir = rq_data_dir(rq);
--              goto done;
--      }
--
-       /*
-        * batches are currently reads XOR writes
-        */
-@@ -453,7 +443,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
-        */
-       dd->batching++;
-       deadline_move_request(dd, per_prio, rq);
--done:
-       return dd_start_request(dd, data_dir, rq);
- }
-@@ -501,6 +490,14 @@ static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
-       enum dd_prio prio;
-       spin_lock(&dd->lock);
-+
-+      if (!list_empty(&dd->dispatch)) {
-+              rq = list_first_entry(&dd->dispatch, struct request, queuelist);
-+              list_del_init(&rq->queuelist);
-+              dd_start_request(dd, rq_data_dir(rq), rq);
-+              goto unlock;
-+      }
-+
-       rq = dd_dispatch_prio_aged_requests(dd, now);
-       if (rq)
-               goto unlock;
-@@ -619,10 +616,10 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
-       eq->elevator_data = dd;
-+      INIT_LIST_HEAD(&dd->dispatch);
-       for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
-               struct dd_per_prio *per_prio = &dd->per_prio[prio];
--              INIT_LIST_HEAD(&per_prio->dispatch);
-               INIT_LIST_HEAD(&per_prio->fifo_list[DD_READ]);
-               INIT_LIST_HEAD(&per_prio->fifo_list[DD_WRITE]);
-               per_prio->sort_list[DD_READ] = RB_ROOT;
-@@ -730,7 +727,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
-       trace_block_rq_insert(rq);
-       if (flags & BLK_MQ_INSERT_AT_HEAD) {
--              list_add(&rq->queuelist, &per_prio->dispatch);
-+              list_add(&rq->queuelist, &dd->dispatch);
-               rq->fifo_time = jiffies;
-       } else {
-               struct list_head *insert_before;
-@@ -804,8 +801,7 @@ static void dd_finish_request(struct request *rq)
- static bool dd_has_work_for_prio(struct dd_per_prio *per_prio)
- {
--      return !list_empty_careful(&per_prio->dispatch) ||
--              !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
-+      return !list_empty_careful(&per_prio->fifo_list[DD_READ]) ||
-               !list_empty_careful(&per_prio->fifo_list[DD_WRITE]);
- }
-@@ -814,6 +810,9 @@ static bool dd_has_work(struct blk_mq_hw_ctx *hctx)
-       struct deadline_data *dd = hctx->queue->elevator->elevator_data;
-       enum dd_prio prio;
-+      if (!list_empty_careful(&dd->dispatch))
-+              return true;
-+
-       for (prio = 0; prio <= DD_PRIO_MAX; prio++)
-               if (dd_has_work_for_prio(&dd->per_prio[prio]))
-                       return true;
-@@ -1022,49 +1021,39 @@ static int dd_owned_by_driver_show(void *data, struct seq_file *m)
-       return 0;
- }
--#define DEADLINE_DISPATCH_ATTR(prio)                                  \
--static void *deadline_dispatch##prio##_start(struct seq_file *m,      \
--                                           loff_t *pos)               \
--      __acquires(&dd->lock)                                           \
--{                                                                     \
--      struct request_queue *q = m->private;                           \
--      struct deadline_data *dd = q->elevator->elevator_data;          \
--      struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
--                                                                      \
--      spin_lock(&dd->lock);                                           \
--      return seq_list_start(&per_prio->dispatch, *pos);               \
--}                                                                     \
--                                                                      \
--static void *deadline_dispatch##prio##_next(struct seq_file *m,               \
--                                          void *v, loff_t *pos)       \
--{                                                                     \
--      struct request_queue *q = m->private;                           \
--      struct deadline_data *dd = q->elevator->elevator_data;          \
--      struct dd_per_prio *per_prio = &dd->per_prio[prio];             \
--                                                                      \
--      return seq_list_next(v, &per_prio->dispatch, pos);              \
--}                                                                     \
--                                                                      \
--static void deadline_dispatch##prio##_stop(struct seq_file *m, void *v)       \
--      __releases(&dd->lock)                                           \
--{                                                                     \
--      struct request_queue *q = m->private;                           \
--      struct deadline_data *dd = q->elevator->elevator_data;          \
--                                                                      \
--      spin_unlock(&dd->lock);                                         \
--}                                                                     \
--                                                                      \
--static const struct seq_operations deadline_dispatch##prio##_seq_ops = { \
--      .start  = deadline_dispatch##prio##_start,                      \
--      .next   = deadline_dispatch##prio##_next,                       \
--      .stop   = deadline_dispatch##prio##_stop,                       \
--      .show   = blk_mq_debugfs_rq_show,                               \
-+static void *deadline_dispatch_start(struct seq_file *m, loff_t *pos)
-+      __acquires(&dd->lock)
-+{
-+      struct request_queue *q = m->private;
-+      struct deadline_data *dd = q->elevator->elevator_data;
-+
-+      spin_lock(&dd->lock);
-+      return seq_list_start(&dd->dispatch, *pos);
- }
--DEADLINE_DISPATCH_ATTR(0);
--DEADLINE_DISPATCH_ATTR(1);
--DEADLINE_DISPATCH_ATTR(2);
--#undef DEADLINE_DISPATCH_ATTR
-+static void *deadline_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
-+{
-+      struct request_queue *q = m->private;
-+      struct deadline_data *dd = q->elevator->elevator_data;
-+
-+      return seq_list_next(v, &dd->dispatch, pos);
-+}
-+
-+static void deadline_dispatch_stop(struct seq_file *m, void *v)
-+      __releases(&dd->lock)
-+{
-+      struct request_queue *q = m->private;
-+      struct deadline_data *dd = q->elevator->elevator_data;
-+
-+      spin_unlock(&dd->lock);
-+}
-+
-+static const struct seq_operations deadline_dispatch_seq_ops = {
-+      .start  = deadline_dispatch_start,
-+      .next   = deadline_dispatch_next,
-+      .stop   = deadline_dispatch_stop,
-+      .show   = blk_mq_debugfs_rq_show,
-+};
- #define DEADLINE_QUEUE_DDIR_ATTRS(name)                                       \
-       {#name "_fifo_list", 0400,                                      \
-@@ -1087,9 +1076,7 @@ static const struct blk_mq_debugfs_attr deadline_queue_debugfs_attrs[] = {
-       {"batching", 0400, deadline_batching_show},
-       {"starved", 0400, deadline_starved_show},
-       {"async_depth", 0400, dd_async_depth_show},
--      {"dispatch0", 0400, .seq_ops = &deadline_dispatch0_seq_ops},
--      {"dispatch1", 0400, .seq_ops = &deadline_dispatch1_seq_ops},
--      {"dispatch2", 0400, .seq_ops = &deadline_dispatch2_seq_ops},
-+      {"dispatch", 0400, .seq_ops = &deadline_dispatch_seq_ops},
-       {"owned_by_driver", 0400, dd_owned_by_driver_show},
-       {"queued", 0400, dd_queued_show},
-       {},
--- 
-2.51.0
-
index 519474af90a936719a7e938b17d78cb854782fdc..f4db078f82044de47ad0d82097d25fcf25acbd89 100644 (file)
@@ -71,9 +71,6 @@ ntfs3-fix-uninit-buffer-allocated-by-__getname.patch
 rculist-add-hlist_nulls_replace_rcu-and-hlist_nulls_.patch
 inet-avoid-ehash-lookup-race-in-inet_ehash_insert.patch
 iio-imu-st_lsm6dsx-fix-measurement-unit-for-odr-stru.patch
-block-mq-deadline-remove-support-for-zone-write-lock.patch
-block-mq-deadline-introduce-dd_start_request.patch
-block-mq-deadline-switch-back-to-a-single-dispatch-l.patch
 arm64-dts-freescale-imx8mp-venice-gw7905-2x-remove-d.patch
 arm64-dts-imx8mm-venice-gw72xx-remove-unused-sdhc1-p.patch
 arm64-dts-imx8mp-venice-gw702x-remove-off-board-uart.patch