]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Drop 6.1 backport of f814bdda774c
authorSasha Levin <sashal@kernel.org>
Fri, 15 Mar 2024 14:44:53 +0000 (10:44 -0400)
committerSasha Levin <sashal@kernel.org>
Fri, 15 Mar 2024 14:44:53 +0000 (10:44 -0400)
15 files changed:
queue-6.1/blk-iocost-disable-writeback-throttling.patch [deleted file]
queue-6.1/blk-iocost-pass-gendisk-to-ioc_refresh_params.patch [deleted file]
queue-6.1/blk-rq-qos-constify-rq_qos_ops.patch [deleted file]
queue-6.1/blk-rq-qos-make-rq_qos_add-and-rq_qos_del-more-usefu.patch [deleted file]
queue-6.1/blk-rq-qos-move-rq_qos_add-and-rq_qos_del-out-of-lin.patch [deleted file]
queue-6.1/blk-rq-qos-store-a-gendisk-instead-of-request_queue-.patch [deleted file]
queue-6.1/blk-wbt-don-t-enable-throttling-if-default-elevator-.patch [deleted file]
queue-6.1/blk-wbt-fix-detection-of-dirty-throttled-tasks.patch [deleted file]
queue-6.1/blk-wbt-fix-that-wbt-can-t-be-disabled-by-default.patch [deleted file]
queue-6.1/blk-wbt-pass-a-gendisk-to-wbt_-enable-disable-_defau.patch [deleted file]
queue-6.1/blk-wbt-pass-a-gendisk-to-wbt_init.patch [deleted file]
queue-6.1/blk-wbt-remove-unnecessary-check-in-wbt_enable_defau.patch [deleted file]
queue-6.1/elevator-add-new-field-flags-in-struct-elevator_queu.patch [deleted file]
queue-6.1/elevator-remove-redundant-code-in-elv_unregister_que.patch [deleted file]
queue-6.1/series

diff --git a/queue-6.1/blk-iocost-disable-writeback-throttling.patch b/queue-6.1/blk-iocost-disable-writeback-throttling.patch
deleted file mode 100644 (file)
index 7225520..0000000
+++ /dev/null
@@ -1,46 +0,0 @@
-From c287453564ed11a8d05e35a279e773fa882d33a3 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 12 Oct 2022 17:40:32 +0800
-Subject: blk-iocost: disable writeback throttling
-
-From: Yu Kuai <yukuai3@huawei.com>
-
-[ Upstream commit 8796acbc9a0eceeddd99eaef833bdda1241d39b9 ]
-
-Commit b5dc5d4d1f4f ("block,bfq: Disable writeback throttling") disable
-wbt for bfq, because different write-throttling heuristics should not
-work together.
-
-For the same reason, wbt and iocost should not work together as well,
-unless admin really want to do that, dispite that performance is
-affected.
-
-Signed-off-by: Yu Kuai <yukuai3@huawei.com>
-Acked-by: Tejun Heo <tj@kernel.org>
-Link: https://lore.kernel.org/r/20221012094035.390056-2-yukuai1@huaweicloud.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: f814bdda774c ("blk-wbt: Fix detection of dirty-throttled tasks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-iocost.c | 2 ++
- 1 file changed, 2 insertions(+)
-
-diff --git a/block/blk-iocost.c b/block/blk-iocost.c
-index e6557024e3da8..3788774a7b729 100644
---- a/block/blk-iocost.c
-+++ b/block/blk-iocost.c
-@@ -3281,9 +3281,11 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
-               blk_stat_enable_accounting(disk->queue);
-               blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
-               ioc->enabled = true;
-+              wbt_disable_default(disk->queue);
-       } else {
-               blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
-               ioc->enabled = false;
-+              wbt_enable_default(disk->queue);
-       }
-       if (user) {
--- 
-2.43.0
-
diff --git a/queue-6.1/blk-iocost-pass-gendisk-to-ioc_refresh_params.patch b/queue-6.1/blk-iocost-pass-gendisk-to-ioc_refresh_params.patch
deleted file mode 100644 (file)
index 788ce87..0000000
+++ /dev/null
@@ -1,138 +0,0 @@
-From 4d92df6c36fe4a84ee71df9bbf00ad1bf65633f5 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 28 Feb 2023 03:16:54 -0800
-Subject: blk-iocost: Pass gendisk to ioc_refresh_params
-
-From: Breno Leitao <leitao@debian.org>
-
-[ Upstream commit e33b93650fc5364f773985a3e961e24349330d97 ]
-
-Current kernel (d2980d8d826554fa6981d621e569a453787472f8) crashes
-when blk_iocost_init for `nvme1` disk.
-
-       BUG: kernel NULL pointer dereference, address: 0000000000000050
-       #PF: supervisor read access in kernel mode
-       #PF: error_code(0x0000) - not-present page
-
-       blk_iocost_init (include/asm-generic/qspinlock.h:128
-                        include/linux/spinlock.h:203
-                        include/linux/spinlock_api_smp.h:158
-                        include/linux/spinlock.h:400
-                        block/blk-iocost.c:2884)
-       ioc_qos_write (block/blk-iocost.c:3198)
-       ? kretprobe_perf_func (kernel/trace/trace_kprobe.c:1566)
-       ? kernfs_fop_write_iter (include/linux/slab.h:584 fs/kernfs/file.c:311)
-       ? __kmem_cache_alloc_node (mm/slab.h:? mm/slub.c:3452 mm/slub.c:3491)
-       ? _copy_from_iter (arch/x86/include/asm/uaccess_64.h:46
-                          arch/x86/include/asm/uaccess_64.h:52
-                          lib/iov_iter.c:183 lib/iov_iter.c:628)
-       ? kretprobe_dispatcher (kernel/trace/trace_kprobe.c:1693)
-       cgroup_file_write (kernel/cgroup/cgroup.c:4061)
-       kernfs_fop_write_iter (fs/kernfs/file.c:334)
-       vfs_write (include/linux/fs.h:1849 fs/read_write.c:491
-                  fs/read_write.c:584)
-       ksys_write (fs/read_write.c:637)
-       do_syscall_64 (arch/x86/entry/common.c:50 arch/x86/entry/common.c:80)
-       entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:120)
-
-This happens because ioc_refresh_params() is being called without
-a properly initialized ioc->rqos, which is happening later in the callee
-side.
-
-ioc_refresh_params() -> ioc_autop_idx() tries to access
-ioc->rqos.disk->queue but ioc->rqos.disk is NULL, causing the BUG above.
-
-Create function, called ioc_refresh_params_disk(), that is similar to
-ioc_refresh_params() but where the "struct gendisk" could be passed as
-an explicit argument. This function will be called when ioc->rqos.disk
-is not initialized.
-
-Fixes: ce57b558604e ("blk-rq-qos: make rq_qos_add and rq_qos_del more useful")
-
-Signed-off-by: Breno Leitao <leitao@debian.org>
-Acked-by: Tejun Heo <tj@kernel.org>
-Link: https://lore.kernel.org/r/20230228111654.1778120-1-leitao@debian.org
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-iocost.c | 26 ++++++++++++++++++++------
- 1 file changed, 20 insertions(+), 6 deletions(-)
-
-diff --git a/block/blk-iocost.c b/block/blk-iocost.c
-index ab5830ba23e0f..0d4bc9d8f2cac 100644
---- a/block/blk-iocost.c
-+++ b/block/blk-iocost.c
-@@ -801,7 +801,11 @@ static void ioc_refresh_period_us(struct ioc *ioc)
-       ioc_refresh_margins(ioc);
- }
--static int ioc_autop_idx(struct ioc *ioc)
-+/*
-+ *  ioc->rqos.disk isn't initialized when this function is called from
-+ *  the init path.
-+ */
-+static int ioc_autop_idx(struct ioc *ioc, struct gendisk *disk)
- {
-       int idx = ioc->autop_idx;
-       const struct ioc_params *p = &autop[idx];
-@@ -809,11 +813,11 @@ static int ioc_autop_idx(struct ioc *ioc)
-       u64 now_ns;
-       /* rotational? */
--      if (!blk_queue_nonrot(ioc->rqos.disk->queue))
-+      if (!blk_queue_nonrot(disk->queue))
-               return AUTOP_HDD;
-       /* handle SATA SSDs w/ broken NCQ */
--      if (blk_queue_depth(ioc->rqos.disk->queue) == 1)
-+      if (blk_queue_depth(disk->queue) == 1)
-               return AUTOP_SSD_QD1;
-       /* use one of the normal ssd sets */
-@@ -902,14 +906,19 @@ static void ioc_refresh_lcoefs(struct ioc *ioc)
-                   &c[LCOEF_WPAGE], &c[LCOEF_WSEQIO], &c[LCOEF_WRANDIO]);
- }
--static bool ioc_refresh_params(struct ioc *ioc, bool force)
-+/*
-+ * struct gendisk is required as an argument because ioc->rqos.disk
-+ * is not properly initialized when called from the init path.
-+ */
-+static bool ioc_refresh_params_disk(struct ioc *ioc, bool force,
-+                                  struct gendisk *disk)
- {
-       const struct ioc_params *p;
-       int idx;
-       lockdep_assert_held(&ioc->lock);
--      idx = ioc_autop_idx(ioc);
-+      idx = ioc_autop_idx(ioc, disk);
-       p = &autop[idx];
-       if (idx == ioc->autop_idx && !force)
-@@ -938,6 +947,11 @@ static bool ioc_refresh_params(struct ioc *ioc, bool force)
-       return true;
- }
-+static bool ioc_refresh_params(struct ioc *ioc, bool force)
-+{
-+      return ioc_refresh_params_disk(ioc, force, ioc->rqos.disk);
-+}
-+
- /*
-  * When an iocg accumulates too much vtime or gets deactivated, we throw away
-  * some vtime, which lowers the overall device utilization. As the exact amount
-@@ -2884,7 +2898,7 @@ static int blk_iocost_init(struct gendisk *disk)
-       spin_lock_irq(&ioc->lock);
-       ioc->autop_idx = AUTOP_INVALID;
--      ioc_refresh_params(ioc, true);
-+      ioc_refresh_params_disk(ioc, true, disk);
-       spin_unlock_irq(&ioc->lock);
-       /*
--- 
-2.43.0
-
diff --git a/queue-6.1/blk-rq-qos-constify-rq_qos_ops.patch b/queue-6.1/blk-rq-qos-constify-rq_qos_ops.patch
deleted file mode 100644 (file)
index 594a636..0000000
+++ /dev/null
@@ -1,103 +0,0 @@
-From aa235b97093a21478dc99fd9638fc62d88af5f17 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 3 Feb 2023 16:03:55 +0100
-Subject: blk-rq-qos: constify rq_qos_ops
-
-From: Christoph Hellwig <hch@lst.de>
-
-[ Upstream commit 3963d84df7974b6687cb34bce3b9e0b2686f839c ]
-
-These op vectors are constant, so mark them const.
-
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Reviewed-by: Andreas Herrmann <aherrmann@suse.de>
-Acked-by: Tejun Heo <tj@kernel.org>
-Link: https://lore.kernel.org/r/20230203150400.3199230-15-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: f814bdda774c ("blk-wbt: Fix detection of dirty-throttled tasks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-iocost.c    | 2 +-
- block/blk-iolatency.c | 2 +-
- block/blk-rq-qos.c    | 2 +-
- block/blk-rq-qos.h    | 4 ++--
- block/blk-wbt.c       | 2 +-
- 5 files changed, 6 insertions(+), 6 deletions(-)
-
-diff --git a/block/blk-iocost.c b/block/blk-iocost.c
-index a8a7d2ce927b9..78958c5bece08 100644
---- a/block/blk-iocost.c
-+++ b/block/blk-iocost.c
-@@ -2836,7 +2836,7 @@ static void ioc_rqos_exit(struct rq_qos *rqos)
-       kfree(ioc);
- }
--static struct rq_qos_ops ioc_rqos_ops = {
-+static const struct rq_qos_ops ioc_rqos_ops = {
-       .throttle = ioc_rqos_throttle,
-       .merge = ioc_rqos_merge,
-       .done_bio = ioc_rqos_done_bio,
-diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
-index c64cfec34ac37..b0f8550f87cd2 100644
---- a/block/blk-iolatency.c
-+++ b/block/blk-iolatency.c
-@@ -651,7 +651,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
-       kfree(blkiolat);
- }
--static struct rq_qos_ops blkcg_iolatency_ops = {
-+static const struct rq_qos_ops blkcg_iolatency_ops = {
-       .throttle = blkcg_iolatency_throttle,
-       .done_bio = blkcg_iolatency_done_bio,
-       .exit = blkcg_iolatency_exit,
-diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
-index 14bee1bd76136..8e83734cfe8db 100644
---- a/block/blk-rq-qos.c
-+++ b/block/blk-rq-qos.c
-@@ -296,7 +296,7 @@ void rq_qos_exit(struct request_queue *q)
- }
- int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
--              struct rq_qos_ops *ops)
-+              const struct rq_qos_ops *ops)
- {
-       struct request_queue *q = disk->queue;
-diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
-index 22552785aa31e..2b7b668479f71 100644
---- a/block/blk-rq-qos.h
-+++ b/block/blk-rq-qos.h
-@@ -25,7 +25,7 @@ struct rq_wait {
- };
- struct rq_qos {
--      struct rq_qos_ops *ops;
-+      const struct rq_qos_ops *ops;
-       struct request_queue *q;
-       enum rq_qos_id id;
-       struct rq_qos *next;
-@@ -86,7 +86,7 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
- }
- int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
--              struct rq_qos_ops *ops);
-+              const struct rq_qos_ops *ops);
- void rq_qos_del(struct rq_qos *rqos);
- typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
-diff --git a/block/blk-wbt.c b/block/blk-wbt.c
-index aec4e37c89c4a..d9398347b08d8 100644
---- a/block/blk-wbt.c
-+++ b/block/blk-wbt.c
-@@ -808,7 +808,7 @@ static const struct blk_mq_debugfs_attr wbt_debugfs_attrs[] = {
- };
- #endif
--static struct rq_qos_ops wbt_rqos_ops = {
-+static const struct rq_qos_ops wbt_rqos_ops = {
-       .throttle = wbt_wait,
-       .issue = wbt_issue,
-       .track = wbt_track,
--- 
-2.43.0
-
diff --git a/queue-6.1/blk-rq-qos-make-rq_qos_add-and-rq_qos_del-more-usefu.patch b/queue-6.1/blk-rq-qos-make-rq_qos_add-and-rq_qos_del-more-usefu.patch
deleted file mode 100644 (file)
index 348877d..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-From 581958da857b8e9faf3303ba6ebc2f7e0b7a15fe Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 3 Feb 2023 16:03:54 +0100
-Subject: blk-rq-qos: make rq_qos_add and rq_qos_del more useful
-
-From: Christoph Hellwig <hch@lst.de>
-
-[ Upstream commit ce57b558604e68277d31ca5ce49ec4579a8618c5 ]
-
-Switch to passing a gendisk, and make rq_qos_add initialize all required
-fields and drop the not required q argument from rq_qos_del.
-
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Reviewed-by: Andreas Herrmann <aherrmann@suse.de>
-Acked-by: Tejun Heo <tj@kernel.org>
-Link: https://lore.kernel.org/r/20230203150400.3199230-14-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: f814bdda774c ("blk-wbt: Fix detection of dirty-throttled tasks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-iocost.c    | 13 +++----------
- block/blk-iolatency.c | 14 ++++----------
- block/blk-rq-qos.c    | 13 ++++++++++---
- block/blk-rq-qos.h    |  5 +++--
- block/blk-wbt.c       |  5 +----
- 5 files changed, 21 insertions(+), 29 deletions(-)
-
-diff --git a/block/blk-iocost.c b/block/blk-iocost.c
-index 72ca07f24b3c0..a8a7d2ce927b9 100644
---- a/block/blk-iocost.c
-+++ b/block/blk-iocost.c
-@@ -2847,9 +2847,7 @@ static struct rq_qos_ops ioc_rqos_ops = {
- static int blk_iocost_init(struct gendisk *disk)
- {
--      struct request_queue *q = disk->queue;
-       struct ioc *ioc;
--      struct rq_qos *rqos;
-       int i, cpu, ret;
-       ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
-@@ -2872,11 +2870,6 @@ static int blk_iocost_init(struct gendisk *disk)
-               local64_set(&ccs->rq_wait_ns, 0);
-       }
--      rqos = &ioc->rqos;
--      rqos->id = RQ_QOS_COST;
--      rqos->ops = &ioc_rqos_ops;
--      rqos->q = q;
--
-       spin_lock_init(&ioc->lock);
-       timer_setup(&ioc->timer, ioc_timer_fn, 0);
-       INIT_LIST_HEAD(&ioc->active_iocgs);
-@@ -2900,17 +2893,17 @@ static int blk_iocost_init(struct gendisk *disk)
-        * called before policy activation completion, can't assume that the
-        * target bio has an iocg associated and need to test for NULL iocg.
-        */
--      ret = rq_qos_add(q, rqos);
-+      ret = rq_qos_add(&ioc->rqos, disk, RQ_QOS_COST, &ioc_rqos_ops);
-       if (ret)
-               goto err_free_ioc;
--      ret = blkcg_activate_policy(q, &blkcg_policy_iocost);
-+      ret = blkcg_activate_policy(disk->queue, &blkcg_policy_iocost);
-       if (ret)
-               goto err_del_qos;
-       return 0;
- err_del_qos:
--      rq_qos_del(q, rqos);
-+      rq_qos_del(&ioc->rqos);
- err_free_ioc:
-       free_percpu(ioc->pcpu_stat);
-       kfree(ioc);
-diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
-index 571fa95aafe96..c64cfec34ac37 100644
---- a/block/blk-iolatency.c
-+++ b/block/blk-iolatency.c
-@@ -758,24 +758,18 @@ static void blkiolatency_enable_work_fn(struct work_struct *work)
- int blk_iolatency_init(struct gendisk *disk)
- {
--      struct request_queue *q = disk->queue;
-       struct blk_iolatency *blkiolat;
--      struct rq_qos *rqos;
-       int ret;
-       blkiolat = kzalloc(sizeof(*blkiolat), GFP_KERNEL);
-       if (!blkiolat)
-               return -ENOMEM;
--      rqos = &blkiolat->rqos;
--      rqos->id = RQ_QOS_LATENCY;
--      rqos->ops = &blkcg_iolatency_ops;
--      rqos->q = q;
--
--      ret = rq_qos_add(q, rqos);
-+      ret = rq_qos_add(&blkiolat->rqos, disk, RQ_QOS_LATENCY,
-+                       &blkcg_iolatency_ops);
-       if (ret)
-               goto err_free;
--      ret = blkcg_activate_policy(q, &blkcg_policy_iolatency);
-+      ret = blkcg_activate_policy(disk->queue, &blkcg_policy_iolatency);
-       if (ret)
-               goto err_qos_del;
-@@ -785,7 +779,7 @@ int blk_iolatency_init(struct gendisk *disk)
-       return 0;
- err_qos_del:
--      rq_qos_del(q, rqos);
-+      rq_qos_del(&blkiolat->rqos);
- err_free:
-       kfree(blkiolat);
-       return ret;
-diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
-index aae98dcb01ebe..14bee1bd76136 100644
---- a/block/blk-rq-qos.c
-+++ b/block/blk-rq-qos.c
-@@ -295,8 +295,15 @@ void rq_qos_exit(struct request_queue *q)
-       }
- }
--int rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
-+int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
-+              struct rq_qos_ops *ops)
- {
-+      struct request_queue *q = disk->queue;
-+
-+      rqos->q = q;
-+      rqos->id = id;
-+      rqos->ops = ops;
-+
-       /*
-        * No IO can be in-flight when adding rqos, so freeze queue, which
-        * is fine since we only support rq_qos for blk-mq queue.
-@@ -326,11 +333,11 @@ int rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
-       spin_unlock_irq(&q->queue_lock);
-       blk_mq_unfreeze_queue(q);
-       return -EBUSY;
--
- }
--void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
-+void rq_qos_del(struct rq_qos *rqos)
- {
-+      struct request_queue *q = rqos->q;
-       struct rq_qos **cur;
-       /*
-diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
-index 805eee8b031d0..22552785aa31e 100644
---- a/block/blk-rq-qos.h
-+++ b/block/blk-rq-qos.h
-@@ -85,8 +85,9 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
-       init_waitqueue_head(&rq_wait->wait);
- }
--int rq_qos_add(struct request_queue *q, struct rq_qos *rqos);
--void rq_qos_del(struct request_queue *q, struct rq_qos *rqos);
-+int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
-+              struct rq_qos_ops *ops);
-+void rq_qos_del(struct rq_qos *rqos);
- typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
- typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
-diff --git a/block/blk-wbt.c b/block/blk-wbt.c
-index 95bec9244e9f3..aec4e37c89c4a 100644
---- a/block/blk-wbt.c
-+++ b/block/blk-wbt.c
-@@ -842,9 +842,6 @@ int wbt_init(struct gendisk *disk)
-       for (i = 0; i < WBT_NUM_RWQ; i++)
-               rq_wait_init(&rwb->rq_wait[i]);
--      rwb->rqos.id = RQ_QOS_WBT;
--      rwb->rqos.ops = &wbt_rqos_ops;
--      rwb->rqos.q = q;
-       rwb->last_comp = rwb->last_issue = jiffies;
-       rwb->win_nsec = RWB_WINDOW_NSEC;
-       rwb->enable_state = WBT_STATE_ON_DEFAULT;
-@@ -857,7 +854,7 @@ int wbt_init(struct gendisk *disk)
-       /*
-        * Assign rwb and add the stats callback.
-        */
--      ret = rq_qos_add(q, &rwb->rqos);
-+      ret = rq_qos_add(&rwb->rqos, disk, RQ_QOS_WBT, &wbt_rqos_ops);
-       if (ret)
-               goto err_free;
--- 
-2.43.0
-
diff --git a/queue-6.1/blk-rq-qos-move-rq_qos_add-and-rq_qos_del-out-of-lin.patch b/queue-6.1/blk-rq-qos-move-rq_qos_add-and-rq_qos_del-out-of-lin.patch
deleted file mode 100644 (file)
index 6669d21..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-From 99215e8e45084576ff46f9ed9e23f06d152f879c Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 3 Feb 2023 16:03:53 +0100
-Subject: blk-rq-qos: move rq_qos_add and rq_qos_del out of line
-
-From: Christoph Hellwig <hch@lst.de>
-
-[ Upstream commit b494f9c566ba5fe2cc8abe67fdeb0332c6b48d4b ]
-
-These two functions are rather larger and not in a fast path, so move
-them out of line.
-
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Acked-by: Tejun Heo <tj@kernel.org>
-Link: https://lore.kernel.org/r/20230203150400.3199230-13-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: f814bdda774c ("blk-wbt: Fix detection of dirty-throttled tasks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-rq-qos.c | 60 +++++++++++++++++++++++++++++++++++++++++++++
- block/blk-rq-qos.h | 61 ++--------------------------------------------
- 2 files changed, 62 insertions(+), 59 deletions(-)
-
-diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
-index 88f0fe7dcf545..aae98dcb01ebe 100644
---- a/block/blk-rq-qos.c
-+++ b/block/blk-rq-qos.c
-@@ -294,3 +294,63 @@ void rq_qos_exit(struct request_queue *q)
-               rqos->ops->exit(rqos);
-       }
- }
-+
-+int rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
-+{
-+      /*
-+       * No IO can be in-flight when adding rqos, so freeze queue, which
-+       * is fine since we only support rq_qos for blk-mq queue.
-+       *
-+       * Reuse ->queue_lock for protecting against other concurrent
-+       * rq_qos adding/deleting
-+       */
-+      blk_mq_freeze_queue(q);
-+
-+      spin_lock_irq(&q->queue_lock);
-+      if (rq_qos_id(q, rqos->id))
-+              goto ebusy;
-+      rqos->next = q->rq_qos;
-+      q->rq_qos = rqos;
-+      spin_unlock_irq(&q->queue_lock);
-+
-+      blk_mq_unfreeze_queue(q);
-+
-+      if (rqos->ops->debugfs_attrs) {
-+              mutex_lock(&q->debugfs_mutex);
-+              blk_mq_debugfs_register_rqos(rqos);
-+              mutex_unlock(&q->debugfs_mutex);
-+      }
-+
-+      return 0;
-+ebusy:
-+      spin_unlock_irq(&q->queue_lock);
-+      blk_mq_unfreeze_queue(q);
-+      return -EBUSY;
-+
-+}
-+
-+void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
-+{
-+      struct rq_qos **cur;
-+
-+      /*
-+       * See comment in rq_qos_add() about freezing queue & using
-+       * ->queue_lock.
-+       */
-+      blk_mq_freeze_queue(q);
-+
-+      spin_lock_irq(&q->queue_lock);
-+      for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
-+              if (*cur == rqos) {
-+                      *cur = rqos->next;
-+                      break;
-+              }
-+      }
-+      spin_unlock_irq(&q->queue_lock);
-+
-+      blk_mq_unfreeze_queue(q);
-+
-+      mutex_lock(&q->debugfs_mutex);
-+      blk_mq_debugfs_unregister_rqos(rqos);
-+      mutex_unlock(&q->debugfs_mutex);
-+}
-diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
-index 1ef1f7d4bc3cb..805eee8b031d0 100644
---- a/block/blk-rq-qos.h
-+++ b/block/blk-rq-qos.h
-@@ -85,65 +85,8 @@ static inline void rq_wait_init(struct rq_wait *rq_wait)
-       init_waitqueue_head(&rq_wait->wait);
- }
--static inline int rq_qos_add(struct request_queue *q, struct rq_qos *rqos)
--{
--      /*
--       * No IO can be in-flight when adding rqos, so freeze queue, which
--       * is fine since we only support rq_qos for blk-mq queue.
--       *
--       * Reuse ->queue_lock for protecting against other concurrent
--       * rq_qos adding/deleting
--       */
--      blk_mq_freeze_queue(q);
--
--      spin_lock_irq(&q->queue_lock);
--      if (rq_qos_id(q, rqos->id))
--              goto ebusy;
--      rqos->next = q->rq_qos;
--      q->rq_qos = rqos;
--      spin_unlock_irq(&q->queue_lock);
--
--      blk_mq_unfreeze_queue(q);
--
--      if (rqos->ops->debugfs_attrs) {
--              mutex_lock(&q->debugfs_mutex);
--              blk_mq_debugfs_register_rqos(rqos);
--              mutex_unlock(&q->debugfs_mutex);
--      }
--
--      return 0;
--ebusy:
--      spin_unlock_irq(&q->queue_lock);
--      blk_mq_unfreeze_queue(q);
--      return -EBUSY;
--
--}
--
--static inline void rq_qos_del(struct request_queue *q, struct rq_qos *rqos)
--{
--      struct rq_qos **cur;
--
--      /*
--       * See comment in rq_qos_add() about freezing queue & using
--       * ->queue_lock.
--       */
--      blk_mq_freeze_queue(q);
--
--      spin_lock_irq(&q->queue_lock);
--      for (cur = &q->rq_qos; *cur; cur = &(*cur)->next) {
--              if (*cur == rqos) {
--                      *cur = rqos->next;
--                      break;
--              }
--      }
--      spin_unlock_irq(&q->queue_lock);
--
--      blk_mq_unfreeze_queue(q);
--
--      mutex_lock(&q->debugfs_mutex);
--      blk_mq_debugfs_unregister_rqos(rqos);
--      mutex_unlock(&q->debugfs_mutex);
--}
-+int rq_qos_add(struct request_queue *q, struct rq_qos *rqos);
-+void rq_qos_del(struct request_queue *q, struct rq_qos *rqos);
- typedef bool (acquire_inflight_cb_t)(struct rq_wait *rqw, void *private_data);
- typedef void (cleanup_cb_t)(struct rq_wait *rqw, void *private_data);
--- 
-2.43.0
-
diff --git a/queue-6.1/blk-rq-qos-store-a-gendisk-instead-of-request_queue-.patch b/queue-6.1/blk-rq-qos-store-a-gendisk-instead-of-request_queue-.patch
deleted file mode 100644 (file)
index 77370a2..0000000
+++ /dev/null
@@ -1,283 +0,0 @@
-From 8d1a0d757f1cabbee1a542c21443aefc9746b42d Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 3 Feb 2023 16:03:56 +0100
-Subject: blk-rq-qos: store a gendisk instead of request_queue in struct rq_qos
-
-From: Christoph Hellwig <hch@lst.de>
-
-[ Upstream commit ba91c849fa50dbc6519cf7808177b3a9b7f6bc97 ]
-
-This is what about half of the users already want, and it's only going to
-grow more.
-
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Reviewed-by: Andreas Herrmann <aherrmann@suse.de>
-Acked-by: Tejun Heo <tj@kernel.org>
-Link: https://lore.kernel.org/r/20230203150400.3199230-16-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: f814bdda774c ("blk-wbt: Fix detection of dirty-throttled tasks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-iocost.c     | 12 ++++++------
- block/blk-iolatency.c  | 14 +++++++-------
- block/blk-mq-debugfs.c | 10 ++++------
- block/blk-rq-qos.c     |  4 ++--
- block/blk-rq-qos.h     |  2 +-
- block/blk-wbt.c        | 16 +++++++---------
- 6 files changed, 27 insertions(+), 31 deletions(-)
-
-diff --git a/block/blk-iocost.c b/block/blk-iocost.c
-index 78958c5bece08..ab5830ba23e0f 100644
---- a/block/blk-iocost.c
-+++ b/block/blk-iocost.c
-@@ -670,7 +670,7 @@ static struct ioc *q_to_ioc(struct request_queue *q)
- static const char __maybe_unused *ioc_name(struct ioc *ioc)
- {
--      struct gendisk *disk = ioc->rqos.q->disk;
-+      struct gendisk *disk = ioc->rqos.disk;
-       if (!disk)
-               return "<unknown>";
-@@ -809,11 +809,11 @@ static int ioc_autop_idx(struct ioc *ioc)
-       u64 now_ns;
-       /* rotational? */
--      if (!blk_queue_nonrot(ioc->rqos.q))
-+      if (!blk_queue_nonrot(ioc->rqos.disk->queue))
-               return AUTOP_HDD;
-       /* handle SATA SSDs w/ broken NCQ */
--      if (blk_queue_depth(ioc->rqos.q) == 1)
-+      if (blk_queue_depth(ioc->rqos.disk->queue) == 1)
-               return AUTOP_SSD_QD1;
-       /* use one of the normal ssd sets */
-@@ -2653,7 +2653,7 @@ static void ioc_rqos_throttle(struct rq_qos *rqos, struct bio *bio)
-       if (use_debt) {
-               iocg_incur_debt(iocg, abs_cost, &now);
-               if (iocg_kick_delay(iocg, &now))
--                      blkcg_schedule_throttle(rqos->q->disk,
-+                      blkcg_schedule_throttle(rqos->disk,
-                                       (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
-               iocg_unlock(iocg, ioc_locked, &flags);
-               return;
-@@ -2754,7 +2754,7 @@ static void ioc_rqos_merge(struct rq_qos *rqos, struct request *rq,
-       if (likely(!list_empty(&iocg->active_list))) {
-               iocg_incur_debt(iocg, abs_cost, &now);
-               if (iocg_kick_delay(iocg, &now))
--                      blkcg_schedule_throttle(rqos->q->disk,
-+                      blkcg_schedule_throttle(rqos->disk,
-                                       (bio->bi_opf & REQ_SWAP) == REQ_SWAP);
-       } else {
-               iocg_commit_bio(iocg, bio, abs_cost, cost);
-@@ -2825,7 +2825,7 @@ static void ioc_rqos_exit(struct rq_qos *rqos)
- {
-       struct ioc *ioc = rqos_to_ioc(rqos);
--      blkcg_deactivate_policy(rqos->q, &blkcg_policy_iocost);
-+      blkcg_deactivate_policy(rqos->disk->queue, &blkcg_policy_iocost);
-       spin_lock_irq(&ioc->lock);
-       ioc->running = IOC_STOP;
-diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
-index b0f8550f87cd2..268e6653b5a62 100644
---- a/block/blk-iolatency.c
-+++ b/block/blk-iolatency.c
-@@ -292,7 +292,7 @@ static void __blkcg_iolatency_throttle(struct rq_qos *rqos,
-       unsigned use_delay = atomic_read(&lat_to_blkg(iolat)->use_delay);
-       if (use_delay)
--              blkcg_schedule_throttle(rqos->q->disk, use_memdelay);
-+              blkcg_schedule_throttle(rqos->disk, use_memdelay);
-       /*
-        * To avoid priority inversions we want to just take a slot if we are
-@@ -330,7 +330,7 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat,
-                               struct child_latency_info *lat_info,
-                               bool up)
- {
--      unsigned long qd = blkiolat->rqos.q->nr_requests;
-+      unsigned long qd = blkiolat->rqos.disk->queue->nr_requests;
-       unsigned long scale = scale_amount(qd, up);
-       unsigned long old = atomic_read(&lat_info->scale_cookie);
-       unsigned long max_scale = qd << 1;
-@@ -370,7 +370,7 @@ static void scale_cookie_change(struct blk_iolatency *blkiolat,
-  */
- static void scale_change(struct iolatency_grp *iolat, bool up)
- {
--      unsigned long qd = iolat->blkiolat->rqos.q->nr_requests;
-+      unsigned long qd = iolat->blkiolat->rqos.disk->queue->nr_requests;
-       unsigned long scale = scale_amount(qd, up);
-       unsigned long old = iolat->rq_depth.max_depth;
-@@ -647,7 +647,7 @@ static void blkcg_iolatency_exit(struct rq_qos *rqos)
-       del_timer_sync(&blkiolat->timer);
-       flush_work(&blkiolat->enable_work);
--      blkcg_deactivate_policy(rqos->q, &blkcg_policy_iolatency);
-+      blkcg_deactivate_policy(rqos->disk->queue, &blkcg_policy_iolatency);
-       kfree(blkiolat);
- }
-@@ -666,7 +666,7 @@ static void blkiolatency_timer_fn(struct timer_list *t)
-       rcu_read_lock();
-       blkg_for_each_descendant_pre(blkg, pos_css,
--                                   blkiolat->rqos.q->root_blkg) {
-+                                   blkiolat->rqos.disk->queue->root_blkg) {
-               struct iolatency_grp *iolat;
-               struct child_latency_info *lat_info;
-               unsigned long flags;
-@@ -750,9 +750,9 @@ static void blkiolatency_enable_work_fn(struct work_struct *work)
-        */
-       enabled = atomic_read(&blkiolat->enable_cnt);
-       if (enabled != blkiolat->enabled) {
--              blk_mq_freeze_queue(blkiolat->rqos.q);
-+              blk_mq_freeze_queue(blkiolat->rqos.disk->queue);
-               blkiolat->enabled = enabled;
--              blk_mq_unfreeze_queue(blkiolat->rqos.q);
-+              blk_mq_unfreeze_queue(blkiolat->rqos.disk->queue);
-       }
- }
-diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
-index 7675e663df365..c152276736832 100644
---- a/block/blk-mq-debugfs.c
-+++ b/block/blk-mq-debugfs.c
-@@ -813,9 +813,9 @@ static const char *rq_qos_id_to_name(enum rq_qos_id id)
- void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
- {
--      lockdep_assert_held(&rqos->q->debugfs_mutex);
-+      lockdep_assert_held(&rqos->disk->queue->debugfs_mutex);
--      if (!rqos->q->debugfs_dir)
-+      if (!rqos->disk->queue->debugfs_dir)
-               return;
-       debugfs_remove_recursive(rqos->debugfs_dir);
-       rqos->debugfs_dir = NULL;
-@@ -823,7 +823,7 @@ void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
- void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
- {
--      struct request_queue *q = rqos->q;
-+      struct request_queue *q = rqos->disk->queue;
-       const char *dir_name = rq_qos_id_to_name(rqos->id);
-       lockdep_assert_held(&q->debugfs_mutex);
-@@ -835,9 +835,7 @@ void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
-               q->rqos_debugfs_dir = debugfs_create_dir("rqos",
-                                                        q->debugfs_dir);
--      rqos->debugfs_dir = debugfs_create_dir(dir_name,
--                                             rqos->q->rqos_debugfs_dir);
--
-+      rqos->debugfs_dir = debugfs_create_dir(dir_name, q->rqos_debugfs_dir);
-       debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
- }
-diff --git a/block/blk-rq-qos.c b/block/blk-rq-qos.c
-index 8e83734cfe8db..d8cc820a365e3 100644
---- a/block/blk-rq-qos.c
-+++ b/block/blk-rq-qos.c
-@@ -300,7 +300,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
- {
-       struct request_queue *q = disk->queue;
--      rqos->q = q;
-+      rqos->disk = disk;
-       rqos->id = id;
-       rqos->ops = ops;
-@@ -337,7 +337,7 @@ int rq_qos_add(struct rq_qos *rqos, struct gendisk *disk, enum rq_qos_id id,
- void rq_qos_del(struct rq_qos *rqos)
- {
--      struct request_queue *q = rqos->q;
-+      struct request_queue *q = rqos->disk->queue;
-       struct rq_qos **cur;
-       /*
-diff --git a/block/blk-rq-qos.h b/block/blk-rq-qos.h
-index 2b7b668479f71..b02a1a3d33a89 100644
---- a/block/blk-rq-qos.h
-+++ b/block/blk-rq-qos.h
-@@ -26,7 +26,7 @@ struct rq_wait {
- struct rq_qos {
-       const struct rq_qos_ops *ops;
--      struct request_queue *q;
-+      struct gendisk *disk;
-       enum rq_qos_id id;
-       struct rq_qos *next;
- #ifdef CONFIG_BLK_DEBUG_FS
-diff --git a/block/blk-wbt.c b/block/blk-wbt.c
-index d9398347b08d8..e9206b1406e76 100644
---- a/block/blk-wbt.c
-+++ b/block/blk-wbt.c
-@@ -98,7 +98,7 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
-  */
- static bool wb_recent_wait(struct rq_wb *rwb)
- {
--      struct bdi_writeback *wb = &rwb->rqos.q->disk->bdi->wb;
-+      struct bdi_writeback *wb = &rwb->rqos.disk->bdi->wb;
-       return time_before(jiffies, wb->dirty_sleep + HZ);
- }
-@@ -235,7 +235,7 @@ enum {
- static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
- {
--      struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
-+      struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
-       struct rq_depth *rqd = &rwb->rq_depth;
-       u64 thislat;
-@@ -288,7 +288,7 @@ static int latency_exceeded(struct rq_wb *rwb, struct blk_rq_stat *stat)
- static void rwb_trace_step(struct rq_wb *rwb, const char *msg)
- {
--      struct backing_dev_info *bdi = rwb->rqos.q->disk->bdi;
-+      struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
-       struct rq_depth *rqd = &rwb->rq_depth;
-       trace_wbt_step(bdi, msg, rqd->scale_step, rwb->cur_win_nsec,
-@@ -358,13 +358,12 @@ static void wb_timer_fn(struct blk_stat_callback *cb)
-       unsigned int inflight = wbt_inflight(rwb);
-       int status;
--      if (!rwb->rqos.q->disk)
-+      if (!rwb->rqos.disk)
-               return;
-       status = latency_exceeded(rwb, cb->stat);
--      trace_wbt_timer(rwb->rqos.q->disk->bdi, status, rqd->scale_step,
--                      inflight);
-+      trace_wbt_timer(rwb->rqos.disk->bdi, status, rqd->scale_step, inflight);
-       /*
-        * If we exceeded the latency target, step down. If we did not,
-@@ -689,16 +688,15 @@ static int wbt_data_dir(const struct request *rq)
- static void wbt_queue_depth_changed(struct rq_qos *rqos)
- {
--      RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->q);
-+      RQWB(rqos)->rq_depth.queue_depth = blk_queue_depth(rqos->disk->queue);
-       wbt_update_limits(RQWB(rqos));
- }
- static void wbt_exit(struct rq_qos *rqos)
- {
-       struct rq_wb *rwb = RQWB(rqos);
--      struct request_queue *q = rqos->q;
--      blk_stat_remove_callback(q, rwb->cb);
-+      blk_stat_remove_callback(rqos->disk->queue, rwb->cb);
-       blk_stat_free_callback(rwb->cb);
-       kfree(rwb);
- }
--- 
-2.43.0
-
diff --git a/queue-6.1/blk-wbt-don-t-enable-throttling-if-default-elevator-.patch b/queue-6.1/blk-wbt-don-t-enable-throttling-if-default-elevator-.patch
deleted file mode 100644 (file)
index 3d91bef..0000000
+++ /dev/null
@@ -1,114 +0,0 @@
-From 51938e4e6ade6005901b700cfe6ecdd7481af216 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 19 Oct 2022 20:15:18 +0800
-Subject: blk-wbt: don't enable throttling if default elevator is bfq
-
-From: Yu Kuai <yukuai3@huawei.com>
-
-[ Upstream commit 671fae5e51297fc76b3758ca2edd514858734a6a ]
-
-Commit b5dc5d4d1f4f ("block,bfq: Disable writeback throttling") tries to
-disable wbt for bfq, it's done by calling wbt_disable_default() in
-bfq_init_queue(). However, wbt is still enabled if default elevator is
-bfq:
-
-device_add_disk
- elevator_init_mq
-  bfq_init_queue
-   wbt_disable_default -> done nothing
-
- blk_register_queue
-  wbt_enable_default -> wbt is enabled
-
-Fix the problem by adding a new flag ELEVATOR_FLAG_DISBALE_WBT, bfq
-will set the flag in bfq_init_queue, and following wbt_enable_default()
-won't enable wbt while the flag is set.
-
-Signed-off-by: Yu Kuai <yukuai3@huawei.com>
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Link: https://lore.kernel.org/r/20221019121518.3865235-7-yukuai1@huaweicloud.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: f814bdda774c ("blk-wbt: Fix detection of dirty-throttled tasks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/bfq-iosched.c |  2 ++
- block/blk-wbt.c     | 11 ++++++++---
- block/elevator.h    |  3 ++-
- 3 files changed, 12 insertions(+), 4 deletions(-)
-
-diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
-index 52eb79d60a3f3..e4699291aee23 100644
---- a/block/bfq-iosched.c
-+++ b/block/bfq-iosched.c
-@@ -7059,6 +7059,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
- #endif
-       blk_stat_disable_accounting(bfqd->queue);
-+      clear_bit(ELEVATOR_FLAG_DISABLE_WBT, &e->flags);
-       wbt_enable_default(bfqd->queue);
-       kfree(bfqd);
-@@ -7204,6 +7205,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
-       /* We dispatch from request queue wide instead of hw queue */
-       blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
-+      set_bit(ELEVATOR_FLAG_DISABLE_WBT, &eq->flags);
-       wbt_disable_default(q);
-       blk_stat_enable_accounting(q);
-diff --git a/block/blk-wbt.c b/block/blk-wbt.c
-index c5a8c10028a08..afb1782b4255e 100644
---- a/block/blk-wbt.c
-+++ b/block/blk-wbt.c
-@@ -27,6 +27,7 @@
- #include "blk-wbt.h"
- #include "blk-rq-qos.h"
-+#include "elevator.h"
- #define CREATE_TRACE_POINTS
- #include <trace/events/wbt.h>
-@@ -638,11 +639,15 @@ void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
-  */
- void wbt_enable_default(struct request_queue *q)
- {
--      struct rq_qos *rqos = wbt_rq_qos(q);
-+      struct rq_qos *rqos;
-+      bool disable_flag = q->elevator &&
-+                  test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags);
-       /* Throttling already enabled? */
-+      rqos = wbt_rq_qos(q);
-       if (rqos) {
--              if (RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
-+              if (!disable_flag &&
-+                  RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
-                       RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
-               return;
-       }
-@@ -651,7 +656,7 @@ void wbt_enable_default(struct request_queue *q)
-       if (!blk_queue_registered(q))
-               return;
--      if (queue_is_mq(q))
-+      if (queue_is_mq(q) && !disable_flag)
-               wbt_init(q);
- }
- EXPORT_SYMBOL_GPL(wbt_enable_default);
-diff --git a/block/elevator.h b/block/elevator.h
-index ed574bf3e629e..75382471222d1 100644
---- a/block/elevator.h
-+++ b/block/elevator.h
-@@ -104,7 +104,8 @@ struct elevator_queue
-       DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
- };
--#define ELEVATOR_FLAG_REGISTERED 0
-+#define ELEVATOR_FLAG_REGISTERED      0
-+#define ELEVATOR_FLAG_DISABLE_WBT     1
- /*
-  * block elevator interface
--- 
-2.43.0
-
diff --git a/queue-6.1/blk-wbt-fix-detection-of-dirty-throttled-tasks.patch b/queue-6.1/blk-wbt-fix-detection-of-dirty-throttled-tasks.patch
deleted file mode 100644 (file)
index c065067..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-From ecf5ea95f5102ff71cf1675020f9bff184b40208 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 23 Jan 2024 18:58:26 +0100
-Subject: blk-wbt: Fix detection of dirty-throttled tasks
-
-From: Jan Kara <jack@suse.cz>
-
-[ Upstream commit f814bdda774c183b0cc15ec8f3b6e7c6f4527ba5 ]
-
-The detection of dirty-throttled tasks in blk-wbt has been subtly broken
-since its beginning in 2016. Namely if we are doing cgroup writeback and
-the throttled task is not in the root cgroup, balance_dirty_pages() will
-set dirty_sleep for the non-root bdi_writeback structure. However
-blk-wbt checks dirty_sleep only in the root cgroup bdi_writeback
-structure. Thus detection of recently throttled tasks is not working in
-this case (we noticed this when we switched to cgroup v2 and suddently
-writeback was slow).
-
-Since blk-wbt has no easy way to get to proper bdi_writeback and
-furthermore its intention has always been to work on the whole device
-rather than on individual cgroups, just move the dirty_sleep timestamp
-from bdi_writeback to backing_dev_info. That fixes the checking for
-recently throttled task and saves memory for everybody as a bonus.
-
-CC: stable@vger.kernel.org
-Fixes: b57d74aff9ab ("writeback: track if we're sleeping on progress in balance_dirty_pages()")
-Signed-off-by: Jan Kara <jack@suse.cz>
-Link: https://lore.kernel.org/r/20240123175826.21452-1-jack@suse.cz
-[axboe: fixup indentation errors]
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-wbt.c                  | 4 ++--
- include/linux/backing-dev-defs.h | 7 +++++--
- mm/backing-dev.c                 | 2 +-
- mm/page-writeback.c              | 2 +-
- 4 files changed, 9 insertions(+), 6 deletions(-)
-
-diff --git a/block/blk-wbt.c b/block/blk-wbt.c
-index e9206b1406e76..fcacdff8af93b 100644
---- a/block/blk-wbt.c
-+++ b/block/blk-wbt.c
-@@ -98,9 +98,9 @@ static void wb_timestamp(struct rq_wb *rwb, unsigned long *var)
-  */
- static bool wb_recent_wait(struct rq_wb *rwb)
- {
--      struct bdi_writeback *wb = &rwb->rqos.disk->bdi->wb;
-+      struct backing_dev_info *bdi = rwb->rqos.disk->bdi;
--      return time_before(jiffies, wb->dirty_sleep + HZ);
-+      return time_before(jiffies, bdi->last_bdp_sleep + HZ);
- }
- static inline struct rq_wait *get_rq_wait(struct rq_wb *rwb,
-diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
-index ae12696ec492c..2ad261082bba5 100644
---- a/include/linux/backing-dev-defs.h
-+++ b/include/linux/backing-dev-defs.h
-@@ -141,8 +141,6 @@ struct bdi_writeback {
-       struct delayed_work dwork;      /* work item used for writeback */
-       struct delayed_work bw_dwork;   /* work item used for bandwidth estimate */
--      unsigned long dirty_sleep;      /* last wait */
--
-       struct list_head bdi_node;      /* anchored at bdi->wb_list */
- #ifdef CONFIG_CGROUP_WRITEBACK
-@@ -179,6 +177,11 @@ struct backing_dev_info {
-        * any dirty wbs, which is depended upon by bdi_has_dirty().
-        */
-       atomic_long_t tot_write_bandwidth;
-+      /*
-+       * Jiffies when last process was dirty throttled on this bdi. Used by
-+       * blk-wbt.
-+       */
-+      unsigned long last_bdp_sleep;
-       struct bdi_writeback wb;  /* the root writeback info for this bdi */
-       struct list_head wb_list; /* list of all wbs */
-diff --git a/mm/backing-dev.c b/mm/backing-dev.c
-index bf5525c2e561a..c070ff9ef9cf3 100644
---- a/mm/backing-dev.c
-+++ b/mm/backing-dev.c
-@@ -305,7 +305,6 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
-       INIT_LIST_HEAD(&wb->work_list);
-       INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
-       INIT_DELAYED_WORK(&wb->bw_dwork, wb_update_bandwidth_workfn);
--      wb->dirty_sleep = jiffies;
-       err = fprop_local_init_percpu(&wb->completions, gfp);
-       if (err)
-@@ -793,6 +792,7 @@ int bdi_init(struct backing_dev_info *bdi)
-       INIT_LIST_HEAD(&bdi->bdi_list);
-       INIT_LIST_HEAD(&bdi->wb_list);
-       init_waitqueue_head(&bdi->wb_waitq);
-+      bdi->last_bdp_sleep = jiffies;
-       return cgwb_bdi_init(bdi);
- }
-diff --git a/mm/page-writeback.c b/mm/page-writeback.c
-index d3e9d12860b9f..9046d1f1b408e 100644
---- a/mm/page-writeback.c
-+++ b/mm/page-writeback.c
-@@ -1809,7 +1809,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb,
-                       break;
-               }
-               __set_current_state(TASK_KILLABLE);
--              wb->dirty_sleep = now;
-+              bdi->last_bdp_sleep = jiffies;
-               io_schedule_timeout(pause);
-               current->dirty_paused_when = now + pause;
--- 
-2.43.0
-
diff --git a/queue-6.1/blk-wbt-fix-that-wbt-can-t-be-disabled-by-default.patch b/queue-6.1/blk-wbt-fix-that-wbt-can-t-be-disabled-by-default.patch
deleted file mode 100644 (file)
index 487c08f..0000000
+++ /dev/null
@@ -1,65 +0,0 @@
-From 5376a7667a1f2430589c3b2f5f0bccafd1dd761b Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 22 May 2023 20:18:54 +0800
-Subject: blk-wbt: fix that wbt can't be disabled by default
-
-From: Yu Kuai <yukuai3@huawei.com>
-
-[ Upstream commit 8a2b20a997a3779ae9fcae268f2959eb82ec05a1 ]
-
-commit b11d31ae01e6 ("blk-wbt: remove unnecessary check in
-wbt_enable_default()") removes the checking of CONFIG_BLK_WBT_MQ by
-mistake, which is used to control enable or disable wbt by default.
-
-Fix the problem by adding back the checking. This patch also do a litter
-cleanup to make related code more readable.
-
-Fixes: b11d31ae01e6 ("blk-wbt: remove unnecessary check in wbt_enable_default()")
-Reported-by: Lukas Bulwahn <lukas.bulwahn@gmail.com>
-Link: https://lore.kernel.org/lkml/CAKXUXMzfKq_J9nKHGyr5P5rvUETY4B-fxoQD4sO+NYjFOfVtZA@mail.gmail.com/t/
-Signed-off-by: Yu Kuai <yukuai3@huawei.com>
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Link: https://lore.kernel.org/r/20230522121854.2928880-1-yukuai1@huaweicloud.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-wbt.c | 12 +++++++-----
- 1 file changed, 7 insertions(+), 5 deletions(-)
-
-diff --git a/block/blk-wbt.c b/block/blk-wbt.c
-index fcacdff8af93b..526fb12c3e4cf 100644
---- a/block/blk-wbt.c
-+++ b/block/blk-wbt.c
-@@ -640,14 +640,16 @@ void wbt_enable_default(struct gendisk *disk)
- {
-       struct request_queue *q = disk->queue;
-       struct rq_qos *rqos;
--      bool disable_flag = q->elevator &&
--                  test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags);
-+      bool enable = IS_ENABLED(CONFIG_BLK_WBT_MQ);
-+
-+      if (q->elevator &&
-+          test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags))
-+              enable = false;
-       /* Throttling already enabled? */
-       rqos = wbt_rq_qos(q);
-       if (rqos) {
--              if (!disable_flag &&
--                  RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
-+              if (enable && RQWB(rqos)->enable_state == WBT_STATE_OFF_DEFAULT)
-                       RQWB(rqos)->enable_state = WBT_STATE_ON_DEFAULT;
-               return;
-       }
-@@ -656,7 +658,7 @@ void wbt_enable_default(struct gendisk *disk)
-       if (!blk_queue_registered(q))
-               return;
--      if (queue_is_mq(q) && !disable_flag)
-+      if (queue_is_mq(q) && enable)
-               wbt_init(disk);
- }
- EXPORT_SYMBOL_GPL(wbt_enable_default);
--- 
-2.43.0
-
diff --git a/queue-6.1/blk-wbt-pass-a-gendisk-to-wbt_-enable-disable-_defau.patch b/queue-6.1/blk-wbt-pass-a-gendisk-to-wbt_-enable-disable-_defau.patch
deleted file mode 100644 (file)
index 93821b3..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-From 3cab63f95634875a1501abbda551e69098f6c978 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 3 Feb 2023 16:03:49 +0100
-Subject: blk-wbt: pass a gendisk to wbt_{enable,disable}_default
-
-From: Christoph Hellwig <hch@lst.de>
-
-[ Upstream commit 04aad37be1a88de6a1919996a615437ac74de479 ]
-
-Pass a gendisk to wbt_enable_default and wbt_disable_default to
-prepare for phasing out usage of the request_queue in the blk-cgroup
-code.
-
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Reviewed-by: Andreas Herrmann <aherrmann@suse.de>
-Acked-by: Tejun Heo <tj@kernel.org>
-Link: https://lore.kernel.org/r/20230203150400.3199230-9-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: f814bdda774c ("blk-wbt: Fix detection of dirty-throttled tasks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/bfq-iosched.c | 4 ++--
- block/blk-iocost.c  | 4 ++--
- block/blk-sysfs.c   | 2 +-
- block/blk-wbt.c     | 7 ++++---
- block/blk-wbt.h     | 8 ++++----
- 5 files changed, 13 insertions(+), 12 deletions(-)
-
-diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
-index e4699291aee23..84b4763b2b223 100644
---- a/block/bfq-iosched.c
-+++ b/block/bfq-iosched.c
-@@ -7060,7 +7060,7 @@ static void bfq_exit_queue(struct elevator_queue *e)
-       blk_stat_disable_accounting(bfqd->queue);
-       clear_bit(ELEVATOR_FLAG_DISABLE_WBT, &e->flags);
--      wbt_enable_default(bfqd->queue);
-+      wbt_enable_default(bfqd->queue->disk);
-       kfree(bfqd);
- }
-@@ -7206,7 +7206,7 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
-       blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q);
-       set_bit(ELEVATOR_FLAG_DISABLE_WBT, &eq->flags);
--      wbt_disable_default(q);
-+      wbt_disable_default(q->disk);
-       blk_stat_enable_accounting(q);
-       return 0;
-diff --git a/block/blk-iocost.c b/block/blk-iocost.c
-index 3788774a7b729..72ca07f24b3c0 100644
---- a/block/blk-iocost.c
-+++ b/block/blk-iocost.c
-@@ -3281,11 +3281,11 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
-               blk_stat_enable_accounting(disk->queue);
-               blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
-               ioc->enabled = true;
--              wbt_disable_default(disk->queue);
-+              wbt_disable_default(disk);
-       } else {
-               blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, disk->queue);
-               ioc->enabled = false;
--              wbt_enable_default(disk->queue);
-+              wbt_enable_default(disk);
-       }
-       if (user) {
-diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
-index a82bdec923b21..c59c4d3ee7a27 100644
---- a/block/blk-sysfs.c
-+++ b/block/blk-sysfs.c
-@@ -837,7 +837,7 @@ int blk_register_queue(struct gendisk *disk)
-               goto put_dev;
-       blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
--      wbt_enable_default(q);
-+      wbt_enable_default(disk);
-       blk_throtl_register(disk);
-       /* Now everything is ready and send out KOBJ_ADD uevent */
-diff --git a/block/blk-wbt.c b/block/blk-wbt.c
-index afb1782b4255e..8d4f075f13e2f 100644
---- a/block/blk-wbt.c
-+++ b/block/blk-wbt.c
-@@ -637,8 +637,9 @@ void wbt_set_write_cache(struct request_queue *q, bool write_cache_on)
- /*
-  * Enable wbt if defaults are configured that way
-  */
--void wbt_enable_default(struct request_queue *q)
-+void wbt_enable_default(struct gendisk *disk)
- {
-+      struct request_queue *q = disk->queue;
-       struct rq_qos *rqos;
-       bool disable_flag = q->elevator &&
-                   test_bit(ELEVATOR_FLAG_DISABLE_WBT, &q->elevator->flags);
-@@ -705,9 +706,9 @@ static void wbt_exit(struct rq_qos *rqos)
- /*
-  * Disable wbt, if enabled by default.
-  */
--void wbt_disable_default(struct request_queue *q)
-+void wbt_disable_default(struct gendisk *disk)
- {
--      struct rq_qos *rqos = wbt_rq_qos(q);
-+      struct rq_qos *rqos = wbt_rq_qos(disk->queue);
-       struct rq_wb *rwb;
-       if (!rqos)
-               return;
-diff --git a/block/blk-wbt.h b/block/blk-wbt.h
-index 7e44eccc676dd..58c226fe33d48 100644
---- a/block/blk-wbt.h
-+++ b/block/blk-wbt.h
-@@ -89,8 +89,8 @@ static inline unsigned int wbt_inflight(struct rq_wb *rwb)
- #ifdef CONFIG_BLK_WBT
- int wbt_init(struct request_queue *);
--void wbt_disable_default(struct request_queue *);
--void wbt_enable_default(struct request_queue *);
-+void wbt_disable_default(struct gendisk *disk);
-+void wbt_enable_default(struct gendisk *disk);
- u64 wbt_get_min_lat(struct request_queue *q);
- void wbt_set_min_lat(struct request_queue *q, u64 val);
-@@ -105,10 +105,10 @@ static inline int wbt_init(struct request_queue *q)
- {
-       return -EINVAL;
- }
--static inline void wbt_disable_default(struct request_queue *q)
-+static inline void wbt_disable_default(struct gendisk *disk)
- {
- }
--static inline void wbt_enable_default(struct request_queue *q)
-+static inline void wbt_enable_default(struct gendisk *disk)
- {
- }
- static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
--- 
-2.43.0
-
diff --git a/queue-6.1/blk-wbt-pass-a-gendisk-to-wbt_init.patch b/queue-6.1/blk-wbt-pass-a-gendisk-to-wbt_init.patch
deleted file mode 100644 (file)
index c2e6a0a..0000000
+++ /dev/null
@@ -1,87 +0,0 @@
-From 64436d303bf9f3e4b615121498533f1e7b068e19 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 3 Feb 2023 16:03:50 +0100
-Subject: blk-wbt: pass a gendisk to wbt_init
-
-From: Christoph Hellwig <hch@lst.de>
-
-[ Upstream commit 958f29654747a54f2272eb478e493eb97f492e06 ]
-
-Pass a gendisk to wbt_init to prepare for phasing out usage of the
-request_queue in the blk-cgroup code.
-
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Reviewed-by: Andreas Herrmann <aherrmann@suse.de>
-Acked-by: Tejun Heo <tj@kernel.org>
-Link: https://lore.kernel.org/r/20230203150400.3199230-10-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: f814bdda774c ("blk-wbt: Fix detection of dirty-throttled tasks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-sysfs.c | 2 +-
- block/blk-wbt.c   | 5 +++--
- block/blk-wbt.h   | 4 ++--
- 3 files changed, 6 insertions(+), 5 deletions(-)
-
-diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
-index c59c4d3ee7a27..31f53ef01982d 100644
---- a/block/blk-sysfs.c
-+++ b/block/blk-sysfs.c
-@@ -488,7 +488,7 @@ static ssize_t queue_wb_lat_store(struct request_queue *q, const char *page,
-       rqos = wbt_rq_qos(q);
-       if (!rqos) {
--              ret = wbt_init(q);
-+              ret = wbt_init(q->disk);
-               if (ret)
-                       return ret;
-       }
-diff --git a/block/blk-wbt.c b/block/blk-wbt.c
-index 8d4f075f13e2f..95bec9244e9f3 100644
---- a/block/blk-wbt.c
-+++ b/block/blk-wbt.c
-@@ -658,7 +658,7 @@ void wbt_enable_default(struct gendisk *disk)
-               return;
-       if (queue_is_mq(q) && !disable_flag)
--              wbt_init(q);
-+              wbt_init(disk);
- }
- EXPORT_SYMBOL_GPL(wbt_enable_default);
-@@ -822,8 +822,9 @@ static struct rq_qos_ops wbt_rqos_ops = {
- #endif
- };
--int wbt_init(struct request_queue *q)
-+int wbt_init(struct gendisk *disk)
- {
-+      struct request_queue *q = disk->queue;
-       struct rq_wb *rwb;
-       int i;
-       int ret;
-diff --git a/block/blk-wbt.h b/block/blk-wbt.h
-index 58c226fe33d48..8170439b89d6e 100644
---- a/block/blk-wbt.h
-+++ b/block/blk-wbt.h
-@@ -88,7 +88,7 @@ static inline unsigned int wbt_inflight(struct rq_wb *rwb)
- #ifdef CONFIG_BLK_WBT
--int wbt_init(struct request_queue *);
-+int wbt_init(struct gendisk *disk);
- void wbt_disable_default(struct gendisk *disk);
- void wbt_enable_default(struct gendisk *disk);
-@@ -101,7 +101,7 @@ u64 wbt_default_latency_nsec(struct request_queue *);
- #else
--static inline int wbt_init(struct request_queue *q)
-+static inline int wbt_init(struct gendisk *disk)
- {
-       return -EINVAL;
- }
--- 
-2.43.0
-
diff --git a/queue-6.1/blk-wbt-remove-unnecessary-check-in-wbt_enable_defau.patch b/queue-6.1/blk-wbt-remove-unnecessary-check-in-wbt_enable_defau.patch
deleted file mode 100644 (file)
index b656839..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-From 631dc45a1e1ca5721ec23d80d60381e818e3c409 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 19 Oct 2022 20:15:14 +0800
-Subject: blk-wbt: remove unnecessary check in wbt_enable_default()
-
-From: Yu Kuai <yukuai3@huawei.com>
-
-[ Upstream commit b11d31ae01e6b0762b28e645ad6718a12faa8d14 ]
-
-If CONFIG_BLK_WBT_MQ is disabled, wbt_init() won't do anything.
-
-Signed-off-by: Yu Kuai <yukuai3@huawei.com>
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Link: https://lore.kernel.org/r/20221019121518.3865235-3-yukuai1@huaweicloud.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: f814bdda774c ("blk-wbt: Fix detection of dirty-throttled tasks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-wbt.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/block/blk-wbt.c b/block/blk-wbt.c
-index c293e08b301ff..c5a8c10028a08 100644
---- a/block/blk-wbt.c
-+++ b/block/blk-wbt.c
-@@ -651,7 +651,7 @@ void wbt_enable_default(struct request_queue *q)
-       if (!blk_queue_registered(q))
-               return;
--      if (queue_is_mq(q) && IS_ENABLED(CONFIG_BLK_WBT_MQ))
-+      if (queue_is_mq(q))
-               wbt_init(q);
- }
- EXPORT_SYMBOL_GPL(wbt_enable_default);
--- 
-2.43.0
-
diff --git a/queue-6.1/elevator-add-new-field-flags-in-struct-elevator_queu.patch b/queue-6.1/elevator-add-new-field-flags-in-struct-elevator_queu.patch
deleted file mode 100644 (file)
index 864e5a1..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-From 85ad0276e21822aca9c6a80d8a03247daa354d1c Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 19 Oct 2022 20:15:17 +0800
-Subject: elevator: add new field flags in struct elevator_queue
-
-From: Yu Kuai <yukuai3@huawei.com>
-
-[ Upstream commit 181d06637451b5348d746039478e71fa53dfbff6 ]
-
-There are only one flag to indicate that elevator is registered currently,
-prepare to add a flag to disable wbt if default elevator is bfq.
-
-Signed-off-by: Yu Kuai <yukuai3@huawei.com>
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Link: https://lore.kernel.org/r/20221019121518.3865235-6-yukuai1@huaweicloud.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: f814bdda774c ("blk-wbt: Fix detection of dirty-throttled tasks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/elevator.c | 6 ++----
- block/elevator.h | 4 +++-
- 2 files changed, 5 insertions(+), 5 deletions(-)
-
-diff --git a/block/elevator.c b/block/elevator.c
-index 20e70fd3f77f9..9e12706e8d8cb 100644
---- a/block/elevator.c
-+++ b/block/elevator.c
-@@ -512,7 +512,7 @@ int elv_register_queue(struct request_queue *q, bool uevent)
-               if (uevent)
-                       kobject_uevent(&e->kobj, KOBJ_ADD);
--              e->registered = 1;
-+              set_bit(ELEVATOR_FLAG_REGISTERED, &e->flags);
-       }
-       return error;
- }
-@@ -523,11 +523,9 @@ void elv_unregister_queue(struct request_queue *q)
-       lockdep_assert_held(&q->sysfs_lock);
--      if (e && e->registered) {
-+      if (e && test_and_clear_bit(ELEVATOR_FLAG_REGISTERED, &e->flags)) {
-               kobject_uevent(&e->kobj, KOBJ_REMOVE);
-               kobject_del(&e->kobj);
--
--              e->registered = 0;
-       }
- }
-diff --git a/block/elevator.h b/block/elevator.h
-index 3f0593b3bf9d3..ed574bf3e629e 100644
---- a/block/elevator.h
-+++ b/block/elevator.h
-@@ -100,10 +100,12 @@ struct elevator_queue
-       void *elevator_data;
-       struct kobject kobj;
-       struct mutex sysfs_lock;
--      unsigned int registered:1;
-+      unsigned long flags;
-       DECLARE_HASHTABLE(hash, ELV_HASH_BITS);
- };
-+#define ELEVATOR_FLAG_REGISTERED 0
-+
- /*
-  * block elevator interface
-  */
--- 
-2.43.0
-
diff --git a/queue-6.1/elevator-remove-redundant-code-in-elv_unregister_que.patch b/queue-6.1/elevator-remove-redundant-code-in-elv_unregister_que.patch
deleted file mode 100644 (file)
index ede298a..0000000
+++ /dev/null
@@ -1,39 +0,0 @@
-From efa75e7a472dd9a1c9519c1cf50e37d2b5d3ca47 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Wed, 19 Oct 2022 20:15:13 +0800
-Subject: elevator: remove redundant code in elv_unregister_queue()
-
-From: Yu Kuai <yukuai3@huawei.com>
-
-[ Upstream commit 6d9f4cf125585ebf0718abcf5ce9ca898877c6d2 ]
-
-"elevator_queue *e" is already declared and initialized in the beginning
-of elv_unregister_queue().
-
-Signed-off-by: Yu Kuai <yukuai3@huawei.com>
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Reviewed-by: Eric Biggers <ebiggers@google.com>
-Link: https://lore.kernel.org/r/20221019121518.3865235-2-yukuai1@huaweicloud.com
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: f814bdda774c ("blk-wbt: Fix detection of dirty-throttled tasks")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/elevator.c | 2 --
- 1 file changed, 2 deletions(-)
-
-diff --git a/block/elevator.c b/block/elevator.c
-index bd71f0fc4e4b6..20e70fd3f77f9 100644
---- a/block/elevator.c
-+++ b/block/elevator.c
-@@ -524,8 +524,6 @@ void elv_unregister_queue(struct request_queue *q)
-       lockdep_assert_held(&q->sysfs_lock);
-       if (e && e->registered) {
--              struct elevator_queue *e = q->elevator;
--
-               kobject_uevent(&e->kobj, KOBJ_REMOVE);
-               kobject_del(&e->kobj);
--- 
-2.43.0
-
index 9b3a6f9597bda41ca237692e76c646e4238268e6..7c501252d09bef903e66edcd8eca603dc196de78 100644 (file)
@@ -45,18 +45,6 @@ documentation-hw-vuln-add-documentation-for-rfds.patch
 x86-rfds-mitigate-register-file-data-sampling-rfds.patch
 kvm-x86-export-rfds_no-and-rfds_clear-to-guests.patch
 selftests-mptcp-decrease-bw-in-simult-flows.patch
-blk-iocost-disable-writeback-throttling.patch
-elevator-remove-redundant-code-in-elv_unregister_que.patch
-blk-wbt-remove-unnecessary-check-in-wbt_enable_defau.patch
-elevator-add-new-field-flags-in-struct-elevator_queu.patch
-blk-wbt-don-t-enable-throttling-if-default-elevator-.patch
-blk-wbt-pass-a-gendisk-to-wbt_-enable-disable-_defau.patch
-blk-wbt-pass-a-gendisk-to-wbt_init.patch
-blk-rq-qos-move-rq_qos_add-and-rq_qos_del-out-of-lin.patch
-blk-rq-qos-make-rq_qos_add-and-rq_qos_del-more-usefu.patch
-blk-rq-qos-constify-rq_qos_ops.patch
-blk-rq-qos-store-a-gendisk-instead-of-request_queue-.patch
-blk-wbt-fix-detection-of-dirty-throttled-tasks.patch
 drm-amd-display-wrong-colorimetry-workaround.patch
 drm-amd-display-fix-mst-null-ptr-for-rv.patch
 getrusage-add-the-signal_struct-sig-local-variable.patch
@@ -65,5 +53,3 @@ getrusage-use-__for_each_thread.patch
 getrusage-use-sig-stats_lock-rather-than-lock_task_s.patch
 fs-proc-do_task_stat-use-__for_each_thread.patch
 fs-proc-do_task_stat-use-sig-stats_lock-to-gather-th.patch
-blk-wbt-fix-that-wbt-can-t-be-disabled-by-default.patch
-blk-iocost-pass-gendisk-to-ioc_refresh_params.patch