1 From cdd86c17d2e3bb1b2f119ee722c632e2f7ef3d30 Mon Sep 17 00:00:00 2001
2 From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
3 Date: Sat, 11 Mar 2023 10:34:32 +0100
4 Subject: Revert "blk-cgroup: synchronize pd_free_fn() from blkg_free_workfn() and blkcg_deactivate_policy()"
6 From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
8 This reverts commit bfe46d2efe46c5c952f982e2ca94fe2ec5e58e2a which is
9 commit f1c006f1c6850c14040f8337753a63119bba39b9 upstream.
11 It is reported to cause problems, as only 2 of the 3 patch series were
12 applied to the stable branches.
14 Reported-by: Mike Cloaked <mike.cloaked@gmail.com>
15 Reported-by: Eric Biggers <ebiggers@kernel.org>
16 Cc: Yu Kuai <yukuai3@huawei.com>
17 Cc: Tejun Heo <tj@kernel.org>
18 Cc: Christoph Hellwig <hch@lst.de>
19 Cc: Jens Axboe <axboe@kernel.dk>
20 Cc: Sasha Levin <sashal@kernel.org>
21 Link: https://bugzilla.kernel.org/show_bug.cgi?id=217174
22 Link: https://lore.kernel.org/r/ZAuPkCn49urWBN5P@sol.localdomain
23 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
24 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
26 block/blk-cgroup.c | 35 ++++++-----------------------------
27 include/linux/blkdev.h | 1 -
28 2 files changed, 6 insertions(+), 30 deletions(-)
30 --- a/block/blk-cgroup.c
31 +++ b/block/blk-cgroup.c
32 @@ -118,32 +118,16 @@ static void blkg_free_workfn(struct work
34 struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
36 - struct request_queue *q = blkg->q;
40 - * pd_free_fn() can also be called from blkcg_deactivate_policy(),
41 - * in order to make sure pd_free_fn() is called in order, the deletion
42 - * of the list blkg->q_node is delayed to here from blkg_destroy(), and
43 - * blkcg_mutex is used to synchronize blkg_free_workfn() and
44 - * blkcg_deactivate_policy().
47 - mutex_lock(&q->blkcg_mutex);
49 for (i = 0; i < BLKCG_MAX_POLS; i++)
51 blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
54 blkg_put(blkg->parent);
57 - list_del_init(&blkg->q_node);
58 - mutex_unlock(&q->blkcg_mutex);
63 + blk_put_queue(blkg->q);
64 free_percpu(blkg->iostat_cpu);
65 percpu_ref_exit(&blkg->refcnt);
67 @@ -474,14 +458,9 @@ static void blkg_destroy(struct blkcg_gq
68 lockdep_assert_held(&blkg->q->queue_lock);
69 lockdep_assert_held(&blkcg->lock);
72 - * blkg stays on the queue list until blkg_free_workfn(), see details in
73 - * blkg_free_workfn(), hence this function can be called from
74 - * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before
75 - * blkg_free_workfn().
77 - if (hlist_unhashed(&blkg->blkcg_node))
79 + /* Something wrong if we are trying to remove same group twice */
80 + WARN_ON_ONCE(list_empty(&blkg->q_node));
81 + WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
83 for (i = 0; i < BLKCG_MAX_POLS; i++) {
84 struct blkcg_policy *pol = blkcg_policy[i];
85 @@ -493,6 +472,7 @@ static void blkg_destroy(struct blkcg_gq
88 radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
89 + list_del_init(&blkg->q_node);
90 hlist_del_init_rcu(&blkg->blkcg_node);
93 @@ -1293,7 +1273,6 @@ int blkcg_init_disk(struct gendisk *disk
96 INIT_LIST_HEAD(&q->blkg_list);
97 - mutex_init(&q->blkcg_mutex);
99 new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
101 @@ -1531,7 +1510,6 @@ void blkcg_deactivate_policy(struct requ
103 blk_mq_freeze_queue(q);
105 - mutex_lock(&q->blkcg_mutex);
106 spin_lock_irq(&q->queue_lock);
108 __clear_bit(pol->plid, q->blkcg_pols);
109 @@ -1550,7 +1528,6 @@ void blkcg_deactivate_policy(struct requ
112 spin_unlock_irq(&q->queue_lock);
113 - mutex_unlock(&q->blkcg_mutex);
116 blk_mq_unfreeze_queue(q);
117 --- a/include/linux/blkdev.h
118 +++ b/include/linux/blkdev.h
119 @@ -484,7 +484,6 @@ struct request_queue {
120 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
121 struct blkcg_gq *root_blkg;
122 struct list_head blkg_list;
123 - struct mutex blkcg_mutex;
126 struct queue_limits limits;