]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob
7336ef354e0f3835f8ef6d385bd95a2e260d9d03
[thirdparty/kernel/stable-queue.git] /
1 From 224749be6c23efe7fb8a030854f4fc5d1dd813b3 Mon Sep 17 00:00:00 2001
2 From: Ming Lei <ming.lei@redhat.com>
3 Date: Wed, 18 Dec 2024 18:16:14 +0800
4 Subject: block: Revert "block: Fix potential deadlock while freezing queue and acquiring sysfs_lock"
5
6 From: Ming Lei <ming.lei@redhat.com>
7
8 commit 224749be6c23efe7fb8a030854f4fc5d1dd813b3 upstream.
9
10 This reverts commit be26ba96421ab0a8fa2055ccf7db7832a13c44d2.
11
12 Commit be26ba96421a ("block: Fix potential deadlock while freezing queue and
13 acquiring sysfs_loc") actually reverts commit 22465bbac53c ("blk-mq: move cpuhp
14 callback registering out of q->sysfs_lock"), and causes the original resctrl
15 lockdep warning.
16
17 So revert it and we need to fix the issue in another way.
18
19 Cc: Nilay Shroff <nilay@linux.ibm.com>
20 Fixes: be26ba96421a ("block: Fix potential deadlock while freezing queue and acquiring sysfs_loc")
21 Signed-off-by: Ming Lei <ming.lei@redhat.com>
22 Link: https://lore.kernel.org/r/20241218101617.3275704-2-ming.lei@redhat.com
23 Signed-off-by: Jens Axboe <axboe@kernel.dk>
24 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
25 ---
26 block/blk-mq-sysfs.c | 16 ++++++++++------
27 block/blk-mq.c | 29 +++++++++++------------------
28 block/blk-sysfs.c | 4 ++--
29 3 files changed, 23 insertions(+), 26 deletions(-)
30
31 --- a/block/blk-mq-sysfs.c
32 +++ b/block/blk-mq-sysfs.c
33 @@ -275,13 +275,15 @@ void blk_mq_sysfs_unregister_hctxs(struc
34 struct blk_mq_hw_ctx *hctx;
35 unsigned long i;
36
37 - lockdep_assert_held(&q->sysfs_dir_lock);
38 -
39 + mutex_lock(&q->sysfs_dir_lock);
40 if (!q->mq_sysfs_init_done)
41 - return;
42 + goto unlock;
43
44 queue_for_each_hw_ctx(q, hctx, i)
45 blk_mq_unregister_hctx(hctx);
46 +
47 +unlock:
48 + mutex_unlock(&q->sysfs_dir_lock);
49 }
50
51 int blk_mq_sysfs_register_hctxs(struct request_queue *q)
52 @@ -290,10 +292,9 @@ int blk_mq_sysfs_register_hctxs(struct r
53 unsigned long i;
54 int ret = 0;
55
56 - lockdep_assert_held(&q->sysfs_dir_lock);
57 -
58 + mutex_lock(&q->sysfs_dir_lock);
59 if (!q->mq_sysfs_init_done)
60 - return ret;
61 + goto unlock;
62
63 queue_for_each_hw_ctx(q, hctx, i) {
64 ret = blk_mq_register_hctx(hctx);
65 @@ -301,5 +302,8 @@ int blk_mq_sysfs_register_hctxs(struct r
66 break;
67 }
68
69 +unlock:
70 + mutex_unlock(&q->sysfs_dir_lock);
71 +
72 return ret;
73 }
74 --- a/block/blk-mq.c
75 +++ b/block/blk-mq.c
76 @@ -4462,8 +4462,7 @@ static void blk_mq_realloc_hw_ctxs(struc
77 unsigned long i, j;
78
79 /* protect against switching io scheduler */
80 - lockdep_assert_held(&q->sysfs_lock);
81 -
82 + mutex_lock(&q->sysfs_lock);
83 for (i = 0; i < set->nr_hw_queues; i++) {
84 int old_node;
85 int node = blk_mq_get_hctx_node(set, i);
86 @@ -4496,6 +4495,7 @@ static void blk_mq_realloc_hw_ctxs(struc
87
88 xa_for_each_start(&q->hctx_table, j, hctx, j)
89 blk_mq_exit_hctx(q, set, hctx, j);
90 + mutex_unlock(&q->sysfs_lock);
91
92 /* unregister cpuhp callbacks for exited hctxs */
93 blk_mq_remove_hw_queues_cpuhp(q);
94 @@ -4527,14 +4527,10 @@ int blk_mq_init_allocated_queue(struct b
95
96 xa_init(&q->hctx_table);
97
98 - mutex_lock(&q->sysfs_lock);
99 -
100 blk_mq_realloc_hw_ctxs(set, q);
101 if (!q->nr_hw_queues)
102 goto err_hctxs;
103
104 - mutex_unlock(&q->sysfs_lock);
105 -
106 INIT_WORK(&q->timeout_work, blk_mq_timeout_work);
107 blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
108
109 @@ -4553,7 +4549,6 @@ int blk_mq_init_allocated_queue(struct b
110 return 0;
111
112 err_hctxs:
113 - mutex_unlock(&q->sysfs_lock);
114 blk_mq_release(q);
115 err_exit:
116 q->mq_ops = NULL;
117 @@ -4934,12 +4929,12 @@ static bool blk_mq_elv_switch_none(struc
118 return false;
119
120 /* q->elevator needs protection from ->sysfs_lock */
121 - lockdep_assert_held(&q->sysfs_lock);
122 + mutex_lock(&q->sysfs_lock);
123
124 /* the check has to be done with holding sysfs_lock */
125 if (!q->elevator) {
126 kfree(qe);
127 - goto out;
128 + goto unlock;
129 }
130
131 INIT_LIST_HEAD(&qe->node);
132 @@ -4949,7 +4944,9 @@ static bool blk_mq_elv_switch_none(struc
133 __elevator_get(qe->type);
134 list_add(&qe->node, head);
135 elevator_disable(q);
136 -out:
137 +unlock:
138 + mutex_unlock(&q->sysfs_lock);
139 +
140 return true;
141 }
142
143 @@ -4978,9 +4975,11 @@ static void blk_mq_elv_switch_back(struc
144 list_del(&qe->node);
145 kfree(qe);
146
147 + mutex_lock(&q->sysfs_lock);
148 elevator_switch(q, t);
149 /* drop the reference acquired in blk_mq_elv_switch_none */
150 elevator_put(t);
151 + mutex_unlock(&q->sysfs_lock);
152 }
153
154 static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set,
155 @@ -5000,11 +4999,8 @@ static void __blk_mq_update_nr_hw_queues
156 if (set->nr_maps == 1 && nr_hw_queues == set->nr_hw_queues)
157 return;
158
159 - list_for_each_entry(q, &set->tag_list, tag_set_list) {
160 - mutex_lock(&q->sysfs_dir_lock);
161 - mutex_lock(&q->sysfs_lock);
162 + list_for_each_entry(q, &set->tag_list, tag_set_list)
163 blk_mq_freeze_queue(q);
164 - }
165 /*
166 * Switch IO scheduler to 'none', cleaning up the data associated
167 * with the previous scheduler. We will switch back once we are done
168 @@ -5060,11 +5056,8 @@ switch_back:
169 list_for_each_entry(q, &set->tag_list, tag_set_list)
170 blk_mq_elv_switch_back(&head, q);
171
172 - list_for_each_entry(q, &set->tag_list, tag_set_list) {
173 + list_for_each_entry(q, &set->tag_list, tag_set_list)
174 blk_mq_unfreeze_queue(q);
175 - mutex_unlock(&q->sysfs_lock);
176 - mutex_unlock(&q->sysfs_dir_lock);
177 - }
178
179 /* Free the excess tags when nr_hw_queues shrink. */
180 for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++)
181 --- a/block/blk-sysfs.c
182 +++ b/block/blk-sysfs.c
183 @@ -690,11 +690,11 @@ queue_attr_store(struct kobject *kobj, s
184 return res;
185 }
186
187 - mutex_lock(&q->sysfs_lock);
188 blk_mq_freeze_queue(q);
189 + mutex_lock(&q->sysfs_lock);
190 res = entry->store(disk, page, length);
191 - blk_mq_unfreeze_queue(q);
192 mutex_unlock(&q->sysfs_lock);
193 + blk_mq_unfreeze_queue(q);
194 return res;
195 }
196