]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - block/blk-mq-sysfs.c
Merge tag 'ovl-update-5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs
[thirdparty/kernel/stable.git] / block / blk-mq-sysfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/backing-dev.h>
5 #include <linux/bio.h>
6 #include <linux/blkdev.h>
7 #include <linux/mm.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/workqueue.h>
11 #include <linux/smp.h>
12
13 #include <linux/blk-mq.h>
14 #include "blk.h"
15 #include "blk-mq.h"
16 #include "blk-mq-tag.h"
17
18 static void blk_mq_sysfs_release(struct kobject *kobj)
19 {
20 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
21
22 free_percpu(ctxs->queue_ctx);
23 kfree(ctxs);
24 }
25
26 static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
27 {
28 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29
30 /* ctx->ctxs won't be released until all ctx are freed */
31 kobject_put(&ctx->ctxs->kobj);
32 }
33
34 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
35 {
36 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
37 kobj);
38
39 cancel_delayed_work_sync(&hctx->run_work);
40
41 if (hctx->flags & BLK_MQ_F_BLOCKING)
42 cleanup_srcu_struct(hctx->srcu);
43 blk_free_flush_queue(hctx->fq);
44 sbitmap_free(&hctx->ctx_map);
45 free_cpumask_var(hctx->cpumask);
46 kfree(hctx->ctxs);
47 kfree(hctx);
48 }
49
50 struct blk_mq_ctx_sysfs_entry {
51 struct attribute attr;
52 ssize_t (*show)(struct blk_mq_ctx *, char *);
53 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
54 };
55
56 struct blk_mq_hw_ctx_sysfs_entry {
57 struct attribute attr;
58 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
59 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
60 };
61
62 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
63 char *page)
64 {
65 struct blk_mq_ctx_sysfs_entry *entry;
66 struct blk_mq_ctx *ctx;
67 struct request_queue *q;
68 ssize_t res;
69
70 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
71 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
72 q = ctx->queue;
73
74 if (!entry->show)
75 return -EIO;
76
77 res = -ENOENT;
78 mutex_lock(&q->sysfs_lock);
79 if (!blk_queue_dying(q))
80 res = entry->show(ctx, page);
81 mutex_unlock(&q->sysfs_lock);
82 return res;
83 }
84
85 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
86 const char *page, size_t length)
87 {
88 struct blk_mq_ctx_sysfs_entry *entry;
89 struct blk_mq_ctx *ctx;
90 struct request_queue *q;
91 ssize_t res;
92
93 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
94 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
95 q = ctx->queue;
96
97 if (!entry->store)
98 return -EIO;
99
100 res = -ENOENT;
101 mutex_lock(&q->sysfs_lock);
102 if (!blk_queue_dying(q))
103 res = entry->store(ctx, page, length);
104 mutex_unlock(&q->sysfs_lock);
105 return res;
106 }
107
108 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
109 struct attribute *attr, char *page)
110 {
111 struct blk_mq_hw_ctx_sysfs_entry *entry;
112 struct blk_mq_hw_ctx *hctx;
113 struct request_queue *q;
114 ssize_t res;
115
116 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
117 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
118 q = hctx->queue;
119
120 if (!entry->show)
121 return -EIO;
122
123 res = -ENOENT;
124 mutex_lock(&q->sysfs_lock);
125 if (!blk_queue_dying(q))
126 res = entry->show(hctx, page);
127 mutex_unlock(&q->sysfs_lock);
128 return res;
129 }
130
131 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
132 struct attribute *attr, const char *page,
133 size_t length)
134 {
135 struct blk_mq_hw_ctx_sysfs_entry *entry;
136 struct blk_mq_hw_ctx *hctx;
137 struct request_queue *q;
138 ssize_t res;
139
140 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
141 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
142 q = hctx->queue;
143
144 if (!entry->store)
145 return -EIO;
146
147 res = -ENOENT;
148 mutex_lock(&q->sysfs_lock);
149 if (!blk_queue_dying(q))
150 res = entry->store(hctx, page, length);
151 mutex_unlock(&q->sysfs_lock);
152 return res;
153 }
154
155 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
156 char *page)
157 {
158 return sprintf(page, "%u\n", hctx->tags->nr_tags);
159 }
160
161 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
162 char *page)
163 {
164 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
165 }
166
167 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
168 {
169 unsigned int i, first = 1;
170 ssize_t ret = 0;
171
172 for_each_cpu(i, hctx->cpumask) {
173 if (first)
174 ret += sprintf(ret + page, "%u", i);
175 else
176 ret += sprintf(ret + page, ", %u", i);
177
178 first = 0;
179 }
180
181 ret += sprintf(ret + page, "\n");
182 return ret;
183 }
184
185 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
186 .attr = {.name = "nr_tags", .mode = 0444 },
187 .show = blk_mq_hw_sysfs_nr_tags_show,
188 };
189 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
190 .attr = {.name = "nr_reserved_tags", .mode = 0444 },
191 .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
192 };
193 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
194 .attr = {.name = "cpu_list", .mode = 0444 },
195 .show = blk_mq_hw_sysfs_cpus_show,
196 };
197
198 static struct attribute *default_hw_ctx_attrs[] = {
199 &blk_mq_hw_sysfs_nr_tags.attr,
200 &blk_mq_hw_sysfs_nr_reserved_tags.attr,
201 &blk_mq_hw_sysfs_cpus.attr,
202 NULL,
203 };
204 ATTRIBUTE_GROUPS(default_hw_ctx);
205
206 static const struct sysfs_ops blk_mq_sysfs_ops = {
207 .show = blk_mq_sysfs_show,
208 .store = blk_mq_sysfs_store,
209 };
210
211 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
212 .show = blk_mq_hw_sysfs_show,
213 .store = blk_mq_hw_sysfs_store,
214 };
215
216 static struct kobj_type blk_mq_ktype = {
217 .sysfs_ops = &blk_mq_sysfs_ops,
218 .release = blk_mq_sysfs_release,
219 };
220
221 static struct kobj_type blk_mq_ctx_ktype = {
222 .sysfs_ops = &blk_mq_sysfs_ops,
223 .release = blk_mq_ctx_sysfs_release,
224 };
225
226 static struct kobj_type blk_mq_hw_ktype = {
227 .sysfs_ops = &blk_mq_hw_sysfs_ops,
228 .default_groups = default_hw_ctx_groups,
229 .release = blk_mq_hw_sysfs_release,
230 };
231
232 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
233 {
234 struct blk_mq_ctx *ctx;
235 int i;
236
237 if (!hctx->nr_ctx)
238 return;
239
240 hctx_for_each_ctx(hctx, ctx, i)
241 kobject_del(&ctx->kobj);
242
243 kobject_del(&hctx->kobj);
244 }
245
246 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
247 {
248 struct request_queue *q = hctx->queue;
249 struct blk_mq_ctx *ctx;
250 int i, ret;
251
252 if (!hctx->nr_ctx)
253 return 0;
254
255 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
256 if (ret)
257 return ret;
258
259 hctx_for_each_ctx(hctx, ctx, i) {
260 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
261 if (ret)
262 break;
263 }
264
265 return ret;
266 }
267
268 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
269 {
270 struct blk_mq_hw_ctx *hctx;
271 int i;
272
273 lockdep_assert_held(&q->sysfs_lock);
274
275 queue_for_each_hw_ctx(q, hctx, i)
276 blk_mq_unregister_hctx(hctx);
277
278 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
279 kobject_del(q->mq_kobj);
280 kobject_put(&dev->kobj);
281
282 q->mq_sysfs_init_done = false;
283 }
284
285 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
286 {
287 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
288 }
289
290 void blk_mq_sysfs_deinit(struct request_queue *q)
291 {
292 struct blk_mq_ctx *ctx;
293 int cpu;
294
295 for_each_possible_cpu(cpu) {
296 ctx = per_cpu_ptr(q->queue_ctx, cpu);
297 kobject_put(&ctx->kobj);
298 }
299 kobject_put(q->mq_kobj);
300 }
301
302 void blk_mq_sysfs_init(struct request_queue *q)
303 {
304 struct blk_mq_ctx *ctx;
305 int cpu;
306
307 kobject_init(q->mq_kobj, &blk_mq_ktype);
308
309 for_each_possible_cpu(cpu) {
310 ctx = per_cpu_ptr(q->queue_ctx, cpu);
311
312 kobject_get(q->mq_kobj);
313 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
314 }
315 }
316
317 int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
318 {
319 struct blk_mq_hw_ctx *hctx;
320 int ret, i;
321
322 WARN_ON_ONCE(!q->kobj.parent);
323 lockdep_assert_held(&q->sysfs_lock);
324
325 ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
326 if (ret < 0)
327 goto out;
328
329 kobject_uevent(q->mq_kobj, KOBJ_ADD);
330
331 queue_for_each_hw_ctx(q, hctx, i) {
332 ret = blk_mq_register_hctx(hctx);
333 if (ret)
334 goto unreg;
335 }
336
337 q->mq_sysfs_init_done = true;
338
339 out:
340 return ret;
341
342 unreg:
343 while (--i >= 0)
344 blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
345
346 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
347 kobject_del(q->mq_kobj);
348 kobject_put(&dev->kobj);
349 return ret;
350 }
351
352 int blk_mq_register_dev(struct device *dev, struct request_queue *q)
353 {
354 int ret;
355
356 mutex_lock(&q->sysfs_lock);
357 ret = __blk_mq_register_dev(dev, q);
358 mutex_unlock(&q->sysfs_lock);
359
360 return ret;
361 }
362
363 void blk_mq_sysfs_unregister(struct request_queue *q)
364 {
365 struct blk_mq_hw_ctx *hctx;
366 int i;
367
368 mutex_lock(&q->sysfs_lock);
369 if (!q->mq_sysfs_init_done)
370 goto unlock;
371
372 queue_for_each_hw_ctx(q, hctx, i)
373 blk_mq_unregister_hctx(hctx);
374
375 unlock:
376 mutex_unlock(&q->sysfs_lock);
377 }
378
379 int blk_mq_sysfs_register(struct request_queue *q)
380 {
381 struct blk_mq_hw_ctx *hctx;
382 int i, ret = 0;
383
384 mutex_lock(&q->sysfs_lock);
385 if (!q->mq_sysfs_init_done)
386 goto unlock;
387
388 queue_for_each_hw_ctx(q, hctx, i) {
389 ret = blk_mq_register_hctx(hctx);
390 if (ret)
391 break;
392 }
393
394 unlock:
395 mutex_unlock(&q->sysfs_lock);
396
397 return ret;
398 }