]>
Commit | Line | Data |
---|---|---|
320ae51f JA |
1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | |
3 | #include <linux/backing-dev.h> | |
4 | #include <linux/bio.h> | |
5 | #include <linux/blkdev.h> | |
6 | #include <linux/mm.h> | |
7 | #include <linux/init.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/workqueue.h> | |
10 | #include <linux/smp.h> | |
11 | ||
12 | #include <linux/blk-mq.h> | |
94ba80af | 13 | #include "blk.h" |
320ae51f JA |
14 | #include "blk-mq.h" |
15 | #include "blk-mq-tag.h" | |
16 | ||
17 | static void blk_mq_sysfs_release(struct kobject *kobj) | |
18 | { | |
1db4909e ML |
19 | struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj); |
20 | ||
21 | free_percpu(ctxs->queue_ctx); | |
22 | kfree(ctxs); | |
23 | } | |
24 | ||
25 | static void blk_mq_ctx_sysfs_release(struct kobject *kobj) | |
26 | { | |
27 | struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj); | |
28 | ||
29 | /* ctx->ctxs won't be released until all ctx are freed */ | |
30 | kobject_put(&ctx->ctxs->kobj); | |
320ae51f JA |
31 | } |
32 | ||
6c8b232e ML |
33 | static void blk_mq_hw_sysfs_release(struct kobject *kobj) |
34 | { | |
35 | struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx, | |
36 | kobj); | |
94ba80af ML |
37 | |
38 | if (hctx->flags & BLK_MQ_F_BLOCKING) | |
39 | cleanup_srcu_struct(hctx->srcu); | |
40 | blk_free_flush_queue(hctx->fq); | |
41 | sbitmap_free(&hctx->ctx_map); | |
01388df3 | 42 | free_cpumask_var(hctx->cpumask); |
6c8b232e ML |
43 | kfree(hctx->ctxs); |
44 | kfree(hctx); | |
45 | } | |
46 | ||
320ae51f JA |
47 | struct blk_mq_ctx_sysfs_entry { |
48 | struct attribute attr; | |
49 | ssize_t (*show)(struct blk_mq_ctx *, char *); | |
50 | ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t); | |
51 | }; | |
52 | ||
53 | struct blk_mq_hw_ctx_sysfs_entry { | |
54 | struct attribute attr; | |
55 | ssize_t (*show)(struct blk_mq_hw_ctx *, char *); | |
56 | ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t); | |
57 | }; | |
58 | ||
59 | static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr, | |
60 | char *page) | |
61 | { | |
62 | struct blk_mq_ctx_sysfs_entry *entry; | |
63 | struct blk_mq_ctx *ctx; | |
64 | struct request_queue *q; | |
65 | ssize_t res; | |
66 | ||
67 | entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); | |
68 | ctx = container_of(kobj, struct blk_mq_ctx, kobj); | |
69 | q = ctx->queue; | |
70 | ||
71 | if (!entry->show) | |
72 | return -EIO; | |
73 | ||
74 | res = -ENOENT; | |
75 | mutex_lock(&q->sysfs_lock); | |
76 | if (!blk_queue_dying(q)) | |
77 | res = entry->show(ctx, page); | |
78 | mutex_unlock(&q->sysfs_lock); | |
79 | return res; | |
80 | } | |
81 | ||
82 | static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr, | |
83 | const char *page, size_t length) | |
84 | { | |
85 | struct blk_mq_ctx_sysfs_entry *entry; | |
86 | struct blk_mq_ctx *ctx; | |
87 | struct request_queue *q; | |
88 | ssize_t res; | |
89 | ||
90 | entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr); | |
91 | ctx = container_of(kobj, struct blk_mq_ctx, kobj); | |
92 | q = ctx->queue; | |
93 | ||
94 | if (!entry->store) | |
95 | return -EIO; | |
96 | ||
97 | res = -ENOENT; | |
98 | mutex_lock(&q->sysfs_lock); | |
99 | if (!blk_queue_dying(q)) | |
100 | res = entry->store(ctx, page, length); | |
101 | mutex_unlock(&q->sysfs_lock); | |
102 | return res; | |
103 | } | |
104 | ||
105 | static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj, | |
106 | struct attribute *attr, char *page) | |
107 | { | |
108 | struct blk_mq_hw_ctx_sysfs_entry *entry; | |
109 | struct blk_mq_hw_ctx *hctx; | |
110 | struct request_queue *q; | |
111 | ssize_t res; | |
112 | ||
113 | entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); | |
114 | hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); | |
115 | q = hctx->queue; | |
116 | ||
117 | if (!entry->show) | |
118 | return -EIO; | |
119 | ||
120 | res = -ENOENT; | |
121 | mutex_lock(&q->sysfs_lock); | |
122 | if (!blk_queue_dying(q)) | |
123 | res = entry->show(hctx, page); | |
124 | mutex_unlock(&q->sysfs_lock); | |
125 | return res; | |
126 | } | |
127 | ||
128 | static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj, | |
129 | struct attribute *attr, const char *page, | |
130 | size_t length) | |
131 | { | |
132 | struct blk_mq_hw_ctx_sysfs_entry *entry; | |
133 | struct blk_mq_hw_ctx *hctx; | |
134 | struct request_queue *q; | |
135 | ssize_t res; | |
136 | ||
137 | entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr); | |
138 | hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj); | |
139 | q = hctx->queue; | |
140 | ||
141 | if (!entry->store) | |
142 | return -EIO; | |
143 | ||
144 | res = -ENOENT; | |
145 | mutex_lock(&q->sysfs_lock); | |
146 | if (!blk_queue_dying(q)) | |
147 | res = entry->store(hctx, page, length); | |
148 | mutex_unlock(&q->sysfs_lock); | |
149 | return res; | |
150 | } | |
151 | ||
d96b37c0 OS |
152 | static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx, |
153 | char *page) | |
bd166ef1 | 154 | { |
d96b37c0 | 155 | return sprintf(page, "%u\n", hctx->tags->nr_tags); |
bd166ef1 JA |
156 | } |
157 | ||
d96b37c0 OS |
158 | static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx, |
159 | char *page) | |
320ae51f | 160 | { |
d96b37c0 | 161 | return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags); |
320ae51f JA |
162 | } |
163 | ||
676141e4 JA |
164 | static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) |
165 | { | |
cb2da43e | 166 | unsigned int i, first = 1; |
676141e4 JA |
167 | ssize_t ret = 0; |
168 | ||
cb2da43e | 169 | for_each_cpu(i, hctx->cpumask) { |
676141e4 JA |
170 | if (first) |
171 | ret += sprintf(ret + page, "%u", i); | |
172 | else | |
173 | ret += sprintf(ret + page, ", %u", i); | |
174 | ||
175 | first = 0; | |
176 | } | |
177 | ||
676141e4 JA |
178 | ret += sprintf(ret + page, "\n"); |
179 | return ret; | |
180 | } | |
181 | ||
320ae51f | 182 | static struct attribute *default_ctx_attrs[] = { |
320ae51f JA |
183 | NULL, |
184 | }; | |
185 | ||
d96b37c0 | 186 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = { |
5657a819 | 187 | .attr = {.name = "nr_tags", .mode = 0444 }, |
d96b37c0 OS |
188 | .show = blk_mq_hw_sysfs_nr_tags_show, |
189 | }; | |
190 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = { | |
5657a819 | 191 | .attr = {.name = "nr_reserved_tags", .mode = 0444 }, |
d96b37c0 OS |
192 | .show = blk_mq_hw_sysfs_nr_reserved_tags_show, |
193 | }; | |
676141e4 | 194 | static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { |
5657a819 | 195 | .attr = {.name = "cpu_list", .mode = 0444 }, |
676141e4 JA |
196 | .show = blk_mq_hw_sysfs_cpus_show, |
197 | }; | |
320ae51f JA |
198 | |
199 | static struct attribute *default_hw_ctx_attrs[] = { | |
d96b37c0 OS |
200 | &blk_mq_hw_sysfs_nr_tags.attr, |
201 | &blk_mq_hw_sysfs_nr_reserved_tags.attr, | |
676141e4 | 202 | &blk_mq_hw_sysfs_cpus.attr, |
320ae51f JA |
203 | NULL, |
204 | }; | |
205 | ||
206 | static const struct sysfs_ops blk_mq_sysfs_ops = { | |
207 | .show = blk_mq_sysfs_show, | |
208 | .store = blk_mq_sysfs_store, | |
209 | }; | |
210 | ||
211 | static const struct sysfs_ops blk_mq_hw_sysfs_ops = { | |
212 | .show = blk_mq_hw_sysfs_show, | |
213 | .store = blk_mq_hw_sysfs_store, | |
214 | }; | |
215 | ||
216 | static struct kobj_type blk_mq_ktype = { | |
217 | .sysfs_ops = &blk_mq_sysfs_ops, | |
218 | .release = blk_mq_sysfs_release, | |
219 | }; | |
220 | ||
221 | static struct kobj_type blk_mq_ctx_ktype = { | |
222 | .sysfs_ops = &blk_mq_sysfs_ops, | |
223 | .default_attrs = default_ctx_attrs, | |
1db4909e | 224 | .release = blk_mq_ctx_sysfs_release, |
320ae51f JA |
225 | }; |
226 | ||
227 | static struct kobj_type blk_mq_hw_ktype = { | |
228 | .sysfs_ops = &blk_mq_hw_sysfs_ops, | |
229 | .default_attrs = default_hw_ctx_attrs, | |
6c8b232e | 230 | .release = blk_mq_hw_sysfs_release, |
320ae51f JA |
231 | }; |
232 | ||
ee3c5db0 | 233 | static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx) |
67aec14c JA |
234 | { |
235 | struct blk_mq_ctx *ctx; | |
236 | int i; | |
237 | ||
4593fdbe | 238 | if (!hctx->nr_ctx) |
67aec14c JA |
239 | return; |
240 | ||
241 | hctx_for_each_ctx(hctx, ctx, i) | |
242 | kobject_del(&ctx->kobj); | |
243 | ||
244 | kobject_del(&hctx->kobj); | |
245 | } | |
246 | ||
ee3c5db0 | 247 | static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx) |
67aec14c JA |
248 | { |
249 | struct request_queue *q = hctx->queue; | |
250 | struct blk_mq_ctx *ctx; | |
251 | int i, ret; | |
252 | ||
4593fdbe | 253 | if (!hctx->nr_ctx) |
67aec14c JA |
254 | return 0; |
255 | ||
1db4909e | 256 | ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num); |
67aec14c JA |
257 | if (ret) |
258 | return ret; | |
259 | ||
260 | hctx_for_each_ctx(hctx, ctx, i) { | |
261 | ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); | |
262 | if (ret) | |
263 | break; | |
264 | } | |
265 | ||
266 | return ret; | |
267 | } | |
268 | ||
667257e8 | 269 | void blk_mq_unregister_dev(struct device *dev, struct request_queue *q) |
320ae51f | 270 | { |
85157366 | 271 | struct blk_mq_hw_ctx *hctx; |
7ea5fe31 | 272 | int i; |
85157366 | 273 | |
2d0364c8 BVA |
274 | lockdep_assert_held(&q->sysfs_lock); |
275 | ||
6c8b232e | 276 | queue_for_each_hw_ctx(q, hctx, i) |
67aec14c JA |
277 | blk_mq_unregister_hctx(hctx); |
278 | ||
1db4909e ML |
279 | kobject_uevent(q->mq_kobj, KOBJ_REMOVE); |
280 | kobject_del(q->mq_kobj); | |
b21d5b30 | 281 | kobject_put(&dev->kobj); |
4593fdbe AM |
282 | |
283 | q->mq_sysfs_init_done = false; | |
c0f3fd2b JA |
284 | } |
285 | ||
868f2f0b KB |
286 | void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx) |
287 | { | |
288 | kobject_init(&hctx->kobj, &blk_mq_hw_ktype); | |
289 | } | |
290 | ||
7ea5fe31 ML |
291 | void blk_mq_sysfs_deinit(struct request_queue *q) |
292 | { | |
293 | struct blk_mq_ctx *ctx; | |
294 | int cpu; | |
295 | ||
296 | for_each_possible_cpu(cpu) { | |
297 | ctx = per_cpu_ptr(q->queue_ctx, cpu); | |
298 | kobject_put(&ctx->kobj); | |
299 | } | |
1db4909e | 300 | kobject_put(q->mq_kobj); |
7ea5fe31 ML |
301 | } |
302 | ||
737f98cf | 303 | void blk_mq_sysfs_init(struct request_queue *q) |
67aec14c | 304 | { |
67aec14c | 305 | struct blk_mq_ctx *ctx; |
897bb0c7 | 306 | int cpu; |
67aec14c | 307 | |
1db4909e | 308 | kobject_init(q->mq_kobj, &blk_mq_ktype); |
67aec14c | 309 | |
897bb0c7 TG |
310 | for_each_possible_cpu(cpu) { |
311 | ctx = per_cpu_ptr(q->queue_ctx, cpu); | |
1db4909e ML |
312 | |
313 | kobject_get(q->mq_kobj); | |
06a41a99 | 314 | kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); |
897bb0c7 | 315 | } |
67aec14c JA |
316 | } |
317 | ||
2d0364c8 | 318 | int __blk_mq_register_dev(struct device *dev, struct request_queue *q) |
320ae51f | 319 | { |
320ae51f | 320 | struct blk_mq_hw_ctx *hctx; |
67aec14c | 321 | int ret, i; |
320ae51f | 322 | |
2d0364c8 BVA |
323 | WARN_ON_ONCE(!q->kobj.parent); |
324 | lockdep_assert_held(&q->sysfs_lock); | |
4593fdbe | 325 | |
1db4909e | 326 | ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq"); |
320ae51f | 327 | if (ret < 0) |
4593fdbe | 328 | goto out; |
320ae51f | 329 | |
1db4909e | 330 | kobject_uevent(q->mq_kobj, KOBJ_ADD); |
320ae51f JA |
331 | |
332 | queue_for_each_hw_ctx(q, hctx, i) { | |
67aec14c | 333 | ret = blk_mq_register_hctx(hctx); |
320ae51f | 334 | if (ret) |
f05d1ba7 | 335 | goto unreg; |
320ae51f JA |
336 | } |
337 | ||
f05d1ba7 | 338 | q->mq_sysfs_init_done = true; |
2d0364c8 | 339 | |
4593fdbe | 340 | out: |
2d0364c8 | 341 | return ret; |
f05d1ba7 BVA |
342 | |
343 | unreg: | |
344 | while (--i >= 0) | |
345 | blk_mq_unregister_hctx(q->queue_hw_ctx[i]); | |
346 | ||
1db4909e ML |
347 | kobject_uevent(q->mq_kobj, KOBJ_REMOVE); |
348 | kobject_del(q->mq_kobj); | |
f05d1ba7 BVA |
349 | kobject_put(&dev->kobj); |
350 | return ret; | |
2d0364c8 BVA |
351 | } |
352 | ||
353 | int blk_mq_register_dev(struct device *dev, struct request_queue *q) | |
354 | { | |
355 | int ret; | |
356 | ||
357 | mutex_lock(&q->sysfs_lock); | |
358 | ret = __blk_mq_register_dev(dev, q); | |
359 | mutex_unlock(&q->sysfs_lock); | |
320ae51f | 360 | |
4593fdbe | 361 | return ret; |
320ae51f | 362 | } |
67aec14c JA |
363 | |
364 | void blk_mq_sysfs_unregister(struct request_queue *q) | |
365 | { | |
366 | struct blk_mq_hw_ctx *hctx; | |
367 | int i; | |
368 | ||
2d0364c8 | 369 | mutex_lock(&q->sysfs_lock); |
4593fdbe | 370 | if (!q->mq_sysfs_init_done) |
2d0364c8 | 371 | goto unlock; |
4593fdbe | 372 | |
67aec14c JA |
373 | queue_for_each_hw_ctx(q, hctx, i) |
374 | blk_mq_unregister_hctx(hctx); | |
2d0364c8 BVA |
375 | |
376 | unlock: | |
377 | mutex_unlock(&q->sysfs_lock); | |
67aec14c JA |
378 | } |
379 | ||
380 | int blk_mq_sysfs_register(struct request_queue *q) | |
381 | { | |
382 | struct blk_mq_hw_ctx *hctx; | |
383 | int i, ret = 0; | |
384 | ||
2d0364c8 | 385 | mutex_lock(&q->sysfs_lock); |
4593fdbe | 386 | if (!q->mq_sysfs_init_done) |
2d0364c8 | 387 | goto unlock; |
4593fdbe | 388 | |
67aec14c JA |
389 | queue_for_each_hw_ctx(q, hctx, i) { |
390 | ret = blk_mq_register_hctx(hctx); | |
391 | if (ret) | |
392 | break; | |
393 | } | |
394 | ||
2d0364c8 BVA |
395 | unlock: |
396 | mutex_unlock(&q->sysfs_lock); | |
397 | ||
67aec14c JA |
398 | return ret; |
399 | } |