]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - block/blk-mq-debugfs.c
net/mlx5e: Separate between ethtool and netdev software stats folding
[thirdparty/kernel/stable.git] / block / blk-mq-debugfs.c
1 /*
2 * Copyright (C) 2017 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <https://www.gnu.org/licenses/>.
15 */
16
17 #include <linux/kernel.h>
18 #include <linux/blkdev.h>
19 #include <linux/debugfs.h>
20
21 #include <linux/blk-mq.h>
22 #include "blk.h"
23 #include "blk-mq.h"
24 #include "blk-mq-debugfs.h"
25 #include "blk-mq-tag.h"
26 #include "blk-rq-qos.h"
27
28 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
29 {
30 if (stat->nr_samples) {
31 seq_printf(m, "samples=%d, mean=%lld, min=%llu, max=%llu",
32 stat->nr_samples, stat->mean, stat->min, stat->max);
33 } else {
34 seq_puts(m, "samples=0");
35 }
36 }
37
38 static int queue_poll_stat_show(void *data, struct seq_file *m)
39 {
40 struct request_queue *q = data;
41 int bucket;
42
43 for (bucket = 0; bucket < BLK_MQ_POLL_STATS_BKTS/2; bucket++) {
44 seq_printf(m, "read (%d Bytes): ", 1 << (9+bucket));
45 print_stat(m, &q->poll_stat[2*bucket]);
46 seq_puts(m, "\n");
47
48 seq_printf(m, "write (%d Bytes): ", 1 << (9+bucket));
49 print_stat(m, &q->poll_stat[2*bucket+1]);
50 seq_puts(m, "\n");
51 }
52 return 0;
53 }
54
55 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
56 __acquires(&q->requeue_lock)
57 {
58 struct request_queue *q = m->private;
59
60 spin_lock_irq(&q->requeue_lock);
61 return seq_list_start(&q->requeue_list, *pos);
62 }
63
64 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
65 {
66 struct request_queue *q = m->private;
67
68 return seq_list_next(v, &q->requeue_list, pos);
69 }
70
71 static void queue_requeue_list_stop(struct seq_file *m, void *v)
72 __releases(&q->requeue_lock)
73 {
74 struct request_queue *q = m->private;
75
76 spin_unlock_irq(&q->requeue_lock);
77 }
78
79 static const struct seq_operations queue_requeue_list_seq_ops = {
80 .start = queue_requeue_list_start,
81 .next = queue_requeue_list_next,
82 .stop = queue_requeue_list_stop,
83 .show = blk_mq_debugfs_rq_show,
84 };
85
86 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
87 const char *const *flag_name, int flag_name_count)
88 {
89 bool sep = false;
90 int i;
91
92 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
93 if (!(flags & BIT(i)))
94 continue;
95 if (sep)
96 seq_puts(m, "|");
97 sep = true;
98 if (i < flag_name_count && flag_name[i])
99 seq_puts(m, flag_name[i]);
100 else
101 seq_printf(m, "%d", i);
102 }
103 return 0;
104 }
105
106 static int queue_pm_only_show(void *data, struct seq_file *m)
107 {
108 struct request_queue *q = data;
109
110 seq_printf(m, "%d\n", atomic_read(&q->pm_only));
111 return 0;
112 }
113
114 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
115 static const char *const blk_queue_flag_name[] = {
116 QUEUE_FLAG_NAME(STOPPED),
117 QUEUE_FLAG_NAME(DYING),
118 QUEUE_FLAG_NAME(BIDI),
119 QUEUE_FLAG_NAME(NOMERGES),
120 QUEUE_FLAG_NAME(SAME_COMP),
121 QUEUE_FLAG_NAME(FAIL_IO),
122 QUEUE_FLAG_NAME(NONROT),
123 QUEUE_FLAG_NAME(IO_STAT),
124 QUEUE_FLAG_NAME(DISCARD),
125 QUEUE_FLAG_NAME(NOXMERGES),
126 QUEUE_FLAG_NAME(ADD_RANDOM),
127 QUEUE_FLAG_NAME(SECERASE),
128 QUEUE_FLAG_NAME(SAME_FORCE),
129 QUEUE_FLAG_NAME(DEAD),
130 QUEUE_FLAG_NAME(INIT_DONE),
131 QUEUE_FLAG_NAME(NO_SG_MERGE),
132 QUEUE_FLAG_NAME(POLL),
133 QUEUE_FLAG_NAME(WC),
134 QUEUE_FLAG_NAME(FUA),
135 QUEUE_FLAG_NAME(FLUSH_NQ),
136 QUEUE_FLAG_NAME(DAX),
137 QUEUE_FLAG_NAME(STATS),
138 QUEUE_FLAG_NAME(POLL_STATS),
139 QUEUE_FLAG_NAME(REGISTERED),
140 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
141 QUEUE_FLAG_NAME(QUIESCED),
142 };
143 #undef QUEUE_FLAG_NAME
144
145 static int queue_state_show(void *data, struct seq_file *m)
146 {
147 struct request_queue *q = data;
148
149 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
150 ARRAY_SIZE(blk_queue_flag_name));
151 seq_puts(m, "\n");
152 return 0;
153 }
154
155 static ssize_t queue_state_write(void *data, const char __user *buf,
156 size_t count, loff_t *ppos)
157 {
158 struct request_queue *q = data;
159 char opbuf[16] = { }, *op;
160
161 /*
162 * The "state" attribute is removed after blk_cleanup_queue() has called
163 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
164 * triggering a use-after-free.
165 */
166 if (blk_queue_dead(q))
167 return -ENOENT;
168
169 if (count >= sizeof(opbuf)) {
170 pr_err("%s: operation too long\n", __func__);
171 goto inval;
172 }
173
174 if (copy_from_user(opbuf, buf, count))
175 return -EFAULT;
176 op = strstrip(opbuf);
177 if (strcmp(op, "run") == 0) {
178 blk_mq_run_hw_queues(q, true);
179 } else if (strcmp(op, "start") == 0) {
180 blk_mq_start_stopped_hw_queues(q, true);
181 } else if (strcmp(op, "kick") == 0) {
182 blk_mq_kick_requeue_list(q);
183 } else {
184 pr_err("%s: unsupported operation '%s'\n", __func__, op);
185 inval:
186 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
187 return -EINVAL;
188 }
189 return count;
190 }
191
192 static int queue_write_hint_show(void *data, struct seq_file *m)
193 {
194 struct request_queue *q = data;
195 int i;
196
197 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
198 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
199
200 return 0;
201 }
202
203 static ssize_t queue_write_hint_store(void *data, const char __user *buf,
204 size_t count, loff_t *ppos)
205 {
206 struct request_queue *q = data;
207 int i;
208
209 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
210 q->write_hints[i] = 0;
211
212 return count;
213 }
214
215 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
216 { "poll_stat", 0400, queue_poll_stat_show },
217 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
218 { "pm_only", 0600, queue_pm_only_show, NULL },
219 { "state", 0600, queue_state_show, queue_state_write },
220 { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
221 { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
222 { },
223 };
224
225 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
226 static const char *const hctx_state_name[] = {
227 HCTX_STATE_NAME(STOPPED),
228 HCTX_STATE_NAME(TAG_ACTIVE),
229 HCTX_STATE_NAME(SCHED_RESTART),
230 };
231 #undef HCTX_STATE_NAME
232
233 static int hctx_state_show(void *data, struct seq_file *m)
234 {
235 struct blk_mq_hw_ctx *hctx = data;
236
237 blk_flags_show(m, hctx->state, hctx_state_name,
238 ARRAY_SIZE(hctx_state_name));
239 seq_puts(m, "\n");
240 return 0;
241 }
242
243 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
244 static const char *const alloc_policy_name[] = {
245 BLK_TAG_ALLOC_NAME(FIFO),
246 BLK_TAG_ALLOC_NAME(RR),
247 };
248 #undef BLK_TAG_ALLOC_NAME
249
250 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
251 static const char *const hctx_flag_name[] = {
252 HCTX_FLAG_NAME(SHOULD_MERGE),
253 HCTX_FLAG_NAME(TAG_SHARED),
254 HCTX_FLAG_NAME(SG_MERGE),
255 HCTX_FLAG_NAME(BLOCKING),
256 HCTX_FLAG_NAME(NO_SCHED),
257 };
258 #undef HCTX_FLAG_NAME
259
260 static int hctx_flags_show(void *data, struct seq_file *m)
261 {
262 struct blk_mq_hw_ctx *hctx = data;
263 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
264
265 seq_puts(m, "alloc_policy=");
266 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
267 alloc_policy_name[alloc_policy])
268 seq_puts(m, alloc_policy_name[alloc_policy]);
269 else
270 seq_printf(m, "%d", alloc_policy);
271 seq_puts(m, " ");
272 blk_flags_show(m,
273 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
274 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
275 seq_puts(m, "\n");
276 return 0;
277 }
278
279 #define REQ_OP_NAME(name) [REQ_OP_##name] = #name
280 static const char *const op_name[] = {
281 REQ_OP_NAME(READ),
282 REQ_OP_NAME(WRITE),
283 REQ_OP_NAME(FLUSH),
284 REQ_OP_NAME(DISCARD),
285 REQ_OP_NAME(SECURE_ERASE),
286 REQ_OP_NAME(ZONE_RESET),
287 REQ_OP_NAME(WRITE_SAME),
288 REQ_OP_NAME(WRITE_ZEROES),
289 REQ_OP_NAME(SCSI_IN),
290 REQ_OP_NAME(SCSI_OUT),
291 REQ_OP_NAME(DRV_IN),
292 REQ_OP_NAME(DRV_OUT),
293 };
294 #undef REQ_OP_NAME
295
296 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
297 static const char *const cmd_flag_name[] = {
298 CMD_FLAG_NAME(FAILFAST_DEV),
299 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
300 CMD_FLAG_NAME(FAILFAST_DRIVER),
301 CMD_FLAG_NAME(SYNC),
302 CMD_FLAG_NAME(META),
303 CMD_FLAG_NAME(PRIO),
304 CMD_FLAG_NAME(NOMERGE),
305 CMD_FLAG_NAME(IDLE),
306 CMD_FLAG_NAME(INTEGRITY),
307 CMD_FLAG_NAME(FUA),
308 CMD_FLAG_NAME(PREFLUSH),
309 CMD_FLAG_NAME(RAHEAD),
310 CMD_FLAG_NAME(BACKGROUND),
311 CMD_FLAG_NAME(NOUNMAP),
312 CMD_FLAG_NAME(NOWAIT),
313 };
314 #undef CMD_FLAG_NAME
315
316 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
317 static const char *const rqf_name[] = {
318 RQF_NAME(SORTED),
319 RQF_NAME(STARTED),
320 RQF_NAME(SOFTBARRIER),
321 RQF_NAME(FLUSH_SEQ),
322 RQF_NAME(MIXED_MERGE),
323 RQF_NAME(MQ_INFLIGHT),
324 RQF_NAME(DONTPREP),
325 RQF_NAME(PREEMPT),
326 RQF_NAME(COPY_USER),
327 RQF_NAME(FAILED),
328 RQF_NAME(QUIET),
329 RQF_NAME(ELVPRIV),
330 RQF_NAME(IO_STAT),
331 RQF_NAME(ALLOCED),
332 RQF_NAME(PM),
333 RQF_NAME(HASHED),
334 RQF_NAME(STATS),
335 RQF_NAME(SPECIAL_PAYLOAD),
336 RQF_NAME(ZONE_WRITE_LOCKED),
337 RQF_NAME(MQ_POLL_SLEPT),
338 };
339 #undef RQF_NAME
340
341 static const char *const blk_mq_rq_state_name_array[] = {
342 [MQ_RQ_IDLE] = "idle",
343 [MQ_RQ_IN_FLIGHT] = "in_flight",
344 [MQ_RQ_COMPLETE] = "complete",
345 };
346
347 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
348 {
349 if (WARN_ON_ONCE((unsigned int)rq_state >=
350 ARRAY_SIZE(blk_mq_rq_state_name_array)))
351 return "(?)";
352 return blk_mq_rq_state_name_array[rq_state];
353 }
354
355 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
356 {
357 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
358 const unsigned int op = rq->cmd_flags & REQ_OP_MASK;
359
360 seq_printf(m, "%p {.op=", rq);
361 if (op < ARRAY_SIZE(op_name) && op_name[op])
362 seq_printf(m, "%s", op_name[op]);
363 else
364 seq_printf(m, "%d", op);
365 seq_puts(m, ", .cmd_flags=");
366 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
367 ARRAY_SIZE(cmd_flag_name));
368 seq_puts(m, ", .rq_flags=");
369 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
370 ARRAY_SIZE(rqf_name));
371 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
372 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
373 rq->internal_tag);
374 if (mq_ops->show_rq)
375 mq_ops->show_rq(m, rq);
376 seq_puts(m, "}\n");
377 return 0;
378 }
379 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
380
381 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
382 {
383 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
384 }
385 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
386
387 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
388 __acquires(&hctx->lock)
389 {
390 struct blk_mq_hw_ctx *hctx = m->private;
391
392 spin_lock(&hctx->lock);
393 return seq_list_start(&hctx->dispatch, *pos);
394 }
395
396 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
397 {
398 struct blk_mq_hw_ctx *hctx = m->private;
399
400 return seq_list_next(v, &hctx->dispatch, pos);
401 }
402
403 static void hctx_dispatch_stop(struct seq_file *m, void *v)
404 __releases(&hctx->lock)
405 {
406 struct blk_mq_hw_ctx *hctx = m->private;
407
408 spin_unlock(&hctx->lock);
409 }
410
411 static const struct seq_operations hctx_dispatch_seq_ops = {
412 .start = hctx_dispatch_start,
413 .next = hctx_dispatch_next,
414 .stop = hctx_dispatch_stop,
415 .show = blk_mq_debugfs_rq_show,
416 };
417
418 struct show_busy_params {
419 struct seq_file *m;
420 struct blk_mq_hw_ctx *hctx;
421 };
422
423 /*
424 * Note: the state of a request may change while this function is in progress,
425 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
426 * keep iterating requests.
427 */
428 static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
429 {
430 const struct show_busy_params *params = data;
431
432 if (rq->mq_hctx == params->hctx)
433 __blk_mq_debugfs_rq_show(params->m,
434 list_entry_rq(&rq->queuelist));
435
436 return true;
437 }
438
439 static int hctx_busy_show(void *data, struct seq_file *m)
440 {
441 struct blk_mq_hw_ctx *hctx = data;
442 struct show_busy_params params = { .m = m, .hctx = hctx };
443
444 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
445 &params);
446
447 return 0;
448 }
449
450 static const char *const hctx_types[] = {
451 [HCTX_TYPE_DEFAULT] = "default",
452 [HCTX_TYPE_READ] = "read",
453 [HCTX_TYPE_POLL] = "poll",
454 };
455
456 static int hctx_type_show(void *data, struct seq_file *m)
457 {
458 struct blk_mq_hw_ctx *hctx = data;
459
460 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
461 seq_printf(m, "%s\n", hctx_types[hctx->type]);
462 return 0;
463 }
464
465 static int hctx_ctx_map_show(void *data, struct seq_file *m)
466 {
467 struct blk_mq_hw_ctx *hctx = data;
468
469 sbitmap_bitmap_show(&hctx->ctx_map, m);
470 return 0;
471 }
472
473 static void blk_mq_debugfs_tags_show(struct seq_file *m,
474 struct blk_mq_tags *tags)
475 {
476 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
477 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
478 seq_printf(m, "active_queues=%d\n",
479 atomic_read(&tags->active_queues));
480
481 seq_puts(m, "\nbitmap_tags:\n");
482 sbitmap_queue_show(&tags->bitmap_tags, m);
483
484 if (tags->nr_reserved_tags) {
485 seq_puts(m, "\nbreserved_tags:\n");
486 sbitmap_queue_show(&tags->breserved_tags, m);
487 }
488 }
489
490 static int hctx_tags_show(void *data, struct seq_file *m)
491 {
492 struct blk_mq_hw_ctx *hctx = data;
493 struct request_queue *q = hctx->queue;
494 int res;
495
496 res = mutex_lock_interruptible(&q->sysfs_lock);
497 if (res)
498 goto out;
499 if (hctx->tags)
500 blk_mq_debugfs_tags_show(m, hctx->tags);
501 mutex_unlock(&q->sysfs_lock);
502
503 out:
504 return res;
505 }
506
507 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
508 {
509 struct blk_mq_hw_ctx *hctx = data;
510 struct request_queue *q = hctx->queue;
511 int res;
512
513 res = mutex_lock_interruptible(&q->sysfs_lock);
514 if (res)
515 goto out;
516 if (hctx->tags)
517 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
518 mutex_unlock(&q->sysfs_lock);
519
520 out:
521 return res;
522 }
523
524 static int hctx_sched_tags_show(void *data, struct seq_file *m)
525 {
526 struct blk_mq_hw_ctx *hctx = data;
527 struct request_queue *q = hctx->queue;
528 int res;
529
530 res = mutex_lock_interruptible(&q->sysfs_lock);
531 if (res)
532 goto out;
533 if (hctx->sched_tags)
534 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
535 mutex_unlock(&q->sysfs_lock);
536
537 out:
538 return res;
539 }
540
541 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
542 {
543 struct blk_mq_hw_ctx *hctx = data;
544 struct request_queue *q = hctx->queue;
545 int res;
546
547 res = mutex_lock_interruptible(&q->sysfs_lock);
548 if (res)
549 goto out;
550 if (hctx->sched_tags)
551 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
552 mutex_unlock(&q->sysfs_lock);
553
554 out:
555 return res;
556 }
557
558 static int hctx_io_poll_show(void *data, struct seq_file *m)
559 {
560 struct blk_mq_hw_ctx *hctx = data;
561
562 seq_printf(m, "considered=%lu\n", hctx->poll_considered);
563 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
564 seq_printf(m, "success=%lu\n", hctx->poll_success);
565 return 0;
566 }
567
568 static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
569 size_t count, loff_t *ppos)
570 {
571 struct blk_mq_hw_ctx *hctx = data;
572
573 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
574 return count;
575 }
576
577 static int hctx_dispatched_show(void *data, struct seq_file *m)
578 {
579 struct blk_mq_hw_ctx *hctx = data;
580 int i;
581
582 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
583
584 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
585 unsigned int d = 1U << (i - 1);
586
587 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
588 }
589
590 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
591 return 0;
592 }
593
594 static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
595 size_t count, loff_t *ppos)
596 {
597 struct blk_mq_hw_ctx *hctx = data;
598 int i;
599
600 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
601 hctx->dispatched[i] = 0;
602 return count;
603 }
604
605 static int hctx_queued_show(void *data, struct seq_file *m)
606 {
607 struct blk_mq_hw_ctx *hctx = data;
608
609 seq_printf(m, "%lu\n", hctx->queued);
610 return 0;
611 }
612
613 static ssize_t hctx_queued_write(void *data, const char __user *buf,
614 size_t count, loff_t *ppos)
615 {
616 struct blk_mq_hw_ctx *hctx = data;
617
618 hctx->queued = 0;
619 return count;
620 }
621
622 static int hctx_run_show(void *data, struct seq_file *m)
623 {
624 struct blk_mq_hw_ctx *hctx = data;
625
626 seq_printf(m, "%lu\n", hctx->run);
627 return 0;
628 }
629
630 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
631 loff_t *ppos)
632 {
633 struct blk_mq_hw_ctx *hctx = data;
634
635 hctx->run = 0;
636 return count;
637 }
638
639 static int hctx_active_show(void *data, struct seq_file *m)
640 {
641 struct blk_mq_hw_ctx *hctx = data;
642
643 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
644 return 0;
645 }
646
647 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
648 {
649 struct blk_mq_hw_ctx *hctx = data;
650
651 seq_printf(m, "%u\n", hctx->dispatch_busy);
652 return 0;
653 }
654
655 #define CTX_RQ_SEQ_OPS(name, type) \
656 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
657 __acquires(&ctx->lock) \
658 { \
659 struct blk_mq_ctx *ctx = m->private; \
660 \
661 spin_lock(&ctx->lock); \
662 return seq_list_start(&ctx->rq_lists[type], *pos); \
663 } \
664 \
665 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
666 loff_t *pos) \
667 { \
668 struct blk_mq_ctx *ctx = m->private; \
669 \
670 return seq_list_next(v, &ctx->rq_lists[type], pos); \
671 } \
672 \
673 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
674 __releases(&ctx->lock) \
675 { \
676 struct blk_mq_ctx *ctx = m->private; \
677 \
678 spin_unlock(&ctx->lock); \
679 } \
680 \
681 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
682 .start = ctx_##name##_rq_list_start, \
683 .next = ctx_##name##_rq_list_next, \
684 .stop = ctx_##name##_rq_list_stop, \
685 .show = blk_mq_debugfs_rq_show, \
686 }
687
688 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
689 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
690 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
691
692 static int ctx_dispatched_show(void *data, struct seq_file *m)
693 {
694 struct blk_mq_ctx *ctx = data;
695
696 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
697 return 0;
698 }
699
700 static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
701 size_t count, loff_t *ppos)
702 {
703 struct blk_mq_ctx *ctx = data;
704
705 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
706 return count;
707 }
708
709 static int ctx_merged_show(void *data, struct seq_file *m)
710 {
711 struct blk_mq_ctx *ctx = data;
712
713 seq_printf(m, "%lu\n", ctx->rq_merged);
714 return 0;
715 }
716
717 static ssize_t ctx_merged_write(void *data, const char __user *buf,
718 size_t count, loff_t *ppos)
719 {
720 struct blk_mq_ctx *ctx = data;
721
722 ctx->rq_merged = 0;
723 return count;
724 }
725
726 static int ctx_completed_show(void *data, struct seq_file *m)
727 {
728 struct blk_mq_ctx *ctx = data;
729
730 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
731 return 0;
732 }
733
734 static ssize_t ctx_completed_write(void *data, const char __user *buf,
735 size_t count, loff_t *ppos)
736 {
737 struct blk_mq_ctx *ctx = data;
738
739 ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
740 return count;
741 }
742
743 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
744 {
745 const struct blk_mq_debugfs_attr *attr = m->private;
746 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
747
748 return attr->show(data, m);
749 }
750
751 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
752 size_t count, loff_t *ppos)
753 {
754 struct seq_file *m = file->private_data;
755 const struct blk_mq_debugfs_attr *attr = m->private;
756 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
757
758 /*
759 * Attributes that only implement .seq_ops are read-only and 'attr' is
760 * the same with 'data' in this case.
761 */
762 if (attr == data || !attr->write)
763 return -EPERM;
764
765 return attr->write(data, buf, count, ppos);
766 }
767
768 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
769 {
770 const struct blk_mq_debugfs_attr *attr = inode->i_private;
771 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
772 struct seq_file *m;
773 int ret;
774
775 if (attr->seq_ops) {
776 ret = seq_open(file, attr->seq_ops);
777 if (!ret) {
778 m = file->private_data;
779 m->private = data;
780 }
781 return ret;
782 }
783
784 if (WARN_ON_ONCE(!attr->show))
785 return -EPERM;
786
787 return single_open(file, blk_mq_debugfs_show, inode->i_private);
788 }
789
790 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
791 {
792 const struct blk_mq_debugfs_attr *attr = inode->i_private;
793
794 if (attr->show)
795 return single_release(inode, file);
796 else
797 return seq_release(inode, file);
798 }
799
800 static const struct file_operations blk_mq_debugfs_fops = {
801 .open = blk_mq_debugfs_open,
802 .read = seq_read,
803 .write = blk_mq_debugfs_write,
804 .llseek = seq_lseek,
805 .release = blk_mq_debugfs_release,
806 };
807
808 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
809 {"state", 0400, hctx_state_show},
810 {"flags", 0400, hctx_flags_show},
811 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
812 {"busy", 0400, hctx_busy_show},
813 {"ctx_map", 0400, hctx_ctx_map_show},
814 {"tags", 0400, hctx_tags_show},
815 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
816 {"sched_tags", 0400, hctx_sched_tags_show},
817 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
818 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
819 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
820 {"queued", 0600, hctx_queued_show, hctx_queued_write},
821 {"run", 0600, hctx_run_show, hctx_run_write},
822 {"active", 0400, hctx_active_show},
823 {"dispatch_busy", 0400, hctx_dispatch_busy_show},
824 {"type", 0400, hctx_type_show},
825 {},
826 };
827
828 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
829 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
830 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
831 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
832 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
833 {"merged", 0600, ctx_merged_show, ctx_merged_write},
834 {"completed", 0600, ctx_completed_show, ctx_completed_write},
835 {},
836 };
837
838 static bool debugfs_create_files(struct dentry *parent, void *data,
839 const struct blk_mq_debugfs_attr *attr)
840 {
841 d_inode(parent)->i_private = data;
842
843 for (; attr->name; attr++) {
844 if (!debugfs_create_file(attr->name, attr->mode, parent,
845 (void *)attr, &blk_mq_debugfs_fops))
846 return false;
847 }
848 return true;
849 }
850
851 int blk_mq_debugfs_register(struct request_queue *q)
852 {
853 struct blk_mq_hw_ctx *hctx;
854 int i;
855
856 if (!blk_debugfs_root)
857 return -ENOENT;
858
859 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
860 blk_debugfs_root);
861 if (!q->debugfs_dir)
862 return -ENOMEM;
863
864 if (!debugfs_create_files(q->debugfs_dir, q,
865 blk_mq_debugfs_queue_attrs))
866 goto err;
867
868 /*
869 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
870 * didn't exist yet (because we don't know what to name the directory
871 * until the queue is registered to a gendisk).
872 */
873 if (q->elevator && !q->sched_debugfs_dir)
874 blk_mq_debugfs_register_sched(q);
875
876 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
877 queue_for_each_hw_ctx(q, hctx, i) {
878 if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
879 goto err;
880 if (q->elevator && !hctx->sched_debugfs_dir &&
881 blk_mq_debugfs_register_sched_hctx(q, hctx))
882 goto err;
883 }
884
885 if (q->rq_qos) {
886 struct rq_qos *rqos = q->rq_qos;
887
888 while (rqos) {
889 blk_mq_debugfs_register_rqos(rqos);
890 rqos = rqos->next;
891 }
892 }
893
894 return 0;
895
896 err:
897 blk_mq_debugfs_unregister(q);
898 return -ENOMEM;
899 }
900
901 void blk_mq_debugfs_unregister(struct request_queue *q)
902 {
903 debugfs_remove_recursive(q->debugfs_dir);
904 q->sched_debugfs_dir = NULL;
905 q->debugfs_dir = NULL;
906 }
907
908 static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
909 struct blk_mq_ctx *ctx)
910 {
911 struct dentry *ctx_dir;
912 char name[20];
913
914 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
915 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
916 if (!ctx_dir)
917 return -ENOMEM;
918
919 if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
920 return -ENOMEM;
921
922 return 0;
923 }
924
925 int blk_mq_debugfs_register_hctx(struct request_queue *q,
926 struct blk_mq_hw_ctx *hctx)
927 {
928 struct blk_mq_ctx *ctx;
929 char name[20];
930 int i;
931
932 if (!q->debugfs_dir)
933 return -ENOENT;
934
935 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
936 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
937 if (!hctx->debugfs_dir)
938 return -ENOMEM;
939
940 if (!debugfs_create_files(hctx->debugfs_dir, hctx,
941 blk_mq_debugfs_hctx_attrs))
942 goto err;
943
944 hctx_for_each_ctx(hctx, ctx, i) {
945 if (blk_mq_debugfs_register_ctx(hctx, ctx))
946 goto err;
947 }
948
949 return 0;
950
951 err:
952 blk_mq_debugfs_unregister_hctx(hctx);
953 return -ENOMEM;
954 }
955
956 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
957 {
958 debugfs_remove_recursive(hctx->debugfs_dir);
959 hctx->sched_debugfs_dir = NULL;
960 hctx->debugfs_dir = NULL;
961 }
962
963 int blk_mq_debugfs_register_hctxs(struct request_queue *q)
964 {
965 struct blk_mq_hw_ctx *hctx;
966 int i;
967
968 queue_for_each_hw_ctx(q, hctx, i) {
969 if (blk_mq_debugfs_register_hctx(q, hctx))
970 return -ENOMEM;
971 }
972
973 return 0;
974 }
975
976 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
977 {
978 struct blk_mq_hw_ctx *hctx;
979 int i;
980
981 queue_for_each_hw_ctx(q, hctx, i)
982 blk_mq_debugfs_unregister_hctx(hctx);
983 }
984
985 int blk_mq_debugfs_register_sched(struct request_queue *q)
986 {
987 struct elevator_type *e = q->elevator->type;
988
989 if (!q->debugfs_dir)
990 return -ENOENT;
991
992 if (!e->queue_debugfs_attrs)
993 return 0;
994
995 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
996 if (!q->sched_debugfs_dir)
997 return -ENOMEM;
998
999 if (!debugfs_create_files(q->sched_debugfs_dir, q,
1000 e->queue_debugfs_attrs))
1001 goto err;
1002
1003 return 0;
1004
1005 err:
1006 blk_mq_debugfs_unregister_sched(q);
1007 return -ENOMEM;
1008 }
1009
1010 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
1011 {
1012 debugfs_remove_recursive(q->sched_debugfs_dir);
1013 q->sched_debugfs_dir = NULL;
1014 }
1015
1016 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
1017 {
1018 debugfs_remove_recursive(rqos->debugfs_dir);
1019 rqos->debugfs_dir = NULL;
1020 }
1021
1022 int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
1023 {
1024 struct request_queue *q = rqos->q;
1025 const char *dir_name = rq_qos_id_to_name(rqos->id);
1026
1027 if (!q->debugfs_dir)
1028 return -ENOENT;
1029
1030 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
1031 return 0;
1032
1033 if (!q->rqos_debugfs_dir) {
1034 q->rqos_debugfs_dir = debugfs_create_dir("rqos",
1035 q->debugfs_dir);
1036 if (!q->rqos_debugfs_dir)
1037 return -ENOMEM;
1038 }
1039
1040 rqos->debugfs_dir = debugfs_create_dir(dir_name,
1041 rqos->q->rqos_debugfs_dir);
1042 if (!rqos->debugfs_dir)
1043 return -ENOMEM;
1044
1045 if (!debugfs_create_files(rqos->debugfs_dir, rqos,
1046 rqos->ops->debugfs_attrs))
1047 goto err;
1048 return 0;
1049 err:
1050 blk_mq_debugfs_unregister_rqos(rqos);
1051 return -ENOMEM;
1052 }
1053
1054 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
1055 {
1056 debugfs_remove_recursive(q->rqos_debugfs_dir);
1057 q->rqos_debugfs_dir = NULL;
1058 }
1059
1060 int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
1061 struct blk_mq_hw_ctx *hctx)
1062 {
1063 struct elevator_type *e = q->elevator->type;
1064
1065 if (!hctx->debugfs_dir)
1066 return -ENOENT;
1067
1068 if (!e->hctx_debugfs_attrs)
1069 return 0;
1070
1071 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
1072 hctx->debugfs_dir);
1073 if (!hctx->sched_debugfs_dir)
1074 return -ENOMEM;
1075
1076 if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
1077 e->hctx_debugfs_attrs))
1078 return -ENOMEM;
1079
1080 return 0;
1081 }
1082
1083 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
1084 {
1085 debugfs_remove_recursive(hctx->sched_debugfs_dir);
1086 hctx->sched_debugfs_dir = NULL;
1087 }