]> git.ipfire.org Git - thirdparty/linux.git/blob - block/blk-mq-debugfs.c
blk-mq: blk-mq: provide forced completion method
[thirdparty/linux.git] / block / blk-mq-debugfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2017 Facebook
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/debugfs.h>
9
10 #include <linux/blk-mq.h>
11 #include "blk.h"
12 #include "blk-mq.h"
13 #include "blk-mq-debugfs.h"
14 #include "blk-mq-tag.h"
15 #include "blk-rq-qos.h"
16
17 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
18 {
19 if (stat->nr_samples) {
20 seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
21 stat->nr_samples, stat->mean, stat->min, stat->max);
22 } else {
23 seq_puts(m, "samples=0");
24 }
25 }
26
27 static int queue_poll_stat_show(void *data, struct seq_file *m)
28 {
29 struct request_queue *q = data;
30 int bucket;
31
32 for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
33 seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket));
34 print_stat(m, &q->poll_stat[2 * bucket]);
35 seq_puts(m, "\n");
36
37 seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket));
38 print_stat(m, &q->poll_stat[2 * bucket + 1]);
39 seq_puts(m, "\n");
40 }
41 return 0;
42 }
43
44 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
45 __acquires(&q->requeue_lock)
46 {
47 struct request_queue *q = m->private;
48
49 spin_lock_irq(&q->requeue_lock);
50 return seq_list_start(&q->requeue_list, *pos);
51 }
52
53 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
54 {
55 struct request_queue *q = m->private;
56
57 return seq_list_next(v, &q->requeue_list, pos);
58 }
59
60 static void queue_requeue_list_stop(struct seq_file *m, void *v)
61 __releases(&q->requeue_lock)
62 {
63 struct request_queue *q = m->private;
64
65 spin_unlock_irq(&q->requeue_lock);
66 }
67
68 static const struct seq_operations queue_requeue_list_seq_ops = {
69 .start = queue_requeue_list_start,
70 .next = queue_requeue_list_next,
71 .stop = queue_requeue_list_stop,
72 .show = blk_mq_debugfs_rq_show,
73 };
74
75 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
76 const char *const *flag_name, int flag_name_count)
77 {
78 bool sep = false;
79 int i;
80
81 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
82 if (!(flags & BIT(i)))
83 continue;
84 if (sep)
85 seq_puts(m, "|");
86 sep = true;
87 if (i < flag_name_count && flag_name[i])
88 seq_puts(m, flag_name[i]);
89 else
90 seq_printf(m, "%d", i);
91 }
92 return 0;
93 }
94
95 static int queue_pm_only_show(void *data, struct seq_file *m)
96 {
97 struct request_queue *q = data;
98
99 seq_printf(m, "%d\n", atomic_read(&q->pm_only));
100 return 0;
101 }
102
103 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
104 static const char *const blk_queue_flag_name[] = {
105 QUEUE_FLAG_NAME(STOPPED),
106 QUEUE_FLAG_NAME(DYING),
107 QUEUE_FLAG_NAME(NOMERGES),
108 QUEUE_FLAG_NAME(SAME_COMP),
109 QUEUE_FLAG_NAME(FAIL_IO),
110 QUEUE_FLAG_NAME(NONROT),
111 QUEUE_FLAG_NAME(IO_STAT),
112 QUEUE_FLAG_NAME(DISCARD),
113 QUEUE_FLAG_NAME(NOXMERGES),
114 QUEUE_FLAG_NAME(ADD_RANDOM),
115 QUEUE_FLAG_NAME(SECERASE),
116 QUEUE_FLAG_NAME(SAME_FORCE),
117 QUEUE_FLAG_NAME(DEAD),
118 QUEUE_FLAG_NAME(INIT_DONE),
119 QUEUE_FLAG_NAME(POLL),
120 QUEUE_FLAG_NAME(WC),
121 QUEUE_FLAG_NAME(FUA),
122 QUEUE_FLAG_NAME(DAX),
123 QUEUE_FLAG_NAME(STATS),
124 QUEUE_FLAG_NAME(POLL_STATS),
125 QUEUE_FLAG_NAME(REGISTERED),
126 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
127 QUEUE_FLAG_NAME(QUIESCED),
128 };
129 #undef QUEUE_FLAG_NAME
130
131 static int queue_state_show(void *data, struct seq_file *m)
132 {
133 struct request_queue *q = data;
134
135 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
136 ARRAY_SIZE(blk_queue_flag_name));
137 seq_puts(m, "\n");
138 return 0;
139 }
140
141 static ssize_t queue_state_write(void *data, const char __user *buf,
142 size_t count, loff_t *ppos)
143 {
144 struct request_queue *q = data;
145 char opbuf[16] = { }, *op;
146
147 /*
148 * The "state" attribute is removed after blk_cleanup_queue() has called
149 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
150 * triggering a use-after-free.
151 */
152 if (blk_queue_dead(q))
153 return -ENOENT;
154
155 if (count >= sizeof(opbuf)) {
156 pr_err("%s: operation too long\n", __func__);
157 goto inval;
158 }
159
160 if (copy_from_user(opbuf, buf, count))
161 return -EFAULT;
162 op = strstrip(opbuf);
163 if (strcmp(op, "run") == 0) {
164 blk_mq_run_hw_queues(q, true);
165 } else if (strcmp(op, "start") == 0) {
166 blk_mq_start_stopped_hw_queues(q, true);
167 } else if (strcmp(op, "kick") == 0) {
168 blk_mq_kick_requeue_list(q);
169 } else {
170 pr_err("%s: unsupported operation '%s'\n", __func__, op);
171 inval:
172 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
173 return -EINVAL;
174 }
175 return count;
176 }
177
178 static int queue_write_hint_show(void *data, struct seq_file *m)
179 {
180 struct request_queue *q = data;
181 int i;
182
183 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
184 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
185
186 return 0;
187 }
188
189 static ssize_t queue_write_hint_store(void *data, const char __user *buf,
190 size_t count, loff_t *ppos)
191 {
192 struct request_queue *q = data;
193 int i;
194
195 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
196 q->write_hints[i] = 0;
197
198 return count;
199 }
200
201 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
202 { "poll_stat", 0400, queue_poll_stat_show },
203 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
204 { "pm_only", 0600, queue_pm_only_show, NULL },
205 { "state", 0600, queue_state_show, queue_state_write },
206 { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
207 { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
208 { },
209 };
210
211 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
212 static const char *const hctx_state_name[] = {
213 HCTX_STATE_NAME(STOPPED),
214 HCTX_STATE_NAME(TAG_ACTIVE),
215 HCTX_STATE_NAME(SCHED_RESTART),
216 };
217 #undef HCTX_STATE_NAME
218
219 static int hctx_state_show(void *data, struct seq_file *m)
220 {
221 struct blk_mq_hw_ctx *hctx = data;
222
223 blk_flags_show(m, hctx->state, hctx_state_name,
224 ARRAY_SIZE(hctx_state_name));
225 seq_puts(m, "\n");
226 return 0;
227 }
228
229 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
230 static const char *const alloc_policy_name[] = {
231 BLK_TAG_ALLOC_NAME(FIFO),
232 BLK_TAG_ALLOC_NAME(RR),
233 };
234 #undef BLK_TAG_ALLOC_NAME
235
236 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
237 static const char *const hctx_flag_name[] = {
238 HCTX_FLAG_NAME(SHOULD_MERGE),
239 HCTX_FLAG_NAME(TAG_SHARED),
240 HCTX_FLAG_NAME(BLOCKING),
241 HCTX_FLAG_NAME(NO_SCHED),
242 };
243 #undef HCTX_FLAG_NAME
244
245 static int hctx_flags_show(void *data, struct seq_file *m)
246 {
247 struct blk_mq_hw_ctx *hctx = data;
248 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
249
250 seq_puts(m, "alloc_policy=");
251 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
252 alloc_policy_name[alloc_policy])
253 seq_puts(m, alloc_policy_name[alloc_policy]);
254 else
255 seq_printf(m, "%d", alloc_policy);
256 seq_puts(m, " ");
257 blk_flags_show(m,
258 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
259 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
260 seq_puts(m, "\n");
261 return 0;
262 }
263
264 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
265 static const char *const cmd_flag_name[] = {
266 CMD_FLAG_NAME(FAILFAST_DEV),
267 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
268 CMD_FLAG_NAME(FAILFAST_DRIVER),
269 CMD_FLAG_NAME(SYNC),
270 CMD_FLAG_NAME(META),
271 CMD_FLAG_NAME(PRIO),
272 CMD_FLAG_NAME(NOMERGE),
273 CMD_FLAG_NAME(IDLE),
274 CMD_FLAG_NAME(INTEGRITY),
275 CMD_FLAG_NAME(FUA),
276 CMD_FLAG_NAME(PREFLUSH),
277 CMD_FLAG_NAME(RAHEAD),
278 CMD_FLAG_NAME(BACKGROUND),
279 CMD_FLAG_NAME(NOWAIT),
280 CMD_FLAG_NAME(NOUNMAP),
281 CMD_FLAG_NAME(HIPRI),
282 };
283 #undef CMD_FLAG_NAME
284
285 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
286 static const char *const rqf_name[] = {
287 RQF_NAME(SORTED),
288 RQF_NAME(STARTED),
289 RQF_NAME(SOFTBARRIER),
290 RQF_NAME(FLUSH_SEQ),
291 RQF_NAME(MIXED_MERGE),
292 RQF_NAME(MQ_INFLIGHT),
293 RQF_NAME(DONTPREP),
294 RQF_NAME(PREEMPT),
295 RQF_NAME(FAILED),
296 RQF_NAME(QUIET),
297 RQF_NAME(ELVPRIV),
298 RQF_NAME(IO_STAT),
299 RQF_NAME(ALLOCED),
300 RQF_NAME(PM),
301 RQF_NAME(HASHED),
302 RQF_NAME(STATS),
303 RQF_NAME(SPECIAL_PAYLOAD),
304 RQF_NAME(ZONE_WRITE_LOCKED),
305 RQF_NAME(MQ_POLL_SLEPT),
306 };
307 #undef RQF_NAME
308
309 static const char *const blk_mq_rq_state_name_array[] = {
310 [MQ_RQ_IDLE] = "idle",
311 [MQ_RQ_IN_FLIGHT] = "in_flight",
312 [MQ_RQ_COMPLETE] = "complete",
313 };
314
315 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
316 {
317 if (WARN_ON_ONCE((unsigned int)rq_state >=
318 ARRAY_SIZE(blk_mq_rq_state_name_array)))
319 return "(?)";
320 return blk_mq_rq_state_name_array[rq_state];
321 }
322
323 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
324 {
325 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
326 const unsigned int op = req_op(rq);
327 const char *op_str = blk_op_str(op);
328
329 seq_printf(m, "%p {.op=", rq);
330 if (strcmp(op_str, "UNKNOWN") == 0)
331 seq_printf(m, "%u", op);
332 else
333 seq_printf(m, "%s", op_str);
334 seq_puts(m, ", .cmd_flags=");
335 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
336 ARRAY_SIZE(cmd_flag_name));
337 seq_puts(m, ", .rq_flags=");
338 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
339 ARRAY_SIZE(rqf_name));
340 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
341 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
342 rq->internal_tag);
343 if (mq_ops->show_rq)
344 mq_ops->show_rq(m, rq);
345 seq_puts(m, "}\n");
346 return 0;
347 }
348 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
349
350 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
351 {
352 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
353 }
354 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
355
356 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
357 __acquires(&hctx->lock)
358 {
359 struct blk_mq_hw_ctx *hctx = m->private;
360
361 spin_lock(&hctx->lock);
362 return seq_list_start(&hctx->dispatch, *pos);
363 }
364
365 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
366 {
367 struct blk_mq_hw_ctx *hctx = m->private;
368
369 return seq_list_next(v, &hctx->dispatch, pos);
370 }
371
372 static void hctx_dispatch_stop(struct seq_file *m, void *v)
373 __releases(&hctx->lock)
374 {
375 struct blk_mq_hw_ctx *hctx = m->private;
376
377 spin_unlock(&hctx->lock);
378 }
379
380 static const struct seq_operations hctx_dispatch_seq_ops = {
381 .start = hctx_dispatch_start,
382 .next = hctx_dispatch_next,
383 .stop = hctx_dispatch_stop,
384 .show = blk_mq_debugfs_rq_show,
385 };
386
387 struct show_busy_params {
388 struct seq_file *m;
389 struct blk_mq_hw_ctx *hctx;
390 };
391
392 /*
393 * Note: the state of a request may change while this function is in progress,
394 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
395 * keep iterating requests.
396 */
397 static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
398 {
399 const struct show_busy_params *params = data;
400
401 if (rq->mq_hctx == params->hctx)
402 __blk_mq_debugfs_rq_show(params->m,
403 list_entry_rq(&rq->queuelist));
404
405 return true;
406 }
407
408 static int hctx_busy_show(void *data, struct seq_file *m)
409 {
410 struct blk_mq_hw_ctx *hctx = data;
411 struct show_busy_params params = { .m = m, .hctx = hctx };
412
413 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
414 &params);
415
416 return 0;
417 }
418
419 static const char *const hctx_types[] = {
420 [HCTX_TYPE_DEFAULT] = "default",
421 [HCTX_TYPE_READ] = "read",
422 [HCTX_TYPE_POLL] = "poll",
423 };
424
425 static int hctx_type_show(void *data, struct seq_file *m)
426 {
427 struct blk_mq_hw_ctx *hctx = data;
428
429 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
430 seq_printf(m, "%s\n", hctx_types[hctx->type]);
431 return 0;
432 }
433
434 static int hctx_ctx_map_show(void *data, struct seq_file *m)
435 {
436 struct blk_mq_hw_ctx *hctx = data;
437
438 sbitmap_bitmap_show(&hctx->ctx_map, m);
439 return 0;
440 }
441
442 static void blk_mq_debugfs_tags_show(struct seq_file *m,
443 struct blk_mq_tags *tags)
444 {
445 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
446 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
447 seq_printf(m, "active_queues=%d\n",
448 atomic_read(&tags->active_queues));
449
450 seq_puts(m, "\nbitmap_tags:\n");
451 sbitmap_queue_show(&tags->bitmap_tags, m);
452
453 if (tags->nr_reserved_tags) {
454 seq_puts(m, "\nbreserved_tags:\n");
455 sbitmap_queue_show(&tags->breserved_tags, m);
456 }
457 }
458
459 static int hctx_tags_show(void *data, struct seq_file *m)
460 {
461 struct blk_mq_hw_ctx *hctx = data;
462 struct request_queue *q = hctx->queue;
463 int res;
464
465 res = mutex_lock_interruptible(&q->sysfs_lock);
466 if (res)
467 goto out;
468 if (hctx->tags)
469 blk_mq_debugfs_tags_show(m, hctx->tags);
470 mutex_unlock(&q->sysfs_lock);
471
472 out:
473 return res;
474 }
475
476 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
477 {
478 struct blk_mq_hw_ctx *hctx = data;
479 struct request_queue *q = hctx->queue;
480 int res;
481
482 res = mutex_lock_interruptible(&q->sysfs_lock);
483 if (res)
484 goto out;
485 if (hctx->tags)
486 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
487 mutex_unlock(&q->sysfs_lock);
488
489 out:
490 return res;
491 }
492
493 static int hctx_sched_tags_show(void *data, struct seq_file *m)
494 {
495 struct blk_mq_hw_ctx *hctx = data;
496 struct request_queue *q = hctx->queue;
497 int res;
498
499 res = mutex_lock_interruptible(&q->sysfs_lock);
500 if (res)
501 goto out;
502 if (hctx->sched_tags)
503 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
504 mutex_unlock(&q->sysfs_lock);
505
506 out:
507 return res;
508 }
509
510 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
511 {
512 struct blk_mq_hw_ctx *hctx = data;
513 struct request_queue *q = hctx->queue;
514 int res;
515
516 res = mutex_lock_interruptible(&q->sysfs_lock);
517 if (res)
518 goto out;
519 if (hctx->sched_tags)
520 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
521 mutex_unlock(&q->sysfs_lock);
522
523 out:
524 return res;
525 }
526
527 static int hctx_io_poll_show(void *data, struct seq_file *m)
528 {
529 struct blk_mq_hw_ctx *hctx = data;
530
531 seq_printf(m, "considered=%lu\n", hctx->poll_considered);
532 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
533 seq_printf(m, "success=%lu\n", hctx->poll_success);
534 return 0;
535 }
536
537 static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
538 size_t count, loff_t *ppos)
539 {
540 struct blk_mq_hw_ctx *hctx = data;
541
542 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
543 return count;
544 }
545
546 static int hctx_dispatched_show(void *data, struct seq_file *m)
547 {
548 struct blk_mq_hw_ctx *hctx = data;
549 int i;
550
551 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
552
553 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
554 unsigned int d = 1U << (i - 1);
555
556 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
557 }
558
559 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
560 return 0;
561 }
562
563 static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
564 size_t count, loff_t *ppos)
565 {
566 struct blk_mq_hw_ctx *hctx = data;
567 int i;
568
569 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
570 hctx->dispatched[i] = 0;
571 return count;
572 }
573
574 static int hctx_queued_show(void *data, struct seq_file *m)
575 {
576 struct blk_mq_hw_ctx *hctx = data;
577
578 seq_printf(m, "%lu\n", hctx->queued);
579 return 0;
580 }
581
582 static ssize_t hctx_queued_write(void *data, const char __user *buf,
583 size_t count, loff_t *ppos)
584 {
585 struct blk_mq_hw_ctx *hctx = data;
586
587 hctx->queued = 0;
588 return count;
589 }
590
591 static int hctx_run_show(void *data, struct seq_file *m)
592 {
593 struct blk_mq_hw_ctx *hctx = data;
594
595 seq_printf(m, "%lu\n", hctx->run);
596 return 0;
597 }
598
599 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
600 loff_t *ppos)
601 {
602 struct blk_mq_hw_ctx *hctx = data;
603
604 hctx->run = 0;
605 return count;
606 }
607
608 static int hctx_active_show(void *data, struct seq_file *m)
609 {
610 struct blk_mq_hw_ctx *hctx = data;
611
612 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
613 return 0;
614 }
615
616 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
617 {
618 struct blk_mq_hw_ctx *hctx = data;
619
620 seq_printf(m, "%u\n", hctx->dispatch_busy);
621 return 0;
622 }
623
624 #define CTX_RQ_SEQ_OPS(name, type) \
625 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
626 __acquires(&ctx->lock) \
627 { \
628 struct blk_mq_ctx *ctx = m->private; \
629 \
630 spin_lock(&ctx->lock); \
631 return seq_list_start(&ctx->rq_lists[type], *pos); \
632 } \
633 \
634 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
635 loff_t *pos) \
636 { \
637 struct blk_mq_ctx *ctx = m->private; \
638 \
639 return seq_list_next(v, &ctx->rq_lists[type], pos); \
640 } \
641 \
642 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
643 __releases(&ctx->lock) \
644 { \
645 struct blk_mq_ctx *ctx = m->private; \
646 \
647 spin_unlock(&ctx->lock); \
648 } \
649 \
650 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
651 .start = ctx_##name##_rq_list_start, \
652 .next = ctx_##name##_rq_list_next, \
653 .stop = ctx_##name##_rq_list_stop, \
654 .show = blk_mq_debugfs_rq_show, \
655 }
656
657 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
658 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
659 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
660
661 static int ctx_dispatched_show(void *data, struct seq_file *m)
662 {
663 struct blk_mq_ctx *ctx = data;
664
665 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
666 return 0;
667 }
668
669 static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
670 size_t count, loff_t *ppos)
671 {
672 struct blk_mq_ctx *ctx = data;
673
674 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
675 return count;
676 }
677
678 static int ctx_merged_show(void *data, struct seq_file *m)
679 {
680 struct blk_mq_ctx *ctx = data;
681
682 seq_printf(m, "%lu\n", ctx->rq_merged);
683 return 0;
684 }
685
686 static ssize_t ctx_merged_write(void *data, const char __user *buf,
687 size_t count, loff_t *ppos)
688 {
689 struct blk_mq_ctx *ctx = data;
690
691 ctx->rq_merged = 0;
692 return count;
693 }
694
695 static int ctx_completed_show(void *data, struct seq_file *m)
696 {
697 struct blk_mq_ctx *ctx = data;
698
699 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
700 return 0;
701 }
702
703 static ssize_t ctx_completed_write(void *data, const char __user *buf,
704 size_t count, loff_t *ppos)
705 {
706 struct blk_mq_ctx *ctx = data;
707
708 ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
709 return count;
710 }
711
712 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
713 {
714 const struct blk_mq_debugfs_attr *attr = m->private;
715 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
716
717 return attr->show(data, m);
718 }
719
720 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
721 size_t count, loff_t *ppos)
722 {
723 struct seq_file *m = file->private_data;
724 const struct blk_mq_debugfs_attr *attr = m->private;
725 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
726
727 /*
728 * Attributes that only implement .seq_ops are read-only and 'attr' is
729 * the same with 'data' in this case.
730 */
731 if (attr == data || !attr->write)
732 return -EPERM;
733
734 return attr->write(data, buf, count, ppos);
735 }
736
737 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
738 {
739 const struct blk_mq_debugfs_attr *attr = inode->i_private;
740 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
741 struct seq_file *m;
742 int ret;
743
744 if (attr->seq_ops) {
745 ret = seq_open(file, attr->seq_ops);
746 if (!ret) {
747 m = file->private_data;
748 m->private = data;
749 }
750 return ret;
751 }
752
753 if (WARN_ON_ONCE(!attr->show))
754 return -EPERM;
755
756 return single_open(file, blk_mq_debugfs_show, inode->i_private);
757 }
758
759 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
760 {
761 const struct blk_mq_debugfs_attr *attr = inode->i_private;
762
763 if (attr->show)
764 return single_release(inode, file);
765
766 return seq_release(inode, file);
767 }
768
769 static const struct file_operations blk_mq_debugfs_fops = {
770 .open = blk_mq_debugfs_open,
771 .read = seq_read,
772 .write = blk_mq_debugfs_write,
773 .llseek = seq_lseek,
774 .release = blk_mq_debugfs_release,
775 };
776
777 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
778 {"state", 0400, hctx_state_show},
779 {"flags", 0400, hctx_flags_show},
780 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
781 {"busy", 0400, hctx_busy_show},
782 {"ctx_map", 0400, hctx_ctx_map_show},
783 {"tags", 0400, hctx_tags_show},
784 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
785 {"sched_tags", 0400, hctx_sched_tags_show},
786 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
787 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
788 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
789 {"queued", 0600, hctx_queued_show, hctx_queued_write},
790 {"run", 0600, hctx_run_show, hctx_run_write},
791 {"active", 0400, hctx_active_show},
792 {"dispatch_busy", 0400, hctx_dispatch_busy_show},
793 {"type", 0400, hctx_type_show},
794 {},
795 };
796
797 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
798 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
799 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
800 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
801 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
802 {"merged", 0600, ctx_merged_show, ctx_merged_write},
803 {"completed", 0600, ctx_completed_show, ctx_completed_write},
804 {},
805 };
806
807 static void debugfs_create_files(struct dentry *parent, void *data,
808 const struct blk_mq_debugfs_attr *attr)
809 {
810 if (IS_ERR_OR_NULL(parent))
811 return;
812
813 d_inode(parent)->i_private = data;
814
815 for (; attr->name; attr++)
816 debugfs_create_file(attr->name, attr->mode, parent,
817 (void *)attr, &blk_mq_debugfs_fops);
818 }
819
820 void blk_mq_debugfs_register(struct request_queue *q)
821 {
822 struct blk_mq_hw_ctx *hctx;
823 int i;
824
825 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
826 blk_debugfs_root);
827
828 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
829
830 /*
831 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
832 * didn't exist yet (because we don't know what to name the directory
833 * until the queue is registered to a gendisk).
834 */
835 if (q->elevator && !q->sched_debugfs_dir)
836 blk_mq_debugfs_register_sched(q);
837
838 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
839 queue_for_each_hw_ctx(q, hctx, i) {
840 if (!hctx->debugfs_dir)
841 blk_mq_debugfs_register_hctx(q, hctx);
842 if (q->elevator && !hctx->sched_debugfs_dir)
843 blk_mq_debugfs_register_sched_hctx(q, hctx);
844 }
845
846 if (q->rq_qos) {
847 struct rq_qos *rqos = q->rq_qos;
848
849 while (rqos) {
850 blk_mq_debugfs_register_rqos(rqos);
851 rqos = rqos->next;
852 }
853 }
854 }
855
856 void blk_mq_debugfs_unregister(struct request_queue *q)
857 {
858 debugfs_remove_recursive(q->debugfs_dir);
859 q->sched_debugfs_dir = NULL;
860 q->debugfs_dir = NULL;
861 }
862
863 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
864 struct blk_mq_ctx *ctx)
865 {
866 struct dentry *ctx_dir;
867 char name[20];
868
869 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
870 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
871
872 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
873 }
874
875 void blk_mq_debugfs_register_hctx(struct request_queue *q,
876 struct blk_mq_hw_ctx *hctx)
877 {
878 struct blk_mq_ctx *ctx;
879 char name[20];
880 int i;
881
882 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
883 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
884
885 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
886
887 hctx_for_each_ctx(hctx, ctx, i)
888 blk_mq_debugfs_register_ctx(hctx, ctx);
889 }
890
891 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
892 {
893 debugfs_remove_recursive(hctx->debugfs_dir);
894 hctx->sched_debugfs_dir = NULL;
895 hctx->debugfs_dir = NULL;
896 }
897
898 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
899 {
900 struct blk_mq_hw_ctx *hctx;
901 int i;
902
903 queue_for_each_hw_ctx(q, hctx, i)
904 blk_mq_debugfs_register_hctx(q, hctx);
905 }
906
907 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
908 {
909 struct blk_mq_hw_ctx *hctx;
910 int i;
911
912 queue_for_each_hw_ctx(q, hctx, i)
913 blk_mq_debugfs_unregister_hctx(hctx);
914 }
915
916 void blk_mq_debugfs_register_sched(struct request_queue *q)
917 {
918 struct elevator_type *e = q->elevator->type;
919
920 /*
921 * If the parent directory has not been created yet, return, we will be
922 * called again later on and the directory/files will be created then.
923 */
924 if (!q->debugfs_dir)
925 return;
926
927 if (!e->queue_debugfs_attrs)
928 return;
929
930 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
931
932 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
933 }
934
935 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
936 {
937 debugfs_remove_recursive(q->sched_debugfs_dir);
938 q->sched_debugfs_dir = NULL;
939 }
940
941 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
942 {
943 debugfs_remove_recursive(rqos->debugfs_dir);
944 rqos->debugfs_dir = NULL;
945 }
946
947 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
948 {
949 struct request_queue *q = rqos->q;
950 const char *dir_name = rq_qos_id_to_name(rqos->id);
951
952 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
953 return;
954
955 if (!q->rqos_debugfs_dir)
956 q->rqos_debugfs_dir = debugfs_create_dir("rqos",
957 q->debugfs_dir);
958
959 rqos->debugfs_dir = debugfs_create_dir(dir_name,
960 rqos->q->rqos_debugfs_dir);
961
962 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
963 }
964
965 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
966 {
967 debugfs_remove_recursive(q->rqos_debugfs_dir);
968 q->rqos_debugfs_dir = NULL;
969 }
970
971 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
972 struct blk_mq_hw_ctx *hctx)
973 {
974 struct elevator_type *e = q->elevator->type;
975
976 if (!e->hctx_debugfs_attrs)
977 return;
978
979 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
980 hctx->debugfs_dir);
981 debugfs_create_files(hctx->sched_debugfs_dir, hctx,
982 e->hctx_debugfs_attrs);
983 }
984
985 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
986 {
987 debugfs_remove_recursive(hctx->sched_debugfs_dir);
988 hctx->sched_debugfs_dir = NULL;
989 }