]> git.ipfire.org Git - thirdparty/linux.git/blob - block/blk-mq-debugfs.c
blk-throttle: remove blk_throtl_drain
[thirdparty/linux.git] / block / blk-mq-debugfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2017 Facebook
4 */
5
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/debugfs.h>
9
10 #include <linux/blk-mq.h>
11 #include "blk.h"
12 #include "blk-mq.h"
13 #include "blk-mq-debugfs.h"
14 #include "blk-mq-tag.h"
15 #include "blk-rq-qos.h"
16
17 static void print_stat(struct seq_file *m, struct blk_rq_stat *stat)
18 {
19 if (stat->nr_samples) {
20 seq_printf(m, "samples=%d, mean=%llu, min=%llu, max=%llu",
21 stat->nr_samples, stat->mean, stat->min, stat->max);
22 } else {
23 seq_puts(m, "samples=0");
24 }
25 }
26
27 static int queue_poll_stat_show(void *data, struct seq_file *m)
28 {
29 struct request_queue *q = data;
30 int bucket;
31
32 for (bucket = 0; bucket < (BLK_MQ_POLL_STATS_BKTS / 2); bucket++) {
33 seq_printf(m, "read (%d Bytes): ", 1 << (9 + bucket));
34 print_stat(m, &q->poll_stat[2 * bucket]);
35 seq_puts(m, "\n");
36
37 seq_printf(m, "write (%d Bytes): ", 1 << (9 + bucket));
38 print_stat(m, &q->poll_stat[2 * bucket + 1]);
39 seq_puts(m, "\n");
40 }
41 return 0;
42 }
43
44 static void *queue_requeue_list_start(struct seq_file *m, loff_t *pos)
45 __acquires(&q->requeue_lock)
46 {
47 struct request_queue *q = m->private;
48
49 spin_lock_irq(&q->requeue_lock);
50 return seq_list_start(&q->requeue_list, *pos);
51 }
52
53 static void *queue_requeue_list_next(struct seq_file *m, void *v, loff_t *pos)
54 {
55 struct request_queue *q = m->private;
56
57 return seq_list_next(v, &q->requeue_list, pos);
58 }
59
60 static void queue_requeue_list_stop(struct seq_file *m, void *v)
61 __releases(&q->requeue_lock)
62 {
63 struct request_queue *q = m->private;
64
65 spin_unlock_irq(&q->requeue_lock);
66 }
67
68 static const struct seq_operations queue_requeue_list_seq_ops = {
69 .start = queue_requeue_list_start,
70 .next = queue_requeue_list_next,
71 .stop = queue_requeue_list_stop,
72 .show = blk_mq_debugfs_rq_show,
73 };
74
75 static int blk_flags_show(struct seq_file *m, const unsigned long flags,
76 const char *const *flag_name, int flag_name_count)
77 {
78 bool sep = false;
79 int i;
80
81 for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) {
82 if (!(flags & BIT(i)))
83 continue;
84 if (sep)
85 seq_puts(m, "|");
86 sep = true;
87 if (i < flag_name_count && flag_name[i])
88 seq_puts(m, flag_name[i]);
89 else
90 seq_printf(m, "%d", i);
91 }
92 return 0;
93 }
94
95 static int queue_pm_only_show(void *data, struct seq_file *m)
96 {
97 struct request_queue *q = data;
98
99 seq_printf(m, "%d\n", atomic_read(&q->pm_only));
100 return 0;
101 }
102
103 #define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
104 static const char *const blk_queue_flag_name[] = {
105 QUEUE_FLAG_NAME(STOPPED),
106 QUEUE_FLAG_NAME(DYING),
107 QUEUE_FLAG_NAME(NOMERGES),
108 QUEUE_FLAG_NAME(SAME_COMP),
109 QUEUE_FLAG_NAME(FAIL_IO),
110 QUEUE_FLAG_NAME(NONROT),
111 QUEUE_FLAG_NAME(IO_STAT),
112 QUEUE_FLAG_NAME(DISCARD),
113 QUEUE_FLAG_NAME(NOXMERGES),
114 QUEUE_FLAG_NAME(ADD_RANDOM),
115 QUEUE_FLAG_NAME(SECERASE),
116 QUEUE_FLAG_NAME(SAME_FORCE),
117 QUEUE_FLAG_NAME(DEAD),
118 QUEUE_FLAG_NAME(INIT_DONE),
119 QUEUE_FLAG_NAME(POLL),
120 QUEUE_FLAG_NAME(WC),
121 QUEUE_FLAG_NAME(FUA),
122 QUEUE_FLAG_NAME(DAX),
123 QUEUE_FLAG_NAME(STATS),
124 QUEUE_FLAG_NAME(POLL_STATS),
125 QUEUE_FLAG_NAME(REGISTERED),
126 QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
127 QUEUE_FLAG_NAME(QUIESCED),
128 };
129 #undef QUEUE_FLAG_NAME
130
131 static int queue_state_show(void *data, struct seq_file *m)
132 {
133 struct request_queue *q = data;
134
135 blk_flags_show(m, q->queue_flags, blk_queue_flag_name,
136 ARRAY_SIZE(blk_queue_flag_name));
137 seq_puts(m, "\n");
138 return 0;
139 }
140
141 static ssize_t queue_state_write(void *data, const char __user *buf,
142 size_t count, loff_t *ppos)
143 {
144 struct request_queue *q = data;
145 char opbuf[16] = { }, *op;
146
147 /*
148 * The "state" attribute is removed after blk_cleanup_queue() has called
149 * blk_mq_free_queue(). Return if QUEUE_FLAG_DEAD has been set to avoid
150 * triggering a use-after-free.
151 */
152 if (blk_queue_dead(q))
153 return -ENOENT;
154
155 if (count >= sizeof(opbuf)) {
156 pr_err("%s: operation too long\n", __func__);
157 goto inval;
158 }
159
160 if (copy_from_user(opbuf, buf, count))
161 return -EFAULT;
162 op = strstrip(opbuf);
163 if (strcmp(op, "run") == 0) {
164 blk_mq_run_hw_queues(q, true);
165 } else if (strcmp(op, "start") == 0) {
166 blk_mq_start_stopped_hw_queues(q, true);
167 } else if (strcmp(op, "kick") == 0) {
168 blk_mq_kick_requeue_list(q);
169 } else {
170 pr_err("%s: unsupported operation '%s'\n", __func__, op);
171 inval:
172 pr_err("%s: use 'run', 'start' or 'kick'\n", __func__);
173 return -EINVAL;
174 }
175 return count;
176 }
177
178 static int queue_write_hint_show(void *data, struct seq_file *m)
179 {
180 struct request_queue *q = data;
181 int i;
182
183 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
184 seq_printf(m, "hint%d: %llu\n", i, q->write_hints[i]);
185
186 return 0;
187 }
188
189 static ssize_t queue_write_hint_store(void *data, const char __user *buf,
190 size_t count, loff_t *ppos)
191 {
192 struct request_queue *q = data;
193 int i;
194
195 for (i = 0; i < BLK_MAX_WRITE_HINTS; i++)
196 q->write_hints[i] = 0;
197
198 return count;
199 }
200
201 static const struct blk_mq_debugfs_attr blk_mq_debugfs_queue_attrs[] = {
202 { "poll_stat", 0400, queue_poll_stat_show },
203 { "requeue_list", 0400, .seq_ops = &queue_requeue_list_seq_ops },
204 { "pm_only", 0600, queue_pm_only_show, NULL },
205 { "state", 0600, queue_state_show, queue_state_write },
206 { "write_hints", 0600, queue_write_hint_show, queue_write_hint_store },
207 { "zone_wlock", 0400, queue_zone_wlock_show, NULL },
208 { },
209 };
210
211 #define HCTX_STATE_NAME(name) [BLK_MQ_S_##name] = #name
212 static const char *const hctx_state_name[] = {
213 HCTX_STATE_NAME(STOPPED),
214 HCTX_STATE_NAME(TAG_ACTIVE),
215 HCTX_STATE_NAME(SCHED_RESTART),
216 HCTX_STATE_NAME(INACTIVE),
217 };
218 #undef HCTX_STATE_NAME
219
220 static int hctx_state_show(void *data, struct seq_file *m)
221 {
222 struct blk_mq_hw_ctx *hctx = data;
223
224 blk_flags_show(m, hctx->state, hctx_state_name,
225 ARRAY_SIZE(hctx_state_name));
226 seq_puts(m, "\n");
227 return 0;
228 }
229
230 #define BLK_TAG_ALLOC_NAME(name) [BLK_TAG_ALLOC_##name] = #name
231 static const char *const alloc_policy_name[] = {
232 BLK_TAG_ALLOC_NAME(FIFO),
233 BLK_TAG_ALLOC_NAME(RR),
234 };
235 #undef BLK_TAG_ALLOC_NAME
236
237 #define HCTX_FLAG_NAME(name) [ilog2(BLK_MQ_F_##name)] = #name
238 static const char *const hctx_flag_name[] = {
239 HCTX_FLAG_NAME(SHOULD_MERGE),
240 HCTX_FLAG_NAME(TAG_SHARED),
241 HCTX_FLAG_NAME(BLOCKING),
242 HCTX_FLAG_NAME(NO_SCHED),
243 HCTX_FLAG_NAME(STACKING),
244 };
245 #undef HCTX_FLAG_NAME
246
247 static int hctx_flags_show(void *data, struct seq_file *m)
248 {
249 struct blk_mq_hw_ctx *hctx = data;
250 const int alloc_policy = BLK_MQ_FLAG_TO_ALLOC_POLICY(hctx->flags);
251
252 seq_puts(m, "alloc_policy=");
253 if (alloc_policy < ARRAY_SIZE(alloc_policy_name) &&
254 alloc_policy_name[alloc_policy])
255 seq_puts(m, alloc_policy_name[alloc_policy]);
256 else
257 seq_printf(m, "%d", alloc_policy);
258 seq_puts(m, " ");
259 blk_flags_show(m,
260 hctx->flags ^ BLK_ALLOC_POLICY_TO_MQ_FLAG(alloc_policy),
261 hctx_flag_name, ARRAY_SIZE(hctx_flag_name));
262 seq_puts(m, "\n");
263 return 0;
264 }
265
266 #define CMD_FLAG_NAME(name) [__REQ_##name] = #name
267 static const char *const cmd_flag_name[] = {
268 CMD_FLAG_NAME(FAILFAST_DEV),
269 CMD_FLAG_NAME(FAILFAST_TRANSPORT),
270 CMD_FLAG_NAME(FAILFAST_DRIVER),
271 CMD_FLAG_NAME(SYNC),
272 CMD_FLAG_NAME(META),
273 CMD_FLAG_NAME(PRIO),
274 CMD_FLAG_NAME(NOMERGE),
275 CMD_FLAG_NAME(IDLE),
276 CMD_FLAG_NAME(INTEGRITY),
277 CMD_FLAG_NAME(FUA),
278 CMD_FLAG_NAME(PREFLUSH),
279 CMD_FLAG_NAME(RAHEAD),
280 CMD_FLAG_NAME(BACKGROUND),
281 CMD_FLAG_NAME(NOWAIT),
282 CMD_FLAG_NAME(NOUNMAP),
283 CMD_FLAG_NAME(HIPRI),
284 };
285 #undef CMD_FLAG_NAME
286
287 #define RQF_NAME(name) [ilog2((__force u32)RQF_##name)] = #name
288 static const char *const rqf_name[] = {
289 RQF_NAME(SORTED),
290 RQF_NAME(STARTED),
291 RQF_NAME(SOFTBARRIER),
292 RQF_NAME(FLUSH_SEQ),
293 RQF_NAME(MIXED_MERGE),
294 RQF_NAME(MQ_INFLIGHT),
295 RQF_NAME(DONTPREP),
296 RQF_NAME(PREEMPT),
297 RQF_NAME(FAILED),
298 RQF_NAME(QUIET),
299 RQF_NAME(ELVPRIV),
300 RQF_NAME(IO_STAT),
301 RQF_NAME(ALLOCED),
302 RQF_NAME(PM),
303 RQF_NAME(HASHED),
304 RQF_NAME(STATS),
305 RQF_NAME(SPECIAL_PAYLOAD),
306 RQF_NAME(ZONE_WRITE_LOCKED),
307 RQF_NAME(MQ_POLL_SLEPT),
308 };
309 #undef RQF_NAME
310
311 static const char *const blk_mq_rq_state_name_array[] = {
312 [MQ_RQ_IDLE] = "idle",
313 [MQ_RQ_IN_FLIGHT] = "in_flight",
314 [MQ_RQ_COMPLETE] = "complete",
315 };
316
317 static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
318 {
319 if (WARN_ON_ONCE((unsigned int)rq_state >=
320 ARRAY_SIZE(blk_mq_rq_state_name_array)))
321 return "(?)";
322 return blk_mq_rq_state_name_array[rq_state];
323 }
324
325 int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq)
326 {
327 const struct blk_mq_ops *const mq_ops = rq->q->mq_ops;
328 const unsigned int op = req_op(rq);
329 const char *op_str = blk_op_str(op);
330
331 seq_printf(m, "%p {.op=", rq);
332 if (strcmp(op_str, "UNKNOWN") == 0)
333 seq_printf(m, "%u", op);
334 else
335 seq_printf(m, "%s", op_str);
336 seq_puts(m, ", .cmd_flags=");
337 blk_flags_show(m, rq->cmd_flags & ~REQ_OP_MASK, cmd_flag_name,
338 ARRAY_SIZE(cmd_flag_name));
339 seq_puts(m, ", .rq_flags=");
340 blk_flags_show(m, (__force unsigned int)rq->rq_flags, rqf_name,
341 ARRAY_SIZE(rqf_name));
342 seq_printf(m, ", .state=%s", blk_mq_rq_state_name(blk_mq_rq_state(rq)));
343 seq_printf(m, ", .tag=%d, .internal_tag=%d", rq->tag,
344 rq->internal_tag);
345 if (mq_ops->show_rq)
346 mq_ops->show_rq(m, rq);
347 seq_puts(m, "}\n");
348 return 0;
349 }
350 EXPORT_SYMBOL_GPL(__blk_mq_debugfs_rq_show);
351
352 int blk_mq_debugfs_rq_show(struct seq_file *m, void *v)
353 {
354 return __blk_mq_debugfs_rq_show(m, list_entry_rq(v));
355 }
356 EXPORT_SYMBOL_GPL(blk_mq_debugfs_rq_show);
357
358 static void *hctx_dispatch_start(struct seq_file *m, loff_t *pos)
359 __acquires(&hctx->lock)
360 {
361 struct blk_mq_hw_ctx *hctx = m->private;
362
363 spin_lock(&hctx->lock);
364 return seq_list_start(&hctx->dispatch, *pos);
365 }
366
367 static void *hctx_dispatch_next(struct seq_file *m, void *v, loff_t *pos)
368 {
369 struct blk_mq_hw_ctx *hctx = m->private;
370
371 return seq_list_next(v, &hctx->dispatch, pos);
372 }
373
374 static void hctx_dispatch_stop(struct seq_file *m, void *v)
375 __releases(&hctx->lock)
376 {
377 struct blk_mq_hw_ctx *hctx = m->private;
378
379 spin_unlock(&hctx->lock);
380 }
381
382 static const struct seq_operations hctx_dispatch_seq_ops = {
383 .start = hctx_dispatch_start,
384 .next = hctx_dispatch_next,
385 .stop = hctx_dispatch_stop,
386 .show = blk_mq_debugfs_rq_show,
387 };
388
389 struct show_busy_params {
390 struct seq_file *m;
391 struct blk_mq_hw_ctx *hctx;
392 };
393
394 /*
395 * Note: the state of a request may change while this function is in progress,
396 * e.g. due to a concurrent blk_mq_finish_request() call. Returns true to
397 * keep iterating requests.
398 */
399 static bool hctx_show_busy_rq(struct request *rq, void *data, bool reserved)
400 {
401 const struct show_busy_params *params = data;
402
403 if (rq->mq_hctx == params->hctx)
404 __blk_mq_debugfs_rq_show(params->m,
405 list_entry_rq(&rq->queuelist));
406
407 return true;
408 }
409
410 static int hctx_busy_show(void *data, struct seq_file *m)
411 {
412 struct blk_mq_hw_ctx *hctx = data;
413 struct show_busy_params params = { .m = m, .hctx = hctx };
414
415 blk_mq_tagset_busy_iter(hctx->queue->tag_set, hctx_show_busy_rq,
416 &params);
417
418 return 0;
419 }
420
421 static const char *const hctx_types[] = {
422 [HCTX_TYPE_DEFAULT] = "default",
423 [HCTX_TYPE_READ] = "read",
424 [HCTX_TYPE_POLL] = "poll",
425 };
426
427 static int hctx_type_show(void *data, struct seq_file *m)
428 {
429 struct blk_mq_hw_ctx *hctx = data;
430
431 BUILD_BUG_ON(ARRAY_SIZE(hctx_types) != HCTX_MAX_TYPES);
432 seq_printf(m, "%s\n", hctx_types[hctx->type]);
433 return 0;
434 }
435
436 static int hctx_ctx_map_show(void *data, struct seq_file *m)
437 {
438 struct blk_mq_hw_ctx *hctx = data;
439
440 sbitmap_bitmap_show(&hctx->ctx_map, m);
441 return 0;
442 }
443
444 static void blk_mq_debugfs_tags_show(struct seq_file *m,
445 struct blk_mq_tags *tags)
446 {
447 seq_printf(m, "nr_tags=%u\n", tags->nr_tags);
448 seq_printf(m, "nr_reserved_tags=%u\n", tags->nr_reserved_tags);
449 seq_printf(m, "active_queues=%d\n",
450 atomic_read(&tags->active_queues));
451
452 seq_puts(m, "\nbitmap_tags:\n");
453 sbitmap_queue_show(&tags->bitmap_tags, m);
454
455 if (tags->nr_reserved_tags) {
456 seq_puts(m, "\nbreserved_tags:\n");
457 sbitmap_queue_show(&tags->breserved_tags, m);
458 }
459 }
460
461 static int hctx_tags_show(void *data, struct seq_file *m)
462 {
463 struct blk_mq_hw_ctx *hctx = data;
464 struct request_queue *q = hctx->queue;
465 int res;
466
467 res = mutex_lock_interruptible(&q->sysfs_lock);
468 if (res)
469 goto out;
470 if (hctx->tags)
471 blk_mq_debugfs_tags_show(m, hctx->tags);
472 mutex_unlock(&q->sysfs_lock);
473
474 out:
475 return res;
476 }
477
478 static int hctx_tags_bitmap_show(void *data, struct seq_file *m)
479 {
480 struct blk_mq_hw_ctx *hctx = data;
481 struct request_queue *q = hctx->queue;
482 int res;
483
484 res = mutex_lock_interruptible(&q->sysfs_lock);
485 if (res)
486 goto out;
487 if (hctx->tags)
488 sbitmap_bitmap_show(&hctx->tags->bitmap_tags.sb, m);
489 mutex_unlock(&q->sysfs_lock);
490
491 out:
492 return res;
493 }
494
495 static int hctx_sched_tags_show(void *data, struct seq_file *m)
496 {
497 struct blk_mq_hw_ctx *hctx = data;
498 struct request_queue *q = hctx->queue;
499 int res;
500
501 res = mutex_lock_interruptible(&q->sysfs_lock);
502 if (res)
503 goto out;
504 if (hctx->sched_tags)
505 blk_mq_debugfs_tags_show(m, hctx->sched_tags);
506 mutex_unlock(&q->sysfs_lock);
507
508 out:
509 return res;
510 }
511
512 static int hctx_sched_tags_bitmap_show(void *data, struct seq_file *m)
513 {
514 struct blk_mq_hw_ctx *hctx = data;
515 struct request_queue *q = hctx->queue;
516 int res;
517
518 res = mutex_lock_interruptible(&q->sysfs_lock);
519 if (res)
520 goto out;
521 if (hctx->sched_tags)
522 sbitmap_bitmap_show(&hctx->sched_tags->bitmap_tags.sb, m);
523 mutex_unlock(&q->sysfs_lock);
524
525 out:
526 return res;
527 }
528
529 static int hctx_io_poll_show(void *data, struct seq_file *m)
530 {
531 struct blk_mq_hw_ctx *hctx = data;
532
533 seq_printf(m, "considered=%lu\n", hctx->poll_considered);
534 seq_printf(m, "invoked=%lu\n", hctx->poll_invoked);
535 seq_printf(m, "success=%lu\n", hctx->poll_success);
536 return 0;
537 }
538
539 static ssize_t hctx_io_poll_write(void *data, const char __user *buf,
540 size_t count, loff_t *ppos)
541 {
542 struct blk_mq_hw_ctx *hctx = data;
543
544 hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
545 return count;
546 }
547
548 static int hctx_dispatched_show(void *data, struct seq_file *m)
549 {
550 struct blk_mq_hw_ctx *hctx = data;
551 int i;
552
553 seq_printf(m, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
554
555 for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
556 unsigned int d = 1U << (i - 1);
557
558 seq_printf(m, "%8u\t%lu\n", d, hctx->dispatched[i]);
559 }
560
561 seq_printf(m, "%8u+\t%lu\n", 1U << (i - 1), hctx->dispatched[i]);
562 return 0;
563 }
564
565 static ssize_t hctx_dispatched_write(void *data, const char __user *buf,
566 size_t count, loff_t *ppos)
567 {
568 struct blk_mq_hw_ctx *hctx = data;
569 int i;
570
571 for (i = 0; i < BLK_MQ_MAX_DISPATCH_ORDER; i++)
572 hctx->dispatched[i] = 0;
573 return count;
574 }
575
576 static int hctx_queued_show(void *data, struct seq_file *m)
577 {
578 struct blk_mq_hw_ctx *hctx = data;
579
580 seq_printf(m, "%lu\n", hctx->queued);
581 return 0;
582 }
583
584 static ssize_t hctx_queued_write(void *data, const char __user *buf,
585 size_t count, loff_t *ppos)
586 {
587 struct blk_mq_hw_ctx *hctx = data;
588
589 hctx->queued = 0;
590 return count;
591 }
592
593 static int hctx_run_show(void *data, struct seq_file *m)
594 {
595 struct blk_mq_hw_ctx *hctx = data;
596
597 seq_printf(m, "%lu\n", hctx->run);
598 return 0;
599 }
600
601 static ssize_t hctx_run_write(void *data, const char __user *buf, size_t count,
602 loff_t *ppos)
603 {
604 struct blk_mq_hw_ctx *hctx = data;
605
606 hctx->run = 0;
607 return count;
608 }
609
610 static int hctx_active_show(void *data, struct seq_file *m)
611 {
612 struct blk_mq_hw_ctx *hctx = data;
613
614 seq_printf(m, "%d\n", atomic_read(&hctx->nr_active));
615 return 0;
616 }
617
618 static int hctx_dispatch_busy_show(void *data, struct seq_file *m)
619 {
620 struct blk_mq_hw_ctx *hctx = data;
621
622 seq_printf(m, "%u\n", hctx->dispatch_busy);
623 return 0;
624 }
625
626 #define CTX_RQ_SEQ_OPS(name, type) \
627 static void *ctx_##name##_rq_list_start(struct seq_file *m, loff_t *pos) \
628 __acquires(&ctx->lock) \
629 { \
630 struct blk_mq_ctx *ctx = m->private; \
631 \
632 spin_lock(&ctx->lock); \
633 return seq_list_start(&ctx->rq_lists[type], *pos); \
634 } \
635 \
636 static void *ctx_##name##_rq_list_next(struct seq_file *m, void *v, \
637 loff_t *pos) \
638 { \
639 struct blk_mq_ctx *ctx = m->private; \
640 \
641 return seq_list_next(v, &ctx->rq_lists[type], pos); \
642 } \
643 \
644 static void ctx_##name##_rq_list_stop(struct seq_file *m, void *v) \
645 __releases(&ctx->lock) \
646 { \
647 struct blk_mq_ctx *ctx = m->private; \
648 \
649 spin_unlock(&ctx->lock); \
650 } \
651 \
652 static const struct seq_operations ctx_##name##_rq_list_seq_ops = { \
653 .start = ctx_##name##_rq_list_start, \
654 .next = ctx_##name##_rq_list_next, \
655 .stop = ctx_##name##_rq_list_stop, \
656 .show = blk_mq_debugfs_rq_show, \
657 }
658
659 CTX_RQ_SEQ_OPS(default, HCTX_TYPE_DEFAULT);
660 CTX_RQ_SEQ_OPS(read, HCTX_TYPE_READ);
661 CTX_RQ_SEQ_OPS(poll, HCTX_TYPE_POLL);
662
663 static int ctx_dispatched_show(void *data, struct seq_file *m)
664 {
665 struct blk_mq_ctx *ctx = data;
666
667 seq_printf(m, "%lu %lu\n", ctx->rq_dispatched[1], ctx->rq_dispatched[0]);
668 return 0;
669 }
670
671 static ssize_t ctx_dispatched_write(void *data, const char __user *buf,
672 size_t count, loff_t *ppos)
673 {
674 struct blk_mq_ctx *ctx = data;
675
676 ctx->rq_dispatched[0] = ctx->rq_dispatched[1] = 0;
677 return count;
678 }
679
680 static int ctx_merged_show(void *data, struct seq_file *m)
681 {
682 struct blk_mq_ctx *ctx = data;
683
684 seq_printf(m, "%lu\n", ctx->rq_merged);
685 return 0;
686 }
687
688 static ssize_t ctx_merged_write(void *data, const char __user *buf,
689 size_t count, loff_t *ppos)
690 {
691 struct blk_mq_ctx *ctx = data;
692
693 ctx->rq_merged = 0;
694 return count;
695 }
696
697 static int ctx_completed_show(void *data, struct seq_file *m)
698 {
699 struct blk_mq_ctx *ctx = data;
700
701 seq_printf(m, "%lu %lu\n", ctx->rq_completed[1], ctx->rq_completed[0]);
702 return 0;
703 }
704
705 static ssize_t ctx_completed_write(void *data, const char __user *buf,
706 size_t count, loff_t *ppos)
707 {
708 struct blk_mq_ctx *ctx = data;
709
710 ctx->rq_completed[0] = ctx->rq_completed[1] = 0;
711 return count;
712 }
713
714 static int blk_mq_debugfs_show(struct seq_file *m, void *v)
715 {
716 const struct blk_mq_debugfs_attr *attr = m->private;
717 void *data = d_inode(m->file->f_path.dentry->d_parent)->i_private;
718
719 return attr->show(data, m);
720 }
721
722 static ssize_t blk_mq_debugfs_write(struct file *file, const char __user *buf,
723 size_t count, loff_t *ppos)
724 {
725 struct seq_file *m = file->private_data;
726 const struct blk_mq_debugfs_attr *attr = m->private;
727 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
728
729 /*
730 * Attributes that only implement .seq_ops are read-only and 'attr' is
731 * the same with 'data' in this case.
732 */
733 if (attr == data || !attr->write)
734 return -EPERM;
735
736 return attr->write(data, buf, count, ppos);
737 }
738
739 static int blk_mq_debugfs_open(struct inode *inode, struct file *file)
740 {
741 const struct blk_mq_debugfs_attr *attr = inode->i_private;
742 void *data = d_inode(file->f_path.dentry->d_parent)->i_private;
743 struct seq_file *m;
744 int ret;
745
746 if (attr->seq_ops) {
747 ret = seq_open(file, attr->seq_ops);
748 if (!ret) {
749 m = file->private_data;
750 m->private = data;
751 }
752 return ret;
753 }
754
755 if (WARN_ON_ONCE(!attr->show))
756 return -EPERM;
757
758 return single_open(file, blk_mq_debugfs_show, inode->i_private);
759 }
760
761 static int blk_mq_debugfs_release(struct inode *inode, struct file *file)
762 {
763 const struct blk_mq_debugfs_attr *attr = inode->i_private;
764
765 if (attr->show)
766 return single_release(inode, file);
767
768 return seq_release(inode, file);
769 }
770
771 static const struct file_operations blk_mq_debugfs_fops = {
772 .open = blk_mq_debugfs_open,
773 .read = seq_read,
774 .write = blk_mq_debugfs_write,
775 .llseek = seq_lseek,
776 .release = blk_mq_debugfs_release,
777 };
778
779 static const struct blk_mq_debugfs_attr blk_mq_debugfs_hctx_attrs[] = {
780 {"state", 0400, hctx_state_show},
781 {"flags", 0400, hctx_flags_show},
782 {"dispatch", 0400, .seq_ops = &hctx_dispatch_seq_ops},
783 {"busy", 0400, hctx_busy_show},
784 {"ctx_map", 0400, hctx_ctx_map_show},
785 {"tags", 0400, hctx_tags_show},
786 {"tags_bitmap", 0400, hctx_tags_bitmap_show},
787 {"sched_tags", 0400, hctx_sched_tags_show},
788 {"sched_tags_bitmap", 0400, hctx_sched_tags_bitmap_show},
789 {"io_poll", 0600, hctx_io_poll_show, hctx_io_poll_write},
790 {"dispatched", 0600, hctx_dispatched_show, hctx_dispatched_write},
791 {"queued", 0600, hctx_queued_show, hctx_queued_write},
792 {"run", 0600, hctx_run_show, hctx_run_write},
793 {"active", 0400, hctx_active_show},
794 {"dispatch_busy", 0400, hctx_dispatch_busy_show},
795 {"type", 0400, hctx_type_show},
796 {},
797 };
798
799 static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
800 {"default_rq_list", 0400, .seq_ops = &ctx_default_rq_list_seq_ops},
801 {"read_rq_list", 0400, .seq_ops = &ctx_read_rq_list_seq_ops},
802 {"poll_rq_list", 0400, .seq_ops = &ctx_poll_rq_list_seq_ops},
803 {"dispatched", 0600, ctx_dispatched_show, ctx_dispatched_write},
804 {"merged", 0600, ctx_merged_show, ctx_merged_write},
805 {"completed", 0600, ctx_completed_show, ctx_completed_write},
806 {},
807 };
808
809 static void debugfs_create_files(struct dentry *parent, void *data,
810 const struct blk_mq_debugfs_attr *attr)
811 {
812 if (IS_ERR_OR_NULL(parent))
813 return;
814
815 d_inode(parent)->i_private = data;
816
817 for (; attr->name; attr++)
818 debugfs_create_file(attr->name, attr->mode, parent,
819 (void *)attr, &blk_mq_debugfs_fops);
820 }
821
822 void blk_mq_debugfs_register(struct request_queue *q)
823 {
824 struct blk_mq_hw_ctx *hctx;
825 int i;
826
827 q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
828 blk_debugfs_root);
829
830 debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
831
832 /*
833 * blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
834 * didn't exist yet (because we don't know what to name the directory
835 * until the queue is registered to a gendisk).
836 */
837 if (q->elevator && !q->sched_debugfs_dir)
838 blk_mq_debugfs_register_sched(q);
839
840 /* Similarly, blk_mq_init_hctx() couldn't do this previously. */
841 queue_for_each_hw_ctx(q, hctx, i) {
842 if (!hctx->debugfs_dir)
843 blk_mq_debugfs_register_hctx(q, hctx);
844 if (q->elevator && !hctx->sched_debugfs_dir)
845 blk_mq_debugfs_register_sched_hctx(q, hctx);
846 }
847
848 if (q->rq_qos) {
849 struct rq_qos *rqos = q->rq_qos;
850
851 while (rqos) {
852 blk_mq_debugfs_register_rqos(rqos);
853 rqos = rqos->next;
854 }
855 }
856 }
857
858 void blk_mq_debugfs_unregister(struct request_queue *q)
859 {
860 debugfs_remove_recursive(q->debugfs_dir);
861 q->sched_debugfs_dir = NULL;
862 q->debugfs_dir = NULL;
863 }
864
865 static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
866 struct blk_mq_ctx *ctx)
867 {
868 struct dentry *ctx_dir;
869 char name[20];
870
871 snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
872 ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
873
874 debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
875 }
876
877 void blk_mq_debugfs_register_hctx(struct request_queue *q,
878 struct blk_mq_hw_ctx *hctx)
879 {
880 struct blk_mq_ctx *ctx;
881 char name[20];
882 int i;
883
884 snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
885 hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
886
887 debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
888
889 hctx_for_each_ctx(hctx, ctx, i)
890 blk_mq_debugfs_register_ctx(hctx, ctx);
891 }
892
893 void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
894 {
895 debugfs_remove_recursive(hctx->debugfs_dir);
896 hctx->sched_debugfs_dir = NULL;
897 hctx->debugfs_dir = NULL;
898 }
899
900 void blk_mq_debugfs_register_hctxs(struct request_queue *q)
901 {
902 struct blk_mq_hw_ctx *hctx;
903 int i;
904
905 queue_for_each_hw_ctx(q, hctx, i)
906 blk_mq_debugfs_register_hctx(q, hctx);
907 }
908
909 void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
910 {
911 struct blk_mq_hw_ctx *hctx;
912 int i;
913
914 queue_for_each_hw_ctx(q, hctx, i)
915 blk_mq_debugfs_unregister_hctx(hctx);
916 }
917
918 void blk_mq_debugfs_register_sched(struct request_queue *q)
919 {
920 struct elevator_type *e = q->elevator->type;
921
922 /*
923 * If the parent directory has not been created yet, return, we will be
924 * called again later on and the directory/files will be created then.
925 */
926 if (!q->debugfs_dir)
927 return;
928
929 if (!e->queue_debugfs_attrs)
930 return;
931
932 q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
933
934 debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
935 }
936
937 void blk_mq_debugfs_unregister_sched(struct request_queue *q)
938 {
939 debugfs_remove_recursive(q->sched_debugfs_dir);
940 q->sched_debugfs_dir = NULL;
941 }
942
943 void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
944 {
945 debugfs_remove_recursive(rqos->debugfs_dir);
946 rqos->debugfs_dir = NULL;
947 }
948
949 void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
950 {
951 struct request_queue *q = rqos->q;
952 const char *dir_name = rq_qos_id_to_name(rqos->id);
953
954 if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
955 return;
956
957 if (!q->rqos_debugfs_dir)
958 q->rqos_debugfs_dir = debugfs_create_dir("rqos",
959 q->debugfs_dir);
960
961 rqos->debugfs_dir = debugfs_create_dir(dir_name,
962 rqos->q->rqos_debugfs_dir);
963
964 debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
965 }
966
967 void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
968 {
969 debugfs_remove_recursive(q->rqos_debugfs_dir);
970 q->rqos_debugfs_dir = NULL;
971 }
972
973 void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
974 struct blk_mq_hw_ctx *hctx)
975 {
976 struct elevator_type *e = q->elevator->type;
977
978 if (!e->hctx_debugfs_attrs)
979 return;
980
981 hctx->sched_debugfs_dir = debugfs_create_dir("sched",
982 hctx->debugfs_dir);
983 debugfs_create_files(hctx->sched_debugfs_dir, hctx,
984 e->hctx_debugfs_attrs);
985 }
986
987 void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
988 {
989 debugfs_remove_recursive(hctx->sched_debugfs_dir);
990 hctx->sched_debugfs_dir = NULL;
991 }