1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to sysfs handling
5 #include <linux/kernel.h>
6 #include <linux/slab.h>
7 #include <linux/module.h>
9 #include <linux/blkdev.h>
10 #include <linux/backing-dev.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/debugfs.h>
16 #include "blk-mq-debugfs.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
20 #include "blk-cgroup.h"
21 #include "blk-throttle.h"
23 struct queue_sysfs_entry
{
24 struct attribute attr
;
25 ssize_t (*show
)(struct request_queue
*, char *);
26 ssize_t (*store
)(struct request_queue
*, const char *, size_t);
30 queue_var_show(unsigned long var
, char *page
)
32 return sprintf(page
, "%lu\n", var
);
36 queue_var_store(unsigned long *var
, const char *page
, size_t count
)
41 err
= kstrtoul(page
, 10, &v
);
42 if (err
|| v
> UINT_MAX
)
50 static ssize_t
queue_requests_show(struct request_queue
*q
, char *page
)
52 return queue_var_show(q
->nr_requests
, page
);
56 queue_requests_store(struct request_queue
*q
, const char *page
, size_t count
)
64 ret
= queue_var_store(&nr
, page
, count
);
68 if (nr
< BLKDEV_MIN_RQ
)
71 err
= blk_mq_update_nr_requests(q
, nr
);
78 static ssize_t
queue_ra_show(struct request_queue
*q
, char *page
)
84 ra_kb
= q
->disk
->bdi
->ra_pages
<< (PAGE_SHIFT
- 10);
85 return queue_var_show(ra_kb
, page
);
89 queue_ra_store(struct request_queue
*q
, const char *page
, size_t count
)
96 ret
= queue_var_store(&ra_kb
, page
, count
);
99 q
->disk
->bdi
->ra_pages
= ra_kb
>> (PAGE_SHIFT
- 10);
103 static ssize_t
queue_max_sectors_show(struct request_queue
*q
, char *page
)
105 int max_sectors_kb
= queue_max_sectors(q
) >> 1;
107 return queue_var_show(max_sectors_kb
, page
);
110 static ssize_t
queue_max_segments_show(struct request_queue
*q
, char *page
)
112 return queue_var_show(queue_max_segments(q
), page
);
115 static ssize_t
queue_max_discard_segments_show(struct request_queue
*q
,
118 return queue_var_show(queue_max_discard_segments(q
), page
);
121 static ssize_t
queue_max_integrity_segments_show(struct request_queue
*q
, char *page
)
123 return queue_var_show(q
->limits
.max_integrity_segments
, page
);
126 static ssize_t
queue_max_segment_size_show(struct request_queue
*q
, char *page
)
128 return queue_var_show(queue_max_segment_size(q
), page
);
131 static ssize_t
queue_logical_block_size_show(struct request_queue
*q
, char *page
)
133 return queue_var_show(queue_logical_block_size(q
), page
);
136 static ssize_t
queue_physical_block_size_show(struct request_queue
*q
, char *page
)
138 return queue_var_show(queue_physical_block_size(q
), page
);
141 static ssize_t
queue_chunk_sectors_show(struct request_queue
*q
, char *page
)
143 return queue_var_show(q
->limits
.chunk_sectors
, page
);
146 static ssize_t
queue_io_min_show(struct request_queue
*q
, char *page
)
148 return queue_var_show(queue_io_min(q
), page
);
151 static ssize_t
queue_io_opt_show(struct request_queue
*q
, char *page
)
153 return queue_var_show(queue_io_opt(q
), page
);
156 static ssize_t
queue_discard_granularity_show(struct request_queue
*q
, char *page
)
158 return queue_var_show(q
->limits
.discard_granularity
, page
);
161 static ssize_t
queue_discard_max_hw_show(struct request_queue
*q
, char *page
)
164 return sprintf(page
, "%llu\n",
165 (unsigned long long)q
->limits
.max_hw_discard_sectors
<< 9);
168 static ssize_t
queue_discard_max_show(struct request_queue
*q
, char *page
)
170 return sprintf(page
, "%llu\n",
171 (unsigned long long)q
->limits
.max_discard_sectors
<< 9);
174 static ssize_t
queue_discard_max_store(struct request_queue
*q
,
175 const char *page
, size_t count
)
177 unsigned long max_discard
;
178 ssize_t ret
= queue_var_store(&max_discard
, page
, count
);
183 if (max_discard
& (q
->limits
.discard_granularity
- 1))
187 if (max_discard
> UINT_MAX
)
190 if (max_discard
> q
->limits
.max_hw_discard_sectors
)
191 max_discard
= q
->limits
.max_hw_discard_sectors
;
193 q
->limits
.max_discard_sectors
= max_discard
;
197 static ssize_t
queue_discard_zeroes_data_show(struct request_queue
*q
, char *page
)
199 return queue_var_show(0, page
);
202 static ssize_t
queue_write_same_max_show(struct request_queue
*q
, char *page
)
204 return queue_var_show(0, page
);
207 static ssize_t
queue_write_zeroes_max_show(struct request_queue
*q
, char *page
)
209 return sprintf(page
, "%llu\n",
210 (unsigned long long)q
->limits
.max_write_zeroes_sectors
<< 9);
213 static ssize_t
queue_zone_write_granularity_show(struct request_queue
*q
,
216 return queue_var_show(queue_zone_write_granularity(q
), page
);
219 static ssize_t
queue_zone_append_max_show(struct request_queue
*q
, char *page
)
221 unsigned long long max_sectors
= q
->limits
.max_zone_append_sectors
;
223 return sprintf(page
, "%llu\n", max_sectors
<< SECTOR_SHIFT
);
227 queue_max_sectors_store(struct request_queue
*q
, const char *page
, size_t count
)
230 unsigned int max_sectors_kb
,
231 max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1,
232 page_kb
= 1 << (PAGE_SHIFT
- 10);
233 ssize_t ret
= queue_var_store(&var
, page
, count
);
238 max_sectors_kb
= (unsigned int)var
;
239 max_hw_sectors_kb
= min_not_zero(max_hw_sectors_kb
,
240 q
->limits
.max_dev_sectors
>> 1);
241 if (max_sectors_kb
== 0) {
242 q
->limits
.max_user_sectors
= 0;
243 max_sectors_kb
= min(max_hw_sectors_kb
,
244 BLK_DEF_MAX_SECTORS
>> 1);
246 if (max_sectors_kb
> max_hw_sectors_kb
||
247 max_sectors_kb
< page_kb
)
249 q
->limits
.max_user_sectors
= max_sectors_kb
<< 1;
252 spin_lock_irq(&q
->queue_lock
);
253 q
->limits
.max_sectors
= max_sectors_kb
<< 1;
255 q
->disk
->bdi
->io_pages
= max_sectors_kb
>> (PAGE_SHIFT
- 10);
256 spin_unlock_irq(&q
->queue_lock
);
261 static ssize_t
queue_max_hw_sectors_show(struct request_queue
*q
, char *page
)
263 int max_hw_sectors_kb
= queue_max_hw_sectors(q
) >> 1;
265 return queue_var_show(max_hw_sectors_kb
, page
);
268 static ssize_t
queue_virt_boundary_mask_show(struct request_queue
*q
, char *page
)
270 return queue_var_show(q
->limits
.virt_boundary_mask
, page
);
273 static ssize_t
queue_dma_alignment_show(struct request_queue
*q
, char *page
)
275 return queue_var_show(queue_dma_alignment(q
), page
);
278 #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \
280 queue_##name##_show(struct request_queue *q, char *page) \
283 bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \
284 return queue_var_show(neg ? !bit : bit, page); \
287 queue_##name##_store(struct request_queue *q, const char *page, size_t count) \
291 ret = queue_var_store(&val, page, count); \
298 blk_queue_flag_set(QUEUE_FLAG_##flag, q); \
300 blk_queue_flag_clear(QUEUE_FLAG_##flag, q); \
304 QUEUE_SYSFS_BIT_FNS(nonrot
, NONROT
, 1);
305 QUEUE_SYSFS_BIT_FNS(random
, ADD_RANDOM
, 0);
306 QUEUE_SYSFS_BIT_FNS(iostats
, IO_STAT
, 0);
307 QUEUE_SYSFS_BIT_FNS(stable_writes
, STABLE_WRITES
, 0);
308 #undef QUEUE_SYSFS_BIT_FNS
310 static ssize_t
queue_zoned_show(struct request_queue
*q
, char *page
)
312 switch (blk_queue_zoned_model(q
)) {
314 return sprintf(page
, "host-aware\n");
316 return sprintf(page
, "host-managed\n");
318 return sprintf(page
, "none\n");
322 static ssize_t
queue_nr_zones_show(struct request_queue
*q
, char *page
)
324 return queue_var_show(disk_nr_zones(q
->disk
), page
);
327 static ssize_t
queue_max_open_zones_show(struct request_queue
*q
, char *page
)
329 return queue_var_show(bdev_max_open_zones(q
->disk
->part0
), page
);
332 static ssize_t
queue_max_active_zones_show(struct request_queue
*q
, char *page
)
334 return queue_var_show(bdev_max_active_zones(q
->disk
->part0
), page
);
337 static ssize_t
queue_nomerges_show(struct request_queue
*q
, char *page
)
339 return queue_var_show((blk_queue_nomerges(q
) << 1) |
340 blk_queue_noxmerges(q
), page
);
343 static ssize_t
queue_nomerges_store(struct request_queue
*q
, const char *page
,
347 ssize_t ret
= queue_var_store(&nm
, page
, count
);
352 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES
, q
);
353 blk_queue_flag_clear(QUEUE_FLAG_NOXMERGES
, q
);
355 blk_queue_flag_set(QUEUE_FLAG_NOMERGES
, q
);
357 blk_queue_flag_set(QUEUE_FLAG_NOXMERGES
, q
);
362 static ssize_t
queue_rq_affinity_show(struct request_queue
*q
, char *page
)
364 bool set
= test_bit(QUEUE_FLAG_SAME_COMP
, &q
->queue_flags
);
365 bool force
= test_bit(QUEUE_FLAG_SAME_FORCE
, &q
->queue_flags
);
367 return queue_var_show(set
<< force
, page
);
371 queue_rq_affinity_store(struct request_queue
*q
, const char *page
, size_t count
)
373 ssize_t ret
= -EINVAL
;
377 ret
= queue_var_store(&val
, page
, count
);
382 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
383 blk_queue_flag_set(QUEUE_FLAG_SAME_FORCE
, q
);
384 } else if (val
== 1) {
385 blk_queue_flag_set(QUEUE_FLAG_SAME_COMP
, q
);
386 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
387 } else if (val
== 0) {
388 blk_queue_flag_clear(QUEUE_FLAG_SAME_COMP
, q
);
389 blk_queue_flag_clear(QUEUE_FLAG_SAME_FORCE
, q
);
395 static ssize_t
queue_poll_delay_show(struct request_queue
*q
, char *page
)
397 return sprintf(page
, "%d\n", -1);
400 static ssize_t
queue_poll_delay_store(struct request_queue
*q
, const char *page
,
406 static ssize_t
queue_poll_show(struct request_queue
*q
, char *page
)
408 return queue_var_show(test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
), page
);
411 static ssize_t
queue_poll_store(struct request_queue
*q
, const char *page
,
414 if (!test_bit(QUEUE_FLAG_POLL
, &q
->queue_flags
))
416 pr_info_ratelimited("writes to the poll attribute are ignored.\n");
417 pr_info_ratelimited("please use driver specific parameters instead.\n");
421 static ssize_t
queue_io_timeout_show(struct request_queue
*q
, char *page
)
423 return sprintf(page
, "%u\n", jiffies_to_msecs(q
->rq_timeout
));
426 static ssize_t
queue_io_timeout_store(struct request_queue
*q
, const char *page
,
432 err
= kstrtou32(page
, 10, &val
);
436 blk_queue_rq_timeout(q
, msecs_to_jiffies(val
));
441 static ssize_t
queue_wc_show(struct request_queue
*q
, char *page
)
443 if (test_bit(QUEUE_FLAG_WC
, &q
->queue_flags
))
444 return sprintf(page
, "write back\n");
446 return sprintf(page
, "write through\n");
449 static ssize_t
queue_wc_store(struct request_queue
*q
, const char *page
,
452 if (!strncmp(page
, "write back", 10)) {
453 if (!test_bit(QUEUE_FLAG_HW_WC
, &q
->queue_flags
))
455 blk_queue_flag_set(QUEUE_FLAG_WC
, q
);
456 } else if (!strncmp(page
, "write through", 13) ||
457 !strncmp(page
, "none", 4)) {
458 blk_queue_flag_clear(QUEUE_FLAG_WC
, q
);
466 static ssize_t
queue_fua_show(struct request_queue
*q
, char *page
)
468 return sprintf(page
, "%u\n", test_bit(QUEUE_FLAG_FUA
, &q
->queue_flags
));
471 static ssize_t
queue_dax_show(struct request_queue
*q
, char *page
)
473 return queue_var_show(blk_queue_dax(q
), page
);
476 #define QUEUE_RO_ENTRY(_prefix, _name) \
477 static struct queue_sysfs_entry _prefix##_entry = { \
478 .attr = { .name = _name, .mode = 0444 }, \
479 .show = _prefix##_show, \
482 #define QUEUE_RW_ENTRY(_prefix, _name) \
483 static struct queue_sysfs_entry _prefix##_entry = { \
484 .attr = { .name = _name, .mode = 0644 }, \
485 .show = _prefix##_show, \
486 .store = _prefix##_store, \
489 QUEUE_RW_ENTRY(queue_requests
, "nr_requests");
490 QUEUE_RW_ENTRY(queue_ra
, "read_ahead_kb");
491 QUEUE_RW_ENTRY(queue_max_sectors
, "max_sectors_kb");
492 QUEUE_RO_ENTRY(queue_max_hw_sectors
, "max_hw_sectors_kb");
493 QUEUE_RO_ENTRY(queue_max_segments
, "max_segments");
494 QUEUE_RO_ENTRY(queue_max_integrity_segments
, "max_integrity_segments");
495 QUEUE_RO_ENTRY(queue_max_segment_size
, "max_segment_size");
496 QUEUE_RW_ENTRY(elv_iosched
, "scheduler");
498 QUEUE_RO_ENTRY(queue_logical_block_size
, "logical_block_size");
499 QUEUE_RO_ENTRY(queue_physical_block_size
, "physical_block_size");
500 QUEUE_RO_ENTRY(queue_chunk_sectors
, "chunk_sectors");
501 QUEUE_RO_ENTRY(queue_io_min
, "minimum_io_size");
502 QUEUE_RO_ENTRY(queue_io_opt
, "optimal_io_size");
504 QUEUE_RO_ENTRY(queue_max_discard_segments
, "max_discard_segments");
505 QUEUE_RO_ENTRY(queue_discard_granularity
, "discard_granularity");
506 QUEUE_RO_ENTRY(queue_discard_max_hw
, "discard_max_hw_bytes");
507 QUEUE_RW_ENTRY(queue_discard_max
, "discard_max_bytes");
508 QUEUE_RO_ENTRY(queue_discard_zeroes_data
, "discard_zeroes_data");
510 QUEUE_RO_ENTRY(queue_write_same_max
, "write_same_max_bytes");
511 QUEUE_RO_ENTRY(queue_write_zeroes_max
, "write_zeroes_max_bytes");
512 QUEUE_RO_ENTRY(queue_zone_append_max
, "zone_append_max_bytes");
513 QUEUE_RO_ENTRY(queue_zone_write_granularity
, "zone_write_granularity");
515 QUEUE_RO_ENTRY(queue_zoned
, "zoned");
516 QUEUE_RO_ENTRY(queue_nr_zones
, "nr_zones");
517 QUEUE_RO_ENTRY(queue_max_open_zones
, "max_open_zones");
518 QUEUE_RO_ENTRY(queue_max_active_zones
, "max_active_zones");
520 QUEUE_RW_ENTRY(queue_nomerges
, "nomerges");
521 QUEUE_RW_ENTRY(queue_rq_affinity
, "rq_affinity");
522 QUEUE_RW_ENTRY(queue_poll
, "io_poll");
523 QUEUE_RW_ENTRY(queue_poll_delay
, "io_poll_delay");
524 QUEUE_RW_ENTRY(queue_wc
, "write_cache");
525 QUEUE_RO_ENTRY(queue_fua
, "fua");
526 QUEUE_RO_ENTRY(queue_dax
, "dax");
527 QUEUE_RW_ENTRY(queue_io_timeout
, "io_timeout");
528 QUEUE_RO_ENTRY(queue_virt_boundary_mask
, "virt_boundary_mask");
529 QUEUE_RO_ENTRY(queue_dma_alignment
, "dma_alignment");
531 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
532 QUEUE_RW_ENTRY(blk_throtl_sample_time
, "throttle_sample_time");
535 /* legacy alias for logical_block_size: */
536 static struct queue_sysfs_entry queue_hw_sector_size_entry
= {
537 .attr
= {.name
= "hw_sector_size", .mode
= 0444 },
538 .show
= queue_logical_block_size_show
,
541 QUEUE_RW_ENTRY(queue_nonrot
, "rotational");
542 QUEUE_RW_ENTRY(queue_iostats
, "iostats");
543 QUEUE_RW_ENTRY(queue_random
, "add_random");
544 QUEUE_RW_ENTRY(queue_stable_writes
, "stable_writes");
546 #ifdef CONFIG_BLK_WBT
547 static ssize_t
queue_var_store64(s64
*var
, const char *page
)
552 err
= kstrtos64(page
, 10, &v
);
560 static ssize_t
queue_wb_lat_show(struct request_queue
*q
, char *page
)
566 return sprintf(page
, "0\n");
568 return sprintf(page
, "%llu\n", div_u64(wbt_get_min_lat(q
), 1000));
571 static ssize_t
queue_wb_lat_store(struct request_queue
*q
, const char *page
,
578 ret
= queue_var_store64(&val
, page
);
584 rqos
= wbt_rq_qos(q
);
586 ret
= wbt_init(q
->disk
);
592 val
= wbt_default_latency_nsec(q
);
596 if (wbt_get_min_lat(q
) == val
)
600 * Ensure that the queue is idled, in case the latency update
601 * ends up either enabling or disabling wbt completely. We can't
602 * have IO inflight if that happens.
604 blk_mq_freeze_queue(q
);
605 blk_mq_quiesce_queue(q
);
607 wbt_set_min_lat(q
, val
);
609 blk_mq_unquiesce_queue(q
);
610 blk_mq_unfreeze_queue(q
);
615 QUEUE_RW_ENTRY(queue_wb_lat
, "wbt_lat_usec");
618 static struct attribute
*queue_attrs
[] = {
619 &queue_ra_entry
.attr
,
620 &queue_max_hw_sectors_entry
.attr
,
621 &queue_max_sectors_entry
.attr
,
622 &queue_max_segments_entry
.attr
,
623 &queue_max_discard_segments_entry
.attr
,
624 &queue_max_integrity_segments_entry
.attr
,
625 &queue_max_segment_size_entry
.attr
,
626 &queue_hw_sector_size_entry
.attr
,
627 &queue_logical_block_size_entry
.attr
,
628 &queue_physical_block_size_entry
.attr
,
629 &queue_chunk_sectors_entry
.attr
,
630 &queue_io_min_entry
.attr
,
631 &queue_io_opt_entry
.attr
,
632 &queue_discard_granularity_entry
.attr
,
633 &queue_discard_max_entry
.attr
,
634 &queue_discard_max_hw_entry
.attr
,
635 &queue_discard_zeroes_data_entry
.attr
,
636 &queue_write_same_max_entry
.attr
,
637 &queue_write_zeroes_max_entry
.attr
,
638 &queue_zone_append_max_entry
.attr
,
639 &queue_zone_write_granularity_entry
.attr
,
640 &queue_nonrot_entry
.attr
,
641 &queue_zoned_entry
.attr
,
642 &queue_nr_zones_entry
.attr
,
643 &queue_max_open_zones_entry
.attr
,
644 &queue_max_active_zones_entry
.attr
,
645 &queue_nomerges_entry
.attr
,
646 &queue_iostats_entry
.attr
,
647 &queue_stable_writes_entry
.attr
,
648 &queue_random_entry
.attr
,
649 &queue_poll_entry
.attr
,
650 &queue_wc_entry
.attr
,
651 &queue_fua_entry
.attr
,
652 &queue_dax_entry
.attr
,
653 &queue_poll_delay_entry
.attr
,
654 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
655 &blk_throtl_sample_time_entry
.attr
,
657 &queue_virt_boundary_mask_entry
.attr
,
658 &queue_dma_alignment_entry
.attr
,
662 static struct attribute
*blk_mq_queue_attrs
[] = {
663 &queue_requests_entry
.attr
,
664 &elv_iosched_entry
.attr
,
665 &queue_rq_affinity_entry
.attr
,
666 &queue_io_timeout_entry
.attr
,
667 #ifdef CONFIG_BLK_WBT
668 &queue_wb_lat_entry
.attr
,
673 static umode_t
queue_attr_visible(struct kobject
*kobj
, struct attribute
*attr
,
676 struct gendisk
*disk
= container_of(kobj
, struct gendisk
, queue_kobj
);
677 struct request_queue
*q
= disk
->queue
;
679 if ((attr
== &queue_max_open_zones_entry
.attr
||
680 attr
== &queue_max_active_zones_entry
.attr
) &&
681 !blk_queue_is_zoned(q
))
687 static umode_t
blk_mq_queue_attr_visible(struct kobject
*kobj
,
688 struct attribute
*attr
, int n
)
690 struct gendisk
*disk
= container_of(kobj
, struct gendisk
, queue_kobj
);
691 struct request_queue
*q
= disk
->queue
;
696 if (attr
== &queue_io_timeout_entry
.attr
&& !q
->mq_ops
->timeout
)
702 static struct attribute_group queue_attr_group
= {
703 .attrs
= queue_attrs
,
704 .is_visible
= queue_attr_visible
,
707 static struct attribute_group blk_mq_queue_attr_group
= {
708 .attrs
= blk_mq_queue_attrs
,
709 .is_visible
= blk_mq_queue_attr_visible
,
712 #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
715 queue_attr_show(struct kobject
*kobj
, struct attribute
*attr
, char *page
)
717 struct queue_sysfs_entry
*entry
= to_queue(attr
);
718 struct gendisk
*disk
= container_of(kobj
, struct gendisk
, queue_kobj
);
719 struct request_queue
*q
= disk
->queue
;
724 mutex_lock(&q
->sysfs_lock
);
725 res
= entry
->show(q
, page
);
726 mutex_unlock(&q
->sysfs_lock
);
731 queue_attr_store(struct kobject
*kobj
, struct attribute
*attr
,
732 const char *page
, size_t length
)
734 struct queue_sysfs_entry
*entry
= to_queue(attr
);
735 struct gendisk
*disk
= container_of(kobj
, struct gendisk
, queue_kobj
);
736 struct request_queue
*q
= disk
->queue
;
742 mutex_lock(&q
->sysfs_lock
);
743 res
= entry
->store(q
, page
, length
);
744 mutex_unlock(&q
->sysfs_lock
);
748 static const struct sysfs_ops queue_sysfs_ops
= {
749 .show
= queue_attr_show
,
750 .store
= queue_attr_store
,
753 static const struct attribute_group
*blk_queue_attr_groups
[] = {
755 &blk_mq_queue_attr_group
,
759 static void blk_queue_release(struct kobject
*kobj
)
761 /* nothing to do here, all data is associated with the parent gendisk */
764 static const struct kobj_type blk_queue_ktype
= {
765 .default_groups
= blk_queue_attr_groups
,
766 .sysfs_ops
= &queue_sysfs_ops
,
767 .release
= blk_queue_release
,
770 static void blk_debugfs_remove(struct gendisk
*disk
)
772 struct request_queue
*q
= disk
->queue
;
774 mutex_lock(&q
->debugfs_mutex
);
775 blk_trace_shutdown(q
);
776 debugfs_remove_recursive(q
->debugfs_dir
);
777 q
->debugfs_dir
= NULL
;
778 q
->sched_debugfs_dir
= NULL
;
779 q
->rqos_debugfs_dir
= NULL
;
780 mutex_unlock(&q
->debugfs_mutex
);
784 * blk_register_queue - register a block layer queue with sysfs
785 * @disk: Disk of which the request queue should be registered with sysfs.
787 int blk_register_queue(struct gendisk
*disk
)
789 struct request_queue
*q
= disk
->queue
;
792 mutex_lock(&q
->sysfs_dir_lock
);
793 kobject_init(&disk
->queue_kobj
, &blk_queue_ktype
);
794 ret
= kobject_add(&disk
->queue_kobj
, &disk_to_dev(disk
)->kobj
, "queue");
796 goto out_put_queue_kobj
;
798 if (queue_is_mq(q
)) {
799 ret
= blk_mq_sysfs_register(disk
);
801 goto out_put_queue_kobj
;
803 mutex_lock(&q
->sysfs_lock
);
805 mutex_lock(&q
->debugfs_mutex
);
806 q
->debugfs_dir
= debugfs_create_dir(disk
->disk_name
, blk_debugfs_root
);
808 blk_mq_debugfs_register(q
);
809 mutex_unlock(&q
->debugfs_mutex
);
811 ret
= disk_register_independent_access_ranges(disk
);
813 goto out_debugfs_remove
;
816 ret
= elv_register_queue(q
, false);
818 goto out_unregister_ia_ranges
;
821 ret
= blk_crypto_sysfs_register(disk
);
823 goto out_elv_unregister
;
825 blk_queue_flag_set(QUEUE_FLAG_REGISTERED
, q
);
826 wbt_enable_default(disk
);
827 blk_throtl_register(disk
);
829 /* Now everything is ready and send out KOBJ_ADD uevent */
830 kobject_uevent(&disk
->queue_kobj
, KOBJ_ADD
);
832 kobject_uevent(&q
->elevator
->kobj
, KOBJ_ADD
);
833 mutex_unlock(&q
->sysfs_lock
);
834 mutex_unlock(&q
->sysfs_dir_lock
);
837 * SCSI probing may synchronously create and destroy a lot of
838 * request_queues for non-existent devices. Shutting down a fully
839 * functional queue takes measureable wallclock time as RCU grace
840 * periods are involved. To avoid excessive latency in these
841 * cases, a request_queue starts out in a degraded mode which is
842 * faster to shut down and is made fully functional here as
843 * request_queues for non-existent devices never get registered.
845 if (!blk_queue_init_done(q
)) {
846 blk_queue_flag_set(QUEUE_FLAG_INIT_DONE
, q
);
847 percpu_ref_switch_to_percpu(&q
->q_usage_counter
);
853 elv_unregister_queue(q
);
854 out_unregister_ia_ranges
:
855 disk_unregister_independent_access_ranges(disk
);
857 blk_debugfs_remove(disk
);
858 mutex_unlock(&q
->sysfs_lock
);
860 kobject_put(&disk
->queue_kobj
);
861 mutex_unlock(&q
->sysfs_dir_lock
);
866 * blk_unregister_queue - counterpart of blk_register_queue()
867 * @disk: Disk of which the request queue should be unregistered from sysfs.
869 * Note: the caller is responsible for guaranteeing that this function is called
870 * after blk_register_queue() has finished.
872 void blk_unregister_queue(struct gendisk
*disk
)
874 struct request_queue
*q
= disk
->queue
;
879 /* Return early if disk->queue was never registered. */
880 if (!blk_queue_registered(q
))
884 * Since sysfs_remove_dir() prevents adding new directory entries
885 * before removal of existing entries starts, protect against
886 * concurrent elv_iosched_store() calls.
888 mutex_lock(&q
->sysfs_lock
);
889 blk_queue_flag_clear(QUEUE_FLAG_REGISTERED
, q
);
890 mutex_unlock(&q
->sysfs_lock
);
892 mutex_lock(&q
->sysfs_dir_lock
);
894 * Remove the sysfs attributes before unregistering the queue data
895 * structures that can be modified through sysfs.
898 blk_mq_sysfs_unregister(disk
);
899 blk_crypto_sysfs_unregister(disk
);
901 mutex_lock(&q
->sysfs_lock
);
902 elv_unregister_queue(q
);
903 disk_unregister_independent_access_ranges(disk
);
904 mutex_unlock(&q
->sysfs_lock
);
906 /* Now that we've deleted all child objects, we can delete the queue. */
907 kobject_uevent(&disk
->queue_kobj
, KOBJ_REMOVE
);
908 kobject_del(&disk
->queue_kobj
);
909 mutex_unlock(&q
->sysfs_dir_lock
);
911 blk_debugfs_remove(disk
);