1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to setting various queue properties from drivers
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
9 #include <linux/blkdev.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
19 #include "blk-rq-qos.h"
22 void blk_queue_rq_timeout(struct request_queue
*q
, unsigned int timeout
)
24 q
->rq_timeout
= timeout
;
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout
);
29 * blk_set_stacking_limits - set default limits for stacking devices
30 * @lim: the queue_limits structure to reset
32 * Prepare queue limits for applying limits from underlying devices using
35 void blk_set_stacking_limits(struct queue_limits
*lim
)
37 memset(lim
, 0, sizeof(*lim
));
38 lim
->logical_block_size
= SECTOR_SIZE
;
39 lim
->physical_block_size
= SECTOR_SIZE
;
40 lim
->io_min
= SECTOR_SIZE
;
41 lim
->discard_granularity
= SECTOR_SIZE
;
42 lim
->dma_alignment
= SECTOR_SIZE
- 1;
43 lim
->seg_boundary_mask
= BLK_SEG_BOUNDARY_MASK
;
45 /* Inherit limits from component devices */
46 lim
->max_segments
= USHRT_MAX
;
47 lim
->max_discard_segments
= USHRT_MAX
;
48 lim
->max_hw_sectors
= UINT_MAX
;
49 lim
->max_segment_size
= UINT_MAX
;
50 lim
->max_sectors
= UINT_MAX
;
51 lim
->max_dev_sectors
= UINT_MAX
;
52 lim
->max_write_zeroes_sectors
= UINT_MAX
;
53 lim
->max_zone_append_sectors
= UINT_MAX
;
54 lim
->max_user_discard_sectors
= UINT_MAX
;
56 EXPORT_SYMBOL(blk_set_stacking_limits
);
58 static void blk_apply_bdi_limits(struct backing_dev_info
*bdi
,
59 struct queue_limits
*lim
)
62 * For read-ahead of large files to be effective, we need to read ahead
63 * at least twice the optimal I/O size.
65 bdi
->ra_pages
= max(lim
->io_opt
* 2 / PAGE_SIZE
, VM_READAHEAD_PAGES
);
66 bdi
->io_pages
= lim
->max_sectors
>> PAGE_SECTORS_SHIFT
;
69 static int blk_validate_zoned_limits(struct queue_limits
*lim
)
72 if (WARN_ON_ONCE(lim
->max_open_zones
) ||
73 WARN_ON_ONCE(lim
->max_active_zones
) ||
74 WARN_ON_ONCE(lim
->zone_write_granularity
) ||
75 WARN_ON_ONCE(lim
->max_zone_append_sectors
))
80 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED
)))
83 if (lim
->zone_write_granularity
< lim
->logical_block_size
)
84 lim
->zone_write_granularity
= lim
->logical_block_size
;
86 if (lim
->max_zone_append_sectors
) {
88 * The Zone Append size is limited by the maximum I/O size
89 * and the zone size given that it can't span zones.
91 lim
->max_zone_append_sectors
=
92 min3(lim
->max_hw_sectors
,
93 lim
->max_zone_append_sectors
,
101 * Check that the limits in lim are valid, initialize defaults for unset
102 * values, and cap values based on others where needed.
104 static int blk_validate_limits(struct queue_limits
*lim
)
106 unsigned int max_hw_sectors
;
109 * Unless otherwise specified, default to 512 byte logical blocks and a
110 * physical block size equal to the logical block size.
112 if (!lim
->logical_block_size
)
113 lim
->logical_block_size
= SECTOR_SIZE
;
114 if (lim
->physical_block_size
< lim
->logical_block_size
)
115 lim
->physical_block_size
= lim
->logical_block_size
;
118 * The minimum I/O size defaults to the physical block size unless
119 * explicitly overridden.
121 if (lim
->io_min
< lim
->physical_block_size
)
122 lim
->io_min
= lim
->physical_block_size
;
125 * max_hw_sectors has a somewhat weird default for historical reason,
126 * but driver really should set their own instead of relying on this
129 * The block layer relies on the fact that every driver can
130 * handle at lest a page worth of data per I/O, and needs the value
131 * aligned to the logical block size.
133 if (!lim
->max_hw_sectors
)
134 lim
->max_hw_sectors
= BLK_SAFE_MAX_SECTORS
;
135 if (WARN_ON_ONCE(lim
->max_hw_sectors
< PAGE_SECTORS
))
137 lim
->max_hw_sectors
= round_down(lim
->max_hw_sectors
,
138 lim
->logical_block_size
>> SECTOR_SHIFT
);
141 * The actual max_sectors value is a complex beast and also takes the
142 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
143 * value into account. The ->max_sectors value is always calculated
144 * from these, so directly setting it won't have any effect.
146 max_hw_sectors
= min_not_zero(lim
->max_hw_sectors
,
147 lim
->max_dev_sectors
);
148 if (lim
->max_user_sectors
) {
149 if (lim
->max_user_sectors
< PAGE_SIZE
/ SECTOR_SIZE
)
151 lim
->max_sectors
= min(max_hw_sectors
, lim
->max_user_sectors
);
153 lim
->max_sectors
= min(max_hw_sectors
, BLK_DEF_MAX_SECTORS_CAP
);
155 lim
->max_sectors
= round_down(lim
->max_sectors
,
156 lim
->logical_block_size
>> SECTOR_SHIFT
);
159 * Random default for the maximum number of segments. Driver should not
160 * rely on this and set their own.
162 if (!lim
->max_segments
)
163 lim
->max_segments
= BLK_MAX_SEGMENTS
;
165 lim
->max_discard_sectors
=
166 min(lim
->max_hw_discard_sectors
, lim
->max_user_discard_sectors
);
168 if (!lim
->max_discard_segments
)
169 lim
->max_discard_segments
= 1;
171 if (lim
->discard_granularity
< lim
->physical_block_size
)
172 lim
->discard_granularity
= lim
->physical_block_size
;
175 * By default there is no limit on the segment boundary alignment,
176 * but if there is one it can't be smaller than the page size as
177 * that would break all the normal I/O patterns.
179 if (!lim
->seg_boundary_mask
)
180 lim
->seg_boundary_mask
= BLK_SEG_BOUNDARY_MASK
;
181 if (WARN_ON_ONCE(lim
->seg_boundary_mask
< PAGE_SIZE
- 1))
185 * Devices that require a virtual boundary do not support scatter/gather
186 * I/O natively, but instead require a descriptor list entry for each
187 * page (which might not be identical to the Linux PAGE_SIZE). Because
188 * of that they are not limited by our notion of "segment size".
190 if (lim
->virt_boundary_mask
) {
191 if (WARN_ON_ONCE(lim
->max_segment_size
&&
192 lim
->max_segment_size
!= UINT_MAX
))
194 lim
->max_segment_size
= UINT_MAX
;
197 * The maximum segment size has an odd historic 64k default that
198 * drivers probably should override. Just like the I/O size we
199 * require drivers to at least handle a full page per segment.
201 if (!lim
->max_segment_size
)
202 lim
->max_segment_size
= BLK_MAX_SEGMENT_SIZE
;
203 if (WARN_ON_ONCE(lim
->max_segment_size
< PAGE_SIZE
))
208 * We require drivers to at least do logical block aligned I/O, but
209 * historically could not check for that due to the separate calls
210 * to set the limits. Once the transition is finished the check
211 * below should be narrowed down to check the logical block size.
213 if (!lim
->dma_alignment
)
214 lim
->dma_alignment
= SECTOR_SIZE
- 1;
215 if (WARN_ON_ONCE(lim
->dma_alignment
> PAGE_SIZE
))
218 if (lim
->alignment_offset
) {
219 lim
->alignment_offset
&= (lim
->physical_block_size
- 1);
223 return blk_validate_zoned_limits(lim
);
227 * Set the default limits for a newly allocated queue. @lim contains the
228 * initial limits set by the driver, which could be no limit in which case
229 * all fields are cleared to zero.
231 int blk_set_default_limits(struct queue_limits
*lim
)
234 * Most defaults are set by capping the bounds in blk_validate_limits,
235 * but max_user_discard_sectors is special and needs an explicit
236 * initialization to the max value here.
238 lim
->max_user_discard_sectors
= UINT_MAX
;
239 return blk_validate_limits(lim
);
243 * queue_limits_commit_update - commit an atomic update of queue limits
244 * @q: queue to update
245 * @lim: limits to apply
247 * Apply the limits in @lim that were obtained from queue_limits_start_update()
248 * and updated by the caller to @q.
250 * Returns 0 if successful, else a negative error code.
252 int queue_limits_commit_update(struct request_queue
*q
,
253 struct queue_limits
*lim
)
254 __releases(q
->limits_lock
)
256 int error
= blk_validate_limits(lim
);
261 blk_apply_bdi_limits(q
->disk
->bdi
, lim
);
263 mutex_unlock(&q
->limits_lock
);
266 EXPORT_SYMBOL_GPL(queue_limits_commit_update
);
269 * queue_limits_set - apply queue limits to queue
270 * @q: queue to update
271 * @lim: limits to apply
273 * Apply the limits in @lim that were freshly initialized to @q.
274 * To update existing limits use queue_limits_start_update() and
275 * queue_limits_commit_update() instead.
277 * Returns 0 if successful, else a negative error code.
279 int queue_limits_set(struct request_queue
*q
, struct queue_limits
*lim
)
281 mutex_lock(&q
->limits_lock
);
282 return queue_limits_commit_update(q
, lim
);
284 EXPORT_SYMBOL_GPL(queue_limits_set
);
287 * blk_queue_bounce_limit - set bounce buffer limit for queue
288 * @q: the request queue for the device
289 * @bounce: bounce limit to enforce
292 * Force bouncing for ISA DMA ranges or highmem.
294 * DEPRECATED, don't use in new code.
296 void blk_queue_bounce_limit(struct request_queue
*q
, enum blk_bounce bounce
)
298 q
->limits
.bounce
= bounce
;
300 EXPORT_SYMBOL(blk_queue_bounce_limit
);
303 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
304 * @q: the request queue for the device
305 * @max_hw_sectors: max hardware sectors in the usual 512b unit
308 * Enables a low level driver to set a hard upper limit,
309 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
310 * the device driver based upon the capabilities of the I/O
313 * max_dev_sectors is a hard limit imposed by the storage device for
314 * READ/WRITE requests. It is set by the disk driver.
316 * max_sectors is a soft limit imposed by the block layer for
317 * filesystem type requests. This value can be overridden on a
318 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
319 * The soft limit can not exceed max_hw_sectors.
321 void blk_queue_max_hw_sectors(struct request_queue
*q
, unsigned int max_hw_sectors
)
323 struct queue_limits
*limits
= &q
->limits
;
324 unsigned int max_sectors
;
326 if ((max_hw_sectors
<< 9) < PAGE_SIZE
) {
327 max_hw_sectors
= 1 << (PAGE_SHIFT
- 9);
328 pr_info("%s: set to minimum %u\n", __func__
, max_hw_sectors
);
331 max_hw_sectors
= round_down(max_hw_sectors
,
332 limits
->logical_block_size
>> SECTOR_SHIFT
);
333 limits
->max_hw_sectors
= max_hw_sectors
;
335 max_sectors
= min_not_zero(max_hw_sectors
, limits
->max_dev_sectors
);
337 if (limits
->max_user_sectors
)
338 max_sectors
= min(max_sectors
, limits
->max_user_sectors
);
340 max_sectors
= min(max_sectors
, BLK_DEF_MAX_SECTORS_CAP
);
342 max_sectors
= round_down(max_sectors
,
343 limits
->logical_block_size
>> SECTOR_SHIFT
);
344 limits
->max_sectors
= max_sectors
;
348 q
->disk
->bdi
->io_pages
= max_sectors
>> (PAGE_SHIFT
- 9);
350 EXPORT_SYMBOL(blk_queue_max_hw_sectors
);
353 * blk_queue_chunk_sectors - set size of the chunk for this queue
354 * @q: the request queue for the device
355 * @chunk_sectors: chunk sectors in the usual 512b unit
358 * If a driver doesn't want IOs to cross a given chunk size, it can set
359 * this limit and prevent merging across chunks. Note that the block layer
360 * must accept a page worth of data at any offset. So if the crossing of
361 * chunks is a hard limitation in the driver, it must still be prepared
362 * to split single page bios.
364 void blk_queue_chunk_sectors(struct request_queue
*q
, unsigned int chunk_sectors
)
366 q
->limits
.chunk_sectors
= chunk_sectors
;
368 EXPORT_SYMBOL(blk_queue_chunk_sectors
);
371 * blk_queue_max_discard_sectors - set max sectors for a single discard
372 * @q: the request queue for the device
373 * @max_discard_sectors: maximum number of sectors to discard
375 void blk_queue_max_discard_sectors(struct request_queue
*q
,
376 unsigned int max_discard_sectors
)
378 struct queue_limits
*lim
= &q
->limits
;
380 lim
->max_hw_discard_sectors
= max_discard_sectors
;
381 lim
->max_discard_sectors
=
382 min(max_discard_sectors
, lim
->max_user_discard_sectors
);
384 EXPORT_SYMBOL(blk_queue_max_discard_sectors
);
387 * blk_queue_max_secure_erase_sectors - set max sectors for a secure erase
388 * @q: the request queue for the device
389 * @max_sectors: maximum number of sectors to secure_erase
391 void blk_queue_max_secure_erase_sectors(struct request_queue
*q
,
392 unsigned int max_sectors
)
394 q
->limits
.max_secure_erase_sectors
= max_sectors
;
396 EXPORT_SYMBOL(blk_queue_max_secure_erase_sectors
);
399 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
401 * @q: the request queue for the device
402 * @max_write_zeroes_sectors: maximum number of sectors to write per command
404 void blk_queue_max_write_zeroes_sectors(struct request_queue
*q
,
405 unsigned int max_write_zeroes_sectors
)
407 q
->limits
.max_write_zeroes_sectors
= max_write_zeroes_sectors
;
409 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors
);
412 * blk_queue_max_zone_append_sectors - set max sectors for a single zone append
413 * @q: the request queue for the device
414 * @max_zone_append_sectors: maximum number of sectors to write per command
416 void blk_queue_max_zone_append_sectors(struct request_queue
*q
,
417 unsigned int max_zone_append_sectors
)
419 unsigned int max_sectors
;
421 if (WARN_ON(!blk_queue_is_zoned(q
)))
424 max_sectors
= min(q
->limits
.max_hw_sectors
, max_zone_append_sectors
);
425 max_sectors
= min(q
->limits
.chunk_sectors
, max_sectors
);
428 * Signal eventual driver bugs resulting in the max_zone_append sectors limit
429 * being 0 due to a 0 argument, the chunk_sectors limit (zone size) not set,
430 * or the max_hw_sectors limit not set.
432 WARN_ON(!max_sectors
);
434 q
->limits
.max_zone_append_sectors
= max_sectors
;
436 EXPORT_SYMBOL_GPL(blk_queue_max_zone_append_sectors
);
439 * blk_queue_max_segments - set max hw segments for a request for this queue
440 * @q: the request queue for the device
441 * @max_segments: max number of segments
444 * Enables a low level driver to set an upper limit on the number of
445 * hw data segments in a request.
447 void blk_queue_max_segments(struct request_queue
*q
, unsigned short max_segments
)
451 pr_info("%s: set to minimum %u\n", __func__
, max_segments
);
454 q
->limits
.max_segments
= max_segments
;
456 EXPORT_SYMBOL(blk_queue_max_segments
);
459 * blk_queue_max_discard_segments - set max segments for discard requests
460 * @q: the request queue for the device
461 * @max_segments: max number of segments
464 * Enables a low level driver to set an upper limit on the number of
465 * segments in a discard request.
467 void blk_queue_max_discard_segments(struct request_queue
*q
,
468 unsigned short max_segments
)
470 q
->limits
.max_discard_segments
= max_segments
;
472 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments
);
475 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
476 * @q: the request queue for the device
477 * @max_size: max size of segment in bytes
480 * Enables a low level driver to set an upper limit on the size of a
483 void blk_queue_max_segment_size(struct request_queue
*q
, unsigned int max_size
)
485 if (max_size
< PAGE_SIZE
) {
486 max_size
= PAGE_SIZE
;
487 pr_info("%s: set to minimum %u\n", __func__
, max_size
);
490 /* see blk_queue_virt_boundary() for the explanation */
491 WARN_ON_ONCE(q
->limits
.virt_boundary_mask
);
493 q
->limits
.max_segment_size
= max_size
;
495 EXPORT_SYMBOL(blk_queue_max_segment_size
);
498 * blk_queue_logical_block_size - set logical block size for the queue
499 * @q: the request queue for the device
500 * @size: the logical block size, in bytes
503 * This should be set to the lowest possible block size that the
504 * storage device can address. The default of 512 covers most
507 void blk_queue_logical_block_size(struct request_queue
*q
, unsigned int size
)
509 struct queue_limits
*limits
= &q
->limits
;
511 limits
->logical_block_size
= size
;
513 if (limits
->discard_granularity
< limits
->logical_block_size
)
514 limits
->discard_granularity
= limits
->logical_block_size
;
516 if (limits
->physical_block_size
< size
)
517 limits
->physical_block_size
= size
;
519 if (limits
->io_min
< limits
->physical_block_size
)
520 limits
->io_min
= limits
->physical_block_size
;
522 limits
->max_hw_sectors
=
523 round_down(limits
->max_hw_sectors
, size
>> SECTOR_SHIFT
);
524 limits
->max_sectors
=
525 round_down(limits
->max_sectors
, size
>> SECTOR_SHIFT
);
527 EXPORT_SYMBOL(blk_queue_logical_block_size
);
530 * blk_queue_physical_block_size - set physical block size for the queue
531 * @q: the request queue for the device
532 * @size: the physical block size, in bytes
535 * This should be set to the lowest possible sector size that the
536 * hardware can operate on without reverting to read-modify-write
539 void blk_queue_physical_block_size(struct request_queue
*q
, unsigned int size
)
541 q
->limits
.physical_block_size
= size
;
543 if (q
->limits
.physical_block_size
< q
->limits
.logical_block_size
)
544 q
->limits
.physical_block_size
= q
->limits
.logical_block_size
;
546 if (q
->limits
.discard_granularity
< q
->limits
.physical_block_size
)
547 q
->limits
.discard_granularity
= q
->limits
.physical_block_size
;
549 if (q
->limits
.io_min
< q
->limits
.physical_block_size
)
550 q
->limits
.io_min
= q
->limits
.physical_block_size
;
552 EXPORT_SYMBOL(blk_queue_physical_block_size
);
555 * blk_queue_zone_write_granularity - set zone write granularity for the queue
556 * @q: the request queue for the zoned device
557 * @size: the zone write granularity size, in bytes
560 * This should be set to the lowest possible size allowing to write in
561 * sequential zones of a zoned block device.
563 void blk_queue_zone_write_granularity(struct request_queue
*q
,
566 if (WARN_ON_ONCE(!blk_queue_is_zoned(q
)))
569 q
->limits
.zone_write_granularity
= size
;
571 if (q
->limits
.zone_write_granularity
< q
->limits
.logical_block_size
)
572 q
->limits
.zone_write_granularity
= q
->limits
.logical_block_size
;
574 EXPORT_SYMBOL_GPL(blk_queue_zone_write_granularity
);
577 * blk_queue_alignment_offset - set physical block alignment offset
578 * @q: the request queue for the device
579 * @offset: alignment offset in bytes
582 * Some devices are naturally misaligned to compensate for things like
583 * the legacy DOS partition table 63-sector offset. Low-level drivers
584 * should call this function for devices whose first sector is not
587 void blk_queue_alignment_offset(struct request_queue
*q
, unsigned int offset
)
589 q
->limits
.alignment_offset
=
590 offset
& (q
->limits
.physical_block_size
- 1);
591 q
->limits
.misaligned
= 0;
593 EXPORT_SYMBOL(blk_queue_alignment_offset
);
595 void disk_update_readahead(struct gendisk
*disk
)
597 blk_apply_bdi_limits(disk
->bdi
, &disk
->queue
->limits
);
599 EXPORT_SYMBOL_GPL(disk_update_readahead
);
602 * blk_limits_io_min - set minimum request size for a device
603 * @limits: the queue limits
604 * @min: smallest I/O size in bytes
607 * Some devices have an internal block size bigger than the reported
608 * hardware sector size. This function can be used to signal the
609 * smallest I/O the device can perform without incurring a performance
612 void blk_limits_io_min(struct queue_limits
*limits
, unsigned int min
)
614 limits
->io_min
= min
;
616 if (limits
->io_min
< limits
->logical_block_size
)
617 limits
->io_min
= limits
->logical_block_size
;
619 if (limits
->io_min
< limits
->physical_block_size
)
620 limits
->io_min
= limits
->physical_block_size
;
622 EXPORT_SYMBOL(blk_limits_io_min
);
625 * blk_queue_io_min - set minimum request size for the queue
626 * @q: the request queue for the device
627 * @min: smallest I/O size in bytes
630 * Storage devices may report a granularity or preferred minimum I/O
631 * size which is the smallest request the device can perform without
632 * incurring a performance penalty. For disk drives this is often the
633 * physical block size. For RAID arrays it is often the stripe chunk
634 * size. A properly aligned multiple of minimum_io_size is the
635 * preferred request size for workloads where a high number of I/O
636 * operations is desired.
638 void blk_queue_io_min(struct request_queue
*q
, unsigned int min
)
640 blk_limits_io_min(&q
->limits
, min
);
642 EXPORT_SYMBOL(blk_queue_io_min
);
645 * blk_limits_io_opt - set optimal request size for a device
646 * @limits: the queue limits
647 * @opt: smallest I/O size in bytes
650 * Storage devices may report an optimal I/O size, which is the
651 * device's preferred unit for sustained I/O. This is rarely reported
652 * for disk drives. For RAID arrays it is usually the stripe width or
653 * the internal track size. A properly aligned multiple of
654 * optimal_io_size is the preferred request size for workloads where
655 * sustained throughput is desired.
657 void blk_limits_io_opt(struct queue_limits
*limits
, unsigned int opt
)
659 limits
->io_opt
= opt
;
661 EXPORT_SYMBOL(blk_limits_io_opt
);
664 * blk_queue_io_opt - set optimal request size for the queue
665 * @q: the request queue for the device
666 * @opt: optimal request size in bytes
669 * Storage devices may report an optimal I/O size, which is the
670 * device's preferred unit for sustained I/O. This is rarely reported
671 * for disk drives. For RAID arrays it is usually the stripe width or
672 * the internal track size. A properly aligned multiple of
673 * optimal_io_size is the preferred request size for workloads where
674 * sustained throughput is desired.
676 void blk_queue_io_opt(struct request_queue
*q
, unsigned int opt
)
678 blk_limits_io_opt(&q
->limits
, opt
);
681 q
->disk
->bdi
->ra_pages
=
682 max(queue_io_opt(q
) * 2 / PAGE_SIZE
, VM_READAHEAD_PAGES
);
684 EXPORT_SYMBOL(blk_queue_io_opt
);
686 static int queue_limit_alignment_offset(const struct queue_limits
*lim
,
689 unsigned int granularity
= max(lim
->physical_block_size
, lim
->io_min
);
690 unsigned int alignment
= sector_div(sector
, granularity
>> SECTOR_SHIFT
)
693 return (granularity
+ lim
->alignment_offset
- alignment
) % granularity
;
696 static unsigned int queue_limit_discard_alignment(
697 const struct queue_limits
*lim
, sector_t sector
)
699 unsigned int alignment
, granularity
, offset
;
701 if (!lim
->max_discard_sectors
)
704 /* Why are these in bytes, not sectors? */
705 alignment
= lim
->discard_alignment
>> SECTOR_SHIFT
;
706 granularity
= lim
->discard_granularity
>> SECTOR_SHIFT
;
710 /* Offset of the partition start in 'granularity' sectors */
711 offset
= sector_div(sector
, granularity
);
713 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
714 offset
= (granularity
+ alignment
- offset
) % granularity
;
716 /* Turn it back into bytes, gaah */
717 return offset
<< SECTOR_SHIFT
;
720 static unsigned int blk_round_down_sectors(unsigned int sectors
, unsigned int lbs
)
722 sectors
= round_down(sectors
, lbs
>> SECTOR_SHIFT
);
723 if (sectors
< PAGE_SIZE
>> SECTOR_SHIFT
)
724 sectors
= PAGE_SIZE
>> SECTOR_SHIFT
;
729 * blk_stack_limits - adjust queue_limits for stacked devices
730 * @t: the stacking driver limits (top device)
731 * @b: the underlying queue limits (bottom, component device)
732 * @start: first data sector within component device
735 * This function is used by stacking drivers like MD and DM to ensure
736 * that all component devices have compatible block sizes and
737 * alignments. The stacking driver must provide a queue_limits
738 * struct (top) and then iteratively call the stacking function for
739 * all component (bottom) devices. The stacking function will
740 * attempt to combine the values and ensure proper alignment.
742 * Returns 0 if the top and bottom queue_limits are compatible. The
743 * top device's block sizes and alignment offsets may be adjusted to
744 * ensure alignment with the bottom device. If no compatible sizes
745 * and alignments exist, -1 is returned and the resulting top
746 * queue_limits will have the misaligned flag set to indicate that
747 * the alignment_offset is undefined.
749 int blk_stack_limits(struct queue_limits
*t
, struct queue_limits
*b
,
752 unsigned int top
, bottom
, alignment
, ret
= 0;
754 t
->max_sectors
= min_not_zero(t
->max_sectors
, b
->max_sectors
);
755 t
->max_hw_sectors
= min_not_zero(t
->max_hw_sectors
, b
->max_hw_sectors
);
756 t
->max_dev_sectors
= min_not_zero(t
->max_dev_sectors
, b
->max_dev_sectors
);
757 t
->max_write_zeroes_sectors
= min(t
->max_write_zeroes_sectors
,
758 b
->max_write_zeroes_sectors
);
759 t
->max_zone_append_sectors
= min(t
->max_zone_append_sectors
,
760 b
->max_zone_append_sectors
);
761 t
->bounce
= max(t
->bounce
, b
->bounce
);
763 t
->seg_boundary_mask
= min_not_zero(t
->seg_boundary_mask
,
764 b
->seg_boundary_mask
);
765 t
->virt_boundary_mask
= min_not_zero(t
->virt_boundary_mask
,
766 b
->virt_boundary_mask
);
768 t
->max_segments
= min_not_zero(t
->max_segments
, b
->max_segments
);
769 t
->max_discard_segments
= min_not_zero(t
->max_discard_segments
,
770 b
->max_discard_segments
);
771 t
->max_integrity_segments
= min_not_zero(t
->max_integrity_segments
,
772 b
->max_integrity_segments
);
774 t
->max_segment_size
= min_not_zero(t
->max_segment_size
,
775 b
->max_segment_size
);
777 t
->misaligned
|= b
->misaligned
;
779 alignment
= queue_limit_alignment_offset(b
, start
);
781 /* Bottom device has different alignment. Check that it is
782 * compatible with the current top alignment.
784 if (t
->alignment_offset
!= alignment
) {
786 top
= max(t
->physical_block_size
, t
->io_min
)
787 + t
->alignment_offset
;
788 bottom
= max(b
->physical_block_size
, b
->io_min
) + alignment
;
790 /* Verify that top and bottom intervals line up */
791 if (max(top
, bottom
) % min(top
, bottom
)) {
797 t
->logical_block_size
= max(t
->logical_block_size
,
798 b
->logical_block_size
);
800 t
->physical_block_size
= max(t
->physical_block_size
,
801 b
->physical_block_size
);
803 t
->io_min
= max(t
->io_min
, b
->io_min
);
804 t
->io_opt
= lcm_not_zero(t
->io_opt
, b
->io_opt
);
805 t
->dma_alignment
= max(t
->dma_alignment
, b
->dma_alignment
);
807 /* Set non-power-of-2 compatible chunk_sectors boundary */
808 if (b
->chunk_sectors
)
809 t
->chunk_sectors
= gcd(t
->chunk_sectors
, b
->chunk_sectors
);
811 /* Physical block size a multiple of the logical block size? */
812 if (t
->physical_block_size
& (t
->logical_block_size
- 1)) {
813 t
->physical_block_size
= t
->logical_block_size
;
818 /* Minimum I/O a multiple of the physical block size? */
819 if (t
->io_min
& (t
->physical_block_size
- 1)) {
820 t
->io_min
= t
->physical_block_size
;
825 /* Optimal I/O a multiple of the physical block size? */
826 if (t
->io_opt
& (t
->physical_block_size
- 1)) {
832 /* chunk_sectors a multiple of the physical block size? */
833 if ((t
->chunk_sectors
<< 9) & (t
->physical_block_size
- 1)) {
834 t
->chunk_sectors
= 0;
839 t
->raid_partial_stripes_expensive
=
840 max(t
->raid_partial_stripes_expensive
,
841 b
->raid_partial_stripes_expensive
);
843 /* Find lowest common alignment_offset */
844 t
->alignment_offset
= lcm_not_zero(t
->alignment_offset
, alignment
)
845 % max(t
->physical_block_size
, t
->io_min
);
847 /* Verify that new alignment_offset is on a logical block boundary */
848 if (t
->alignment_offset
& (t
->logical_block_size
- 1)) {
853 t
->max_sectors
= blk_round_down_sectors(t
->max_sectors
, t
->logical_block_size
);
854 t
->max_hw_sectors
= blk_round_down_sectors(t
->max_hw_sectors
, t
->logical_block_size
);
855 t
->max_dev_sectors
= blk_round_down_sectors(t
->max_dev_sectors
, t
->logical_block_size
);
857 /* Discard alignment and granularity */
858 if (b
->discard_granularity
) {
859 alignment
= queue_limit_discard_alignment(b
, start
);
861 if (t
->discard_granularity
!= 0 &&
862 t
->discard_alignment
!= alignment
) {
863 top
= t
->discard_granularity
+ t
->discard_alignment
;
864 bottom
= b
->discard_granularity
+ alignment
;
866 /* Verify that top and bottom intervals line up */
867 if ((max(top
, bottom
) % min(top
, bottom
)) != 0)
868 t
->discard_misaligned
= 1;
871 t
->max_discard_sectors
= min_not_zero(t
->max_discard_sectors
,
872 b
->max_discard_sectors
);
873 t
->max_hw_discard_sectors
= min_not_zero(t
->max_hw_discard_sectors
,
874 b
->max_hw_discard_sectors
);
875 t
->discard_granularity
= max(t
->discard_granularity
,
876 b
->discard_granularity
);
877 t
->discard_alignment
= lcm_not_zero(t
->discard_alignment
, alignment
) %
878 t
->discard_granularity
;
880 t
->max_secure_erase_sectors
= min_not_zero(t
->max_secure_erase_sectors
,
881 b
->max_secure_erase_sectors
);
882 t
->zone_write_granularity
= max(t
->zone_write_granularity
,
883 b
->zone_write_granularity
);
884 t
->zoned
= max(t
->zoned
, b
->zoned
);
886 t
->zone_write_granularity
= 0;
887 t
->max_zone_append_sectors
= 0;
891 EXPORT_SYMBOL(blk_stack_limits
);
894 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
895 * @t: the stacking driver limits (top device)
896 * @bdev: the underlying block device (bottom)
897 * @offset: offset to beginning of data within component device
898 * @pfx: prefix to use for warnings logged
901 * This function is used by stacking drivers like MD and DM to ensure
902 * that all component devices have compatible block sizes and
903 * alignments. The stacking driver must provide a queue_limits
904 * struct (top) and then iteratively call the stacking function for
905 * all component (bottom) devices. The stacking function will
906 * attempt to combine the values and ensure proper alignment.
908 void queue_limits_stack_bdev(struct queue_limits
*t
, struct block_device
*bdev
,
909 sector_t offset
, const char *pfx
)
911 if (blk_stack_limits(t
, &bdev_get_queue(bdev
)->limits
,
912 get_start_sect(bdev
) + offset
))
913 pr_notice("%s: Warning: Device %pg is misaligned\n",
916 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev
);
919 * blk_queue_update_dma_pad - update pad mask
920 * @q: the request queue for the device
923 * Update dma pad mask.
925 * Appending pad buffer to a request modifies the last entry of a
926 * scatter list such that it includes the pad buffer.
928 void blk_queue_update_dma_pad(struct request_queue
*q
, unsigned int mask
)
930 if (mask
> q
->dma_pad_mask
)
931 q
->dma_pad_mask
= mask
;
933 EXPORT_SYMBOL(blk_queue_update_dma_pad
);
936 * blk_queue_segment_boundary - set boundary rules for segment merging
937 * @q: the request queue for the device
938 * @mask: the memory boundary mask
940 void blk_queue_segment_boundary(struct request_queue
*q
, unsigned long mask
)
942 if (mask
< PAGE_SIZE
- 1) {
943 mask
= PAGE_SIZE
- 1;
944 pr_info("%s: set to minimum %lx\n", __func__
, mask
);
947 q
->limits
.seg_boundary_mask
= mask
;
949 EXPORT_SYMBOL(blk_queue_segment_boundary
);
952 * blk_queue_virt_boundary - set boundary rules for bio merging
953 * @q: the request queue for the device
954 * @mask: the memory boundary mask
956 void blk_queue_virt_boundary(struct request_queue
*q
, unsigned long mask
)
958 q
->limits
.virt_boundary_mask
= mask
;
961 * Devices that require a virtual boundary do not support scatter/gather
962 * I/O natively, but instead require a descriptor list entry for each
963 * page (which might not be idential to the Linux PAGE_SIZE). Because
964 * of that they are not limited by our notion of "segment size".
967 q
->limits
.max_segment_size
= UINT_MAX
;
969 EXPORT_SYMBOL(blk_queue_virt_boundary
);
972 * blk_queue_dma_alignment - set dma length and memory alignment
973 * @q: the request queue for the device
974 * @mask: alignment mask
977 * set required memory and length alignment for direct dma transactions.
978 * this is used when building direct io requests for the queue.
981 void blk_queue_dma_alignment(struct request_queue
*q
, int mask
)
983 q
->limits
.dma_alignment
= mask
;
985 EXPORT_SYMBOL(blk_queue_dma_alignment
);
988 * blk_queue_update_dma_alignment - update dma length and memory alignment
989 * @q: the request queue for the device
990 * @mask: alignment mask
993 * update required memory and length alignment for direct dma transactions.
994 * If the requested alignment is larger than the current alignment, then
995 * the current queue alignment is updated to the new value, otherwise it
996 * is left alone. The design of this is to allow multiple objects
997 * (driver, device, transport etc) to set their respective
998 * alignments without having them interfere.
1001 void blk_queue_update_dma_alignment(struct request_queue
*q
, int mask
)
1003 BUG_ON(mask
> PAGE_SIZE
);
1005 if (mask
> q
->limits
.dma_alignment
)
1006 q
->limits
.dma_alignment
= mask
;
1008 EXPORT_SYMBOL(blk_queue_update_dma_alignment
);
1011 * blk_set_queue_depth - tell the block layer about the device queue depth
1012 * @q: the request queue for the device
1013 * @depth: queue depth
1016 void blk_set_queue_depth(struct request_queue
*q
, unsigned int depth
)
1018 q
->queue_depth
= depth
;
1019 rq_qos_queue_depth_changed(q
);
1021 EXPORT_SYMBOL(blk_set_queue_depth
);
1024 * blk_queue_write_cache - configure queue's write cache
1025 * @q: the request queue for the device
1026 * @wc: write back cache on or off
1027 * @fua: device supports FUA writes, if true
1029 * Tell the block layer about the write cache of @q.
1031 void blk_queue_write_cache(struct request_queue
*q
, bool wc
, bool fua
)
1034 blk_queue_flag_set(QUEUE_FLAG_HW_WC
, q
);
1035 blk_queue_flag_set(QUEUE_FLAG_WC
, q
);
1037 blk_queue_flag_clear(QUEUE_FLAG_HW_WC
, q
);
1038 blk_queue_flag_clear(QUEUE_FLAG_WC
, q
);
1041 blk_queue_flag_set(QUEUE_FLAG_FUA
, q
);
1043 blk_queue_flag_clear(QUEUE_FLAG_FUA
, q
);
1045 EXPORT_SYMBOL_GPL(blk_queue_write_cache
);
1048 * blk_queue_required_elevator_features - Set a queue required elevator features
1049 * @q: the request queue for the target device
1050 * @features: Required elevator features OR'ed together
1052 * Tell the block layer that for the device controlled through @q, only the
1053 * only elevators that can be used are those that implement at least the set of
1054 * features specified by @features.
1056 void blk_queue_required_elevator_features(struct request_queue
*q
,
1057 unsigned int features
)
1059 q
->required_elevator_features
= features
;
1061 EXPORT_SYMBOL_GPL(blk_queue_required_elevator_features
);
1064 * blk_queue_can_use_dma_map_merging - configure queue for merging segments.
1065 * @q: the request queue for the device
1066 * @dev: the device pointer for dma
1068 * Tell the block layer about merging the segments by dma map of @q.
1070 bool blk_queue_can_use_dma_map_merging(struct request_queue
*q
,
1073 unsigned long boundary
= dma_get_merge_boundary(dev
);
1078 /* No need to update max_segment_size. see blk_queue_virt_boundary() */
1079 blk_queue_virt_boundary(q
, boundary
);
1083 EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging
);
1086 * disk_set_zoned - inidicate a zoned device
1087 * @disk: gendisk to configure
1089 void disk_set_zoned(struct gendisk
*disk
)
1091 struct request_queue
*q
= disk
->queue
;
1093 WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED
));
1096 * Set the zone write granularity to the device logical block
1097 * size by default. The driver can change this value if needed.
1099 q
->limits
.zoned
= true;
1100 blk_queue_zone_write_granularity(q
, queue_logical_block_size(q
));
1102 EXPORT_SYMBOL_GPL(disk_set_zoned
);
1104 int bdev_alignment_offset(struct block_device
*bdev
)
1106 struct request_queue
*q
= bdev_get_queue(bdev
);
1108 if (q
->limits
.misaligned
)
1110 if (bdev_is_partition(bdev
))
1111 return queue_limit_alignment_offset(&q
->limits
,
1112 bdev
->bd_start_sect
);
1113 return q
->limits
.alignment_offset
;
1115 EXPORT_SYMBOL_GPL(bdev_alignment_offset
);
1117 unsigned int bdev_discard_alignment(struct block_device
*bdev
)
1119 struct request_queue
*q
= bdev_get_queue(bdev
);
1121 if (bdev_is_partition(bdev
))
1122 return queue_limit_discard_alignment(&q
->limits
,
1123 bdev
->bd_start_sect
);
1124 return q
->limits
.discard_alignment
;
1126 EXPORT_SYMBOL_GPL(bdev_discard_alignment
);