1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Portions Copyright (C) 1992 Drew Eckhardt
5 #ifndef _LINUX_BLKDEV_H
6 #define _LINUX_BLKDEV_H
8 #include <linux/types.h>
9 #include <linux/blk_types.h>
10 #include <linux/device.h>
11 #include <linux/list.h>
12 #include <linux/llist.h>
13 #include <linux/minmax.h>
14 #include <linux/timer.h>
15 #include <linux/workqueue.h>
16 #include <linux/wait.h>
17 #include <linux/bio.h>
18 #include <linux/gfp.h>
19 #include <linux/kdev_t.h>
20 #include <linux/rcupdate.h>
21 #include <linux/percpu-refcount.h>
22 #include <linux/blkzoned.h>
23 #include <linux/sched.h>
24 #include <linux/sbitmap.h>
25 #include <linux/uuid.h>
26 #include <linux/xarray.h>
30 struct elevator_queue
;
35 struct blk_flush_queue
;
39 struct blk_queue_stats
;
40 struct blk_stat_callback
;
41 struct blk_crypto_profile
;
43 extern const struct device_type disk_type
;
44 extern const struct device_type part_type
;
45 extern struct class block_class
;
48 * Maximum number of blkcg policies allowed to be registered concurrently.
49 * Defined here to simplify include dependency.
51 #define BLKCG_MAX_POLS 6
53 #define DISK_MAX_PARTS 256
54 #define DISK_NAME_LEN 32
56 #define PARTITION_META_INFO_VOLNAMELTH 64
58 * Enough for the string representation of any kind of UUID plus NULL.
59 * EFI UUID is 36 characters. MSDOS UUID is 11 characters.
61 #define PARTITION_META_INFO_UUIDLTH (UUID_STRING_LEN + 1)
63 struct partition_meta_info
{
64 char uuid
[PARTITION_META_INFO_UUIDLTH
];
65 u8 volname
[PARTITION_META_INFO_VOLNAMELTH
];
69 * DOC: genhd capability flags
71 * ``GENHD_FL_REMOVABLE``: indicates that the block device gives access to
72 * removable media. When set, the device remains present even when media is not
73 * inserted. Shall not be set for devices which are removed entirely when the
76 * ``GENHD_FL_HIDDEN``: the block device is hidden; it doesn't produce events,
77 * doesn't appear in sysfs, and can't be opened from userspace or using
78 * blkdev_get*. Used for the underlying components of multipath devices.
80 * ``GENHD_FL_NO_PART``: partition support is disabled. The kernel will not
81 * scan for partitions from add_disk, and users can't add partitions manually.
85 GENHD_FL_REMOVABLE
= 1 << 0,
86 GENHD_FL_HIDDEN
= 1 << 1,
87 GENHD_FL_NO_PART
= 1 << 2,
91 DISK_EVENT_MEDIA_CHANGE
= 1 << 0, /* media changed */
92 DISK_EVENT_EJECT_REQUEST
= 1 << 1, /* eject requested */
96 /* Poll even if events_poll_msecs is unset */
97 DISK_EVENT_FLAG_POLL
= 1 << 0,
98 /* Forward events to udev */
99 DISK_EVENT_FLAG_UEVENT
= 1 << 1,
100 /* Block event polling when open for exclusive write */
101 DISK_EVENT_FLAG_BLOCK_ON_EXCL_WRITE
= 1 << 2,
107 struct blk_integrity
{
108 const struct blk_integrity_profile
*profile
;
110 unsigned char tuple_size
;
111 unsigned char interval_exp
;
112 unsigned char tag_size
;
115 typedef unsigned int __bitwise blk_mode_t
;
117 /* open for reading */
118 #define BLK_OPEN_READ ((__force blk_mode_t)(1 << 0))
119 /* open for writing */
120 #define BLK_OPEN_WRITE ((__force blk_mode_t)(1 << 1))
121 /* open exclusively (vs other exclusive openers */
122 #define BLK_OPEN_EXCL ((__force blk_mode_t)(1 << 2))
123 /* opened with O_NDELAY */
124 #define BLK_OPEN_NDELAY ((__force blk_mode_t)(1 << 3))
125 /* open for "writes" only for ioctls (specialy hack for floppy.c) */
126 #define BLK_OPEN_WRITE_IOCTL ((__force blk_mode_t)(1 << 4))
130 * major/first_minor/minors should not be set by any new driver, the
131 * block core will take care of allocating them automatically.
137 char disk_name
[DISK_NAME_LEN
]; /* name of major driver */
139 unsigned short events
; /* supported events */
140 unsigned short event_flags
; /* flags related to event processing */
142 struct xarray part_tbl
;
143 struct block_device
*part0
;
145 const struct block_device_operations
*fops
;
146 struct request_queue
*queue
;
149 struct bio_set bio_split
;
153 #define GD_NEED_PART_SCAN 0
154 #define GD_READ_ONLY 1
156 #define GD_NATIVE_CAPACITY 3
158 #define GD_SUPPRESS_PART_SCAN 5
159 #define GD_OWNS_QUEUE 6
161 struct mutex open_mutex
; /* open/close mutex */
162 unsigned open_partitions
; /* number of open partitions */
164 struct backing_dev_info
*bdi
;
165 struct kobject queue_kobj
; /* the queue/ directory */
166 struct kobject
*slave_dir
;
167 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
168 struct list_head slave_bdevs
;
170 struct timer_rand_state
*random
;
171 atomic_t sync_io
; /* RAID */
172 struct disk_events
*ev
;
174 #ifdef CONFIG_BLK_DEV_ZONED
176 * Zoned block device information for request dispatch control.
177 * nr_zones is the total number of zones of the device. This is always
178 * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
179 * bits which indicates if a zone is conventional (bit set) or
180 * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
181 * bits which indicates if a zone is write locked, that is, if a write
182 * request targeting the zone was dispatched.
184 * Reads of this information must be protected with blk_queue_enter() /
185 * blk_queue_exit(). Modifying this information is only allowed while
186 * no requests are being processed. See also blk_mq_freeze_queue() and
187 * blk_mq_unfreeze_queue().
189 unsigned int nr_zones
;
190 unsigned int max_open_zones
;
191 unsigned int max_active_zones
;
192 unsigned long *conv_zones_bitmap
;
193 unsigned long *seq_zones_wlock
;
194 #endif /* CONFIG_BLK_DEV_ZONED */
196 #if IS_ENABLED(CONFIG_CDROM)
197 struct cdrom_device_info
*cdi
;
200 struct badblocks
*bb
;
201 struct lockdep_map lockdep_map
;
203 blk_mode_t open_mode
;
206 * Independent sector access ranges. This is always NULL for
207 * devices that do not have multiple independent access ranges.
209 struct blk_independent_access_ranges
*ia_ranges
;
212 static inline bool disk_live(struct gendisk
*disk
)
214 return !inode_unhashed(disk
->part0
->bd_inode
);
218 * disk_openers - returns how many openers are there for a disk
219 * @disk: disk to check
221 * This returns the number of openers for a disk. Note that this value is only
222 * stable if disk->open_mutex is held.
224 * Note: Due to a quirk in the block layer open code, each open partition is
225 * only counted once even if there are multiple openers.
227 static inline unsigned int disk_openers(struct gendisk
*disk
)
229 return atomic_read(&disk
->part0
->bd_openers
);
233 * The gendisk is refcounted by the part0 block_device, and the bd_device
234 * therein is also used for device model presentation in sysfs.
236 #define dev_to_disk(device) \
237 (dev_to_bdev(device)->bd_disk)
238 #define disk_to_dev(disk) \
239 (&((disk)->part0->bd_device))
241 #if IS_REACHABLE(CONFIG_CDROM)
242 #define disk_to_cdi(disk) ((disk)->cdi)
244 #define disk_to_cdi(disk) NULL
247 static inline dev_t
disk_devt(struct gendisk
*disk
)
249 return MKDEV(disk
->major
, disk
->first_minor
);
252 static inline int blk_validate_block_size(unsigned long bsize
)
254 if (bsize
< 512 || bsize
> PAGE_SIZE
|| !is_power_of_2(bsize
))
260 static inline bool blk_op_is_passthrough(blk_opf_t op
)
263 return op
== REQ_OP_DRV_IN
|| op
== REQ_OP_DRV_OUT
;
267 * Zoned block device models (zoned limit).
269 * Note: This needs to be ordered from the least to the most severe
270 * restrictions for the inheritance in blk_stack_limits() to work.
272 enum blk_zoned_model
{
273 BLK_ZONED_NONE
= 0, /* Regular block device */
274 BLK_ZONED_HA
, /* Host-aware zoned block device */
275 BLK_ZONED_HM
, /* Host-managed zoned block device */
279 * BLK_BOUNCE_NONE: never bounce (default)
280 * BLK_BOUNCE_HIGH: bounce all highmem pages
287 struct queue_limits
{
288 enum blk_bounce bounce
;
289 unsigned long seg_boundary_mask
;
290 unsigned long virt_boundary_mask
;
292 unsigned int max_hw_sectors
;
293 unsigned int max_dev_sectors
;
294 unsigned int chunk_sectors
;
295 unsigned int max_sectors
;
296 unsigned int max_user_sectors
;
297 unsigned int max_segment_size
;
298 unsigned int physical_block_size
;
299 unsigned int logical_block_size
;
300 unsigned int alignment_offset
;
303 unsigned int max_discard_sectors
;
304 unsigned int max_hw_discard_sectors
;
305 unsigned int max_secure_erase_sectors
;
306 unsigned int max_write_zeroes_sectors
;
307 unsigned int max_zone_append_sectors
;
308 unsigned int discard_granularity
;
309 unsigned int discard_alignment
;
310 unsigned int zone_write_granularity
;
312 unsigned short max_segments
;
313 unsigned short max_integrity_segments
;
314 unsigned short max_discard_segments
;
316 unsigned char misaligned
;
317 unsigned char discard_misaligned
;
318 unsigned char raid_partial_stripes_expensive
;
319 enum blk_zoned_model zoned
;
322 * Drivers that set dma_alignment to less than 511 must be prepared to
323 * handle individual bvec's that are not a multiple of a SECTOR_SIZE
324 * due to possible offsets.
326 unsigned int dma_alignment
;
329 typedef int (*report_zones_cb
)(struct blk_zone
*zone
, unsigned int idx
,
332 void disk_set_zoned(struct gendisk
*disk
, enum blk_zoned_model model
);
334 #ifdef CONFIG_BLK_DEV_ZONED
335 #define BLK_ALL_ZONES ((unsigned int)-1)
336 int blkdev_report_zones(struct block_device
*bdev
, sector_t sector
,
337 unsigned int nr_zones
, report_zones_cb cb
, void *data
);
338 unsigned int bdev_nr_zones(struct block_device
*bdev
);
339 extern int blkdev_zone_mgmt(struct block_device
*bdev
, enum req_op op
,
340 sector_t sectors
, sector_t nr_sectors
,
342 int blk_revalidate_disk_zones(struct gendisk
*disk
,
343 void (*update_driver_data
)(struct gendisk
*disk
));
344 #else /* CONFIG_BLK_DEV_ZONED */
345 static inline unsigned int bdev_nr_zones(struct block_device
*bdev
)
349 #endif /* CONFIG_BLK_DEV_ZONED */
352 * Independent access ranges: struct blk_independent_access_range describes
353 * a range of contiguous sectors that can be accessed using device command
354 * execution resources that are independent from the resources used for
355 * other access ranges. This is typically found with single-LUN multi-actuator
356 * HDDs where each access range is served by a different set of heads.
357 * The set of independent ranges supported by the device is defined using
358 * struct blk_independent_access_ranges. The independent ranges must not overlap
359 * and must include all sectors within the disk capacity (no sector holes
361 * For a device with multiple ranges, requests targeting sectors in different
362 * ranges can be executed in parallel. A request can straddle an access range
365 struct blk_independent_access_range
{
371 struct blk_independent_access_ranges
{
373 bool sysfs_registered
;
374 unsigned int nr_ia_ranges
;
375 struct blk_independent_access_range ia_range
[];
378 struct request_queue
{
379 struct request
*last_merge
;
380 struct elevator_queue
*elevator
;
382 struct percpu_ref q_usage_counter
;
384 struct blk_queue_stats
*stats
;
385 struct rq_qos
*rq_qos
;
386 struct mutex rq_qos_mutex
;
388 const struct blk_mq_ops
*mq_ops
;
391 struct blk_mq_ctx __percpu
*queue_ctx
;
393 unsigned int queue_depth
;
395 /* hw dispatch queues */
396 struct xarray hctx_table
;
397 unsigned int nr_hw_queues
;
400 * The queue owner gets to use this for whatever they like.
401 * ll_rw_blk doesn't touch it.
406 * various queue flags, see QUEUE_* below
408 unsigned long queue_flags
;
410 * Number of contexts that have called blk_set_pm_only(). If this
411 * counter is above zero then only RQF_PM requests are processed.
416 * ida allocated id for this queue. Used to index queues from
421 spinlock_t queue_lock
;
423 struct gendisk
*disk
;
430 struct kobject
*mq_kobj
;
432 #ifdef CONFIG_BLK_DEV_INTEGRITY
433 struct blk_integrity integrity
;
434 #endif /* CONFIG_BLK_DEV_INTEGRITY */
438 enum rpm_status rpm_status
;
444 unsigned long nr_requests
; /* Max # of requests */
446 unsigned int dma_pad_mask
;
448 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
449 struct blk_crypto_profile
*crypto_profile
;
450 struct kobject
*crypto_kobject
;
453 unsigned int rq_timeout
;
455 struct timer_list timeout
;
456 struct work_struct timeout_work
;
458 atomic_t nr_active_requests_shared_tags
;
460 struct blk_mq_tags
*sched_shared_tags
;
462 struct list_head icq_list
;
463 #ifdef CONFIG_BLK_CGROUP
464 DECLARE_BITMAP (blkcg_pols
, BLKCG_MAX_POLS
);
465 struct blkcg_gq
*root_blkg
;
466 struct list_head blkg_list
;
467 struct mutex blkcg_mutex
;
470 struct queue_limits limits
;
472 unsigned int required_elevator_features
;
475 #ifdef CONFIG_BLK_DEV_IO_TRACE
476 struct blk_trace __rcu
*blk_trace
;
479 * for flush operations
481 struct blk_flush_queue
*fq
;
482 struct list_head flush_list
;
484 struct list_head requeue_list
;
485 spinlock_t requeue_lock
;
486 struct delayed_work requeue_work
;
488 struct mutex sysfs_lock
;
489 struct mutex sysfs_dir_lock
;
492 * for reusing dead hctx instance in case of updating
495 struct list_head unused_hctx_list
;
496 spinlock_t unused_hctx_lock
;
500 #ifdef CONFIG_BLK_DEV_THROTTLING
502 struct throtl_data
*td
;
504 struct rcu_head rcu_head
;
505 wait_queue_head_t mq_freeze_wq
;
507 * Protect concurrent access to q_usage_counter by
508 * percpu_ref_kill() and percpu_ref_reinit().
510 struct mutex mq_freeze_lock
;
514 struct blk_mq_tag_set
*tag_set
;
515 struct list_head tag_set_list
;
517 struct dentry
*debugfs_dir
;
518 struct dentry
*sched_debugfs_dir
;
519 struct dentry
*rqos_debugfs_dir
;
521 * Serializes all debugfs metadata operations using the above dentries.
523 struct mutex debugfs_mutex
;
525 bool mq_sysfs_init_done
;
528 /* Keep blk_queue_flag_name[] in sync with the definitions below */
529 #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
530 #define QUEUE_FLAG_DYING 1 /* queue being torn down */
531 #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
532 #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
533 #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
534 #define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
535 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
536 #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
537 #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
538 #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
539 #define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
540 #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
541 #define QUEUE_FLAG_HW_WC 18 /* Write back caching supported */
542 #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
543 #define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
544 #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
545 #define QUEUE_FLAG_WC 17 /* Write back caching */
546 #define QUEUE_FLAG_FUA 18 /* device supports FUA writes */
547 #define QUEUE_FLAG_DAX 19 /* device supports DAX */
548 #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */
549 #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */
550 #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */
551 #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */
552 #define QUEUE_FLAG_ZONE_RESETALL 26 /* supports Zone Reset All */
553 #define QUEUE_FLAG_RQ_ALLOC_TIME 27 /* record rq->alloc_time_ns */
554 #define QUEUE_FLAG_HCTX_ACTIVE 28 /* at least one blk-mq hctx is active */
555 #define QUEUE_FLAG_NOWAIT 29 /* device supports NOWAIT */
556 #define QUEUE_FLAG_SQ_SCHED 30 /* single queue style io dispatch */
557 #define QUEUE_FLAG_SKIP_TAGSET_QUIESCE 31 /* quiesce_tagset skip the queue*/
559 #define QUEUE_FLAG_MQ_DEFAULT ((1UL << QUEUE_FLAG_IO_STAT) | \
560 (1UL << QUEUE_FLAG_SAME_COMP) | \
561 (1UL << QUEUE_FLAG_NOWAIT))
563 void blk_queue_flag_set(unsigned int flag
, struct request_queue
*q
);
564 void blk_queue_flag_clear(unsigned int flag
, struct request_queue
*q
);
565 bool blk_queue_flag_test_and_set(unsigned int flag
, struct request_queue
*q
);
567 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
568 #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
569 #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
570 #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
571 #define blk_queue_noxmerges(q) \
572 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
573 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
574 #define blk_queue_stable_writes(q) \
575 test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
576 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
577 #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
578 #define blk_queue_zone_resetall(q) \
579 test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
580 #define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
581 #define blk_queue_pci_p2pdma(q) \
582 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
583 #ifdef CONFIG_BLK_RQ_ALLOC_TIME
584 #define blk_queue_rq_alloc_time(q) \
585 test_bit(QUEUE_FLAG_RQ_ALLOC_TIME, &(q)->queue_flags)
587 #define blk_queue_rq_alloc_time(q) false
590 #define blk_noretry_request(rq) \
591 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
592 REQ_FAILFAST_DRIVER))
593 #define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
594 #define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
595 #define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
596 #define blk_queue_sq_sched(q) test_bit(QUEUE_FLAG_SQ_SCHED, &(q)->queue_flags)
597 #define blk_queue_skip_tagset_quiesce(q) \
598 test_bit(QUEUE_FLAG_SKIP_TAGSET_QUIESCE, &(q)->queue_flags)
600 extern void blk_set_pm_only(struct request_queue
*q
);
601 extern void blk_clear_pm_only(struct request_queue
*q
);
603 #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
605 #define dma_map_bvec(dev, bv, dir, attrs) \
606 dma_map_page_attrs(dev, (bv)->bv_page, (bv)->bv_offset, (bv)->bv_len, \
609 static inline bool queue_is_mq(struct request_queue
*q
)
615 static inline enum rpm_status
queue_rpm_status(struct request_queue
*q
)
617 return q
->rpm_status
;
620 static inline enum rpm_status
queue_rpm_status(struct request_queue
*q
)
626 static inline enum blk_zoned_model
627 blk_queue_zoned_model(struct request_queue
*q
)
629 if (IS_ENABLED(CONFIG_BLK_DEV_ZONED
))
630 return q
->limits
.zoned
;
631 return BLK_ZONED_NONE
;
634 static inline bool blk_queue_is_zoned(struct request_queue
*q
)
636 switch (blk_queue_zoned_model(q
)) {
645 #ifdef CONFIG_BLK_DEV_ZONED
646 static inline unsigned int disk_nr_zones(struct gendisk
*disk
)
648 return blk_queue_is_zoned(disk
->queue
) ? disk
->nr_zones
: 0;
651 static inline unsigned int disk_zone_no(struct gendisk
*disk
, sector_t sector
)
653 if (!blk_queue_is_zoned(disk
->queue
))
655 return sector
>> ilog2(disk
->queue
->limits
.chunk_sectors
);
658 static inline bool disk_zone_is_seq(struct gendisk
*disk
, sector_t sector
)
660 if (!blk_queue_is_zoned(disk
->queue
))
662 if (!disk
->conv_zones_bitmap
)
664 return !test_bit(disk_zone_no(disk
, sector
), disk
->conv_zones_bitmap
);
667 static inline void disk_set_max_open_zones(struct gendisk
*disk
,
668 unsigned int max_open_zones
)
670 disk
->max_open_zones
= max_open_zones
;
673 static inline void disk_set_max_active_zones(struct gendisk
*disk
,
674 unsigned int max_active_zones
)
676 disk
->max_active_zones
= max_active_zones
;
679 static inline unsigned int bdev_max_open_zones(struct block_device
*bdev
)
681 return bdev
->bd_disk
->max_open_zones
;
684 static inline unsigned int bdev_max_active_zones(struct block_device
*bdev
)
686 return bdev
->bd_disk
->max_active_zones
;
689 #else /* CONFIG_BLK_DEV_ZONED */
690 static inline unsigned int disk_nr_zones(struct gendisk
*disk
)
694 static inline bool disk_zone_is_seq(struct gendisk
*disk
, sector_t sector
)
698 static inline unsigned int disk_zone_no(struct gendisk
*disk
, sector_t sector
)
702 static inline unsigned int bdev_max_open_zones(struct block_device
*bdev
)
707 static inline unsigned int bdev_max_active_zones(struct block_device
*bdev
)
711 #endif /* CONFIG_BLK_DEV_ZONED */
713 static inline unsigned int blk_queue_depth(struct request_queue
*q
)
716 return q
->queue_depth
;
718 return q
->nr_requests
;
722 * default timeout for SG_IO if none specified
724 #define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
725 #define BLK_MIN_SG_TIMEOUT (7 * HZ)
727 /* This should not be used directly - use rq_for_each_segment */
728 #define for_each_bio(_bio) \
729 for (; _bio; _bio = _bio->bi_next)
731 int __must_check
device_add_disk(struct device
*parent
, struct gendisk
*disk
,
732 const struct attribute_group
**groups
);
733 static inline int __must_check
add_disk(struct gendisk
*disk
)
735 return device_add_disk(NULL
, disk
, NULL
);
737 void del_gendisk(struct gendisk
*gp
);
738 void invalidate_disk(struct gendisk
*disk
);
739 void set_disk_ro(struct gendisk
*disk
, bool read_only
);
740 void disk_uevent(struct gendisk
*disk
, enum kobject_action action
);
742 static inline int get_disk_ro(struct gendisk
*disk
)
744 return disk
->part0
->bd_read_only
||
745 test_bit(GD_READ_ONLY
, &disk
->state
);
748 static inline int bdev_read_only(struct block_device
*bdev
)
750 return bdev
->bd_read_only
|| get_disk_ro(bdev
->bd_disk
);
753 bool set_capacity_and_notify(struct gendisk
*disk
, sector_t size
);
754 void disk_force_media_change(struct gendisk
*disk
);
755 void bdev_mark_dead(struct block_device
*bdev
, bool surprise
);
757 void add_disk_randomness(struct gendisk
*disk
) __latent_entropy
;
758 void rand_initialize_disk(struct gendisk
*disk
);
760 static inline sector_t
get_start_sect(struct block_device
*bdev
)
762 return bdev
->bd_start_sect
;
765 static inline sector_t
bdev_nr_sectors(struct block_device
*bdev
)
767 return bdev
->bd_nr_sectors
;
770 static inline loff_t
bdev_nr_bytes(struct block_device
*bdev
)
772 return (loff_t
)bdev_nr_sectors(bdev
) << SECTOR_SHIFT
;
775 static inline sector_t
get_capacity(struct gendisk
*disk
)
777 return bdev_nr_sectors(disk
->part0
);
780 static inline u64
sb_bdev_nr_blocks(struct super_block
*sb
)
782 return bdev_nr_sectors(sb
->s_bdev
) >>
783 (sb
->s_blocksize_bits
- SECTOR_SHIFT
);
786 int bdev_disk_changed(struct gendisk
*disk
, bool invalidate
);
788 void put_disk(struct gendisk
*disk
);
789 struct gendisk
*__blk_alloc_disk(int node
, struct lock_class_key
*lkclass
);
792 * blk_alloc_disk - allocate a gendisk structure
793 * @node_id: numa node to allocate on
795 * Allocate and pre-initialize a gendisk structure for use with BIO based
800 #define blk_alloc_disk(node_id) \
802 static struct lock_class_key __key; \
804 __blk_alloc_disk(node_id, &__key); \
807 int __register_blkdev(unsigned int major
, const char *name
,
808 void (*probe
)(dev_t devt
));
809 #define register_blkdev(major, name) \
810 __register_blkdev(major, name, NULL)
811 void unregister_blkdev(unsigned int major
, const char *name
);
813 bool disk_check_media_change(struct gendisk
*disk
);
814 void set_capacity(struct gendisk
*disk
, sector_t size
);
816 #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
817 int bd_link_disk_holder(struct block_device
*bdev
, struct gendisk
*disk
);
818 void bd_unlink_disk_holder(struct block_device
*bdev
, struct gendisk
*disk
);
820 static inline int bd_link_disk_holder(struct block_device
*bdev
,
821 struct gendisk
*disk
)
825 static inline void bd_unlink_disk_holder(struct block_device
*bdev
,
826 struct gendisk
*disk
)
829 #endif /* CONFIG_BLOCK_HOLDER_DEPRECATED */
831 dev_t
part_devt(struct gendisk
*disk
, u8 partno
);
832 void inc_diskseq(struct gendisk
*disk
);
833 void blk_request_module(dev_t devt
);
835 extern int blk_register_queue(struct gendisk
*disk
);
836 extern void blk_unregister_queue(struct gendisk
*disk
);
837 void submit_bio_noacct(struct bio
*bio
);
838 struct bio
*bio_split_to_limits(struct bio
*bio
);
840 extern int blk_lld_busy(struct request_queue
*q
);
841 extern int blk_queue_enter(struct request_queue
*q
, blk_mq_req_flags_t flags
);
842 extern void blk_queue_exit(struct request_queue
*q
);
843 extern void blk_sync_queue(struct request_queue
*q
);
845 /* Helper to convert REQ_OP_XXX to its string format XXX */
846 extern const char *blk_op_str(enum req_op op
);
848 int blk_status_to_errno(blk_status_t status
);
849 blk_status_t
errno_to_blk_status(int errno
);
850 const char *blk_status_to_str(blk_status_t status
);
852 /* only poll the hardware once, don't continue until a completion was found */
853 #define BLK_POLL_ONESHOT (1 << 0)
854 int bio_poll(struct bio
*bio
, struct io_comp_batch
*iob
, unsigned int flags
);
855 int iocb_bio_iopoll(struct kiocb
*kiocb
, struct io_comp_batch
*iob
,
858 static inline struct request_queue
*bdev_get_queue(struct block_device
*bdev
)
860 return bdev
->bd_queue
; /* this is never NULL */
863 /* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
864 const char *blk_zone_cond_str(enum blk_zone_cond zone_cond
);
866 static inline unsigned int bio_zone_no(struct bio
*bio
)
868 return disk_zone_no(bio
->bi_bdev
->bd_disk
, bio
->bi_iter
.bi_sector
);
871 static inline unsigned int bio_zone_is_seq(struct bio
*bio
)
873 return disk_zone_is_seq(bio
->bi_bdev
->bd_disk
, bio
->bi_iter
.bi_sector
);
877 * Return how much of the chunk is left to be used for I/O at a given offset.
879 static inline unsigned int blk_chunk_sectors_left(sector_t offset
,
880 unsigned int chunk_sectors
)
882 if (unlikely(!is_power_of_2(chunk_sectors
)))
883 return chunk_sectors
- sector_div(offset
, chunk_sectors
);
884 return chunk_sectors
- (offset
& (chunk_sectors
- 1));
888 * Access functions for manipulating queue properties
890 void blk_queue_bounce_limit(struct request_queue
*q
, enum blk_bounce limit
);
891 extern void blk_queue_max_hw_sectors(struct request_queue
*, unsigned int);
892 extern void blk_queue_chunk_sectors(struct request_queue
*, unsigned int);
893 extern void blk_queue_max_segments(struct request_queue
*, unsigned short);
894 extern void blk_queue_max_discard_segments(struct request_queue
*,
896 void blk_queue_max_secure_erase_sectors(struct request_queue
*q
,
897 unsigned int max_sectors
);
898 extern void blk_queue_max_segment_size(struct request_queue
*, unsigned int);
899 extern void blk_queue_max_discard_sectors(struct request_queue
*q
,
900 unsigned int max_discard_sectors
);
901 extern void blk_queue_max_write_zeroes_sectors(struct request_queue
*q
,
902 unsigned int max_write_same_sectors
);
903 extern void blk_queue_logical_block_size(struct request_queue
*, unsigned int);
904 extern void blk_queue_max_zone_append_sectors(struct request_queue
*q
,
905 unsigned int max_zone_append_sectors
);
906 extern void blk_queue_physical_block_size(struct request_queue
*, unsigned int);
907 void blk_queue_zone_write_granularity(struct request_queue
*q
,
909 extern void blk_queue_alignment_offset(struct request_queue
*q
,
910 unsigned int alignment
);
911 void disk_update_readahead(struct gendisk
*disk
);
912 extern void blk_limits_io_min(struct queue_limits
*limits
, unsigned int min
);
913 extern void blk_queue_io_min(struct request_queue
*q
, unsigned int min
);
914 extern void blk_limits_io_opt(struct queue_limits
*limits
, unsigned int opt
);
915 extern void blk_queue_io_opt(struct request_queue
*q
, unsigned int opt
);
916 extern void blk_set_queue_depth(struct request_queue
*q
, unsigned int depth
);
917 extern void blk_set_stacking_limits(struct queue_limits
*lim
);
918 extern int blk_stack_limits(struct queue_limits
*t
, struct queue_limits
*b
,
920 extern void disk_stack_limits(struct gendisk
*disk
, struct block_device
*bdev
,
922 extern void blk_queue_update_dma_pad(struct request_queue
*, unsigned int);
923 extern void blk_queue_segment_boundary(struct request_queue
*, unsigned long);
924 extern void blk_queue_virt_boundary(struct request_queue
*, unsigned long);
925 extern void blk_queue_dma_alignment(struct request_queue
*, int);
926 extern void blk_queue_update_dma_alignment(struct request_queue
*, int);
927 extern void blk_queue_rq_timeout(struct request_queue
*, unsigned int);
928 extern void blk_queue_write_cache(struct request_queue
*q
, bool enabled
, bool fua
);
930 struct blk_independent_access_ranges
*
931 disk_alloc_independent_access_ranges(struct gendisk
*disk
, int nr_ia_ranges
);
932 void disk_set_independent_access_ranges(struct gendisk
*disk
,
933 struct blk_independent_access_ranges
*iars
);
936 * Elevator features for blk_queue_required_elevator_features:
938 /* Supports zoned block devices sequential write constraint */
939 #define ELEVATOR_F_ZBD_SEQ_WRITE (1U << 0)
941 extern void blk_queue_required_elevator_features(struct request_queue
*q
,
942 unsigned int features
);
943 extern bool blk_queue_can_use_dma_map_merging(struct request_queue
*q
,
946 bool __must_check
blk_get_queue(struct request_queue
*);
947 extern void blk_put_queue(struct request_queue
*);
949 void blk_mark_disk_dead(struct gendisk
*disk
);
953 * blk_plug permits building a queue of related requests by holding the I/O
954 * fragments for a short period. This allows merging of sequential requests
955 * into single larger request. As the requests are moved from a per-task list to
956 * the device's request_queue in a batch, this results in improved scalability
957 * as the lock contention for request_queue lock is reduced.
959 * It is ok not to disable preemption when adding the request to the plug list
960 * or when attempting a merge. For details, please see schedule() where
961 * blk_flush_plug() is called.
964 struct request
*mq_list
; /* blk-mq requests */
966 /* if ios_left is > 1, we can batch tag/rq allocations */
967 struct request
*cached_rq
;
968 unsigned short nr_ios
;
970 unsigned short rq_count
;
972 bool multiple_queues
;
975 struct list_head cb_list
; /* md requires an unplug callback */
979 typedef void (*blk_plug_cb_fn
)(struct blk_plug_cb
*, bool);
981 struct list_head list
;
982 blk_plug_cb_fn callback
;
985 extern struct blk_plug_cb
*blk_check_plugged(blk_plug_cb_fn unplug
,
986 void *data
, int size
);
987 extern void blk_start_plug(struct blk_plug
*);
988 extern void blk_start_plug_nr_ios(struct blk_plug
*, unsigned short);
989 extern void blk_finish_plug(struct blk_plug
*);
991 void __blk_flush_plug(struct blk_plug
*plug
, bool from_schedule
);
992 static inline void blk_flush_plug(struct blk_plug
*plug
, bool async
)
995 __blk_flush_plug(plug
, async
);
998 int blkdev_issue_flush(struct block_device
*bdev
);
999 long nr_blockdev_pages(void);
1000 #else /* CONFIG_BLOCK */
1004 static inline void blk_start_plug_nr_ios(struct blk_plug
*plug
,
1005 unsigned short nr_ios
)
1009 static inline void blk_start_plug(struct blk_plug
*plug
)
1013 static inline void blk_finish_plug(struct blk_plug
*plug
)
1017 static inline void blk_flush_plug(struct blk_plug
*plug
, bool async
)
1021 static inline int blkdev_issue_flush(struct block_device
*bdev
)
1026 static inline long nr_blockdev_pages(void)
1030 #endif /* CONFIG_BLOCK */
1032 extern void blk_io_schedule(void);
1034 int blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
1035 sector_t nr_sects
, gfp_t gfp_mask
);
1036 int __blkdev_issue_discard(struct block_device
*bdev
, sector_t sector
,
1037 sector_t nr_sects
, gfp_t gfp_mask
, struct bio
**biop
);
1038 int blkdev_issue_secure_erase(struct block_device
*bdev
, sector_t sector
,
1039 sector_t nr_sects
, gfp_t gfp
);
1041 #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
1042 #define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
1044 extern int __blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
1045 sector_t nr_sects
, gfp_t gfp_mask
, struct bio
**biop
,
1047 extern int blkdev_issue_zeroout(struct block_device
*bdev
, sector_t sector
,
1048 sector_t nr_sects
, gfp_t gfp_mask
, unsigned flags
);
1050 static inline int sb_issue_discard(struct super_block
*sb
, sector_t block
,
1051 sector_t nr_blocks
, gfp_t gfp_mask
, unsigned long flags
)
1053 return blkdev_issue_discard(sb
->s_bdev
,
1054 block
<< (sb
->s_blocksize_bits
-
1056 nr_blocks
<< (sb
->s_blocksize_bits
-
1060 static inline int sb_issue_zeroout(struct super_block
*sb
, sector_t block
,
1061 sector_t nr_blocks
, gfp_t gfp_mask
)
1063 return blkdev_issue_zeroout(sb
->s_bdev
,
1064 block
<< (sb
->s_blocksize_bits
-
1066 nr_blocks
<< (sb
->s_blocksize_bits
-
1071 static inline bool bdev_is_partition(struct block_device
*bdev
)
1073 return bdev
->bd_partno
;
1076 enum blk_default_limits
{
1077 BLK_MAX_SEGMENTS
= 128,
1078 BLK_SAFE_MAX_SECTORS
= 255,
1079 BLK_MAX_SEGMENT_SIZE
= 65536,
1080 BLK_SEG_BOUNDARY_MASK
= 0xFFFFFFFFUL
,
1083 #define BLK_DEF_MAX_SECTORS 2560u
1085 static inline unsigned long queue_segment_boundary(const struct request_queue
*q
)
1087 return q
->limits
.seg_boundary_mask
;
1090 static inline unsigned long queue_virt_boundary(const struct request_queue
*q
)
1092 return q
->limits
.virt_boundary_mask
;
1095 static inline unsigned int queue_max_sectors(const struct request_queue
*q
)
1097 return q
->limits
.max_sectors
;
1100 static inline unsigned int queue_max_bytes(struct request_queue
*q
)
1102 return min_t(unsigned int, queue_max_sectors(q
), INT_MAX
>> 9) << 9;
1105 static inline unsigned int queue_max_hw_sectors(const struct request_queue
*q
)
1107 return q
->limits
.max_hw_sectors
;
1110 static inline unsigned short queue_max_segments(const struct request_queue
*q
)
1112 return q
->limits
.max_segments
;
1115 static inline unsigned short queue_max_discard_segments(const struct request_queue
*q
)
1117 return q
->limits
.max_discard_segments
;
1120 static inline unsigned int queue_max_segment_size(const struct request_queue
*q
)
1122 return q
->limits
.max_segment_size
;
1125 static inline unsigned int queue_max_zone_append_sectors(const struct request_queue
*q
)
1128 const struct queue_limits
*l
= &q
->limits
;
1130 return min(l
->max_zone_append_sectors
, l
->max_sectors
);
1133 static inline unsigned int
1134 bdev_max_zone_append_sectors(struct block_device
*bdev
)
1136 return queue_max_zone_append_sectors(bdev_get_queue(bdev
));
1139 static inline unsigned int bdev_max_segments(struct block_device
*bdev
)
1141 return queue_max_segments(bdev_get_queue(bdev
));
1144 static inline unsigned queue_logical_block_size(const struct request_queue
*q
)
1148 if (q
&& q
->limits
.logical_block_size
)
1149 retval
= q
->limits
.logical_block_size
;
1154 static inline unsigned int bdev_logical_block_size(struct block_device
*bdev
)
1156 return queue_logical_block_size(bdev_get_queue(bdev
));
1159 static inline unsigned int queue_physical_block_size(const struct request_queue
*q
)
1161 return q
->limits
.physical_block_size
;
1164 static inline unsigned int bdev_physical_block_size(struct block_device
*bdev
)
1166 return queue_physical_block_size(bdev_get_queue(bdev
));
1169 static inline unsigned int queue_io_min(const struct request_queue
*q
)
1171 return q
->limits
.io_min
;
1174 static inline int bdev_io_min(struct block_device
*bdev
)
1176 return queue_io_min(bdev_get_queue(bdev
));
1179 static inline unsigned int queue_io_opt(const struct request_queue
*q
)
1181 return q
->limits
.io_opt
;
1184 static inline int bdev_io_opt(struct block_device
*bdev
)
1186 return queue_io_opt(bdev_get_queue(bdev
));
1189 static inline unsigned int
1190 queue_zone_write_granularity(const struct request_queue
*q
)
1192 return q
->limits
.zone_write_granularity
;
1195 static inline unsigned int
1196 bdev_zone_write_granularity(struct block_device
*bdev
)
1198 return queue_zone_write_granularity(bdev_get_queue(bdev
));
1201 int bdev_alignment_offset(struct block_device
*bdev
);
1202 unsigned int bdev_discard_alignment(struct block_device
*bdev
);
1204 static inline unsigned int bdev_max_discard_sectors(struct block_device
*bdev
)
1206 return bdev_get_queue(bdev
)->limits
.max_discard_sectors
;
1209 static inline unsigned int bdev_discard_granularity(struct block_device
*bdev
)
1211 return bdev_get_queue(bdev
)->limits
.discard_granularity
;
1214 static inline unsigned int
1215 bdev_max_secure_erase_sectors(struct block_device
*bdev
)
1217 return bdev_get_queue(bdev
)->limits
.max_secure_erase_sectors
;
1220 static inline unsigned int bdev_write_zeroes_sectors(struct block_device
*bdev
)
1222 struct request_queue
*q
= bdev_get_queue(bdev
);
1225 return q
->limits
.max_write_zeroes_sectors
;
1230 static inline bool bdev_nonrot(struct block_device
*bdev
)
1232 return blk_queue_nonrot(bdev_get_queue(bdev
));
1235 static inline bool bdev_synchronous(struct block_device
*bdev
)
1237 return test_bit(QUEUE_FLAG_SYNCHRONOUS
,
1238 &bdev_get_queue(bdev
)->queue_flags
);
1241 static inline bool bdev_stable_writes(struct block_device
*bdev
)
1243 return test_bit(QUEUE_FLAG_STABLE_WRITES
,
1244 &bdev_get_queue(bdev
)->queue_flags
);
1247 static inline bool bdev_write_cache(struct block_device
*bdev
)
1249 return test_bit(QUEUE_FLAG_WC
, &bdev_get_queue(bdev
)->queue_flags
);
1252 static inline bool bdev_fua(struct block_device
*bdev
)
1254 return test_bit(QUEUE_FLAG_FUA
, &bdev_get_queue(bdev
)->queue_flags
);
1257 static inline bool bdev_nowait(struct block_device
*bdev
)
1259 return test_bit(QUEUE_FLAG_NOWAIT
, &bdev_get_queue(bdev
)->queue_flags
);
1262 static inline enum blk_zoned_model
bdev_zoned_model(struct block_device
*bdev
)
1264 return blk_queue_zoned_model(bdev_get_queue(bdev
));
1267 static inline bool bdev_is_zoned(struct block_device
*bdev
)
1269 return blk_queue_is_zoned(bdev_get_queue(bdev
));
1272 static inline unsigned int bdev_zone_no(struct block_device
*bdev
, sector_t sec
)
1274 return disk_zone_no(bdev
->bd_disk
, sec
);
1277 /* Whether write serialization is required for @op on zoned devices. */
1278 static inline bool op_needs_zoned_write_locking(enum req_op op
)
1280 return op
== REQ_OP_WRITE
|| op
== REQ_OP_WRITE_ZEROES
;
1283 static inline bool bdev_op_is_zoned_write(struct block_device
*bdev
,
1286 return bdev_is_zoned(bdev
) && op_needs_zoned_write_locking(op
);
1289 static inline sector_t
bdev_zone_sectors(struct block_device
*bdev
)
1291 struct request_queue
*q
= bdev_get_queue(bdev
);
1293 if (!blk_queue_is_zoned(q
))
1295 return q
->limits
.chunk_sectors
;
1298 static inline sector_t
bdev_offset_from_zone_start(struct block_device
*bdev
,
1301 return sector
& (bdev_zone_sectors(bdev
) - 1);
1304 static inline bool bdev_is_zone_start(struct block_device
*bdev
,
1307 return bdev_offset_from_zone_start(bdev
, sector
) == 0;
1310 static inline int queue_dma_alignment(const struct request_queue
*q
)
1312 return q
? q
->limits
.dma_alignment
: 511;
1315 static inline unsigned int bdev_dma_alignment(struct block_device
*bdev
)
1317 return queue_dma_alignment(bdev_get_queue(bdev
));
1320 static inline bool bdev_iter_is_aligned(struct block_device
*bdev
,
1321 struct iov_iter
*iter
)
1323 return iov_iter_is_aligned(iter
, bdev_dma_alignment(bdev
),
1324 bdev_logical_block_size(bdev
) - 1);
1327 static inline int blk_rq_aligned(struct request_queue
*q
, unsigned long addr
,
1330 unsigned int alignment
= queue_dma_alignment(q
) | q
->dma_pad_mask
;
1331 return !(addr
& alignment
) && !(len
& alignment
);
1334 /* assumes size > 256 */
1335 static inline unsigned int blksize_bits(unsigned int size
)
1337 return order_base_2(size
>> SECTOR_SHIFT
) + SECTOR_SHIFT
;
1340 static inline unsigned int block_size(struct block_device
*bdev
)
1342 return 1 << bdev
->bd_inode
->i_blkbits
;
1345 int kblockd_schedule_work(struct work_struct
*work
);
1346 int kblockd_mod_delayed_work_on(int cpu
, struct delayed_work
*dwork
, unsigned long delay
);
1348 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1349 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1350 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1351 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1353 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
1355 bool blk_crypto_register(struct blk_crypto_profile
*profile
,
1356 struct request_queue
*q
);
1358 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
1360 static inline bool blk_crypto_register(struct blk_crypto_profile
*profile
,
1361 struct request_queue
*q
)
1366 #endif /* CONFIG_BLK_INLINE_ENCRYPTION */
1368 enum blk_unique_id
{
1369 /* these match the Designator Types specified in SPC */
1375 struct block_device_operations
{
1376 void (*submit_bio
)(struct bio
*bio
);
1377 int (*poll_bio
)(struct bio
*bio
, struct io_comp_batch
*iob
,
1378 unsigned int flags
);
1379 int (*open
)(struct gendisk
*disk
, blk_mode_t mode
);
1380 void (*release
)(struct gendisk
*disk
);
1381 int (*ioctl
)(struct block_device
*bdev
, blk_mode_t mode
,
1382 unsigned cmd
, unsigned long arg
);
1383 int (*compat_ioctl
)(struct block_device
*bdev
, blk_mode_t mode
,
1384 unsigned cmd
, unsigned long arg
);
1385 unsigned int (*check_events
) (struct gendisk
*disk
,
1386 unsigned int clearing
);
1387 void (*unlock_native_capacity
) (struct gendisk
*);
1388 int (*getgeo
)(struct block_device
*, struct hd_geometry
*);
1389 int (*set_read_only
)(struct block_device
*bdev
, bool ro
);
1390 void (*free_disk
)(struct gendisk
*disk
);
1391 /* this callback is with swap_lock and sometimes page table lock held */
1392 void (*swap_slot_free_notify
) (struct block_device
*, unsigned long);
1393 int (*report_zones
)(struct gendisk
*, sector_t sector
,
1394 unsigned int nr_zones
, report_zones_cb cb
, void *data
);
1395 char *(*devnode
)(struct gendisk
*disk
, umode_t
*mode
);
1396 /* returns the length of the identifier or a negative errno: */
1397 int (*get_unique_id
)(struct gendisk
*disk
, u8 id
[16],
1398 enum blk_unique_id id_type
);
1399 struct module
*owner
;
1400 const struct pr_ops
*pr_ops
;
1403 * Special callback for probing GPT entry at a given sector.
1404 * Needed by Android devices, used by GPT scanner and MMC blk
1407 int (*alternative_gpt_sector
)(struct gendisk
*disk
, sector_t
*sector
);
1410 #ifdef CONFIG_COMPAT
1411 extern int blkdev_compat_ptr_ioctl(struct block_device
*, blk_mode_t
,
1412 unsigned int, unsigned long);
1414 #define blkdev_compat_ptr_ioctl NULL
1417 static inline void blk_wake_io_task(struct task_struct
*waiter
)
1420 * If we're polling, the task itself is doing the completions. For
1421 * that case, we don't need to signal a wakeup, it's enough to just
1422 * mark us as RUNNING.
1424 if (waiter
== current
)
1425 __set_current_state(TASK_RUNNING
);
1427 wake_up_process(waiter
);
1430 unsigned long bdev_start_io_acct(struct block_device
*bdev
, enum req_op op
,
1431 unsigned long start_time
);
1432 void bdev_end_io_acct(struct block_device
*bdev
, enum req_op op
,
1433 unsigned int sectors
, unsigned long start_time
);
1435 unsigned long bio_start_io_acct(struct bio
*bio
);
1436 void bio_end_io_acct_remapped(struct bio
*bio
, unsigned long start_time
,
1437 struct block_device
*orig_bdev
);
1440 * bio_end_io_acct - end I/O accounting for bio based drivers
1441 * @bio: bio to end account for
1442 * @start_time: start time returned by bio_start_io_acct()
1444 static inline void bio_end_io_acct(struct bio
*bio
, unsigned long start_time
)
1446 return bio_end_io_acct_remapped(bio
, start_time
, bio
->bi_bdev
);
1449 int bdev_read_only(struct block_device
*bdev
);
1450 int set_blocksize(struct block_device
*bdev
, int size
);
1452 int lookup_bdev(const char *pathname
, dev_t
*dev
);
1454 void blkdev_show(struct seq_file
*seqf
, off_t offset
);
1456 #define BDEVNAME_SIZE 32 /* Largest string for a blockdev identifier */
1457 #define BDEVT_SIZE 10 /* Largest string for MAJ:MIN for blkdev */
1459 #define BLKDEV_MAJOR_MAX 512
1461 #define BLKDEV_MAJOR_MAX 0
1464 struct blk_holder_ops
{
1465 void (*mark_dead
)(struct block_device
*bdev
, bool surprise
);
1468 * Sync the file system mounted on the block device.
1470 void (*sync
)(struct block_device
*bdev
);
1473 * Freeze the file system mounted on the block device.
1475 int (*freeze
)(struct block_device
*bdev
);
1478 * Thaw the file system mounted on the block device.
1480 int (*thaw
)(struct block_device
*bdev
);
1484 * For filesystems using @fs_holder_ops, the @holder argument passed to
1485 * helpers used to open and claim block devices via
1486 * bd_prepare_to_claim() must point to a superblock.
1488 extern const struct blk_holder_ops fs_holder_ops
;
1491 * Return the correct open flags for blkdev_get_by_* for super block flags
1492 * as stored in sb->s_flags.
1494 #define sb_open_mode(flags) \
1495 (BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
1497 struct bdev_handle
{
1498 struct block_device
*bdev
;
1503 struct block_device
*blkdev_get_by_dev(dev_t dev
, blk_mode_t mode
, void *holder
,
1504 const struct blk_holder_ops
*hops
);
1505 struct block_device
*blkdev_get_by_path(const char *path
, blk_mode_t mode
,
1506 void *holder
, const struct blk_holder_ops
*hops
);
1507 struct bdev_handle
*bdev_open_by_dev(dev_t dev
, blk_mode_t mode
, void *holder
,
1508 const struct blk_holder_ops
*hops
);
1509 struct bdev_handle
*bdev_open_by_path(const char *path
, blk_mode_t mode
,
1510 void *holder
, const struct blk_holder_ops
*hops
);
1511 int bd_prepare_to_claim(struct block_device
*bdev
, void *holder
,
1512 const struct blk_holder_ops
*hops
);
1513 void bd_abort_claiming(struct block_device
*bdev
, void *holder
);
1514 void blkdev_put(struct block_device
*bdev
, void *holder
);
1515 void bdev_release(struct bdev_handle
*handle
);
1517 /* just for blk-cgroup, don't use elsewhere */
1518 struct block_device
*blkdev_get_no_open(dev_t dev
);
1519 void blkdev_put_no_open(struct block_device
*bdev
);
1521 struct block_device
*I_BDEV(struct inode
*inode
);
1524 void invalidate_bdev(struct block_device
*bdev
);
1525 int sync_blockdev(struct block_device
*bdev
);
1526 int sync_blockdev_range(struct block_device
*bdev
, loff_t lstart
, loff_t lend
);
1527 int sync_blockdev_nowait(struct block_device
*bdev
);
1528 void sync_bdevs(bool wait
);
1529 void bdev_statx_dioalign(struct inode
*inode
, struct kstat
*stat
);
1530 void printk_all_partitions(void);
1531 int __init
early_lookup_bdev(const char *pathname
, dev_t
*dev
);
1533 static inline void invalidate_bdev(struct block_device
*bdev
)
1536 static inline int sync_blockdev(struct block_device
*bdev
)
1540 static inline int sync_blockdev_nowait(struct block_device
*bdev
)
1544 static inline void sync_bdevs(bool wait
)
1547 static inline void bdev_statx_dioalign(struct inode
*inode
, struct kstat
*stat
)
1550 static inline void printk_all_partitions(void)
1553 static inline int early_lookup_bdev(const char *pathname
, dev_t
*dev
)
1557 #endif /* CONFIG_BLOCK */
1559 int bdev_freeze(struct block_device
*bdev
);
1560 int bdev_thaw(struct block_device
*bdev
);
1562 struct io_comp_batch
{
1563 struct request
*req_list
;
1565 void (*complete
)(struct io_comp_batch
*);
1568 #define DEFINE_IO_COMP_BATCH(name) struct io_comp_batch name = { }
1570 #endif /* _LINUX_BLKDEV_H */