1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blk-crypto.h>
6 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
8 #include "blk-crypto-internal.h"
12 /* Max future timer expiry for timeouts */
13 #define BLK_MAX_TIMEOUT (5 * HZ)
15 extern struct dentry
*blk_debugfs_root
;
17 struct blk_flush_queue
{
18 spinlock_t mq_flush_lock
;
19 unsigned int flush_pending_idx
:1;
20 unsigned int flush_running_idx
:1;
21 blk_status_t rq_status
;
22 unsigned long flush_pending_since
;
23 struct list_head flush_queue
[2];
24 unsigned long flush_data_in_flight
;
25 struct request
*flush_rq
;
28 bool is_flush_rq(struct request
*req
);
30 struct blk_flush_queue
*blk_alloc_flush_queue(int node
, int cmd_size
,
32 void blk_free_flush_queue(struct blk_flush_queue
*q
);
34 void blk_freeze_queue(struct request_queue
*q
);
35 void __blk_mq_unfreeze_queue(struct request_queue
*q
, bool force_atomic
);
36 void blk_queue_start_drain(struct request_queue
*q
);
37 int __bio_queue_enter(struct request_queue
*q
, struct bio
*bio
);
38 void submit_bio_noacct_nocheck(struct bio
*bio
);
40 static inline bool blk_try_enter_queue(struct request_queue
*q
, bool pm
)
43 if (!percpu_ref_tryget_live_rcu(&q
->q_usage_counter
))
47 * The code that increments the pm_only counter must ensure that the
48 * counter is globally visible before the queue is unfrozen.
50 if (blk_queue_pm_only(q
) &&
51 (!pm
|| queue_rpm_status(q
) == RPM_SUSPENDED
))
64 static inline int bio_queue_enter(struct bio
*bio
)
66 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
68 if (blk_try_enter_queue(q
, false))
70 return __bio_queue_enter(q
, bio
);
73 #define BIO_INLINE_VECS 4
74 struct bio_vec
*bvec_alloc(mempool_t
*pool
, unsigned short *nr_vecs
,
76 void bvec_free(mempool_t
*pool
, struct bio_vec
*bv
, unsigned short nr_vecs
);
78 bool bvec_try_merge_hw_page(struct request_queue
*q
, struct bio_vec
*bv
,
79 struct page
*page
, unsigned len
, unsigned offset
,
82 static inline bool biovec_phys_mergeable(struct request_queue
*q
,
83 struct bio_vec
*vec1
, struct bio_vec
*vec2
)
85 unsigned long mask
= queue_segment_boundary(q
);
86 phys_addr_t addr1
= page_to_phys(vec1
->bv_page
) + vec1
->bv_offset
;
87 phys_addr_t addr2
= page_to_phys(vec2
->bv_page
) + vec2
->bv_offset
;
90 * Merging adjacent physical pages may not work correctly under KMSAN
91 * if their metadata pages aren't adjacent. Just disable merging.
93 if (IS_ENABLED(CONFIG_KMSAN
))
96 if (addr1
+ vec1
->bv_len
!= addr2
)
98 if (xen_domain() && !xen_biovec_phys_mergeable(vec1
, vec2
->bv_page
))
100 if ((addr1
| mask
) != ((addr2
+ vec2
->bv_len
- 1) | mask
))
105 static inline bool __bvec_gap_to_prev(const struct queue_limits
*lim
,
106 struct bio_vec
*bprv
, unsigned int offset
)
108 return (offset
& lim
->virt_boundary_mask
) ||
109 ((bprv
->bv_offset
+ bprv
->bv_len
) & lim
->virt_boundary_mask
);
113 * Check if adding a bio_vec after bprv with offset would create a gap in
114 * the SG list. Most drivers don't care about this, but some do.
116 static inline bool bvec_gap_to_prev(const struct queue_limits
*lim
,
117 struct bio_vec
*bprv
, unsigned int offset
)
119 if (!lim
->virt_boundary_mask
)
121 return __bvec_gap_to_prev(lim
, bprv
, offset
);
124 static inline bool rq_mergeable(struct request
*rq
)
126 if (blk_rq_is_passthrough(rq
))
129 if (req_op(rq
) == REQ_OP_FLUSH
)
132 if (req_op(rq
) == REQ_OP_WRITE_ZEROES
)
135 if (req_op(rq
) == REQ_OP_ZONE_APPEND
)
138 if (rq
->cmd_flags
& REQ_NOMERGE_FLAGS
)
140 if (rq
->rq_flags
& RQF_NOMERGE_FLAGS
)
147 * There are two different ways to handle DISCARD merges:
148 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
149 * send the bios to controller together. The ranges don't need to be
151 * 2) Otherwise, the request will be normal read/write requests. The ranges
152 * need to be contiguous.
154 static inline bool blk_discard_mergable(struct request
*req
)
156 if (req_op(req
) == REQ_OP_DISCARD
&&
157 queue_max_discard_segments(req
->q
) > 1)
162 static inline unsigned int blk_rq_get_max_segments(struct request
*rq
)
164 if (req_op(rq
) == REQ_OP_DISCARD
)
165 return queue_max_discard_segments(rq
->q
);
166 return queue_max_segments(rq
->q
);
169 static inline unsigned int blk_queue_get_max_sectors(struct request_queue
*q
,
172 if (unlikely(op
== REQ_OP_DISCARD
|| op
== REQ_OP_SECURE_ERASE
))
173 return min(q
->limits
.max_discard_sectors
,
174 UINT_MAX
>> SECTOR_SHIFT
);
176 if (unlikely(op
== REQ_OP_WRITE_ZEROES
))
177 return q
->limits
.max_write_zeroes_sectors
;
179 return q
->limits
.max_sectors
;
182 #ifdef CONFIG_BLK_DEV_INTEGRITY
183 void blk_flush_integrity(void);
184 bool __bio_integrity_endio(struct bio
*);
185 void bio_integrity_free(struct bio
*bio
);
186 static inline bool bio_integrity_endio(struct bio
*bio
)
188 if (bio_integrity(bio
))
189 return __bio_integrity_endio(bio
);
193 bool blk_integrity_merge_rq(struct request_queue
*, struct request
*,
195 bool blk_integrity_merge_bio(struct request_queue
*, struct request
*,
198 static inline bool integrity_req_gap_back_merge(struct request
*req
,
201 struct bio_integrity_payload
*bip
= bio_integrity(req
->bio
);
202 struct bio_integrity_payload
*bip_next
= bio_integrity(next
);
204 return bvec_gap_to_prev(&req
->q
->limits
,
205 &bip
->bip_vec
[bip
->bip_vcnt
- 1],
206 bip_next
->bip_vec
[0].bv_offset
);
209 static inline bool integrity_req_gap_front_merge(struct request
*req
,
212 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
213 struct bio_integrity_payload
*bip_next
= bio_integrity(req
->bio
);
215 return bvec_gap_to_prev(&req
->q
->limits
,
216 &bip
->bip_vec
[bip
->bip_vcnt
- 1],
217 bip_next
->bip_vec
[0].bv_offset
);
220 extern const struct attribute_group blk_integrity_attr_group
;
221 #else /* CONFIG_BLK_DEV_INTEGRITY */
222 static inline bool blk_integrity_merge_rq(struct request_queue
*rq
,
223 struct request
*r1
, struct request
*r2
)
227 static inline bool blk_integrity_merge_bio(struct request_queue
*rq
,
228 struct request
*r
, struct bio
*b
)
232 static inline bool integrity_req_gap_back_merge(struct request
*req
,
237 static inline bool integrity_req_gap_front_merge(struct request
*req
,
243 static inline void blk_flush_integrity(void)
246 static inline bool bio_integrity_endio(struct bio
*bio
)
250 static inline void bio_integrity_free(struct bio
*bio
)
253 #endif /* CONFIG_BLK_DEV_INTEGRITY */
255 unsigned long blk_rq_timeout(unsigned long timeout
);
256 void blk_add_timer(struct request
*req
);
258 bool blk_attempt_plug_merge(struct request_queue
*q
, struct bio
*bio
,
259 unsigned int nr_segs
);
260 bool blk_bio_list_merge(struct request_queue
*q
, struct list_head
*list
,
261 struct bio
*bio
, unsigned int nr_segs
);
266 #define BLK_MAX_REQUEST_COUNT 32
267 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
270 * Internal elevator interface
272 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
274 bool blk_insert_flush(struct request
*rq
);
276 int elevator_switch(struct request_queue
*q
, struct elevator_type
*new_e
);
277 void elevator_disable(struct request_queue
*q
);
278 void elevator_exit(struct request_queue
*q
);
279 int elv_register_queue(struct request_queue
*q
, bool uevent
);
280 void elv_unregister_queue(struct request_queue
*q
);
282 ssize_t
part_size_show(struct device
*dev
, struct device_attribute
*attr
,
284 ssize_t
part_stat_show(struct device
*dev
, struct device_attribute
*attr
,
286 ssize_t
part_inflight_show(struct device
*dev
, struct device_attribute
*attr
,
288 ssize_t
part_fail_show(struct device
*dev
, struct device_attribute
*attr
,
290 ssize_t
part_fail_store(struct device
*dev
, struct device_attribute
*attr
,
291 const char *buf
, size_t count
);
292 ssize_t
part_timeout_show(struct device
*, struct device_attribute
*, char *);
293 ssize_t
part_timeout_store(struct device
*, struct device_attribute
*,
294 const char *, size_t);
296 static inline bool bio_may_exceed_limits(struct bio
*bio
,
297 const struct queue_limits
*lim
)
299 switch (bio_op(bio
)) {
301 case REQ_OP_SECURE_ERASE
:
302 case REQ_OP_WRITE_ZEROES
:
303 return true; /* non-trivial splitting decisions */
309 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
310 * This is a quick and dirty check that relies on the fact that
311 * bi_io_vec[0] is always valid if a bio has data. The check might
312 * lead to occasional false negatives when bios are cloned, but compared
313 * to the performance impact of cloned bios themselves the loop below
314 * doesn't matter anyway.
316 return lim
->chunk_sectors
|| bio
->bi_vcnt
!= 1 ||
317 bio
->bi_io_vec
->bv_len
+ bio
->bi_io_vec
->bv_offset
> PAGE_SIZE
;
320 struct bio
*__bio_split_to_limits(struct bio
*bio
,
321 const struct queue_limits
*lim
,
322 unsigned int *nr_segs
);
323 int ll_back_merge_fn(struct request
*req
, struct bio
*bio
,
324 unsigned int nr_segs
);
325 bool blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
326 struct request
*next
);
327 unsigned int blk_recalc_rq_segments(struct request
*rq
);
328 void blk_rq_set_mixed_merge(struct request
*rq
);
329 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
);
330 enum elv_merge
blk_try_merge(struct request
*rq
, struct bio
*bio
);
332 void blk_set_default_limits(struct queue_limits
*lim
);
333 int blk_dev_init(void);
336 * Contribute to IO statistics IFF:
338 * a) it's attached to a gendisk, and
339 * b) the queue had IO stats enabled when this request was started
341 static inline bool blk_do_io_stat(struct request
*rq
)
343 return (rq
->rq_flags
& RQF_IO_STAT
) && !blk_rq_is_passthrough(rq
);
346 void update_io_ticks(struct block_device
*part
, unsigned long now
, bool end
);
348 static inline void req_set_nomerge(struct request_queue
*q
, struct request
*req
)
350 req
->cmd_flags
|= REQ_NOMERGE
;
351 if (req
== q
->last_merge
)
352 q
->last_merge
= NULL
;
356 * Internal io_context interface
358 struct io_cq
*ioc_find_get_icq(struct request_queue
*q
);
359 struct io_cq
*ioc_lookup_icq(struct request_queue
*q
);
360 #ifdef CONFIG_BLK_ICQ
361 void ioc_clear_queue(struct request_queue
*q
);
363 static inline void ioc_clear_queue(struct request_queue
*q
)
366 #endif /* CONFIG_BLK_ICQ */
368 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
369 extern ssize_t
blk_throtl_sample_time_show(struct request_queue
*q
, char *page
);
370 extern ssize_t
blk_throtl_sample_time_store(struct request_queue
*q
,
371 const char *page
, size_t count
);
372 extern void blk_throtl_bio_endio(struct bio
*bio
);
373 extern void blk_throtl_stat_add(struct request
*rq
, u64 time
);
375 static inline void blk_throtl_bio_endio(struct bio
*bio
) { }
376 static inline void blk_throtl_stat_add(struct request
*rq
, u64 time
) { }
379 struct bio
*__blk_queue_bounce(struct bio
*bio
, struct request_queue
*q
);
381 static inline bool blk_queue_may_bounce(struct request_queue
*q
)
383 return IS_ENABLED(CONFIG_BOUNCE
) &&
384 q
->limits
.bounce
== BLK_BOUNCE_HIGH
&&
385 max_low_pfn
>= max_pfn
;
388 static inline struct bio
*blk_queue_bounce(struct bio
*bio
,
389 struct request_queue
*q
)
391 if (unlikely(blk_queue_may_bounce(q
) && bio_has_data(bio
)))
392 return __blk_queue_bounce(bio
, q
);
396 #ifdef CONFIG_BLK_DEV_ZONED
397 void disk_free_zone_bitmaps(struct gendisk
*disk
);
398 void disk_clear_zone_settings(struct gendisk
*disk
);
399 int blkdev_report_zones_ioctl(struct block_device
*bdev
, unsigned int cmd
,
401 int blkdev_zone_mgmt_ioctl(struct block_device
*bdev
, blk_mode_t mode
,
402 unsigned int cmd
, unsigned long arg
);
403 #else /* CONFIG_BLK_DEV_ZONED */
404 static inline void disk_free_zone_bitmaps(struct gendisk
*disk
) {}
405 static inline void disk_clear_zone_settings(struct gendisk
*disk
) {}
406 static inline int blkdev_report_zones_ioctl(struct block_device
*bdev
,
407 unsigned int cmd
, unsigned long arg
)
411 static inline int blkdev_zone_mgmt_ioctl(struct block_device
*bdev
,
412 blk_mode_t mode
, unsigned int cmd
, unsigned long arg
)
416 #endif /* CONFIG_BLK_DEV_ZONED */
418 struct block_device
*bdev_alloc(struct gendisk
*disk
, u8 partno
);
419 void bdev_add(struct block_device
*bdev
, dev_t dev
);
421 int blk_alloc_ext_minor(void);
422 void blk_free_ext_minor(unsigned int minor
);
423 #define ADDPART_FLAG_NONE 0
424 #define ADDPART_FLAG_RAID 1
425 #define ADDPART_FLAG_WHOLEDISK 2
426 int bdev_add_partition(struct gendisk
*disk
, int partno
, sector_t start
,
428 int bdev_del_partition(struct gendisk
*disk
, int partno
);
429 int bdev_resize_partition(struct gendisk
*disk
, int partno
, sector_t start
,
431 void drop_partition(struct block_device
*part
);
433 void bdev_set_nr_sectors(struct block_device
*bdev
, sector_t sectors
);
435 struct gendisk
*__alloc_disk_node(struct request_queue
*q
, int node_id
,
436 struct lock_class_key
*lkclass
);
438 int bio_add_hw_page(struct request_queue
*q
, struct bio
*bio
,
439 struct page
*page
, unsigned int len
, unsigned int offset
,
440 unsigned int max_sectors
, bool *same_page
);
443 * Clean up a page appropriately, where the page may be pinned, may have a
444 * ref taken on it or neither.
446 static inline void bio_release_page(struct bio
*bio
, struct page
*page
)
448 if (bio_flagged(bio
, BIO_PAGE_PINNED
))
449 unpin_user_page(page
);
452 struct request_queue
*blk_alloc_queue(int node_id
);
454 int disk_scan_partitions(struct gendisk
*disk
, blk_mode_t mode
);
456 int disk_alloc_events(struct gendisk
*disk
);
457 void disk_add_events(struct gendisk
*disk
);
458 void disk_del_events(struct gendisk
*disk
);
459 void disk_release_events(struct gendisk
*disk
);
460 void disk_block_events(struct gendisk
*disk
);
461 void disk_unblock_events(struct gendisk
*disk
);
462 void disk_flush_events(struct gendisk
*disk
, unsigned int mask
);
463 extern struct device_attribute dev_attr_events
;
464 extern struct device_attribute dev_attr_events_async
;
465 extern struct device_attribute dev_attr_events_poll_msecs
;
467 extern struct attribute_group blk_trace_attr_group
;
469 blk_mode_t
file_to_blk_mode(struct file
*file
);
470 int truncate_bdev_range(struct block_device
*bdev
, blk_mode_t mode
,
471 loff_t lstart
, loff_t lend
);
472 long blkdev_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
);
473 long compat_blkdev_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
);
475 extern const struct address_space_operations def_blk_aops
;
477 int disk_register_independent_access_ranges(struct gendisk
*disk
);
478 void disk_unregister_independent_access_ranges(struct gendisk
*disk
);
480 #ifdef CONFIG_FAIL_MAKE_REQUEST
481 bool should_fail_request(struct block_device
*part
, unsigned int bytes
);
482 #else /* CONFIG_FAIL_MAKE_REQUEST */
483 static inline bool should_fail_request(struct block_device
*part
,
488 #endif /* CONFIG_FAIL_MAKE_REQUEST */
491 * Optimized request reference counting. Ideally we'd make timeouts be more
492 * clever, as that's the only reason we need references at all... But until
493 * this happens, this is faster than using refcount_t. Also see:
495 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
497 #define req_ref_zero_or_close_to_overflow(req) \
498 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
500 static inline bool req_ref_inc_not_zero(struct request
*req
)
502 return atomic_inc_not_zero(&req
->ref
);
505 static inline bool req_ref_put_and_test(struct request
*req
)
507 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req
));
508 return atomic_dec_and_test(&req
->ref
);
511 static inline void req_ref_set(struct request
*req
, int value
)
513 atomic_set(&req
->ref
, value
);
516 static inline int req_ref_read(struct request
*req
)
518 return atomic_read(&req
->ref
);
521 #endif /* BLK_INTERNAL_H */