1 /* SPDX-License-Identifier: GPL-2.0 */
5 #include <linux/blk-crypto.h>
6 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
8 #include "blk-crypto-internal.h"
12 /* Max future timer expiry for timeouts */
13 #define BLK_MAX_TIMEOUT (5 * HZ)
15 extern struct dentry
*blk_debugfs_root
;
17 struct blk_flush_queue
{
18 unsigned int flush_pending_idx
:1;
19 unsigned int flush_running_idx
:1;
20 blk_status_t rq_status
;
21 unsigned long flush_pending_since
;
22 struct list_head flush_queue
[2];
23 struct list_head flush_data_in_flight
;
24 struct request
*flush_rq
;
26 spinlock_t mq_flush_lock
;
29 extern struct kmem_cache
*blk_requestq_cachep
;
30 extern struct kmem_cache
*blk_requestq_srcu_cachep
;
31 extern struct kobj_type blk_queue_ktype
;
32 extern struct ida blk_queue_ida
;
34 bool is_flush_rq(struct request
*req
);
36 struct blk_flush_queue
*blk_alloc_flush_queue(int node
, int cmd_size
,
38 void blk_free_flush_queue(struct blk_flush_queue
*q
);
40 void blk_freeze_queue(struct request_queue
*q
);
41 void __blk_mq_unfreeze_queue(struct request_queue
*q
, bool force_atomic
);
42 void blk_queue_start_drain(struct request_queue
*q
);
43 int __bio_queue_enter(struct request_queue
*q
, struct bio
*bio
);
44 void submit_bio_noacct_nocheck(struct bio
*bio
);
46 static inline bool blk_try_enter_queue(struct request_queue
*q
, bool pm
)
49 if (!percpu_ref_tryget_live_rcu(&q
->q_usage_counter
))
53 * The code that increments the pm_only counter must ensure that the
54 * counter is globally visible before the queue is unfrozen.
56 if (blk_queue_pm_only(q
) &&
57 (!pm
|| queue_rpm_status(q
) == RPM_SUSPENDED
))
70 static inline int bio_queue_enter(struct bio
*bio
)
72 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
74 if (blk_try_enter_queue(q
, false))
76 return __bio_queue_enter(q
, bio
);
79 #define BIO_INLINE_VECS 4
80 struct bio_vec
*bvec_alloc(mempool_t
*pool
, unsigned short *nr_vecs
,
82 void bvec_free(mempool_t
*pool
, struct bio_vec
*bv
, unsigned short nr_vecs
);
84 static inline bool biovec_phys_mergeable(struct request_queue
*q
,
85 struct bio_vec
*vec1
, struct bio_vec
*vec2
)
87 unsigned long mask
= queue_segment_boundary(q
);
88 phys_addr_t addr1
= page_to_phys(vec1
->bv_page
) + vec1
->bv_offset
;
89 phys_addr_t addr2
= page_to_phys(vec2
->bv_page
) + vec2
->bv_offset
;
91 if (addr1
+ vec1
->bv_len
!= addr2
)
93 if (xen_domain() && !xen_biovec_phys_mergeable(vec1
, vec2
->bv_page
))
95 if ((addr1
| mask
) != ((addr2
+ vec2
->bv_len
- 1) | mask
))
100 static inline bool __bvec_gap_to_prev(struct queue_limits
*lim
,
101 struct bio_vec
*bprv
, unsigned int offset
)
103 return (offset
& lim
->virt_boundary_mask
) ||
104 ((bprv
->bv_offset
+ bprv
->bv_len
) & lim
->virt_boundary_mask
);
108 * Check if adding a bio_vec after bprv with offset would create a gap in
109 * the SG list. Most drivers don't care about this, but some do.
111 static inline bool bvec_gap_to_prev(struct queue_limits
*lim
,
112 struct bio_vec
*bprv
, unsigned int offset
)
114 if (!lim
->virt_boundary_mask
)
116 return __bvec_gap_to_prev(lim
, bprv
, offset
);
119 static inline bool rq_mergeable(struct request
*rq
)
121 if (blk_rq_is_passthrough(rq
))
124 if (req_op(rq
) == REQ_OP_FLUSH
)
127 if (req_op(rq
) == REQ_OP_WRITE_ZEROES
)
130 if (req_op(rq
) == REQ_OP_ZONE_APPEND
)
133 if (rq
->cmd_flags
& REQ_NOMERGE_FLAGS
)
135 if (rq
->rq_flags
& RQF_NOMERGE_FLAGS
)
142 * There are two different ways to handle DISCARD merges:
143 * 1) If max_discard_segments > 1, the driver treats every bio as a range and
144 * send the bios to controller together. The ranges don't need to be
146 * 2) Otherwise, the request will be normal read/write requests. The ranges
147 * need to be contiguous.
149 static inline bool blk_discard_mergable(struct request
*req
)
151 if (req_op(req
) == REQ_OP_DISCARD
&&
152 queue_max_discard_segments(req
->q
) > 1)
157 static inline unsigned int blk_queue_get_max_sectors(struct request_queue
*q
,
160 if (unlikely(op
== REQ_OP_DISCARD
|| op
== REQ_OP_SECURE_ERASE
))
161 return min(q
->limits
.max_discard_sectors
,
162 UINT_MAX
>> SECTOR_SHIFT
);
164 if (unlikely(op
== REQ_OP_WRITE_ZEROES
))
165 return q
->limits
.max_write_zeroes_sectors
;
167 return q
->limits
.max_sectors
;
170 #ifdef CONFIG_BLK_DEV_INTEGRITY
171 void blk_flush_integrity(void);
172 bool __bio_integrity_endio(struct bio
*);
173 void bio_integrity_free(struct bio
*bio
);
174 static inline bool bio_integrity_endio(struct bio
*bio
)
176 if (bio_integrity(bio
))
177 return __bio_integrity_endio(bio
);
181 bool blk_integrity_merge_rq(struct request_queue
*, struct request
*,
183 bool blk_integrity_merge_bio(struct request_queue
*, struct request
*,
186 static inline bool integrity_req_gap_back_merge(struct request
*req
,
189 struct bio_integrity_payload
*bip
= bio_integrity(req
->bio
);
190 struct bio_integrity_payload
*bip_next
= bio_integrity(next
);
192 return bvec_gap_to_prev(&req
->q
->limits
,
193 &bip
->bip_vec
[bip
->bip_vcnt
- 1],
194 bip_next
->bip_vec
[0].bv_offset
);
197 static inline bool integrity_req_gap_front_merge(struct request
*req
,
200 struct bio_integrity_payload
*bip
= bio_integrity(bio
);
201 struct bio_integrity_payload
*bip_next
= bio_integrity(req
->bio
);
203 return bvec_gap_to_prev(&req
->q
->limits
,
204 &bip
->bip_vec
[bip
->bip_vcnt
- 1],
205 bip_next
->bip_vec
[0].bv_offset
);
208 int blk_integrity_add(struct gendisk
*disk
);
209 void blk_integrity_del(struct gendisk
*);
210 #else /* CONFIG_BLK_DEV_INTEGRITY */
211 static inline bool blk_integrity_merge_rq(struct request_queue
*rq
,
212 struct request
*r1
, struct request
*r2
)
216 static inline bool blk_integrity_merge_bio(struct request_queue
*rq
,
217 struct request
*r
, struct bio
*b
)
221 static inline bool integrity_req_gap_back_merge(struct request
*req
,
226 static inline bool integrity_req_gap_front_merge(struct request
*req
,
232 static inline void blk_flush_integrity(void)
235 static inline bool bio_integrity_endio(struct bio
*bio
)
239 static inline void bio_integrity_free(struct bio
*bio
)
242 static inline int blk_integrity_add(struct gendisk
*disk
)
246 static inline void blk_integrity_del(struct gendisk
*disk
)
249 #endif /* CONFIG_BLK_DEV_INTEGRITY */
251 unsigned long blk_rq_timeout(unsigned long timeout
);
252 void blk_add_timer(struct request
*req
);
253 const char *blk_status_to_str(blk_status_t status
);
255 bool blk_attempt_plug_merge(struct request_queue
*q
, struct bio
*bio
,
256 unsigned int nr_segs
);
257 bool blk_bio_list_merge(struct request_queue
*q
, struct list_head
*list
,
258 struct bio
*bio
, unsigned int nr_segs
);
263 #define BLK_MAX_REQUEST_COUNT 32
264 #define BLK_PLUG_FLUSH_SIZE (128 * 1024)
267 * Internal elevator interface
269 #define ELV_ON_HASH(rq) ((rq)->rq_flags & RQF_HASHED)
271 void blk_insert_flush(struct request
*rq
);
273 int elevator_switch_mq(struct request_queue
*q
,
274 struct elevator_type
*new_e
);
275 void elevator_exit(struct request_queue
*q
);
276 int elv_register_queue(struct request_queue
*q
, bool uevent
);
277 void elv_unregister_queue(struct request_queue
*q
);
279 ssize_t
part_size_show(struct device
*dev
, struct device_attribute
*attr
,
281 ssize_t
part_stat_show(struct device
*dev
, struct device_attribute
*attr
,
283 ssize_t
part_inflight_show(struct device
*dev
, struct device_attribute
*attr
,
285 ssize_t
part_fail_show(struct device
*dev
, struct device_attribute
*attr
,
287 ssize_t
part_fail_store(struct device
*dev
, struct device_attribute
*attr
,
288 const char *buf
, size_t count
);
289 ssize_t
part_timeout_show(struct device
*, struct device_attribute
*, char *);
290 ssize_t
part_timeout_store(struct device
*, struct device_attribute
*,
291 const char *, size_t);
293 static inline bool bio_may_exceed_limits(struct bio
*bio
,
294 struct queue_limits
*lim
)
296 switch (bio_op(bio
)) {
298 case REQ_OP_SECURE_ERASE
:
299 case REQ_OP_WRITE_ZEROES
:
300 return true; /* non-trivial splitting decisions */
306 * All drivers must accept single-segments bios that are <= PAGE_SIZE.
307 * This is a quick and dirty check that relies on the fact that
308 * bi_io_vec[0] is always valid if a bio has data. The check might
309 * lead to occasional false negatives when bios are cloned, but compared
310 * to the performance impact of cloned bios themselves the loop below
311 * doesn't matter anyway.
313 return lim
->chunk_sectors
|| bio
->bi_vcnt
!= 1 ||
314 bio
->bi_io_vec
->bv_len
+ bio
->bi_io_vec
->bv_offset
> PAGE_SIZE
;
317 struct bio
*__bio_split_to_limits(struct bio
*bio
, struct queue_limits
*lim
,
318 unsigned int *nr_segs
);
319 int ll_back_merge_fn(struct request
*req
, struct bio
*bio
,
320 unsigned int nr_segs
);
321 bool blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
322 struct request
*next
);
323 unsigned int blk_recalc_rq_segments(struct request
*rq
);
324 void blk_rq_set_mixed_merge(struct request
*rq
);
325 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
);
326 enum elv_merge
blk_try_merge(struct request
*rq
, struct bio
*bio
);
328 int blk_dev_init(void);
331 * Contribute to IO statistics IFF:
333 * a) it's attached to a gendisk, and
334 * b) the queue had IO stats enabled when this request was started
336 static inline bool blk_do_io_stat(struct request
*rq
)
338 return (rq
->rq_flags
& RQF_IO_STAT
) && !blk_rq_is_passthrough(rq
);
341 void update_io_ticks(struct block_device
*part
, unsigned long now
, bool end
);
343 static inline void req_set_nomerge(struct request_queue
*q
, struct request
*req
)
345 req
->cmd_flags
|= REQ_NOMERGE
;
346 if (req
== q
->last_merge
)
347 q
->last_merge
= NULL
;
351 * Internal io_context interface
353 struct io_cq
*ioc_find_get_icq(struct request_queue
*q
);
354 struct io_cq
*ioc_lookup_icq(struct request_queue
*q
);
355 #ifdef CONFIG_BLK_ICQ
356 void ioc_clear_queue(struct request_queue
*q
);
358 static inline void ioc_clear_queue(struct request_queue
*q
)
361 #endif /* CONFIG_BLK_ICQ */
363 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
364 extern ssize_t
blk_throtl_sample_time_show(struct request_queue
*q
, char *page
);
365 extern ssize_t
blk_throtl_sample_time_store(struct request_queue
*q
,
366 const char *page
, size_t count
);
367 extern void blk_throtl_bio_endio(struct bio
*bio
);
368 extern void blk_throtl_stat_add(struct request
*rq
, u64 time
);
370 static inline void blk_throtl_bio_endio(struct bio
*bio
) { }
371 static inline void blk_throtl_stat_add(struct request
*rq
, u64 time
) { }
374 struct bio
*__blk_queue_bounce(struct bio
*bio
, struct request_queue
*q
);
376 static inline bool blk_queue_may_bounce(struct request_queue
*q
)
378 return IS_ENABLED(CONFIG_BOUNCE
) &&
379 q
->limits
.bounce
== BLK_BOUNCE_HIGH
&&
380 max_low_pfn
>= max_pfn
;
383 static inline struct bio
*blk_queue_bounce(struct bio
*bio
,
384 struct request_queue
*q
)
386 if (unlikely(blk_queue_may_bounce(q
) && bio_has_data(bio
)))
387 return __blk_queue_bounce(bio
, q
);
391 #ifdef CONFIG_BLK_CGROUP_IOLATENCY
392 extern int blk_iolatency_init(struct request_queue
*q
);
394 static inline int blk_iolatency_init(struct request_queue
*q
) { return 0; }
397 #ifdef CONFIG_BLK_DEV_ZONED
398 void disk_free_zone_bitmaps(struct gendisk
*disk
);
399 void disk_clear_zone_settings(struct gendisk
*disk
);
401 static inline void disk_free_zone_bitmaps(struct gendisk
*disk
) {}
402 static inline void disk_clear_zone_settings(struct gendisk
*disk
) {}
405 int blk_alloc_ext_minor(void);
406 void blk_free_ext_minor(unsigned int minor
);
407 #define ADDPART_FLAG_NONE 0
408 #define ADDPART_FLAG_RAID 1
409 #define ADDPART_FLAG_WHOLEDISK 2
410 int bdev_add_partition(struct gendisk
*disk
, int partno
, sector_t start
,
412 int bdev_del_partition(struct gendisk
*disk
, int partno
);
413 int bdev_resize_partition(struct gendisk
*disk
, int partno
, sector_t start
,
415 void blk_drop_partitions(struct gendisk
*disk
);
417 struct gendisk
*__alloc_disk_node(struct request_queue
*q
, int node_id
,
418 struct lock_class_key
*lkclass
);
420 int bio_add_hw_page(struct request_queue
*q
, struct bio
*bio
,
421 struct page
*page
, unsigned int len
, unsigned int offset
,
422 unsigned int max_sectors
, bool *same_page
);
424 static inline struct kmem_cache
*blk_get_queue_kmem_cache(bool srcu
)
427 return blk_requestq_srcu_cachep
;
428 return blk_requestq_cachep
;
430 struct request_queue
*blk_alloc_queue(int node_id
, bool alloc_srcu
);
432 int disk_scan_partitions(struct gendisk
*disk
, fmode_t mode
);
434 int disk_alloc_events(struct gendisk
*disk
);
435 void disk_add_events(struct gendisk
*disk
);
436 void disk_del_events(struct gendisk
*disk
);
437 void disk_release_events(struct gendisk
*disk
);
438 void disk_block_events(struct gendisk
*disk
);
439 void disk_unblock_events(struct gendisk
*disk
);
440 void disk_flush_events(struct gendisk
*disk
, unsigned int mask
);
441 extern struct device_attribute dev_attr_events
;
442 extern struct device_attribute dev_attr_events_async
;
443 extern struct device_attribute dev_attr_events_poll_msecs
;
445 extern struct attribute_group blk_trace_attr_group
;
447 long blkdev_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
);
448 long compat_blkdev_ioctl(struct file
*file
, unsigned cmd
, unsigned long arg
);
450 extern const struct address_space_operations def_blk_aops
;
452 int disk_register_independent_access_ranges(struct gendisk
*disk
);
453 void disk_unregister_independent_access_ranges(struct gendisk
*disk
);
455 #ifdef CONFIG_FAIL_MAKE_REQUEST
456 bool should_fail_request(struct block_device
*part
, unsigned int bytes
);
457 #else /* CONFIG_FAIL_MAKE_REQUEST */
458 static inline bool should_fail_request(struct block_device
*part
,
463 #endif /* CONFIG_FAIL_MAKE_REQUEST */
466 * Optimized request reference counting. Ideally we'd make timeouts be more
467 * clever, as that's the only reason we need references at all... But until
468 * this happens, this is faster than using refcount_t. Also see:
470 * abc54d634334 ("io_uring: switch to atomic_t for io_kiocb reference count")
472 #define req_ref_zero_or_close_to_overflow(req) \
473 ((unsigned int) atomic_read(&(req->ref)) + 127u <= 127u)
475 static inline bool req_ref_inc_not_zero(struct request
*req
)
477 return atomic_inc_not_zero(&req
->ref
);
480 static inline bool req_ref_put_and_test(struct request
*req
)
482 WARN_ON_ONCE(req_ref_zero_or_close_to_overflow(req
));
483 return atomic_dec_and_test(&req
->ref
);
486 static inline void req_ref_set(struct request
*req
, int value
)
488 atomic_set(&req
->ref
, value
);
491 static inline int req_ref_read(struct request
*req
)
493 return atomic_read(&req
->ref
);
496 #endif /* BLK_INTERNAL_H */