]> git.ipfire.org Git - thirdparty/linux.git/blame - include/linux/blkdev.h
blk-rq-qos: inline check for q->rq_qos functions
[thirdparty/linux.git] / include / linux / blkdev.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_BLKDEV_H
3#define _LINUX_BLKDEV_H
4
85fd0bc9 5#include <linux/sched.h>
e6017571 6#include <linux/sched/clock.h>
85fd0bc9 7
f5ff8422
JA
8#ifdef CONFIG_BLOCK
9
1da177e4
LT
10#include <linux/major.h>
11#include <linux/genhd.h>
12#include <linux/list.h>
320ae51f 13#include <linux/llist.h>
1da177e4
LT
14#include <linux/timer.h>
15#include <linux/workqueue.h>
16#include <linux/pagemap.h>
66114cad 17#include <linux/backing-dev-defs.h>
1da177e4
LT
18#include <linux/wait.h>
19#include <linux/mempool.h>
34c0fd54 20#include <linux/pfn.h>
1da177e4 21#include <linux/bio.h>
1da177e4 22#include <linux/stringify.h>
3e6053d7 23#include <linux/gfp.h>
d351af01 24#include <linux/bsg.h>
c7c22e4d 25#include <linux/smp.h>
548bc8e1 26#include <linux/rcupdate.h>
add703fd 27#include <linux/percpu-refcount.h>
84be456f 28#include <linux/scatterlist.h>
6a0cb1bc 29#include <linux/blkzoned.h>
1da177e4 30
de477254 31struct module;
21b2f0c8
CH
32struct scsi_ioctl_command;
33
1da177e4 34struct request_queue;
1da177e4 35struct elevator_queue;
2056a782 36struct blk_trace;
3d6392cf
JA
37struct request;
38struct sg_io_hdr;
aa387cc8 39struct bsg_job;
3c798398 40struct blkcg_gq;
7c94e1c1 41struct blk_flush_queue;
bbd3e064 42struct pr_ops;
a7905043 43struct rq_qos;
34dbad5d
OS
44struct blk_queue_stats;
45struct blk_stat_callback;
1da177e4
LT
46
47#define BLKDEV_MIN_RQ 4
48#define BLKDEV_MAX_RQ 128 /* Default maximum */
49
096392e0 50/* Must be consistent with blk_mq_poll_stats_bkt() */
0206319f
SB
51#define BLK_MQ_POLL_STATS_BKTS 16
52
8bd435b3
TH
53/*
54 * Maximum number of blkcg policies allowed to be registered concurrently.
55 * Defined here to simplify include dependency.
56 */
01c5f85a 57#define BLKCG_MAX_POLS 5
8bd435b3 58
2a842aca 59typedef void (rq_end_io_fn)(struct request *, blk_status_t);
1da177e4 60
e8064021
CH
61/*
62 * request flags */
63typedef __u32 __bitwise req_flags_t;
64
65/* elevator knows about this request */
66#define RQF_SORTED ((__force req_flags_t)(1 << 0))
67/* drive already may have started this one */
68#define RQF_STARTED ((__force req_flags_t)(1 << 1))
e8064021
CH
69/* may not be passed by ioscheduler */
70#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
71/* request for flush sequence */
72#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
73/* merge of different types, fail separately */
74#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
75/* track inflight for MQ */
76#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
77/* don't call prep for this one */
78#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
79/* set for "ide_preempt" requests and also for requests for which the SCSI
80 "quiesce" state must be ignored. */
81#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
82/* contains copies of user pages */
83#define RQF_COPY_USER ((__force req_flags_t)(1 << 9))
84/* vaguely specified driver internal error. Ignored by the block layer */
85#define RQF_FAILED ((__force req_flags_t)(1 << 10))
86/* don't warn about errors */
87#define RQF_QUIET ((__force req_flags_t)(1 << 11))
88/* elevator private data attached */
89#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
4822e902 90/* account into disk and partition IO statistics */
e8064021
CH
91#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
92/* request came from our alloc pool */
93#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
94/* runtime pm request */
95#define RQF_PM ((__force req_flags_t)(1 << 15))
96/* on IO scheduler merge hash */
97#define RQF_HASHED ((__force req_flags_t)(1 << 16))
4822e902 98/* track IO completion time */
cf43e6be 99#define RQF_STATS ((__force req_flags_t)(1 << 17))
f9d03f96
CH
100/* Look at ->special_vec for the actual data payload instead of the
101 bio chain. */
102#define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18))
6cc77e9c
CH
103/* The per-zone write lock is held for this request */
104#define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19))
76a86f9d 105/* already slept for hybrid poll */
12f5b931 106#define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 20))
da661267
CH
107/* ->timeout has been called, don't expire again */
108#define RQF_TIMED_OUT ((__force req_flags_t)(1 << 21))
e8064021
CH
109
110/* flags that prevent us from merging requests: */
111#define RQF_NOMERGE_FLAGS \
f9d03f96 112 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ | RQF_SPECIAL_PAYLOAD)
e8064021 113
12f5b931
KB
114/*
115 * Request state for blk-mq.
116 */
117enum mq_rq_state {
118 MQ_RQ_IDLE = 0,
119 MQ_RQ_IN_FLIGHT = 1,
120 MQ_RQ_COMPLETE = 2,
121};
122
1da177e4 123/*
af76e555
CH
124 * Try to put the fields that are referenced together in the same cacheline.
125 *
126 * If you modify this structure, make sure to update blk_rq_init() and
127 * especially blk_mq_rq_ctx_init() to take care of the added fields.
1da177e4
LT
128 */
129struct request {
165125e1 130 struct request_queue *q;
320ae51f 131 struct blk_mq_ctx *mq_ctx;
ea4f995e 132 struct blk_mq_hw_ctx *mq_hctx;
e6a1c874 133
ef295ecf 134 unsigned int cmd_flags; /* op and common flags */
e8064021 135 req_flags_t rq_flags;
d486f1f2
JA
136
137 int internal_tag;
138
a2dec7b3 139 /* the following two fields are internal, NEVER access directly */
a2dec7b3 140 unsigned int __data_len; /* total data len */
bd166ef1 141 int tag;
181fdde3 142 sector_t __sector; /* sector cursor */
1da177e4
LT
143
144 struct bio *bio;
145 struct bio *biotail;
146
7c3fb70f
JA
147 struct list_head queuelist;
148
360f92c2
JA
149 /*
150 * The hash is used inside the scheduler, and killed once the
151 * request reaches the dispatch list. The ipi_list is only used
152 * to queue the request for softirq completion, which is long
153 * after the request has been unhashed (and even removed from
154 * the dispatch list).
155 */
156 union {
157 struct hlist_node hash; /* merge hash */
158 struct list_head ipi_list;
159 };
160
e6a1c874
JA
161 /*
162 * The rb_node is only used inside the io scheduler, requests
163 * are pruned when moved to the dispatch queue. So let the
c186794d 164 * completion_data share space with the rb_node.
e6a1c874
JA
165 */
166 union {
167 struct rb_node rb_node; /* sort/lookup */
f9d03f96 168 struct bio_vec special_vec;
c186794d 169 void *completion_data;
e26738e0 170 int error_count; /* for legacy drivers, don't use */
e6a1c874 171 };
9817064b 172
ff7d145f 173 /*
7f1dc8a2 174 * Three pointers are available for the IO schedulers, if they need
c186794d
MS
175 * more they have to dynamically allocate it. Flush requests are
176 * never put on the IO scheduler. So let the flush fields share
a612fddf 177 * space with the elevator data.
ff7d145f 178 */
c186794d 179 union {
a612fddf
TH
180 struct {
181 struct io_cq *icq;
182 void *priv[2];
183 } elv;
184
c186794d
MS
185 struct {
186 unsigned int seq;
187 struct list_head list;
4853abaa 188 rq_end_io_fn *saved_end_io;
c186794d
MS
189 } flush;
190 };
ff7d145f 191
8f34ee75 192 struct gendisk *rq_disk;
09e099d4 193 struct hd_struct *part;
522a7775
OS
194 /* Time that I/O was submitted to the kernel. */
195 u64 start_time_ns;
544ccc8d
OS
196 /* Time that I/O was submitted to the device. */
197 u64 io_start_time_ns;
198
199#ifdef CONFIG_BLK_WBT
200 unsigned short wbt_flags;
201#endif
202#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
203 unsigned short throtl_size;
204#endif
205
206 /*
207 * Number of scatter-gather DMA addr+len pairs after
1da177e4
LT
208 * physical address coalescing is performed.
209 */
210 unsigned short nr_phys_segments;
7c3fb70f 211
13f05c8d
MP
212#if defined(CONFIG_BLK_DEV_INTEGRITY)
213 unsigned short nr_integrity_segments;
214#endif
1da177e4 215
7c3fb70f 216 unsigned short write_hint;
8f34ee75
JA
217 unsigned short ioprio;
218
731ec497 219 void *special; /* opaque pointer available for LLD use */
cdd60262 220
7a85f889 221 unsigned int extra_len; /* length of alignment and padding */
1da177e4 222
12f5b931
KB
223 enum mq_rq_state state;
224 refcount_t ref;
1d9bd516 225
0b7576d8 226 unsigned int timeout;
079076b3 227 unsigned long deadline;
cb6934f8 228
7c3fb70f 229 union {
0a4b6e2f 230 struct __call_single_data csd;
7c3fb70f
JA
231 u64 fifo_time;
232 };
233
1da177e4 234 /*
c00895ab 235 * completion callback.
1da177e4
LT
236 */
237 rq_end_io_fn *end_io;
238 void *end_io_data;
abae1fde
FT
239
240 /* for bidi */
241 struct request *next_rq;
1da177e4
LT
242};
243
14cb0dc6
ML
244static inline bool blk_op_is_scsi(unsigned int op)
245{
246 return op == REQ_OP_SCSI_IN || op == REQ_OP_SCSI_OUT;
247}
248
249static inline bool blk_op_is_private(unsigned int op)
250{
251 return op == REQ_OP_DRV_IN || op == REQ_OP_DRV_OUT;
252}
253
aebf526b
CH
254static inline bool blk_rq_is_scsi(struct request *rq)
255{
14cb0dc6 256 return blk_op_is_scsi(req_op(rq));
aebf526b
CH
257}
258
259static inline bool blk_rq_is_private(struct request *rq)
260{
14cb0dc6 261 return blk_op_is_private(req_op(rq));
aebf526b
CH
262}
263
57292b58
CH
264static inline bool blk_rq_is_passthrough(struct request *rq)
265{
aebf526b 266 return blk_rq_is_scsi(rq) || blk_rq_is_private(rq);
57292b58
CH
267}
268
14cb0dc6
ML
269static inline bool bio_is_passthrough(struct bio *bio)
270{
271 unsigned op = bio_op(bio);
272
273 return blk_op_is_scsi(op) || blk_op_is_private(op);
274}
275
766ca442
FLVC
276static inline unsigned short req_get_ioprio(struct request *req)
277{
278 return req->ioprio;
279}
280
1da177e4
LT
281#include <linux/elevator.h>
282
320ae51f
JA
283struct blk_queue_ctx;
284
dece1635 285typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
ea435e1b 286typedef bool (poll_q_fn) (struct request_queue *q, blk_qc_t);
1da177e4
LT
287
288struct bio_vec;
2fb98e84 289typedef int (dma_drain_needed_fn)(struct request *);
1da177e4 290
242f9dcb 291enum blk_eh_timer_return {
88b0cfad
CH
292 BLK_EH_DONE, /* drivers has completed the command */
293 BLK_EH_RESET_TIMER, /* reset timer and try again */
242f9dcb
JA
294};
295
1da177e4
LT
296enum blk_queue_state {
297 Queue_down,
298 Queue_up,
299};
300
ee1b6f7a
SL
301#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
302#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
1da177e4 303
abf54393
FT
304#define BLK_SCSI_MAX_CMDS (256)
305#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
306
797476b8
DLM
307/*
308 * Zoned block device models (zoned limit).
309 */
310enum blk_zoned_model {
311 BLK_ZONED_NONE, /* Regular block device */
312 BLK_ZONED_HA, /* Host-aware zoned block device */
313 BLK_ZONED_HM, /* Host-managed zoned block device */
314};
315
025146e1
MP
316struct queue_limits {
317 unsigned long bounce_pfn;
318 unsigned long seg_boundary_mask;
03100aad 319 unsigned long virt_boundary_mask;
025146e1
MP
320
321 unsigned int max_hw_sectors;
ca369d51 322 unsigned int max_dev_sectors;
762380ad 323 unsigned int chunk_sectors;
025146e1
MP
324 unsigned int max_sectors;
325 unsigned int max_segment_size;
c72758f3
MP
326 unsigned int physical_block_size;
327 unsigned int alignment_offset;
328 unsigned int io_min;
329 unsigned int io_opt;
67efc925 330 unsigned int max_discard_sectors;
0034af03 331 unsigned int max_hw_discard_sectors;
4363ac7c 332 unsigned int max_write_same_sectors;
a6f0788e 333 unsigned int max_write_zeroes_sectors;
86b37281
MP
334 unsigned int discard_granularity;
335 unsigned int discard_alignment;
025146e1
MP
336
337 unsigned short logical_block_size;
8a78362c 338 unsigned short max_segments;
13f05c8d 339 unsigned short max_integrity_segments;
1e739730 340 unsigned short max_discard_segments;
025146e1 341
c72758f3 342 unsigned char misaligned;
86b37281 343 unsigned char discard_misaligned;
e692cb66 344 unsigned char cluster;
c78afc62 345 unsigned char raid_partial_stripes_expensive;
797476b8 346 enum blk_zoned_model zoned;
025146e1
MP
347};
348
6a0cb1bc
HR
349#ifdef CONFIG_BLK_DEV_ZONED
350
a91e1380 351extern unsigned int blkdev_nr_zones(struct block_device *bdev);
6a0cb1bc
HR
352extern int blkdev_report_zones(struct block_device *bdev,
353 sector_t sector, struct blk_zone *zones,
354 unsigned int *nr_zones, gfp_t gfp_mask);
355extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
356 sector_t nr_sectors, gfp_t gfp_mask);
bf505456 357extern int blk_revalidate_disk_zones(struct gendisk *disk);
6a0cb1bc 358
3ed05a98
ST
359extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
360 unsigned int cmd, unsigned long arg);
361extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
362 unsigned int cmd, unsigned long arg);
363
364#else /* CONFIG_BLK_DEV_ZONED */
365
a91e1380
DLM
366static inline unsigned int blkdev_nr_zones(struct block_device *bdev)
367{
368 return 0;
369}
bf505456
DLM
370
371static inline int blk_revalidate_disk_zones(struct gendisk *disk)
372{
373 return 0;
374}
375
3ed05a98
ST
376static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
377 fmode_t mode, unsigned int cmd,
378 unsigned long arg)
379{
380 return -ENOTTY;
381}
382
383static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
384 fmode_t mode, unsigned int cmd,
385 unsigned long arg)
386{
387 return -ENOTTY;
388}
389
6a0cb1bc
HR
390#endif /* CONFIG_BLK_DEV_ZONED */
391
d7b76301 392struct request_queue {
1da177e4
LT
393 /*
394 * Together with queue_head for cacheline sharing
395 */
396 struct list_head queue_head;
397 struct request *last_merge;
b374d18a 398 struct elevator_queue *elevator;
1da177e4 399
34dbad5d 400 struct blk_queue_stats *stats;
a7905043 401 struct rq_qos *rq_qos;
87760e5e 402
1da177e4 403 make_request_fn *make_request_fn;
ea435e1b 404 poll_q_fn *poll_fn;
2fb98e84 405 dma_drain_needed_fn *dma_drain_needed;
1da177e4 406
f8a5b122 407 const struct blk_mq_ops *mq_ops;
320ae51f 408
320ae51f 409 /* sw queues */
e6cdb092 410 struct blk_mq_ctx __percpu *queue_ctx;
320ae51f
JA
411 unsigned int nr_queues;
412
d278d4a8
JA
413 unsigned int queue_depth;
414
320ae51f
JA
415 /* hw dispatch queues */
416 struct blk_mq_hw_ctx **queue_hw_ctx;
417 unsigned int nr_hw_queues;
418
dc3b17cc 419 struct backing_dev_info *backing_dev_info;
1da177e4
LT
420
421 /*
422 * The queue owner gets to use this for whatever they like.
423 * ll_rw_blk doesn't touch it.
424 */
425 void *queuedata;
426
1da177e4 427 /*
d7b76301 428 * various queue flags, see QUEUE_* below
1da177e4 429 */
d7b76301 430 unsigned long queue_flags;
cd84a62e
BVA
431 /*
432 * Number of contexts that have called blk_set_pm_only(). If this
433 * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
434 * processed.
435 */
436 atomic_t pm_only;
1da177e4 437
a73f730d
TH
438 /*
439 * ida allocated id for this queue. Used to index queues from
440 * ioctx.
441 */
442 int id;
443
1da177e4 444 /*
d7b76301 445 * queue needs bounce pages for pages above this limit
1da177e4 446 */
d7b76301 447 gfp_t bounce_gfp;
1da177e4 448
0d945c1f 449 spinlock_t queue_lock;
1da177e4
LT
450
451 /*
452 * queue kobject
453 */
454 struct kobject kobj;
455
320ae51f
JA
456 /*
457 * mq queue kobject
458 */
459 struct kobject mq_kobj;
460
ac6fc48c
DW
461#ifdef CONFIG_BLK_DEV_INTEGRITY
462 struct blk_integrity integrity;
463#endif /* CONFIG_BLK_DEV_INTEGRITY */
464
47fafbc7 465#ifdef CONFIG_PM
6c954667
LM
466 struct device *dev;
467 int rpm_status;
468 unsigned int nr_pending;
469#endif
470
1da177e4
LT
471 /*
472 * queue settings
473 */
474 unsigned long nr_requests; /* Max # of requests */
1da177e4 475
fa0ccd83 476 unsigned int dma_drain_size;
d7b76301 477 void *dma_drain_buffer;
e3790c7d 478 unsigned int dma_pad_mask;
1da177e4
LT
479 unsigned int dma_alignment;
480
242f9dcb 481 unsigned int rq_timeout;
64f1c21e 482 int poll_nsec;
34dbad5d
OS
483
484 struct blk_stat_callback *poll_cb;
0206319f 485 struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
34dbad5d 486
242f9dcb 487 struct timer_list timeout;
287922eb 488 struct work_struct timeout_work;
242f9dcb 489
a612fddf 490 struct list_head icq_list;
4eef3049 491#ifdef CONFIG_BLK_CGROUP
a2b1693b 492 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
3c798398 493 struct blkcg_gq *root_blkg;
03aa264a 494 struct list_head blkg_list;
4eef3049 495#endif
a612fddf 496
025146e1
MP
497 struct queue_limits limits;
498
6a5ac984 499#ifdef CONFIG_BLK_DEV_ZONED
6cc77e9c
CH
500 /*
501 * Zoned block device information for request dispatch control.
502 * nr_zones is the total number of zones of the device. This is always
503 * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones
504 * bits which indicates if a zone is conventional (bit clear) or
505 * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones
506 * bits which indicates if a zone is write locked, that is, if a write
507 * request targeting the zone was dispatched. All three fields are
508 * initialized by the low level device driver (e.g. scsi/sd.c).
509 * Stacking drivers (device mappers) may or may not initialize
510 * these fields.
ccce20fc
BVA
511 *
512 * Reads of this information must be protected with blk_queue_enter() /
513 * blk_queue_exit(). Modifying this information is only allowed while
514 * no requests are being processed. See also blk_mq_freeze_queue() and
515 * blk_mq_unfreeze_queue().
6cc77e9c
CH
516 */
517 unsigned int nr_zones;
518 unsigned long *seq_zones_bitmap;
519 unsigned long *seq_zones_wlock;
6a5ac984 520#endif /* CONFIG_BLK_DEV_ZONED */
6cc77e9c 521
1da177e4
LT
522 /*
523 * sg stuff
524 */
525 unsigned int sg_timeout;
526 unsigned int sg_reserved_size;
1946089a 527 int node;
6c5c9341 528#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 529 struct blk_trace *blk_trace;
5acb3cc2 530 struct mutex blk_trace_mutex;
6c5c9341 531#endif
1da177e4 532 /*
4913efe4 533 * for flush operations
1da177e4 534 */
7c94e1c1 535 struct blk_flush_queue *fq;
483f4afc 536
6fca6a61
CH
537 struct list_head requeue_list;
538 spinlock_t requeue_lock;
2849450a 539 struct delayed_work requeue_work;
6fca6a61 540
483f4afc 541 struct mutex sysfs_lock;
d351af01 542
4ecd4fef 543 atomic_t mq_freeze_depth;
d732580b 544
d351af01
FT
545#if defined(CONFIG_BLK_DEV_BSG)
546 struct bsg_class_device bsg_dev;
547#endif
e43473b7
VG
548
549#ifdef CONFIG_BLK_DEV_THROTTLING
550 /* Throttle data */
551 struct throtl_data *td;
552#endif
548bc8e1 553 struct rcu_head rcu_head;
320ae51f 554 wait_queue_head_t mq_freeze_wq;
3ef28e83 555 struct percpu_ref q_usage_counter;
320ae51f 556 struct list_head all_q_node;
0d2602ca
JA
557
558 struct blk_mq_tag_set *tag_set;
559 struct list_head tag_set_list;
338aa96d 560 struct bio_set bio_split;
4593fdbe 561
03796c14 562#ifdef CONFIG_BLK_DEBUG_FS
07e4fead 563 struct dentry *debugfs_dir;
d332ce09 564 struct dentry *sched_debugfs_dir;
07e4fead
OS
565#endif
566
4593fdbe 567 bool mq_sysfs_init_done;
6d247d7f
CH
568
569 size_t cmd_size;
570 void *rq_alloc_data;
dc9edc44
BVA
571
572 struct work_struct release_work;
f793dfd3
JA
573
574#define BLK_MAX_WRITE_HINTS 5
575 u64 write_hints[BLK_MAX_WRITE_HINTS];
1da177e4
LT
576};
577
e743eb1e
JA
578#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */
579#define QUEUE_FLAG_DYING 2 /* queue being torn down */
e743eb1e
JA
580#define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */
581#define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */
582#define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */
583#define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */
e743eb1e 584#define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */
88e740f1 585#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
4822e902 586#define QUEUE_FLAG_IO_STAT 10 /* do disk/partitions IO accounting */
e743eb1e
JA
587#define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */
588#define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */
589#define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */
590#define QUEUE_FLAG_SECERASE 14 /* supports secure erase */
591#define QUEUE_FLAG_SAME_FORCE 15 /* force complete on same CPU */
592#define QUEUE_FLAG_DEAD 16 /* queue tear-down finished */
593#define QUEUE_FLAG_INIT_DONE 17 /* queue is initialized */
594#define QUEUE_FLAG_NO_SG_MERGE 18 /* don't attempt to merge SG segments*/
595#define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */
596#define QUEUE_FLAG_WC 20 /* Write back caching */
597#define QUEUE_FLAG_FUA 21 /* device supports FUA writes */
598#define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */
599#define QUEUE_FLAG_DAX 23 /* device supports DAX */
4822e902 600#define QUEUE_FLAG_STATS 24 /* track IO start and completion times */
e743eb1e
JA
601#define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */
602#define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */
603#define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */
604#define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */
bd6bf7c1 605#define QUEUE_FLAG_PCI_P2PDMA 29 /* device supports PCI p2p requests */
bc58ba94
JA
606
607#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
e2e1a148
JA
608 (1 << QUEUE_FLAG_SAME_COMP) | \
609 (1 << QUEUE_FLAG_ADD_RANDOM))
797e7dbb 610
94eddfbe 611#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
8e0b60b9
CH
612 (1 << QUEUE_FLAG_SAME_COMP) | \
613 (1 << QUEUE_FLAG_POLL))
94eddfbe 614
8814ce8a
BVA
615void blk_queue_flag_set(unsigned int flag, struct request_queue *q);
616void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
617bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
8814ce8a 618
1da177e4 619#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
3f3299d5 620#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
c246e80d 621#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
320ae51f 622#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
ac9fafa1 623#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
488991e2
AB
624#define blk_queue_noxmerges(q) \
625 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
a68bbddb 626#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
bc58ba94 627#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
e2e1a148 628#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
c15227de 629#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
288dab8a
CH
630#define blk_queue_secure_erase(q) \
631 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
163d4baa 632#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
9efc160f
BVA
633#define blk_queue_scsi_passthrough(q) \
634 test_bit(QUEUE_FLAG_SCSI_PASSTHROUGH, &(q)->queue_flags)
49d92c0d
LG
635#define blk_queue_pci_p2pdma(q) \
636 test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
1da177e4 637
33659ebb
CH
638#define blk_noretry_request(rq) \
639 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
640 REQ_FAILFAST_DRIVER))
f4560ffe 641#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
cd84a62e 642#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
0ce91444 643#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
c9254f2d 644
cd84a62e
BVA
645extern void blk_set_pm_only(struct request_queue *q);
646extern void blk_clear_pm_only(struct request_queue *q);
33659ebb 647
57292b58
CH
648static inline bool blk_account_rq(struct request *rq)
649{
650 return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq);
651}
33659ebb 652
abae1fde 653#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
1da177e4
LT
654
655#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
656
4e1b2d52 657#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
1da177e4 658
344e9ffc 659static inline bool queue_is_mq(struct request_queue *q)
49fd524f 660{
a1ce35fa 661 return q->mq_ops;
49fd524f
JA
662}
663
e692cb66
MP
664static inline unsigned int blk_queue_cluster(struct request_queue *q)
665{
666 return q->limits.cluster;
667}
668
797476b8
DLM
669static inline enum blk_zoned_model
670blk_queue_zoned_model(struct request_queue *q)
671{
672 return q->limits.zoned;
673}
674
675static inline bool blk_queue_is_zoned(struct request_queue *q)
676{
677 switch (blk_queue_zoned_model(q)) {
678 case BLK_ZONED_HA:
679 case BLK_ZONED_HM:
680 return true;
681 default:
682 return false;
683 }
684}
685
f99e8648 686static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
6a0cb1bc
HR
687{
688 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
689}
690
6a5ac984 691#ifdef CONFIG_BLK_DEV_ZONED
965b652e
DLM
692static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
693{
694 return blk_queue_is_zoned(q) ? q->nr_zones : 0;
695}
696
6cc77e9c
CH
697static inline unsigned int blk_queue_zone_no(struct request_queue *q,
698 sector_t sector)
699{
700 if (!blk_queue_is_zoned(q))
701 return 0;
702 return sector >> ilog2(q->limits.chunk_sectors);
703}
704
705static inline bool blk_queue_zone_is_seq(struct request_queue *q,
706 sector_t sector)
707{
708 if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap)
709 return false;
710 return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
711}
965b652e
DLM
712#else /* CONFIG_BLK_DEV_ZONED */
713static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
714{
715 return 0;
716}
6a5ac984 717#endif /* CONFIG_BLK_DEV_ZONED */
6cc77e9c 718
1faa16d2
JA
719static inline bool rq_is_sync(struct request *rq)
720{
ef295ecf 721 return op_is_sync(rq->cmd_flags);
1faa16d2
JA
722}
723
e2a60da7
MP
724static inline bool rq_mergeable(struct request *rq)
725{
57292b58 726 if (blk_rq_is_passthrough(rq))
e2a60da7 727 return false;
1da177e4 728
3a5e02ce
MC
729 if (req_op(rq) == REQ_OP_FLUSH)
730 return false;
731
a6f0788e
CK
732 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
733 return false;
734
e2a60da7 735 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
e8064021
CH
736 return false;
737 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
e2a60da7
MP
738 return false;
739
740 return true;
741}
1da177e4 742
4363ac7c
MP
743static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
744{
efbeccdb
CH
745 if (bio_page(a) == bio_page(b) &&
746 bio_offset(a) == bio_offset(b))
4363ac7c
MP
747 return true;
748
749 return false;
750}
751
d278d4a8
JA
752static inline unsigned int blk_queue_depth(struct request_queue *q)
753{
754 if (q->queue_depth)
755 return q->queue_depth;
756
757 return q->nr_requests;
758}
759
1da177e4
LT
760extern unsigned long blk_max_low_pfn, blk_max_pfn;
761
762/*
763 * standard bounce addresses:
764 *
765 * BLK_BOUNCE_HIGH : bounce all highmem pages
766 * BLK_BOUNCE_ANY : don't bounce anything
767 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
768 */
2472892a
AK
769
770#if BITS_PER_LONG == 32
1da177e4 771#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
2472892a
AK
772#else
773#define BLK_BOUNCE_HIGH -1ULL
774#endif
775#define BLK_BOUNCE_ANY (-1ULL)
bfe17231 776#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
1da177e4 777
3d6392cf
JA
778/*
779 * default timeout for SG_IO if none specified
780 */
781#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
f2f1fa78 782#define BLK_MIN_SG_TIMEOUT (7 * HZ)
3d6392cf 783
152e283f
FT
784struct rq_map_data {
785 struct page **pages;
786 int page_order;
787 int nr_entries;
56c451f4 788 unsigned long offset;
97ae77a1 789 int null_mapped;
ecb554a8 790 int from_user;
152e283f
FT
791};
792
5705f702 793struct req_iterator {
7988613b 794 struct bvec_iter iter;
5705f702
N
795 struct bio *bio;
796};
797
798/* This should not be used directly - use rq_for_each_segment */
1e428079
JA
799#define for_each_bio(_bio) \
800 for (; _bio; _bio = _bio->bi_next)
5705f702 801#define __rq_for_each_bio(_bio, rq) \
1da177e4
LT
802 if ((rq->bio)) \
803 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
804
5705f702
N
805#define rq_for_each_segment(bvl, _rq, _iter) \
806 __rq_for_each_bio(_iter.bio, _rq) \
7988613b 807 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
5705f702 808
4550dd6c 809#define rq_iter_last(bvec, _iter) \
7988613b 810 (_iter.bio->bi_next == NULL && \
4550dd6c 811 bio_iter_last(bvec, _iter.iter))
5705f702 812
2d4dc890
IL
813#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
814# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
815#endif
816#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
817extern void rq_flush_dcache_pages(struct request *rq);
818#else
819static inline void rq_flush_dcache_pages(struct request *rq)
820{
821}
822#endif
823
1da177e4
LT
824extern int blk_register_queue(struct gendisk *disk);
825extern void blk_unregister_queue(struct gendisk *disk);
dece1635 826extern blk_qc_t generic_make_request(struct bio *bio);
f421e1d9 827extern blk_qc_t direct_make_request(struct bio *bio);
2a4aa30c 828extern void blk_rq_init(struct request_queue *q, struct request *rq);
da8d7f07 829extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
1da177e4 830extern void blk_put_request(struct request *);
cd6ce148 831extern struct request *blk_get_request(struct request_queue *, unsigned int op,
ff005a06 832 blk_mq_req_flags_t flags);
ef9e3fac 833extern int blk_lld_busy(struct request_queue *q);
78d8e58a
MS
834extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
835 struct bio_set *bs, gfp_t gfp_mask,
836 int (*bio_ctr)(struct bio *, struct bio *, void *),
837 void *data);
838extern void blk_rq_unprep_clone(struct request *rq);
2a842aca 839extern blk_status_t blk_insert_cloned_request(struct request_queue *q,
82124d60 840 struct request *rq);
0abc2a10 841extern int blk_rq_append_bio(struct request *rq, struct bio **bio);
af67c31f 842extern void blk_queue_split(struct request_queue *, struct bio **);
165125e1 843extern void blk_recount_segments(struct request_queue *, struct bio *);
0bfc96cb 844extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
577ebb37
PB
845extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
846 unsigned int, void __user *);
74f3c8af
AV
847extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
848 unsigned int, void __user *);
e915e872
AV
849extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
850 struct scsi_ioctl_command __user *);
3fcfab16 851
9a95e4ef 852extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
2e6edc95 853extern void blk_queue_exit(struct request_queue *q);
1da177e4 854extern void blk_sync_queue(struct request_queue *q);
a3bce90e 855extern int blk_rq_map_user(struct request_queue *, struct request *,
152e283f
FT
856 struct rq_map_data *, void __user *, unsigned long,
857 gfp_t);
8e5cfc45 858extern int blk_rq_unmap_user(struct bio *);
165125e1
JA
859extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
860extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
26e49cfc
KO
861 struct rq_map_data *, const struct iov_iter *,
862 gfp_t);
b7819b92 863extern void blk_execute_rq(struct request_queue *, struct gendisk *,
994ca9a1 864 struct request *, int);
165125e1 865extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
15fc858a 866 struct request *, int, rq_end_io_fn *);
6e39b69e 867
2a842aca
CH
868int blk_status_to_errno(blk_status_t status);
869blk_status_t errno_to_blk_status(int errno);
870
ea435e1b 871bool blk_poll(struct request_queue *q, blk_qc_t cookie);
05229bee 872
165125e1 873static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1da177e4 874{
ff9ea323 875 return bdev->bd_disk->queue; /* this is never NULL */
1da177e4
LT
876}
877
233bde21
BVA
878/*
879 * The basic unit of block I/O is a sector. It is used in a number of contexts
880 * in Linux (blk, bio, genhd). The size of one sector is 512 = 2**9
881 * bytes. Variables of type sector_t represent an offset or size that is a
882 * multiple of 512 bytes. Hence these two constants.
883 */
884#ifndef SECTOR_SHIFT
885#define SECTOR_SHIFT 9
886#endif
887#ifndef SECTOR_SIZE
888#define SECTOR_SIZE (1 << SECTOR_SHIFT)
889#endif
890
5efccd17 891/*
80a761fd
TH
892 * blk_rq_pos() : the current sector
893 * blk_rq_bytes() : bytes left in the entire request
894 * blk_rq_cur_bytes() : bytes left in the current segment
895 * blk_rq_err_bytes() : bytes left till the next error boundary
896 * blk_rq_sectors() : sectors left in the entire request
897 * blk_rq_cur_sectors() : sectors left in the current segment
5efccd17 898 */
5b93629b
TH
899static inline sector_t blk_rq_pos(const struct request *rq)
900{
a2dec7b3 901 return rq->__sector;
2e46e8b2
TH
902}
903
904static inline unsigned int blk_rq_bytes(const struct request *rq)
905{
a2dec7b3 906 return rq->__data_len;
5b93629b
TH
907}
908
2e46e8b2
TH
909static inline int blk_rq_cur_bytes(const struct request *rq)
910{
911 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
912}
5efccd17 913
80a761fd
TH
914extern unsigned int blk_rq_err_bytes(const struct request *rq);
915
5b93629b
TH
916static inline unsigned int blk_rq_sectors(const struct request *rq)
917{
233bde21 918 return blk_rq_bytes(rq) >> SECTOR_SHIFT;
5b93629b
TH
919}
920
921static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
922{
233bde21 923 return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
5b93629b
TH
924}
925
6a5ac984 926#ifdef CONFIG_BLK_DEV_ZONED
6cc77e9c
CH
927static inline unsigned int blk_rq_zone_no(struct request *rq)
928{
929 return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
930}
931
932static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
933{
934 return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
935}
6a5ac984 936#endif /* CONFIG_BLK_DEV_ZONED */
6cc77e9c 937
2e3258ec
CH
938/*
939 * Some commands like WRITE SAME have a payload or data transfer size which
940 * is different from the size of the request. Any driver that supports such
941 * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
942 * calculate the data transfer size.
943 */
944static inline unsigned int blk_rq_payload_bytes(struct request *rq)
945{
946 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
947 return rq->special_vec.bv_len;
948 return blk_rq_bytes(rq);
949}
950
f31dc1cd 951static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
8fe0d473 952 int op)
f31dc1cd 953{
7afafc8a 954 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
233bde21
BVA
955 return min(q->limits.max_discard_sectors,
956 UINT_MAX >> SECTOR_SHIFT);
f31dc1cd 957
8fe0d473 958 if (unlikely(op == REQ_OP_WRITE_SAME))
4363ac7c
MP
959 return q->limits.max_write_same_sectors;
960
a6f0788e
CK
961 if (unlikely(op == REQ_OP_WRITE_ZEROES))
962 return q->limits.max_write_zeroes_sectors;
963
f31dc1cd
MP
964 return q->limits.max_sectors;
965}
966
762380ad
JA
967/*
968 * Return maximum size of a request at given offset. Only valid for
969 * file system requests.
970 */
971static inline unsigned int blk_max_size_offset(struct request_queue *q,
972 sector_t offset)
973{
974 if (!q->limits.chunk_sectors)
736ed4de 975 return q->limits.max_sectors;
762380ad 976
15bfd21f
KB
977 return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
978 (offset & (q->limits.chunk_sectors - 1))));
762380ad
JA
979}
980
17007f39
DLM
981static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
982 sector_t offset)
f31dc1cd
MP
983{
984 struct request_queue *q = rq->q;
985
57292b58 986 if (blk_rq_is_passthrough(rq))
f31dc1cd
MP
987 return q->limits.max_hw_sectors;
988
7afafc8a
AH
989 if (!q->limits.chunk_sectors ||
990 req_op(rq) == REQ_OP_DISCARD ||
991 req_op(rq) == REQ_OP_SECURE_ERASE)
8fe0d473 992 return blk_queue_get_max_sectors(q, req_op(rq));
762380ad 993
17007f39 994 return min(blk_max_size_offset(q, offset),
8fe0d473 995 blk_queue_get_max_sectors(q, req_op(rq)));
f31dc1cd
MP
996}
997
75afb352
JN
998static inline unsigned int blk_rq_count_bios(struct request *rq)
999{
1000 unsigned int nr_bios = 0;
1001 struct bio *bio;
1002
1003 __rq_for_each_bio(bio, rq)
1004 nr_bios++;
1005
1006 return nr_bios;
1007}
1008
ef71de8b
CH
1009void blk_steal_bios(struct bio_list *list, struct request *rq);
1010
1da177e4 1011/*
2e60e022
TH
1012 * Request completion related functions.
1013 *
1014 * blk_update_request() completes given number of bytes and updates
1015 * the request without completing it.
1016 *
f06d9a2b
TH
1017 * blk_end_request() and friends. __blk_end_request() must be called
1018 * with the request queue spinlock acquired.
1da177e4
LT
1019 *
1020 * Several drivers define their own end_request and call
3bcddeac
KU
1021 * blk_end_request() for parts of the original function.
1022 * This prevents code duplication in drivers.
1da177e4 1023 */
2a842aca 1024extern bool blk_update_request(struct request *rq, blk_status_t error,
2e60e022 1025 unsigned int nr_bytes);
2a842aca
CH
1026extern void blk_end_request_all(struct request *rq, blk_status_t error);
1027extern bool __blk_end_request(struct request *rq, blk_status_t error,
b1f74493 1028 unsigned int nr_bytes);
2a842aca
CH
1029extern void __blk_end_request_all(struct request *rq, blk_status_t error);
1030extern bool __blk_end_request_cur(struct request *rq, blk_status_t error);
2e60e022 1031
242f9dcb
JA
1032extern void __blk_complete_request(struct request *);
1033extern void blk_abort_request(struct request *);
ff856bad 1034
1da177e4
LT
1035/*
1036 * Access functions for manipulating queue properties
1037 */
165125e1
JA
1038extern void blk_cleanup_queue(struct request_queue *);
1039extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
1040extern void blk_queue_bounce_limit(struct request_queue *, u64);
086fa5ff 1041extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
762380ad 1042extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
8a78362c 1043extern void blk_queue_max_segments(struct request_queue *, unsigned short);
1e739730
CH
1044extern void blk_queue_max_discard_segments(struct request_queue *,
1045 unsigned short);
165125e1 1046extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
67efc925
CH
1047extern void blk_queue_max_discard_sectors(struct request_queue *q,
1048 unsigned int max_discard_sectors);
4363ac7c
MP
1049extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1050 unsigned int max_write_same_sectors);
a6f0788e
CK
1051extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
1052 unsigned int max_write_same_sectors);
e1defc4f 1053extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
892b6f90 1054extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
c72758f3
MP
1055extern void blk_queue_alignment_offset(struct request_queue *q,
1056 unsigned int alignment);
7c958e32 1057extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
c72758f3 1058extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
3c5820c7 1059extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
c72758f3 1060extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
d278d4a8 1061extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
e475bba2 1062extern void blk_set_default_limits(struct queue_limits *lim);
b1bd055d 1063extern void blk_set_stacking_limits(struct queue_limits *lim);
c72758f3
MP
1064extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1065 sector_t offset);
17be8c24
MP
1066extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1067 sector_t offset);
c72758f3
MP
1068extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1069 sector_t offset);
165125e1 1070extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
e3790c7d 1071extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
27f8221a 1072extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
2fb98e84
TH
1073extern int blk_queue_dma_drain(struct request_queue *q,
1074 dma_drain_needed_fn *dma_drain_needed,
1075 void *buf, unsigned int size);
165125e1 1076extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
03100aad 1077extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
165125e1 1078extern void blk_queue_dma_alignment(struct request_queue *, int);
11c3e689 1079extern void blk_queue_update_dma_alignment(struct request_queue *, int);
242f9dcb 1080extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
f3876930 1081extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
93e9d8e8 1082extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1da177e4 1083
1e739730
CH
1084/*
1085 * Number of physical segments as sent to the device.
1086 *
1087 * Normally this is the number of discontiguous data segments sent by the
1088 * submitter. But for data-less command like discard we might have no
1089 * actual data segments submitted, but the driver might have to add it's
1090 * own special payload. In that case we still return 1 here so that this
1091 * special payload will be mapped.
1092 */
f9d03f96
CH
1093static inline unsigned short blk_rq_nr_phys_segments(struct request *rq)
1094{
1095 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
1096 return 1;
1097 return rq->nr_phys_segments;
1098}
1099
1e739730
CH
1100/*
1101 * Number of discard segments (or ranges) the driver needs to fill in.
1102 * Each discard bio merged into a request is counted as one segment.
1103 */
1104static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
1105{
1106 return max_t(unsigned short, rq->nr_phys_segments, 1);
1107}
1108
165125e1 1109extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1da177e4 1110extern void blk_dump_rq_flags(struct request *, char *);
1da177e4 1111extern long nr_blockdev_pages(void);
1da177e4 1112
09ac46c4 1113bool __must_check blk_get_queue(struct request_queue *);
165125e1 1114struct request_queue *blk_alloc_queue(gfp_t);
6d469642 1115struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
165125e1 1116extern void blk_put_queue(struct request_queue *);
3f21c265 1117extern void blk_set_queue_dying(struct request_queue *);
1da177e4 1118
316cc67d 1119/*
75df7136
SJ
1120 * blk_plug permits building a queue of related requests by holding the I/O
1121 * fragments for a short period. This allows merging of sequential requests
1122 * into single larger request. As the requests are moved from a per-task list to
1123 * the device's request_queue in a batch, this results in improved scalability
1124 * as the lock contention for request_queue lock is reduced.
1125 *
1126 * It is ok not to disable preemption when adding the request to the plug list
1127 * or when attempting a merge, because blk_schedule_flush_list() will only flush
1128 * the plug list when the task sleeps by itself. For details, please see
1129 * schedule() where blk_schedule_flush_plug() is called.
316cc67d 1130 */
73c10101 1131struct blk_plug {
320ae51f 1132 struct list_head mq_list; /* blk-mq requests */
75df7136 1133 struct list_head cb_list; /* md requires an unplug callback */
73c10101 1134};
55c022bb 1135#define BLK_MAX_REQUEST_COUNT 16
50d24c34 1136#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
55c022bb 1137
9cbb1750 1138struct blk_plug_cb;
74018dc3 1139typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
048c9374
N
1140struct blk_plug_cb {
1141 struct list_head list;
9cbb1750
N
1142 blk_plug_cb_fn callback;
1143 void *data;
048c9374 1144};
9cbb1750
N
1145extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1146 void *data, int size);
73c10101
JA
1147extern void blk_start_plug(struct blk_plug *);
1148extern void blk_finish_plug(struct blk_plug *);
f6603783 1149extern void blk_flush_plug_list(struct blk_plug *, bool);
73c10101
JA
1150
1151static inline void blk_flush_plug(struct task_struct *tsk)
1152{
1153 struct blk_plug *plug = tsk->plug;
1154
a237c1c5
JA
1155 if (plug)
1156 blk_flush_plug_list(plug, false);
1157}
1158
1159static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1160{
1161 struct blk_plug *plug = tsk->plug;
1162
88b996cd 1163 if (plug)
f6603783 1164 blk_flush_plug_list(plug, true);
73c10101
JA
1165}
1166
1167static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1168{
1169 struct blk_plug *plug = tsk->plug;
1170
320ae51f 1171 return plug &&
a1ce35fa 1172 (!list_empty(&plug->mq_list) ||
320ae51f 1173 !list_empty(&plug->cb_list));
73c10101
JA
1174}
1175
ee472d83
CH
1176extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1177extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1178 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
e950fdf7
CH
1179
1180#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
dd3932ed 1181
fbd9b09a
DM
1182extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1183 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
38f25255 1184extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
288dab8a 1185 sector_t nr_sects, gfp_t gfp_mask, int flags,
469e3216 1186 struct bio **biop);
ee472d83
CH
1187
1188#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
cb365b96 1189#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
ee472d83 1190
e73c23ff
CK
1191extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1192 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
ee472d83 1193 unsigned flags);
3f14d792 1194extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
ee472d83
CH
1195 sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
1196
2cf6d26a
CH
1197static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1198 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
fb2dce86 1199{
233bde21
BVA
1200 return blkdev_issue_discard(sb->s_bdev,
1201 block << (sb->s_blocksize_bits -
1202 SECTOR_SHIFT),
1203 nr_blocks << (sb->s_blocksize_bits -
1204 SECTOR_SHIFT),
2cf6d26a 1205 gfp_mask, flags);
fb2dce86 1206}
e6fa0be6 1207static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
a107e5a3 1208 sector_t nr_blocks, gfp_t gfp_mask)
e6fa0be6
LC
1209{
1210 return blkdev_issue_zeroout(sb->s_bdev,
233bde21
BVA
1211 block << (sb->s_blocksize_bits -
1212 SECTOR_SHIFT),
1213 nr_blocks << (sb->s_blocksize_bits -
1214 SECTOR_SHIFT),
ee472d83 1215 gfp_mask, 0);
e6fa0be6 1216}
1da177e4 1217
f00c4d80 1218extern int blk_verify_command(unsigned char *cmd, fmode_t mode);
0b07de85 1219
eb28d31b
MP
1220enum blk_default_limits {
1221 BLK_MAX_SEGMENTS = 128,
1222 BLK_SAFE_MAX_SECTORS = 255,
d2be537c 1223 BLK_DEF_MAX_SECTORS = 2560,
eb28d31b
MP
1224 BLK_MAX_SEGMENT_SIZE = 65536,
1225 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1226};
0e435ac2 1227
ae03bf63
MP
1228static inline unsigned long queue_segment_boundary(struct request_queue *q)
1229{
025146e1 1230 return q->limits.seg_boundary_mask;
ae03bf63
MP
1231}
1232
03100aad
KB
1233static inline unsigned long queue_virt_boundary(struct request_queue *q)
1234{
1235 return q->limits.virt_boundary_mask;
1236}
1237
ae03bf63
MP
1238static inline unsigned int queue_max_sectors(struct request_queue *q)
1239{
025146e1 1240 return q->limits.max_sectors;
ae03bf63
MP
1241}
1242
1243static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1244{
025146e1 1245 return q->limits.max_hw_sectors;
ae03bf63
MP
1246}
1247
8a78362c 1248static inline unsigned short queue_max_segments(struct request_queue *q)
ae03bf63 1249{
8a78362c 1250 return q->limits.max_segments;
ae03bf63
MP
1251}
1252
1e739730
CH
1253static inline unsigned short queue_max_discard_segments(struct request_queue *q)
1254{
1255 return q->limits.max_discard_segments;
1256}
1257
ae03bf63
MP
1258static inline unsigned int queue_max_segment_size(struct request_queue *q)
1259{
025146e1 1260 return q->limits.max_segment_size;
ae03bf63
MP
1261}
1262
e1defc4f 1263static inline unsigned short queue_logical_block_size(struct request_queue *q)
1da177e4
LT
1264{
1265 int retval = 512;
1266
025146e1
MP
1267 if (q && q->limits.logical_block_size)
1268 retval = q->limits.logical_block_size;
1da177e4
LT
1269
1270 return retval;
1271}
1272
e1defc4f 1273static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1da177e4 1274{
e1defc4f 1275 return queue_logical_block_size(bdev_get_queue(bdev));
1da177e4
LT
1276}
1277
c72758f3
MP
1278static inline unsigned int queue_physical_block_size(struct request_queue *q)
1279{
1280 return q->limits.physical_block_size;
1281}
1282
892b6f90 1283static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
ac481c20
MP
1284{
1285 return queue_physical_block_size(bdev_get_queue(bdev));
1286}
1287
c72758f3
MP
1288static inline unsigned int queue_io_min(struct request_queue *q)
1289{
1290 return q->limits.io_min;
1291}
1292
ac481c20
MP
1293static inline int bdev_io_min(struct block_device *bdev)
1294{
1295 return queue_io_min(bdev_get_queue(bdev));
1296}
1297
c72758f3
MP
1298static inline unsigned int queue_io_opt(struct request_queue *q)
1299{
1300 return q->limits.io_opt;
1301}
1302
ac481c20
MP
1303static inline int bdev_io_opt(struct block_device *bdev)
1304{
1305 return queue_io_opt(bdev_get_queue(bdev));
1306}
1307
c72758f3
MP
1308static inline int queue_alignment_offset(struct request_queue *q)
1309{
ac481c20 1310 if (q->limits.misaligned)
c72758f3
MP
1311 return -1;
1312
ac481c20 1313 return q->limits.alignment_offset;
c72758f3
MP
1314}
1315
e03a72e1 1316static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
81744ee4
MP
1317{
1318 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
233bde21
BVA
1319 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
1320 << SECTOR_SHIFT;
81744ee4 1321
b8839b8c 1322 return (granularity + lim->alignment_offset - alignment) % granularity;
c72758f3
MP
1323}
1324
ac481c20
MP
1325static inline int bdev_alignment_offset(struct block_device *bdev)
1326{
1327 struct request_queue *q = bdev_get_queue(bdev);
1328
1329 if (q->limits.misaligned)
1330 return -1;
1331
1332 if (bdev != bdev->bd_contains)
1333 return bdev->bd_part->alignment_offset;
1334
1335 return q->limits.alignment_offset;
1336}
1337
86b37281
MP
1338static inline int queue_discard_alignment(struct request_queue *q)
1339{
1340 if (q->limits.discard_misaligned)
1341 return -1;
1342
1343 return q->limits.discard_alignment;
1344}
1345
e03a72e1 1346static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
86b37281 1347{
59771079 1348 unsigned int alignment, granularity, offset;
dd3d145d 1349
a934a00a
MP
1350 if (!lim->max_discard_sectors)
1351 return 0;
1352
59771079 1353 /* Why are these in bytes, not sectors? */
233bde21
BVA
1354 alignment = lim->discard_alignment >> SECTOR_SHIFT;
1355 granularity = lim->discard_granularity >> SECTOR_SHIFT;
59771079
LT
1356 if (!granularity)
1357 return 0;
1358
1359 /* Offset of the partition start in 'granularity' sectors */
1360 offset = sector_div(sector, granularity);
1361
1362 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1363 offset = (granularity + alignment - offset) % granularity;
1364
1365 /* Turn it back into bytes, gaah */
233bde21 1366 return offset << SECTOR_SHIFT;
86b37281
MP
1367}
1368
c6e66634
PB
1369static inline int bdev_discard_alignment(struct block_device *bdev)
1370{
1371 struct request_queue *q = bdev_get_queue(bdev);
1372
1373 if (bdev != bdev->bd_contains)
1374 return bdev->bd_part->discard_alignment;
1375
1376 return q->limits.discard_alignment;
1377}
1378
4363ac7c
MP
1379static inline unsigned int bdev_write_same(struct block_device *bdev)
1380{
1381 struct request_queue *q = bdev_get_queue(bdev);
1382
1383 if (q)
1384 return q->limits.max_write_same_sectors;
1385
1386 return 0;
1387}
1388
a6f0788e
CK
1389static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1390{
1391 struct request_queue *q = bdev_get_queue(bdev);
1392
1393 if (q)
1394 return q->limits.max_write_zeroes_sectors;
1395
1396 return 0;
1397}
1398
797476b8
DLM
1399static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
1400{
1401 struct request_queue *q = bdev_get_queue(bdev);
1402
1403 if (q)
1404 return blk_queue_zoned_model(q);
1405
1406 return BLK_ZONED_NONE;
1407}
1408
1409static inline bool bdev_is_zoned(struct block_device *bdev)
1410{
1411 struct request_queue *q = bdev_get_queue(bdev);
1412
1413 if (q)
1414 return blk_queue_is_zoned(q);
1415
1416 return false;
1417}
1418
f99e8648 1419static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
6a0cb1bc
HR
1420{
1421 struct request_queue *q = bdev_get_queue(bdev);
1422
1423 if (q)
f99e8648 1424 return blk_queue_zone_sectors(q);
6cc77e9c
CH
1425 return 0;
1426}
6a0cb1bc 1427
165125e1 1428static inline int queue_dma_alignment(struct request_queue *q)
1da177e4 1429{
482eb689 1430 return q ? q->dma_alignment : 511;
1da177e4
LT
1431}
1432
14417799 1433static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
87904074
FT
1434 unsigned int len)
1435{
1436 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
14417799 1437 return !(addr & alignment) && !(len & alignment);
87904074
FT
1438}
1439
1da177e4
LT
1440/* assumes size > 256 */
1441static inline unsigned int blksize_bits(unsigned int size)
1442{
1443 unsigned int bits = 8;
1444 do {
1445 bits++;
1446 size >>= 1;
1447 } while (size > 256);
1448 return bits;
1449}
1450
2befb9e3 1451static inline unsigned int block_size(struct block_device *bdev)
1da177e4
LT
1452{
1453 return bdev->bd_block_size;
1454}
1455
f3876930 1456static inline bool queue_flush_queueable(struct request_queue *q)
1457{
c888a8f9 1458 return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
f3876930 1459}
1460
1da177e4
LT
1461typedef struct {struct page *v;} Sector;
1462
1463unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1464
1465static inline void put_dev_sector(Sector p)
1466{
09cbfeaf 1467 put_page(p.v);
1da177e4
LT
1468}
1469
59c3d45e 1470int kblockd_schedule_work(struct work_struct *work);
ee63cfa7 1471int kblockd_schedule_work_on(int cpu, struct work_struct *work);
818cd1cb 1472int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1da177e4 1473
1da177e4
LT
1474#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1475 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1476#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1477 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1478
7ba1ba12
MP
1479#if defined(CONFIG_BLK_DEV_INTEGRITY)
1480
8288f496
MP
1481enum blk_integrity_flags {
1482 BLK_INTEGRITY_VERIFY = 1 << 0,
1483 BLK_INTEGRITY_GENERATE = 1 << 1,
3aec2f41 1484 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
aae7df50 1485 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
8288f496 1486};
7ba1ba12 1487
18593088 1488struct blk_integrity_iter {
7ba1ba12
MP
1489 void *prot_buf;
1490 void *data_buf;
3be91c4a 1491 sector_t seed;
7ba1ba12 1492 unsigned int data_size;
3be91c4a 1493 unsigned short interval;
7ba1ba12
MP
1494 const char *disk_name;
1495};
1496
4e4cbee9 1497typedef blk_status_t (integrity_processing_fn) (struct blk_integrity_iter *);
7ba1ba12 1498
0f8087ec
MP
1499struct blk_integrity_profile {
1500 integrity_processing_fn *generate_fn;
1501 integrity_processing_fn *verify_fn;
1502 const char *name;
1503};
7ba1ba12 1504
25520d55 1505extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
7ba1ba12 1506extern void blk_integrity_unregister(struct gendisk *);
ad7fce93 1507extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
13f05c8d
MP
1508extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1509 struct scatterlist *);
1510extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
4eaf99be
MP
1511extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1512 struct request *);
1513extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1514 struct bio *);
7ba1ba12 1515
25520d55 1516static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
b04accc4 1517{
ac6fc48c 1518 struct blk_integrity *bi = &disk->queue->integrity;
25520d55
MP
1519
1520 if (!bi->profile)
1521 return NULL;
1522
1523 return bi;
b04accc4
JA
1524}
1525
25520d55
MP
1526static inline
1527struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
b02739b0 1528{
25520d55 1529 return blk_get_integrity(bdev->bd_disk);
b02739b0
MP
1530}
1531
180b2f95 1532static inline bool blk_integrity_rq(struct request *rq)
7ba1ba12 1533{
180b2f95 1534 return rq->cmd_flags & REQ_INTEGRITY;
7ba1ba12
MP
1535}
1536
13f05c8d
MP
1537static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1538 unsigned int segs)
1539{
1540 q->limits.max_integrity_segments = segs;
1541}
1542
1543static inline unsigned short
1544queue_max_integrity_segments(struct request_queue *q)
1545{
1546 return q->limits.max_integrity_segments;
1547}
1548
359f6427
GE
1549/**
1550 * bio_integrity_intervals - Return number of integrity intervals for a bio
1551 * @bi: blk_integrity profile for device
1552 * @sectors: Size of the bio in 512-byte sectors
1553 *
1554 * Description: The block layer calculates everything in 512 byte
1555 * sectors but integrity metadata is done in terms of the data integrity
1556 * interval size of the storage device. Convert the block layer sectors
1557 * to the appropriate number of integrity intervals.
1558 */
1559static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1560 unsigned int sectors)
1561{
1562 return sectors >> (bi->interval_exp - 9);
1563}
1564
1565static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1566 unsigned int sectors)
1567{
1568 return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
1569}
1570
7ba1ba12
MP
1571#else /* CONFIG_BLK_DEV_INTEGRITY */
1572
fd83240a
SR
1573struct bio;
1574struct block_device;
1575struct gendisk;
1576struct blk_integrity;
1577
1578static inline int blk_integrity_rq(struct request *rq)
1579{
1580 return 0;
1581}
1582static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1583 struct bio *b)
1584{
1585 return 0;
1586}
1587static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1588 struct bio *b,
1589 struct scatterlist *s)
1590{
1591 return 0;
1592}
1593static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1594{
61a04e5b 1595 return NULL;
fd83240a
SR
1596}
1597static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1598{
1599 return NULL;
1600}
1601static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1602{
1603 return 0;
1604}
25520d55 1605static inline void blk_integrity_register(struct gendisk *d,
fd83240a
SR
1606 struct blk_integrity *b)
1607{
fd83240a
SR
1608}
1609static inline void blk_integrity_unregister(struct gendisk *d)
1610{
1611}
1612static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1613 unsigned int segs)
1614{
1615}
1616static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1617{
1618 return 0;
1619}
4eaf99be
MP
1620static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1621 struct request *r1,
1622 struct request *r2)
fd83240a 1623{
cb1a5ab6 1624 return true;
fd83240a 1625}
4eaf99be
MP
1626static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1627 struct request *r,
1628 struct bio *b)
fd83240a 1629{
cb1a5ab6 1630 return true;
fd83240a 1631}
25520d55 1632
359f6427
GE
1633static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
1634 unsigned int sectors)
1635{
1636 return 0;
1637}
1638
1639static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
1640 unsigned int sectors)
1641{
1642 return 0;
1643}
1644
7ba1ba12
MP
1645#endif /* CONFIG_BLK_DEV_INTEGRITY */
1646
08f85851 1647struct block_device_operations {
d4430d62 1648 int (*open) (struct block_device *, fmode_t);
db2a144b 1649 void (*release) (struct gendisk *, fmode_t);
3f289dcb 1650 int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
d4430d62
AV
1651 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1652 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
77ea887e
TH
1653 unsigned int (*check_events) (struct gendisk *disk,
1654 unsigned int clearing);
1655 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
08f85851 1656 int (*media_changed) (struct gendisk *);
c3e33e04 1657 void (*unlock_native_capacity) (struct gendisk *);
08f85851
AV
1658 int (*revalidate_disk) (struct gendisk *);
1659 int (*getgeo)(struct block_device *, struct hd_geometry *);
b3a27d05
NG
1660 /* this callback is with swap_lock and sometimes page table lock held */
1661 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
e76239a3
CH
1662 int (*report_zones)(struct gendisk *, sector_t sector,
1663 struct blk_zone *zones, unsigned int *nr_zones,
1664 gfp_t gfp_mask);
08f85851 1665 struct module *owner;
bbd3e064 1666 const struct pr_ops *pr_ops;
08f85851
AV
1667};
1668
633a08b8
AV
1669extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1670 unsigned long);
47a191fd
MW
1671extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1672extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1673 struct writeback_control *);
6cc77e9c
CH
1674
1675#ifdef CONFIG_BLK_DEV_ZONED
1676bool blk_req_needs_zone_write_lock(struct request *rq);
1677void __blk_req_zone_write_lock(struct request *rq);
1678void __blk_req_zone_write_unlock(struct request *rq);
1679
1680static inline void blk_req_zone_write_lock(struct request *rq)
1681{
1682 if (blk_req_needs_zone_write_lock(rq))
1683 __blk_req_zone_write_lock(rq);
1684}
1685
1686static inline void blk_req_zone_write_unlock(struct request *rq)
1687{
1688 if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED)
1689 __blk_req_zone_write_unlock(rq);
1690}
1691
1692static inline bool blk_req_zone_is_write_locked(struct request *rq)
1693{
1694 return rq->q->seq_zones_wlock &&
1695 test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock);
1696}
1697
1698static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1699{
1700 if (!blk_req_needs_zone_write_lock(rq))
1701 return true;
1702 return !blk_req_zone_is_write_locked(rq);
1703}
1704#else
1705static inline bool blk_req_needs_zone_write_lock(struct request *rq)
1706{
1707 return false;
1708}
1709
1710static inline void blk_req_zone_write_lock(struct request *rq)
1711{
1712}
1713
1714static inline void blk_req_zone_write_unlock(struct request *rq)
1715{
1716}
1717static inline bool blk_req_zone_is_write_locked(struct request *rq)
1718{
1719 return false;
1720}
1721
1722static inline bool blk_req_can_dispatch_to_zone(struct request *rq)
1723{
1724 return true;
1725}
1726#endif /* CONFIG_BLK_DEV_ZONED */
1727
9361401e 1728#else /* CONFIG_BLOCK */
ac13a829
FF
1729
1730struct block_device;
1731
9361401e
DH
1732/*
1733 * stubs for when the block layer is configured out
1734 */
1735#define buffer_heads_over_limit 0
1736
9361401e
DH
1737static inline long nr_blockdev_pages(void)
1738{
1739 return 0;
1740}
1741
1f940bdf
JA
1742struct blk_plug {
1743};
1744
1745static inline void blk_start_plug(struct blk_plug *plug)
73c10101
JA
1746{
1747}
1748
1f940bdf 1749static inline void blk_finish_plug(struct blk_plug *plug)
73c10101
JA
1750{
1751}
1752
1f940bdf 1753static inline void blk_flush_plug(struct task_struct *task)
73c10101
JA
1754{
1755}
1756
a237c1c5
JA
1757static inline void blk_schedule_flush_plug(struct task_struct *task)
1758{
1759}
1760
1761
73c10101
JA
1762static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1763{
1764 return false;
1765}
1766
ac13a829
FF
1767static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1768 sector_t *error_sector)
1769{
1770 return 0;
1771}
1772
9361401e
DH
1773#endif /* CONFIG_BLOCK */
1774
1da177e4 1775#endif