]> git.ipfire.org Git - thirdparty/linux.git/blame - include/linux/blkdev.h
blk-wbt: don't throttle discard or write zeroes
[thirdparty/linux.git] / include / linux / blkdev.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H
3
85fd0bc9
RK
4#include <linux/sched.h>
5
f5ff8422
JA
6#ifdef CONFIG_BLOCK
7
1da177e4
LT
8#include <linux/major.h>
9#include <linux/genhd.h>
10#include <linux/list.h>
320ae51f 11#include <linux/llist.h>
1da177e4
LT
12#include <linux/timer.h>
13#include <linux/workqueue.h>
14#include <linux/pagemap.h>
66114cad 15#include <linux/backing-dev-defs.h>
1da177e4
LT
16#include <linux/wait.h>
17#include <linux/mempool.h>
34c0fd54 18#include <linux/pfn.h>
1da177e4 19#include <linux/bio.h>
1da177e4 20#include <linux/stringify.h>
3e6053d7 21#include <linux/gfp.h>
d351af01 22#include <linux/bsg.h>
c7c22e4d 23#include <linux/smp.h>
548bc8e1 24#include <linux/rcupdate.h>
add703fd 25#include <linux/percpu-refcount.h>
84be456f 26#include <linux/scatterlist.h>
6a0cb1bc 27#include <linux/blkzoned.h>
1da177e4 28
de477254 29struct module;
21b2f0c8
CH
30struct scsi_ioctl_command;
31
1da177e4 32struct request_queue;
1da177e4 33struct elevator_queue;
2056a782 34struct blk_trace;
3d6392cf
JA
35struct request;
36struct sg_io_hdr;
aa387cc8 37struct bsg_job;
3c798398 38struct blkcg_gq;
7c94e1c1 39struct blk_flush_queue;
bbd3e064 40struct pr_ops;
87760e5e 41struct rq_wb;
1da177e4
LT
42
43#define BLKDEV_MIN_RQ 4
44#define BLKDEV_MAX_RQ 128 /* Default maximum */
45
8bd435b3
TH
46/*
47 * Maximum number of blkcg policies allowed to be registered concurrently.
48 * Defined here to simplify include dependency.
49 */
50#define BLKCG_MAX_POLS 2
51
8ffdc655 52typedef void (rq_end_io_fn)(struct request *, int);
1da177e4 53
5b788ce3
TH
54#define BLK_RL_SYNCFULL (1U << 0)
55#define BLK_RL_ASYNCFULL (1U << 1)
56
1da177e4 57struct request_list {
5b788ce3 58 struct request_queue *q; /* the queue this rl belongs to */
a051661c
TH
59#ifdef CONFIG_BLK_CGROUP
60 struct blkcg_gq *blkg; /* blkg this request pool belongs to */
61#endif
1faa16d2
JA
62 /*
63 * count[], starved[], and wait[] are indexed by
64 * BLK_RW_SYNC/BLK_RW_ASYNC
65 */
8a5ecdd4
TH
66 int count[2];
67 int starved[2];
68 mempool_t *rq_pool;
69 wait_queue_head_t wait[2];
5b788ce3 70 unsigned int flags;
1da177e4
LT
71};
72
4aff5e23
JA
73/*
74 * request command types
75 */
76enum rq_cmd_type_bits {
77 REQ_TYPE_FS = 1, /* fs request */
78 REQ_TYPE_BLOCK_PC, /* scsi command */
b42171ef 79 REQ_TYPE_DRV_PRIV, /* driver defined types from here */
4aff5e23
JA
80};
81
e8064021
CH
82/*
83 * request flags */
84typedef __u32 __bitwise req_flags_t;
85
86/* elevator knows about this request */
87#define RQF_SORTED ((__force req_flags_t)(1 << 0))
88/* drive already may have started this one */
89#define RQF_STARTED ((__force req_flags_t)(1 << 1))
90/* uses tagged queueing */
91#define RQF_QUEUED ((__force req_flags_t)(1 << 2))
92/* may not be passed by ioscheduler */
93#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
94/* request for flush sequence */
95#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
96/* merge of different types, fail separately */
97#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
98/* track inflight for MQ */
99#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
100/* don't call prep for this one */
101#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
102/* set for "ide_preempt" requests and also for requests for which the SCSI
103 "quiesce" state must be ignored. */
104#define RQF_PREEMPT ((__force req_flags_t)(1 << 8))
105/* contains copies of user pages */
106#define RQF_COPY_USER ((__force req_flags_t)(1 << 9))
107/* vaguely specified driver internal error. Ignored by the block layer */
108#define RQF_FAILED ((__force req_flags_t)(1 << 10))
109/* don't warn about errors */
110#define RQF_QUIET ((__force req_flags_t)(1 << 11))
111/* elevator private data attached */
112#define RQF_ELVPRIV ((__force req_flags_t)(1 << 12))
113/* account I/O stat */
114#define RQF_IO_STAT ((__force req_flags_t)(1 << 13))
115/* request came from our alloc pool */
116#define RQF_ALLOCED ((__force req_flags_t)(1 << 14))
117/* runtime pm request */
118#define RQF_PM ((__force req_flags_t)(1 << 15))
119/* on IO scheduler merge hash */
120#define RQF_HASHED ((__force req_flags_t)(1 << 16))
cf43e6be
JA
121/* IO stats tracking on */
122#define RQF_STATS ((__force req_flags_t)(1 << 17))
e8064021
CH
123
124/* flags that prevent us from merging requests: */
125#define RQF_NOMERGE_FLAGS \
126 (RQF_STARTED | RQF_SOFTBARRIER | RQF_FLUSH_SEQ)
127
1da177e4
LT
128#define BLK_MAX_CDB 16
129
130/*
af76e555
CH
131 * Try to put the fields that are referenced together in the same cacheline.
132 *
133 * If you modify this structure, make sure to update blk_rq_init() and
134 * especially blk_mq_rq_ctx_init() to take care of the added fields.
1da177e4
LT
135 */
136struct request {
6897fc22 137 struct list_head queuelist;
320ae51f
JA
138 union {
139 struct call_single_data csd;
9828c2c6 140 u64 fifo_time;
320ae51f 141 };
ff856bad 142
165125e1 143 struct request_queue *q;
320ae51f 144 struct blk_mq_ctx *mq_ctx;
e6a1c874 145
ca93e453 146 int cpu;
b42171ef 147 unsigned cmd_type;
ef295ecf 148 unsigned int cmd_flags; /* op and common flags */
e8064021 149 req_flags_t rq_flags;
242f9dcb 150 unsigned long atomic_flags;
1da177e4 151
a2dec7b3 152 /* the following two fields are internal, NEVER access directly */
a2dec7b3 153 unsigned int __data_len; /* total data len */
181fdde3 154 sector_t __sector; /* sector cursor */
1da177e4
LT
155
156 struct bio *bio;
157 struct bio *biotail;
158
360f92c2
JA
159 /*
160 * The hash is used inside the scheduler, and killed once the
161 * request reaches the dispatch list. The ipi_list is only used
162 * to queue the request for softirq completion, which is long
163 * after the request has been unhashed (and even removed from
164 * the dispatch list).
165 */
166 union {
167 struct hlist_node hash; /* merge hash */
168 struct list_head ipi_list;
169 };
170
e6a1c874
JA
171 /*
172 * The rb_node is only used inside the io scheduler, requests
173 * are pruned when moved to the dispatch queue. So let the
c186794d 174 * completion_data share space with the rb_node.
e6a1c874
JA
175 */
176 union {
177 struct rb_node rb_node; /* sort/lookup */
c186794d 178 void *completion_data;
e6a1c874 179 };
9817064b 180
ff7d145f 181 /*
7f1dc8a2 182 * Three pointers are available for the IO schedulers, if they need
c186794d
MS
183 * more they have to dynamically allocate it. Flush requests are
184 * never put on the IO scheduler. So let the flush fields share
a612fddf 185 * space with the elevator data.
ff7d145f 186 */
c186794d 187 union {
a612fddf
TH
188 struct {
189 struct io_cq *icq;
190 void *priv[2];
191 } elv;
192
c186794d
MS
193 struct {
194 unsigned int seq;
195 struct list_head list;
4853abaa 196 rq_end_io_fn *saved_end_io;
c186794d
MS
197 } flush;
198 };
ff7d145f 199
8f34ee75 200 struct gendisk *rq_disk;
09e099d4 201 struct hd_struct *part;
1da177e4 202 unsigned long start_time;
cf43e6be 203 struct blk_issue_stat issue_stat;
9195291e 204#ifdef CONFIG_BLK_CGROUP
a051661c 205 struct request_list *rl; /* rl this rq is alloced from */
9195291e
DS
206 unsigned long long start_time_ns;
207 unsigned long long io_start_time_ns; /* when passed to hardware */
208#endif
1da177e4
LT
209 /* Number of scatter-gather DMA addr+len pairs after
210 * physical address coalescing is performed.
211 */
212 unsigned short nr_phys_segments;
13f05c8d
MP
213#if defined(CONFIG_BLK_DEV_INTEGRITY)
214 unsigned short nr_integrity_segments;
215#endif
1da177e4 216
8f34ee75
JA
217 unsigned short ioprio;
218
731ec497 219 void *special; /* opaque pointer available for LLD use */
1da177e4 220
cdd60262
JA
221 int tag;
222 int errors;
223
1da177e4
LT
224 /*
225 * when request is used as a packet command carrier
226 */
d7e3c324
FT
227 unsigned char __cmd[BLK_MAX_CDB];
228 unsigned char *cmd;
181fdde3 229 unsigned short cmd_len;
1da177e4 230
7a85f889 231 unsigned int extra_len; /* length of alignment and padding */
1da177e4 232 unsigned int sense_len;
c3a4d78c 233 unsigned int resid_len; /* residual count */
1da177e4
LT
234 void *sense;
235
242f9dcb
JA
236 unsigned long deadline;
237 struct list_head timeout_list;
1da177e4 238 unsigned int timeout;
17e01f21 239 int retries;
1da177e4 240
1da177e4 241 /*
c00895ab 242 * completion callback.
1da177e4
LT
243 */
244 rq_end_io_fn *end_io;
245 void *end_io_data;
abae1fde
FT
246
247 /* for bidi */
248 struct request *next_rq;
1da177e4
LT
249};
250
766ca442
FLVC
251static inline unsigned short req_get_ioprio(struct request *req)
252{
253 return req->ioprio;
254}
255
1da177e4
LT
256#include <linux/elevator.h>
257
320ae51f
JA
258struct blk_queue_ctx;
259
165125e1 260typedef void (request_fn_proc) (struct request_queue *q);
dece1635 261typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
165125e1 262typedef int (prep_rq_fn) (struct request_queue *, struct request *);
28018c24 263typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
1da177e4
LT
264
265struct bio_vec;
ff856bad 266typedef void (softirq_done_fn)(struct request *);
2fb98e84 267typedef int (dma_drain_needed_fn)(struct request *);
ef9e3fac 268typedef int (lld_busy_fn) (struct request_queue *q);
aa387cc8 269typedef int (bsg_job_fn) (struct bsg_job *);
1da177e4 270
242f9dcb
JA
271enum blk_eh_timer_return {
272 BLK_EH_NOT_HANDLED,
273 BLK_EH_HANDLED,
274 BLK_EH_RESET_TIMER,
275};
276
277typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
278
1da177e4
LT
279enum blk_queue_state {
280 Queue_down,
281 Queue_up,
282};
283
1da177e4
LT
284struct blk_queue_tag {
285 struct request **tag_index; /* map of busy tags */
286 unsigned long *tag_map; /* bit map of free/busy tags */
1da177e4
LT
287 int busy; /* current depth */
288 int max_depth; /* what we will send to device */
ba025082 289 int real_max_depth; /* what the array can hold */
1da177e4 290 atomic_t refcnt; /* map can be shared */
ee1b6f7a
SL
291 int alloc_policy; /* tag allocation policy */
292 int next_tag; /* next tag */
1da177e4 293};
ee1b6f7a
SL
294#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
295#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
1da177e4 296
abf54393
FT
297#define BLK_SCSI_MAX_CMDS (256)
298#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
299
797476b8
DLM
300/*
301 * Zoned block device models (zoned limit).
302 */
303enum blk_zoned_model {
304 BLK_ZONED_NONE, /* Regular block device */
305 BLK_ZONED_HA, /* Host-aware zoned block device */
306 BLK_ZONED_HM, /* Host-managed zoned block device */
307};
308
025146e1
MP
309struct queue_limits {
310 unsigned long bounce_pfn;
311 unsigned long seg_boundary_mask;
03100aad 312 unsigned long virt_boundary_mask;
025146e1
MP
313
314 unsigned int max_hw_sectors;
ca369d51 315 unsigned int max_dev_sectors;
762380ad 316 unsigned int chunk_sectors;
025146e1
MP
317 unsigned int max_sectors;
318 unsigned int max_segment_size;
c72758f3
MP
319 unsigned int physical_block_size;
320 unsigned int alignment_offset;
321 unsigned int io_min;
322 unsigned int io_opt;
67efc925 323 unsigned int max_discard_sectors;
0034af03 324 unsigned int max_hw_discard_sectors;
4363ac7c 325 unsigned int max_write_same_sectors;
a6f0788e 326 unsigned int max_write_zeroes_sectors;
86b37281
MP
327 unsigned int discard_granularity;
328 unsigned int discard_alignment;
025146e1
MP
329
330 unsigned short logical_block_size;
8a78362c 331 unsigned short max_segments;
13f05c8d 332 unsigned short max_integrity_segments;
025146e1 333
c72758f3 334 unsigned char misaligned;
86b37281 335 unsigned char discard_misaligned;
e692cb66 336 unsigned char cluster;
a934a00a 337 unsigned char discard_zeroes_data;
c78afc62 338 unsigned char raid_partial_stripes_expensive;
797476b8 339 enum blk_zoned_model zoned;
025146e1
MP
340};
341
6a0cb1bc
HR
342#ifdef CONFIG_BLK_DEV_ZONED
343
344struct blk_zone_report_hdr {
345 unsigned int nr_zones;
346 u8 padding[60];
347};
348
349extern int blkdev_report_zones(struct block_device *bdev,
350 sector_t sector, struct blk_zone *zones,
351 unsigned int *nr_zones, gfp_t gfp_mask);
352extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
353 sector_t nr_sectors, gfp_t gfp_mask);
354
3ed05a98
ST
355extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
356 unsigned int cmd, unsigned long arg);
357extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
358 unsigned int cmd, unsigned long arg);
359
360#else /* CONFIG_BLK_DEV_ZONED */
361
362static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
363 fmode_t mode, unsigned int cmd,
364 unsigned long arg)
365{
366 return -ENOTTY;
367}
368
369static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
370 fmode_t mode, unsigned int cmd,
371 unsigned long arg)
372{
373 return -ENOTTY;
374}
375
6a0cb1bc
HR
376#endif /* CONFIG_BLK_DEV_ZONED */
377
d7b76301 378struct request_queue {
1da177e4
LT
379 /*
380 * Together with queue_head for cacheline sharing
381 */
382 struct list_head queue_head;
383 struct request *last_merge;
b374d18a 384 struct elevator_queue *elevator;
8a5ecdd4
TH
385 int nr_rqs[2]; /* # allocated [a]sync rqs */
386 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
1da177e4 387
87760e5e
JA
388 struct rq_wb *rq_wb;
389
1da177e4 390 /*
a051661c
TH
391 * If blkcg is not used, @q->root_rl serves all requests. If blkcg
392 * is used, root blkg allocates from @q->root_rl and all other
393 * blkgs from their own blkg->rl. Which one to use should be
394 * determined using bio_request_list().
1da177e4 395 */
a051661c 396 struct request_list root_rl;
1da177e4
LT
397
398 request_fn_proc *request_fn;
1da177e4
LT
399 make_request_fn *make_request_fn;
400 prep_rq_fn *prep_rq_fn;
28018c24 401 unprep_rq_fn *unprep_rq_fn;
ff856bad 402 softirq_done_fn *softirq_done_fn;
242f9dcb 403 rq_timed_out_fn *rq_timed_out_fn;
2fb98e84 404 dma_drain_needed_fn *dma_drain_needed;
ef9e3fac 405 lld_busy_fn *lld_busy_fn;
1da177e4 406
320ae51f
JA
407 struct blk_mq_ops *mq_ops;
408
409 unsigned int *mq_map;
410
411 /* sw queues */
e6cdb092 412 struct blk_mq_ctx __percpu *queue_ctx;
320ae51f
JA
413 unsigned int nr_queues;
414
d278d4a8
JA
415 unsigned int queue_depth;
416
320ae51f
JA
417 /* hw dispatch queues */
418 struct blk_mq_hw_ctx **queue_hw_ctx;
419 unsigned int nr_hw_queues;
420
8922e16c
TH
421 /*
422 * Dispatch queue sorting
423 */
1b47f531 424 sector_t end_sector;
8922e16c 425 struct request *boundary_rq;
8922e16c 426
1da177e4 427 /*
3cca6dc1 428 * Delayed queue handling
1da177e4 429 */
3cca6dc1 430 struct delayed_work delay_work;
1da177e4
LT
431
432 struct backing_dev_info backing_dev_info;
433
434 /*
435 * The queue owner gets to use this for whatever they like.
436 * ll_rw_blk doesn't touch it.
437 */
438 void *queuedata;
439
1da177e4 440 /*
d7b76301 441 * various queue flags, see QUEUE_* below
1da177e4 442 */
d7b76301 443 unsigned long queue_flags;
1da177e4 444
a73f730d
TH
445 /*
446 * ida allocated id for this queue. Used to index queues from
447 * ioctx.
448 */
449 int id;
450
1da177e4 451 /*
d7b76301 452 * queue needs bounce pages for pages above this limit
1da177e4 453 */
d7b76301 454 gfp_t bounce_gfp;
1da177e4
LT
455
456 /*
152587de
JA
457 * protects queue structures from reentrancy. ->__queue_lock should
458 * _never_ be used directly, it is queue private. always use
459 * ->queue_lock.
1da177e4 460 */
152587de 461 spinlock_t __queue_lock;
1da177e4
LT
462 spinlock_t *queue_lock;
463
464 /*
465 * queue kobject
466 */
467 struct kobject kobj;
468
320ae51f
JA
469 /*
470 * mq queue kobject
471 */
472 struct kobject mq_kobj;
473
ac6fc48c
DW
474#ifdef CONFIG_BLK_DEV_INTEGRITY
475 struct blk_integrity integrity;
476#endif /* CONFIG_BLK_DEV_INTEGRITY */
477
47fafbc7 478#ifdef CONFIG_PM
6c954667
LM
479 struct device *dev;
480 int rpm_status;
481 unsigned int nr_pending;
482#endif
483
1da177e4
LT
484 /*
485 * queue settings
486 */
487 unsigned long nr_requests; /* Max # of requests */
488 unsigned int nr_congestion_on;
489 unsigned int nr_congestion_off;
490 unsigned int nr_batching;
491
fa0ccd83 492 unsigned int dma_drain_size;
d7b76301 493 void *dma_drain_buffer;
e3790c7d 494 unsigned int dma_pad_mask;
1da177e4
LT
495 unsigned int dma_alignment;
496
497 struct blk_queue_tag *queue_tags;
6eca9004 498 struct list_head tag_busy_list;
1da177e4 499
15853af9 500 unsigned int nr_sorted;
0a7ae2ff 501 unsigned int in_flight[2];
cf43e6be
JA
502
503 struct blk_rq_stat rq_stats[2];
504
24faf6f6
BVA
505 /*
506 * Number of active block driver functions for which blk_drain_queue()
507 * must wait. Must be incremented around functions that unlock the
508 * queue_lock internally, e.g. scsi_request_fn().
509 */
510 unsigned int request_fn_active;
1da177e4 511
242f9dcb 512 unsigned int rq_timeout;
64f1c21e 513 int poll_nsec;
242f9dcb 514 struct timer_list timeout;
287922eb 515 struct work_struct timeout_work;
242f9dcb
JA
516 struct list_head timeout_list;
517
a612fddf 518 struct list_head icq_list;
4eef3049 519#ifdef CONFIG_BLK_CGROUP
a2b1693b 520 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
3c798398 521 struct blkcg_gq *root_blkg;
03aa264a 522 struct list_head blkg_list;
4eef3049 523#endif
a612fddf 524
025146e1
MP
525 struct queue_limits limits;
526
1da177e4
LT
527 /*
528 * sg stuff
529 */
530 unsigned int sg_timeout;
531 unsigned int sg_reserved_size;
1946089a 532 int node;
6c5c9341 533#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 534 struct blk_trace *blk_trace;
6c5c9341 535#endif
1da177e4 536 /*
4913efe4 537 * for flush operations
1da177e4 538 */
7c94e1c1 539 struct blk_flush_queue *fq;
483f4afc 540
6fca6a61
CH
541 struct list_head requeue_list;
542 spinlock_t requeue_lock;
2849450a 543 struct delayed_work requeue_work;
6fca6a61 544
483f4afc 545 struct mutex sysfs_lock;
d351af01 546
d732580b 547 int bypass_depth;
4ecd4fef 548 atomic_t mq_freeze_depth;
d732580b 549
d351af01 550#if defined(CONFIG_BLK_DEV_BSG)
aa387cc8
MC
551 bsg_job_fn *bsg_job_fn;
552 int bsg_job_size;
d351af01
FT
553 struct bsg_class_device bsg_dev;
554#endif
e43473b7
VG
555
556#ifdef CONFIG_BLK_DEV_THROTTLING
557 /* Throttle data */
558 struct throtl_data *td;
559#endif
548bc8e1 560 struct rcu_head rcu_head;
320ae51f 561 wait_queue_head_t mq_freeze_wq;
3ef28e83 562 struct percpu_ref q_usage_counter;
320ae51f 563 struct list_head all_q_node;
0d2602ca
JA
564
565 struct blk_mq_tag_set *tag_set;
566 struct list_head tag_set_list;
54efd50b 567 struct bio_set *bio_split;
4593fdbe
AM
568
569 bool mq_sysfs_init_done;
1da177e4
LT
570};
571
1da177e4
LT
572#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
573#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
1faa16d2
JA
574#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
575#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
3f3299d5 576#define QUEUE_FLAG_DYING 5 /* queue being torn down */
d732580b 577#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
c21e6beb
JA
578#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
579#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
5757a6d7 580#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
c21e6beb
JA
581#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
582#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
583#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
88e740f1 584#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
c21e6beb
JA
585#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
586#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
587#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
588#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
288dab8a 589#define QUEUE_FLAG_SECERASE 17 /* supports secure erase */
5757a6d7 590#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
c246e80d 591#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
320ae51f 592#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
05f1dd53 593#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
05229bee 594#define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */
93e9d8e8
JA
595#define QUEUE_FLAG_WC 23 /* Write back caching */
596#define QUEUE_FLAG_FUA 24 /* device supports FUA writes */
c888a8f9 597#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
163d4baa 598#define QUEUE_FLAG_DAX 26 /* device supports DAX */
cf43e6be 599#define QUEUE_FLAG_STATS 27 /* track rq completion times */
bc58ba94
JA
600
601#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
01e97f6b 602 (1 << QUEUE_FLAG_STACKABLE) | \
e2e1a148
JA
603 (1 << QUEUE_FLAG_SAME_COMP) | \
604 (1 << QUEUE_FLAG_ADD_RANDOM))
797e7dbb 605
94eddfbe 606#define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
ad9cf3bb 607 (1 << QUEUE_FLAG_STACKABLE) | \
8e0b60b9
CH
608 (1 << QUEUE_FLAG_SAME_COMP) | \
609 (1 << QUEUE_FLAG_POLL))
94eddfbe 610
8bcb6c7d 611static inline void queue_lockdep_assert_held(struct request_queue *q)
8f45c1a5 612{
8bcb6c7d
AK
613 if (q->queue_lock)
614 lockdep_assert_held(q->queue_lock);
8f45c1a5
LT
615}
616
75ad23bc
NP
617static inline void queue_flag_set_unlocked(unsigned int flag,
618 struct request_queue *q)
619{
620 __set_bit(flag, &q->queue_flags);
621}
622
e48ec690
JA
623static inline int queue_flag_test_and_clear(unsigned int flag,
624 struct request_queue *q)
625{
8bcb6c7d 626 queue_lockdep_assert_held(q);
e48ec690
JA
627
628 if (test_bit(flag, &q->queue_flags)) {
629 __clear_bit(flag, &q->queue_flags);
630 return 1;
631 }
632
633 return 0;
634}
635
636static inline int queue_flag_test_and_set(unsigned int flag,
637 struct request_queue *q)
638{
8bcb6c7d 639 queue_lockdep_assert_held(q);
e48ec690
JA
640
641 if (!test_bit(flag, &q->queue_flags)) {
642 __set_bit(flag, &q->queue_flags);
643 return 0;
644 }
645
646 return 1;
647}
648
75ad23bc
NP
649static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
650{
8bcb6c7d 651 queue_lockdep_assert_held(q);
75ad23bc
NP
652 __set_bit(flag, &q->queue_flags);
653}
654
655static inline void queue_flag_clear_unlocked(unsigned int flag,
656 struct request_queue *q)
657{
658 __clear_bit(flag, &q->queue_flags);
659}
660
0a7ae2ff
JA
661static inline int queue_in_flight(struct request_queue *q)
662{
663 return q->in_flight[0] + q->in_flight[1];
664}
665
75ad23bc
NP
666static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
667{
8bcb6c7d 668 queue_lockdep_assert_held(q);
75ad23bc
NP
669 __clear_bit(flag, &q->queue_flags);
670}
671
1da177e4
LT
672#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
673#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
3f3299d5 674#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
c246e80d 675#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
d732580b 676#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
320ae51f 677#define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
ac9fafa1 678#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
488991e2
AB
679#define blk_queue_noxmerges(q) \
680 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
a68bbddb 681#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
bc58ba94 682#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
e2e1a148 683#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
4ee5eaf4
KU
684#define blk_queue_stackable(q) \
685 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
c15227de 686#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
288dab8a
CH
687#define blk_queue_secure_erase(q) \
688 (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
163d4baa 689#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
1da177e4 690
33659ebb
CH
691#define blk_noretry_request(rq) \
692 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
693 REQ_FAILFAST_DRIVER))
694
695#define blk_account_rq(rq) \
e8064021 696 (((rq)->rq_flags & RQF_STARTED) && \
e2a60da7 697 ((rq)->cmd_type == REQ_TYPE_FS))
33659ebb 698
ab780f1e 699#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
abae1fde 700#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
336cdb40
KU
701/* rq->queuelist of dequeued request must be list_empty() */
702#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
1da177e4
LT
703
704#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
705
4e1b2d52 706#define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ)
1da177e4 707
49fd524f
JA
708/*
709 * Driver can handle struct request, if it either has an old style
710 * request_fn defined, or is blk-mq based.
711 */
712static inline bool queue_is_rq_based(struct request_queue *q)
713{
714 return q->request_fn || q->mq_ops;
715}
716
e692cb66
MP
717static inline unsigned int blk_queue_cluster(struct request_queue *q)
718{
719 return q->limits.cluster;
720}
721
797476b8
DLM
722static inline enum blk_zoned_model
723blk_queue_zoned_model(struct request_queue *q)
724{
725 return q->limits.zoned;
726}
727
728static inline bool blk_queue_is_zoned(struct request_queue *q)
729{
730 switch (blk_queue_zoned_model(q)) {
731 case BLK_ZONED_HA:
732 case BLK_ZONED_HM:
733 return true;
734 default:
735 return false;
736 }
737}
738
6a0cb1bc
HR
739static inline unsigned int blk_queue_zone_size(struct request_queue *q)
740{
741 return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
742}
743
1faa16d2
JA
744static inline bool rq_is_sync(struct request *rq)
745{
ef295ecf 746 return op_is_sync(rq->cmd_flags);
1faa16d2
JA
747}
748
5b788ce3 749static inline bool blk_rl_full(struct request_list *rl, bool sync)
1da177e4 750{
5b788ce3
TH
751 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
752
753 return rl->flags & flag;
1da177e4
LT
754}
755
5b788ce3 756static inline void blk_set_rl_full(struct request_list *rl, bool sync)
1da177e4 757{
5b788ce3
TH
758 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
759
760 rl->flags |= flag;
1da177e4
LT
761}
762
5b788ce3 763static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
1da177e4 764{
5b788ce3
TH
765 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
766
767 rl->flags &= ~flag;
1da177e4
LT
768}
769
e2a60da7
MP
770static inline bool rq_mergeable(struct request *rq)
771{
772 if (rq->cmd_type != REQ_TYPE_FS)
773 return false;
1da177e4 774
3a5e02ce
MC
775 if (req_op(rq) == REQ_OP_FLUSH)
776 return false;
777
a6f0788e
CK
778 if (req_op(rq) == REQ_OP_WRITE_ZEROES)
779 return false;
780
e2a60da7 781 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
e8064021
CH
782 return false;
783 if (rq->rq_flags & RQF_NOMERGE_FLAGS)
e2a60da7
MP
784 return false;
785
786 return true;
787}
1da177e4 788
4363ac7c
MP
789static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
790{
791 if (bio_data(a) == bio_data(b))
792 return true;
793
794 return false;
795}
796
d278d4a8
JA
797static inline unsigned int blk_queue_depth(struct request_queue *q)
798{
799 if (q->queue_depth)
800 return q->queue_depth;
801
802 return q->nr_requests;
803}
804
1da177e4
LT
805/*
806 * q->prep_rq_fn return values
807 */
0fb5b1fb
MP
808enum {
809 BLKPREP_OK, /* serve it */
810 BLKPREP_KILL, /* fatal error, kill, return -EIO */
811 BLKPREP_DEFER, /* leave on queue */
812 BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
813};
1da177e4
LT
814
815extern unsigned long blk_max_low_pfn, blk_max_pfn;
816
817/*
818 * standard bounce addresses:
819 *
820 * BLK_BOUNCE_HIGH : bounce all highmem pages
821 * BLK_BOUNCE_ANY : don't bounce anything
822 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
823 */
2472892a
AK
824
825#if BITS_PER_LONG == 32
1da177e4 826#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
2472892a
AK
827#else
828#define BLK_BOUNCE_HIGH -1ULL
829#endif
830#define BLK_BOUNCE_ANY (-1ULL)
bfe17231 831#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
1da177e4 832
3d6392cf
JA
833/*
834 * default timeout for SG_IO if none specified
835 */
836#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
f2f1fa78 837#define BLK_MIN_SG_TIMEOUT (7 * HZ)
3d6392cf 838
2a7326b5 839#ifdef CONFIG_BOUNCE
1da177e4 840extern int init_emergency_isa_pool(void);
165125e1 841extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
1da177e4
LT
842#else
843static inline int init_emergency_isa_pool(void)
844{
845 return 0;
846}
165125e1 847static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
1da177e4
LT
848{
849}
850#endif /* CONFIG_MMU */
851
152e283f
FT
852struct rq_map_data {
853 struct page **pages;
854 int page_order;
855 int nr_entries;
56c451f4 856 unsigned long offset;
97ae77a1 857 int null_mapped;
ecb554a8 858 int from_user;
152e283f
FT
859};
860
5705f702 861struct req_iterator {
7988613b 862 struct bvec_iter iter;
5705f702
N
863 struct bio *bio;
864};
865
866/* This should not be used directly - use rq_for_each_segment */
1e428079
JA
867#define for_each_bio(_bio) \
868 for (; _bio; _bio = _bio->bi_next)
5705f702 869#define __rq_for_each_bio(_bio, rq) \
1da177e4
LT
870 if ((rq->bio)) \
871 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
872
5705f702
N
873#define rq_for_each_segment(bvl, _rq, _iter) \
874 __rq_for_each_bio(_iter.bio, _rq) \
7988613b 875 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
5705f702 876
4550dd6c 877#define rq_iter_last(bvec, _iter) \
7988613b 878 (_iter.bio->bi_next == NULL && \
4550dd6c 879 bio_iter_last(bvec, _iter.iter))
5705f702 880
2d4dc890
IL
881#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
882# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
883#endif
884#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
885extern void rq_flush_dcache_pages(struct request *rq);
886#else
887static inline void rq_flush_dcache_pages(struct request *rq)
888{
889}
890#endif
891
2af3a815
TK
892#ifdef CONFIG_PRINTK
893#define vfs_msg(sb, level, fmt, ...) \
894 __vfs_msg(sb, level, fmt, ##__VA_ARGS__)
895#else
896#define vfs_msg(sb, level, fmt, ...) \
897do { \
898 no_printk(fmt, ##__VA_ARGS__); \
899 __vfs_msg(sb, "", " "); \
900} while (0)
901#endif
902
1da177e4
LT
903extern int blk_register_queue(struct gendisk *disk);
904extern void blk_unregister_queue(struct gendisk *disk);
dece1635 905extern blk_qc_t generic_make_request(struct bio *bio);
2a4aa30c 906extern void blk_rq_init(struct request_queue *q, struct request *rq);
1da177e4 907extern void blk_put_request(struct request *);
165125e1 908extern void __blk_put_request(struct request_queue *, struct request *);
165125e1 909extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
f27b087b 910extern void blk_rq_set_block_pc(struct request *);
165125e1 911extern void blk_requeue_request(struct request_queue *, struct request *);
66ac0280 912extern void blk_add_request_payload(struct request *rq, struct page *page,
37e58237 913 int offset, unsigned int len);
ef9e3fac 914extern int blk_lld_busy(struct request_queue *q);
78d8e58a
MS
915extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
916 struct bio_set *bs, gfp_t gfp_mask,
917 int (*bio_ctr)(struct bio *, struct bio *, void *),
918 void *data);
919extern void blk_rq_unprep_clone(struct request *rq);
82124d60
KU
920extern int blk_insert_cloned_request(struct request_queue *q,
921 struct request *rq);
98d61d5b 922extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
3cca6dc1 923extern void blk_delay_queue(struct request_queue *, unsigned long);
54efd50b
KO
924extern void blk_queue_split(struct request_queue *, struct bio **,
925 struct bio_set *);
165125e1 926extern void blk_recount_segments(struct request_queue *, struct bio *);
0bfc96cb 927extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
577ebb37
PB
928extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
929 unsigned int, void __user *);
74f3c8af
AV
930extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
931 unsigned int, void __user *);
e915e872
AV
932extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
933 struct scsi_ioctl_command __user *);
3fcfab16 934
6f3b0e8b 935extern int blk_queue_enter(struct request_queue *q, bool nowait);
2e6edc95 936extern void blk_queue_exit(struct request_queue *q);
165125e1 937extern void blk_start_queue(struct request_queue *q);
21491412 938extern void blk_start_queue_async(struct request_queue *q);
165125e1 939extern void blk_stop_queue(struct request_queue *q);
1da177e4 940extern void blk_sync_queue(struct request_queue *q);
165125e1 941extern void __blk_stop_queue(struct request_queue *q);
24ecfbe2 942extern void __blk_run_queue(struct request_queue *q);
a7928c15 943extern void __blk_run_queue_uncond(struct request_queue *q);
165125e1 944extern void blk_run_queue(struct request_queue *);
c21e6beb 945extern void blk_run_queue_async(struct request_queue *q);
6a83e74d 946extern void blk_mq_quiesce_queue(struct request_queue *q);
a3bce90e 947extern int blk_rq_map_user(struct request_queue *, struct request *,
152e283f
FT
948 struct rq_map_data *, void __user *, unsigned long,
949 gfp_t);
8e5cfc45 950extern int blk_rq_unmap_user(struct bio *);
165125e1
JA
951extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
952extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
26e49cfc
KO
953 struct rq_map_data *, const struct iov_iter *,
954 gfp_t);
165125e1 955extern int blk_execute_rq(struct request_queue *, struct gendisk *,
994ca9a1 956 struct request *, int);
165125e1 957extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
15fc858a 958 struct request *, int, rq_end_io_fn *);
6e39b69e 959
bbd7bb70 960bool blk_mq_poll(struct request_queue *q, blk_qc_t cookie);
05229bee 961
165125e1 962static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1da177e4 963{
ff9ea323 964 return bdev->bd_disk->queue; /* this is never NULL */
1da177e4
LT
965}
966
5efccd17 967/*
80a761fd
TH
968 * blk_rq_pos() : the current sector
969 * blk_rq_bytes() : bytes left in the entire request
970 * blk_rq_cur_bytes() : bytes left in the current segment
971 * blk_rq_err_bytes() : bytes left till the next error boundary
972 * blk_rq_sectors() : sectors left in the entire request
973 * blk_rq_cur_sectors() : sectors left in the current segment
5efccd17 974 */
5b93629b
TH
975static inline sector_t blk_rq_pos(const struct request *rq)
976{
a2dec7b3 977 return rq->__sector;
2e46e8b2
TH
978}
979
980static inline unsigned int blk_rq_bytes(const struct request *rq)
981{
a2dec7b3 982 return rq->__data_len;
5b93629b
TH
983}
984
2e46e8b2
TH
985static inline int blk_rq_cur_bytes(const struct request *rq)
986{
987 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
988}
5efccd17 989
80a761fd
TH
990extern unsigned int blk_rq_err_bytes(const struct request *rq);
991
5b93629b
TH
992static inline unsigned int blk_rq_sectors(const struct request *rq)
993{
2e46e8b2 994 return blk_rq_bytes(rq) >> 9;
5b93629b
TH
995}
996
997static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
998{
2e46e8b2 999 return blk_rq_cur_bytes(rq) >> 9;
5b93629b
TH
1000}
1001
f31dc1cd 1002static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
8fe0d473 1003 int op)
f31dc1cd 1004{
7afafc8a 1005 if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
871dd928 1006 return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
f31dc1cd 1007
8fe0d473 1008 if (unlikely(op == REQ_OP_WRITE_SAME))
4363ac7c
MP
1009 return q->limits.max_write_same_sectors;
1010
a6f0788e
CK
1011 if (unlikely(op == REQ_OP_WRITE_ZEROES))
1012 return q->limits.max_write_zeroes_sectors;
1013
f31dc1cd
MP
1014 return q->limits.max_sectors;
1015}
1016
762380ad
JA
1017/*
1018 * Return maximum size of a request at given offset. Only valid for
1019 * file system requests.
1020 */
1021static inline unsigned int blk_max_size_offset(struct request_queue *q,
1022 sector_t offset)
1023{
1024 if (!q->limits.chunk_sectors)
736ed4de 1025 return q->limits.max_sectors;
762380ad
JA
1026
1027 return q->limits.chunk_sectors -
1028 (offset & (q->limits.chunk_sectors - 1));
1029}
1030
17007f39
DLM
1031static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
1032 sector_t offset)
f31dc1cd
MP
1033{
1034 struct request_queue *q = rq->q;
1035
f2101842 1036 if (unlikely(rq->cmd_type != REQ_TYPE_FS))
f31dc1cd
MP
1037 return q->limits.max_hw_sectors;
1038
7afafc8a
AH
1039 if (!q->limits.chunk_sectors ||
1040 req_op(rq) == REQ_OP_DISCARD ||
1041 req_op(rq) == REQ_OP_SECURE_ERASE)
8fe0d473 1042 return blk_queue_get_max_sectors(q, req_op(rq));
762380ad 1043
17007f39 1044 return min(blk_max_size_offset(q, offset),
8fe0d473 1045 blk_queue_get_max_sectors(q, req_op(rq)));
f31dc1cd
MP
1046}
1047
75afb352
JN
1048static inline unsigned int blk_rq_count_bios(struct request *rq)
1049{
1050 unsigned int nr_bios = 0;
1051 struct bio *bio;
1052
1053 __rq_for_each_bio(bio, rq)
1054 nr_bios++;
1055
1056 return nr_bios;
1057}
1058
9934c8c0
TH
1059/*
1060 * Request issue related functions.
1061 */
1062extern struct request *blk_peek_request(struct request_queue *q);
1063extern void blk_start_request(struct request *rq);
1064extern struct request *blk_fetch_request(struct request_queue *q);
1065
1da177e4 1066/*
2e60e022
TH
1067 * Request completion related functions.
1068 *
1069 * blk_update_request() completes given number of bytes and updates
1070 * the request without completing it.
1071 *
f06d9a2b
TH
1072 * blk_end_request() and friends. __blk_end_request() must be called
1073 * with the request queue spinlock acquired.
1da177e4
LT
1074 *
1075 * Several drivers define their own end_request and call
3bcddeac
KU
1076 * blk_end_request() for parts of the original function.
1077 * This prevents code duplication in drivers.
1da177e4 1078 */
2e60e022
TH
1079extern bool blk_update_request(struct request *rq, int error,
1080 unsigned int nr_bytes);
12120077 1081extern void blk_finish_request(struct request *rq, int error);
b1f74493
FT
1082extern bool blk_end_request(struct request *rq, int error,
1083 unsigned int nr_bytes);
1084extern void blk_end_request_all(struct request *rq, int error);
1085extern bool blk_end_request_cur(struct request *rq, int error);
80a761fd 1086extern bool blk_end_request_err(struct request *rq, int error);
b1f74493
FT
1087extern bool __blk_end_request(struct request *rq, int error,
1088 unsigned int nr_bytes);
1089extern void __blk_end_request_all(struct request *rq, int error);
1090extern bool __blk_end_request_cur(struct request *rq, int error);
80a761fd 1091extern bool __blk_end_request_err(struct request *rq, int error);
2e60e022 1092
ff856bad 1093extern void blk_complete_request(struct request *);
242f9dcb
JA
1094extern void __blk_complete_request(struct request *);
1095extern void blk_abort_request(struct request *);
28018c24 1096extern void blk_unprep_request(struct request *);
ff856bad 1097
1da177e4
LT
1098/*
1099 * Access functions for manipulating queue properties
1100 */
165125e1 1101extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
1946089a 1102 spinlock_t *lock, int node_id);
165125e1 1103extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
01effb0d
MS
1104extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
1105 request_fn_proc *, spinlock_t *);
165125e1
JA
1106extern void blk_cleanup_queue(struct request_queue *);
1107extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
1108extern void blk_queue_bounce_limit(struct request_queue *, u64);
086fa5ff 1109extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
762380ad 1110extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
8a78362c 1111extern void blk_queue_max_segments(struct request_queue *, unsigned short);
165125e1 1112extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
67efc925
CH
1113extern void blk_queue_max_discard_sectors(struct request_queue *q,
1114 unsigned int max_discard_sectors);
4363ac7c
MP
1115extern void blk_queue_max_write_same_sectors(struct request_queue *q,
1116 unsigned int max_write_same_sectors);
a6f0788e
CK
1117extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
1118 unsigned int max_write_same_sectors);
e1defc4f 1119extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
892b6f90 1120extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
c72758f3
MP
1121extern void blk_queue_alignment_offset(struct request_queue *q,
1122 unsigned int alignment);
7c958e32 1123extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
c72758f3 1124extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
3c5820c7 1125extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
c72758f3 1126extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
d278d4a8 1127extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth);
e475bba2 1128extern void blk_set_default_limits(struct queue_limits *lim);
b1bd055d 1129extern void blk_set_stacking_limits(struct queue_limits *lim);
c72758f3
MP
1130extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1131 sector_t offset);
17be8c24
MP
1132extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1133 sector_t offset);
c72758f3
MP
1134extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1135 sector_t offset);
165125e1 1136extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
e3790c7d 1137extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
27f8221a 1138extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
2fb98e84
TH
1139extern int blk_queue_dma_drain(struct request_queue *q,
1140 dma_drain_needed_fn *dma_drain_needed,
1141 void *buf, unsigned int size);
ef9e3fac 1142extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
165125e1 1143extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
03100aad 1144extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
165125e1 1145extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
28018c24 1146extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
165125e1 1147extern void blk_queue_dma_alignment(struct request_queue *, int);
11c3e689 1148extern void blk_queue_update_dma_alignment(struct request_queue *, int);
165125e1 1149extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
242f9dcb
JA
1150extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1151extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
f3876930 1152extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
93e9d8e8 1153extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1da177e4 1154extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1da177e4 1155
165125e1 1156extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1da177e4 1157extern void blk_dump_rq_flags(struct request *, char *);
1da177e4 1158extern long nr_blockdev_pages(void);
1da177e4 1159
09ac46c4 1160bool __must_check blk_get_queue(struct request_queue *);
165125e1
JA
1161struct request_queue *blk_alloc_queue(gfp_t);
1162struct request_queue *blk_alloc_queue_node(gfp_t, int);
1163extern void blk_put_queue(struct request_queue *);
3f21c265 1164extern void blk_set_queue_dying(struct request_queue *);
1da177e4 1165
6c954667
LM
1166/*
1167 * block layer runtime pm functions
1168 */
47fafbc7 1169#ifdef CONFIG_PM
6c954667
LM
1170extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1171extern int blk_pre_runtime_suspend(struct request_queue *q);
1172extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1173extern void blk_pre_runtime_resume(struct request_queue *q);
1174extern void blk_post_runtime_resume(struct request_queue *q, int err);
d07ab6d1 1175extern void blk_set_runtime_active(struct request_queue *q);
6c954667
LM
1176#else
1177static inline void blk_pm_runtime_init(struct request_queue *q,
1178 struct device *dev) {}
1179static inline int blk_pre_runtime_suspend(struct request_queue *q)
1180{
1181 return -ENOSYS;
1182}
1183static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
1184static inline void blk_pre_runtime_resume(struct request_queue *q) {}
1185static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
9a05e754 1186static inline void blk_set_runtime_active(struct request_queue *q) {}
6c954667
LM
1187#endif
1188
316cc67d 1189/*
75df7136
SJ
1190 * blk_plug permits building a queue of related requests by holding the I/O
1191 * fragments for a short period. This allows merging of sequential requests
1192 * into single larger request. As the requests are moved from a per-task list to
1193 * the device's request_queue in a batch, this results in improved scalability
1194 * as the lock contention for request_queue lock is reduced.
1195 *
1196 * It is ok not to disable preemption when adding the request to the plug list
1197 * or when attempting a merge, because blk_schedule_flush_list() will only flush
1198 * the plug list when the task sleeps by itself. For details, please see
1199 * schedule() where blk_schedule_flush_plug() is called.
316cc67d 1200 */
73c10101 1201struct blk_plug {
75df7136 1202 struct list_head list; /* requests */
320ae51f 1203 struct list_head mq_list; /* blk-mq requests */
75df7136 1204 struct list_head cb_list; /* md requires an unplug callback */
73c10101 1205};
55c022bb 1206#define BLK_MAX_REQUEST_COUNT 16
50d24c34 1207#define BLK_PLUG_FLUSH_SIZE (128 * 1024)
55c022bb 1208
9cbb1750 1209struct blk_plug_cb;
74018dc3 1210typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
048c9374
N
1211struct blk_plug_cb {
1212 struct list_head list;
9cbb1750
N
1213 blk_plug_cb_fn callback;
1214 void *data;
048c9374 1215};
9cbb1750
N
1216extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1217 void *data, int size);
73c10101
JA
1218extern void blk_start_plug(struct blk_plug *);
1219extern void blk_finish_plug(struct blk_plug *);
f6603783 1220extern void blk_flush_plug_list(struct blk_plug *, bool);
73c10101
JA
1221
1222static inline void blk_flush_plug(struct task_struct *tsk)
1223{
1224 struct blk_plug *plug = tsk->plug;
1225
a237c1c5
JA
1226 if (plug)
1227 blk_flush_plug_list(plug, false);
1228}
1229
1230static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1231{
1232 struct blk_plug *plug = tsk->plug;
1233
88b996cd 1234 if (plug)
f6603783 1235 blk_flush_plug_list(plug, true);
73c10101
JA
1236}
1237
1238static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1239{
1240 struct blk_plug *plug = tsk->plug;
1241
320ae51f
JA
1242 return plug &&
1243 (!list_empty(&plug->list) ||
1244 !list_empty(&plug->mq_list) ||
1245 !list_empty(&plug->cb_list));
73c10101
JA
1246}
1247
1da177e4
LT
1248/*
1249 * tag stuff
1250 */
165125e1
JA
1251extern int blk_queue_start_tag(struct request_queue *, struct request *);
1252extern struct request *blk_queue_find_tag(struct request_queue *, int);
1253extern void blk_queue_end_tag(struct request_queue *, struct request *);
ee1b6f7a 1254extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
165125e1
JA
1255extern void blk_queue_free_tags(struct request_queue *);
1256extern int blk_queue_resize_tags(struct request_queue *, int);
1257extern void blk_queue_invalidate_tags(struct request_queue *);
ee1b6f7a 1258extern struct blk_queue_tag *blk_init_tags(int, int);
492dfb48 1259extern void blk_free_tags(struct blk_queue_tag *);
1da177e4 1260
f583f492
DS
1261static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1262 int tag)
1263{
1264 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1265 return NULL;
1266 return bqt->tag_index[tag];
1267}
dd3932ed 1268
e950fdf7
CH
1269
1270#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
1271#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
dd3932ed
CH
1272
1273extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
fbd9b09a
DM
1274extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1275 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
38f25255 1276extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
288dab8a 1277 sector_t nr_sects, gfp_t gfp_mask, int flags,
469e3216 1278 struct bio **biop);
4363ac7c
MP
1279extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1280 sector_t nr_sects, gfp_t gfp_mask, struct page *page);
e73c23ff
CK
1281extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1282 sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
1283 bool discard);
3f14d792 1284extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
d93ba7a5 1285 sector_t nr_sects, gfp_t gfp_mask, bool discard);
2cf6d26a
CH
1286static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1287 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
fb2dce86 1288{
2cf6d26a
CH
1289 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
1290 nr_blocks << (sb->s_blocksize_bits - 9),
1291 gfp_mask, flags);
fb2dce86 1292}
e6fa0be6 1293static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
a107e5a3 1294 sector_t nr_blocks, gfp_t gfp_mask)
e6fa0be6
LC
1295{
1296 return blkdev_issue_zeroout(sb->s_bdev,
1297 block << (sb->s_blocksize_bits - 9),
1298 nr_blocks << (sb->s_blocksize_bits - 9),
d93ba7a5 1299 gfp_mask, true);
e6fa0be6 1300}
1da177e4 1301
018e0446 1302extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
0b07de85 1303
eb28d31b
MP
1304enum blk_default_limits {
1305 BLK_MAX_SEGMENTS = 128,
1306 BLK_SAFE_MAX_SECTORS = 255,
d2be537c 1307 BLK_DEF_MAX_SECTORS = 2560,
eb28d31b
MP
1308 BLK_MAX_SEGMENT_SIZE = 65536,
1309 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1310};
0e435ac2 1311
1da177e4
LT
1312#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1313
ae03bf63
MP
1314static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1315{
025146e1 1316 return q->limits.bounce_pfn;
ae03bf63
MP
1317}
1318
1319static inline unsigned long queue_segment_boundary(struct request_queue *q)
1320{
025146e1 1321 return q->limits.seg_boundary_mask;
ae03bf63
MP
1322}
1323
03100aad
KB
1324static inline unsigned long queue_virt_boundary(struct request_queue *q)
1325{
1326 return q->limits.virt_boundary_mask;
1327}
1328
ae03bf63
MP
1329static inline unsigned int queue_max_sectors(struct request_queue *q)
1330{
025146e1 1331 return q->limits.max_sectors;
ae03bf63
MP
1332}
1333
1334static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1335{
025146e1 1336 return q->limits.max_hw_sectors;
ae03bf63
MP
1337}
1338
8a78362c 1339static inline unsigned short queue_max_segments(struct request_queue *q)
ae03bf63 1340{
8a78362c 1341 return q->limits.max_segments;
ae03bf63
MP
1342}
1343
1344static inline unsigned int queue_max_segment_size(struct request_queue *q)
1345{
025146e1 1346 return q->limits.max_segment_size;
ae03bf63
MP
1347}
1348
e1defc4f 1349static inline unsigned short queue_logical_block_size(struct request_queue *q)
1da177e4
LT
1350{
1351 int retval = 512;
1352
025146e1
MP
1353 if (q && q->limits.logical_block_size)
1354 retval = q->limits.logical_block_size;
1da177e4
LT
1355
1356 return retval;
1357}
1358
e1defc4f 1359static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1da177e4 1360{
e1defc4f 1361 return queue_logical_block_size(bdev_get_queue(bdev));
1da177e4
LT
1362}
1363
c72758f3
MP
1364static inline unsigned int queue_physical_block_size(struct request_queue *q)
1365{
1366 return q->limits.physical_block_size;
1367}
1368
892b6f90 1369static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
ac481c20
MP
1370{
1371 return queue_physical_block_size(bdev_get_queue(bdev));
1372}
1373
c72758f3
MP
1374static inline unsigned int queue_io_min(struct request_queue *q)
1375{
1376 return q->limits.io_min;
1377}
1378
ac481c20
MP
1379static inline int bdev_io_min(struct block_device *bdev)
1380{
1381 return queue_io_min(bdev_get_queue(bdev));
1382}
1383
c72758f3
MP
1384static inline unsigned int queue_io_opt(struct request_queue *q)
1385{
1386 return q->limits.io_opt;
1387}
1388
ac481c20
MP
1389static inline int bdev_io_opt(struct block_device *bdev)
1390{
1391 return queue_io_opt(bdev_get_queue(bdev));
1392}
1393
c72758f3
MP
1394static inline int queue_alignment_offset(struct request_queue *q)
1395{
ac481c20 1396 if (q->limits.misaligned)
c72758f3
MP
1397 return -1;
1398
ac481c20 1399 return q->limits.alignment_offset;
c72758f3
MP
1400}
1401
e03a72e1 1402static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
81744ee4
MP
1403{
1404 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
b8839b8c 1405 unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
81744ee4 1406
b8839b8c 1407 return (granularity + lim->alignment_offset - alignment) % granularity;
c72758f3
MP
1408}
1409
ac481c20
MP
1410static inline int bdev_alignment_offset(struct block_device *bdev)
1411{
1412 struct request_queue *q = bdev_get_queue(bdev);
1413
1414 if (q->limits.misaligned)
1415 return -1;
1416
1417 if (bdev != bdev->bd_contains)
1418 return bdev->bd_part->alignment_offset;
1419
1420 return q->limits.alignment_offset;
1421}
1422
86b37281
MP
1423static inline int queue_discard_alignment(struct request_queue *q)
1424{
1425 if (q->limits.discard_misaligned)
1426 return -1;
1427
1428 return q->limits.discard_alignment;
1429}
1430
e03a72e1 1431static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
86b37281 1432{
59771079 1433 unsigned int alignment, granularity, offset;
dd3d145d 1434
a934a00a
MP
1435 if (!lim->max_discard_sectors)
1436 return 0;
1437
59771079
LT
1438 /* Why are these in bytes, not sectors? */
1439 alignment = lim->discard_alignment >> 9;
1440 granularity = lim->discard_granularity >> 9;
1441 if (!granularity)
1442 return 0;
1443
1444 /* Offset of the partition start in 'granularity' sectors */
1445 offset = sector_div(sector, granularity);
1446
1447 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1448 offset = (granularity + alignment - offset) % granularity;
1449
1450 /* Turn it back into bytes, gaah */
1451 return offset << 9;
86b37281
MP
1452}
1453
c6e66634
PB
1454static inline int bdev_discard_alignment(struct block_device *bdev)
1455{
1456 struct request_queue *q = bdev_get_queue(bdev);
1457
1458 if (bdev != bdev->bd_contains)
1459 return bdev->bd_part->discard_alignment;
1460
1461 return q->limits.discard_alignment;
1462}
1463
98262f27
MP
1464static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1465{
a934a00a 1466 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
98262f27
MP
1467 return 1;
1468
1469 return 0;
1470}
1471
1472static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1473{
1474 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1475}
1476
4363ac7c
MP
1477static inline unsigned int bdev_write_same(struct block_device *bdev)
1478{
1479 struct request_queue *q = bdev_get_queue(bdev);
1480
1481 if (q)
1482 return q->limits.max_write_same_sectors;
1483
1484 return 0;
1485}
1486
a6f0788e
CK
1487static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
1488{
1489 struct request_queue *q = bdev_get_queue(bdev);
1490
1491 if (q)
1492 return q->limits.max_write_zeroes_sectors;
1493
1494 return 0;
1495}
1496
797476b8
DLM
1497static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
1498{
1499 struct request_queue *q = bdev_get_queue(bdev);
1500
1501 if (q)
1502 return blk_queue_zoned_model(q);
1503
1504 return BLK_ZONED_NONE;
1505}
1506
1507static inline bool bdev_is_zoned(struct block_device *bdev)
1508{
1509 struct request_queue *q = bdev_get_queue(bdev);
1510
1511 if (q)
1512 return blk_queue_is_zoned(q);
1513
1514 return false;
1515}
1516
6a0cb1bc
HR
1517static inline unsigned int bdev_zone_size(struct block_device *bdev)
1518{
1519 struct request_queue *q = bdev_get_queue(bdev);
1520
1521 if (q)
1522 return blk_queue_zone_size(q);
1523
1524 return 0;
1525}
1526
165125e1 1527static inline int queue_dma_alignment(struct request_queue *q)
1da177e4 1528{
482eb689 1529 return q ? q->dma_alignment : 511;
1da177e4
LT
1530}
1531
14417799 1532static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
87904074
FT
1533 unsigned int len)
1534{
1535 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
14417799 1536 return !(addr & alignment) && !(len & alignment);
87904074
FT
1537}
1538
1da177e4
LT
1539/* assumes size > 256 */
1540static inline unsigned int blksize_bits(unsigned int size)
1541{
1542 unsigned int bits = 8;
1543 do {
1544 bits++;
1545 size >>= 1;
1546 } while (size > 256);
1547 return bits;
1548}
1549
2befb9e3 1550static inline unsigned int block_size(struct block_device *bdev)
1da177e4
LT
1551{
1552 return bdev->bd_block_size;
1553}
1554
f3876930 1555static inline bool queue_flush_queueable(struct request_queue *q)
1556{
c888a8f9 1557 return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
f3876930 1558}
1559
1da177e4
LT
1560typedef struct {struct page *v;} Sector;
1561
1562unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1563
1564static inline void put_dev_sector(Sector p)
1565{
09cbfeaf 1566 put_page(p.v);
1da177e4
LT
1567}
1568
e0af2917
ML
1569static inline bool __bvec_gap_to_prev(struct request_queue *q,
1570 struct bio_vec *bprv, unsigned int offset)
1571{
1572 return offset ||
1573 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1574}
1575
03100aad
KB
1576/*
1577 * Check if adding a bio_vec after bprv with offset would create a gap in
1578 * the SG list. Most drivers don't care about this, but some do.
1579 */
1580static inline bool bvec_gap_to_prev(struct request_queue *q,
1581 struct bio_vec *bprv, unsigned int offset)
1582{
1583 if (!queue_virt_boundary(q))
1584 return false;
e0af2917 1585 return __bvec_gap_to_prev(q, bprv, offset);
03100aad
KB
1586}
1587
5e7c4274
JA
1588static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1589 struct bio *next)
1590{
25e71a99
ML
1591 if (bio_has_data(prev) && queue_virt_boundary(q)) {
1592 struct bio_vec pb, nb;
1593
1594 bio_get_last_bvec(prev, &pb);
1595 bio_get_first_bvec(next, &nb);
5e7c4274 1596
25e71a99
ML
1597 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
1598 }
1599
1600 return false;
5e7c4274
JA
1601}
1602
1603static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1604{
1605 return bio_will_gap(req->q, req->biotail, bio);
1606}
1607
1608static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1609{
1610 return bio_will_gap(req->q, bio, req->bio);
1611}
1612
59c3d45e 1613int kblockd_schedule_work(struct work_struct *work);
ee63cfa7 1614int kblockd_schedule_work_on(int cpu, struct work_struct *work);
59c3d45e 1615int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
8ab14595 1616int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1da177e4 1617
9195291e 1618#ifdef CONFIG_BLK_CGROUP
28f4197e
JA
1619/*
1620 * This should not be using sched_clock(). A real patch is in progress
1621 * to fix this up, until that is in place we need to disable preemption
1622 * around sched_clock() in this function and set_io_start_time_ns().
1623 */
9195291e
DS
1624static inline void set_start_time_ns(struct request *req)
1625{
28f4197e 1626 preempt_disable();
9195291e 1627 req->start_time_ns = sched_clock();
28f4197e 1628 preempt_enable();
9195291e
DS
1629}
1630
1631static inline void set_io_start_time_ns(struct request *req)
1632{
28f4197e 1633 preempt_disable();
9195291e 1634 req->io_start_time_ns = sched_clock();
28f4197e 1635 preempt_enable();
9195291e 1636}
84c124da
DS
1637
1638static inline uint64_t rq_start_time_ns(struct request *req)
1639{
1640 return req->start_time_ns;
1641}
1642
1643static inline uint64_t rq_io_start_time_ns(struct request *req)
1644{
1645 return req->io_start_time_ns;
1646}
9195291e
DS
1647#else
1648static inline void set_start_time_ns(struct request *req) {}
1649static inline void set_io_start_time_ns(struct request *req) {}
84c124da
DS
1650static inline uint64_t rq_start_time_ns(struct request *req)
1651{
1652 return 0;
1653}
1654static inline uint64_t rq_io_start_time_ns(struct request *req)
1655{
1656 return 0;
1657}
9195291e
DS
1658#endif
1659
1da177e4
LT
1660#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1661 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1662#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1663 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1664
7ba1ba12
MP
1665#if defined(CONFIG_BLK_DEV_INTEGRITY)
1666
8288f496
MP
1667enum blk_integrity_flags {
1668 BLK_INTEGRITY_VERIFY = 1 << 0,
1669 BLK_INTEGRITY_GENERATE = 1 << 1,
3aec2f41 1670 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
aae7df50 1671 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
8288f496 1672};
7ba1ba12 1673
18593088 1674struct blk_integrity_iter {
7ba1ba12
MP
1675 void *prot_buf;
1676 void *data_buf;
3be91c4a 1677 sector_t seed;
7ba1ba12 1678 unsigned int data_size;
3be91c4a 1679 unsigned short interval;
7ba1ba12
MP
1680 const char *disk_name;
1681};
1682
18593088 1683typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
7ba1ba12 1684
0f8087ec
MP
1685struct blk_integrity_profile {
1686 integrity_processing_fn *generate_fn;
1687 integrity_processing_fn *verify_fn;
1688 const char *name;
1689};
7ba1ba12 1690
25520d55 1691extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
7ba1ba12 1692extern void blk_integrity_unregister(struct gendisk *);
ad7fce93 1693extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
13f05c8d
MP
1694extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1695 struct scatterlist *);
1696extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
4eaf99be
MP
1697extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1698 struct request *);
1699extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1700 struct bio *);
7ba1ba12 1701
25520d55 1702static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
b04accc4 1703{
ac6fc48c 1704 struct blk_integrity *bi = &disk->queue->integrity;
25520d55
MP
1705
1706 if (!bi->profile)
1707 return NULL;
1708
1709 return bi;
b04accc4
JA
1710}
1711
25520d55
MP
1712static inline
1713struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
b02739b0 1714{
25520d55 1715 return blk_get_integrity(bdev->bd_disk);
b02739b0
MP
1716}
1717
180b2f95 1718static inline bool blk_integrity_rq(struct request *rq)
7ba1ba12 1719{
180b2f95 1720 return rq->cmd_flags & REQ_INTEGRITY;
7ba1ba12
MP
1721}
1722
13f05c8d
MP
1723static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1724 unsigned int segs)
1725{
1726 q->limits.max_integrity_segments = segs;
1727}
1728
1729static inline unsigned short
1730queue_max_integrity_segments(struct request_queue *q)
1731{
1732 return q->limits.max_integrity_segments;
1733}
1734
7f39add3
SG
1735static inline bool integrity_req_gap_back_merge(struct request *req,
1736 struct bio *next)
1737{
1738 struct bio_integrity_payload *bip = bio_integrity(req->bio);
1739 struct bio_integrity_payload *bip_next = bio_integrity(next);
1740
1741 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1742 bip_next->bip_vec[0].bv_offset);
1743}
1744
1745static inline bool integrity_req_gap_front_merge(struct request *req,
1746 struct bio *bio)
1747{
1748 struct bio_integrity_payload *bip = bio_integrity(bio);
1749 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1750
1751 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1752 bip_next->bip_vec[0].bv_offset);
1753}
1754
7ba1ba12
MP
1755#else /* CONFIG_BLK_DEV_INTEGRITY */
1756
fd83240a
SR
1757struct bio;
1758struct block_device;
1759struct gendisk;
1760struct blk_integrity;
1761
1762static inline int blk_integrity_rq(struct request *rq)
1763{
1764 return 0;
1765}
1766static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1767 struct bio *b)
1768{
1769 return 0;
1770}
1771static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1772 struct bio *b,
1773 struct scatterlist *s)
1774{
1775 return 0;
1776}
1777static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1778{
61a04e5b 1779 return NULL;
fd83240a
SR
1780}
1781static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1782{
1783 return NULL;
1784}
1785static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1786{
1787 return 0;
1788}
25520d55 1789static inline void blk_integrity_register(struct gendisk *d,
fd83240a
SR
1790 struct blk_integrity *b)
1791{
fd83240a
SR
1792}
1793static inline void blk_integrity_unregister(struct gendisk *d)
1794{
1795}
1796static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1797 unsigned int segs)
1798{
1799}
1800static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1801{
1802 return 0;
1803}
4eaf99be
MP
1804static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1805 struct request *r1,
1806 struct request *r2)
fd83240a 1807{
cb1a5ab6 1808 return true;
fd83240a 1809}
4eaf99be
MP
1810static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1811 struct request *r,
1812 struct bio *b)
fd83240a 1813{
cb1a5ab6 1814 return true;
fd83240a 1815}
25520d55 1816
7f39add3
SG
1817static inline bool integrity_req_gap_back_merge(struct request *req,
1818 struct bio *next)
1819{
1820 return false;
1821}
1822static inline bool integrity_req_gap_front_merge(struct request *req,
1823 struct bio *bio)
1824{
1825 return false;
1826}
7ba1ba12
MP
1827
1828#endif /* CONFIG_BLK_DEV_INTEGRITY */
1829
b2e0d162
DW
1830/**
1831 * struct blk_dax_ctl - control and output parameters for ->direct_access
1832 * @sector: (input) offset relative to a block_device
1833 * @addr: (output) kernel virtual address for @sector populated by driver
1834 * @pfn: (output) page frame number for @addr populated by driver
1835 * @size: (input) number of bytes requested
1836 */
1837struct blk_dax_ctl {
1838 sector_t sector;
7a9eb206 1839 void *addr;
b2e0d162 1840 long size;
34c0fd54 1841 pfn_t pfn;
b2e0d162
DW
1842};
1843
08f85851 1844struct block_device_operations {
d4430d62 1845 int (*open) (struct block_device *, fmode_t);
db2a144b 1846 void (*release) (struct gendisk *, fmode_t);
c11f0c0b 1847 int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
d4430d62
AV
1848 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1849 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
7a9eb206
DW
1850 long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
1851 long);
77ea887e
TH
1852 unsigned int (*check_events) (struct gendisk *disk,
1853 unsigned int clearing);
1854 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
08f85851 1855 int (*media_changed) (struct gendisk *);
c3e33e04 1856 void (*unlock_native_capacity) (struct gendisk *);
08f85851
AV
1857 int (*revalidate_disk) (struct gendisk *);
1858 int (*getgeo)(struct block_device *, struct hd_geometry *);
b3a27d05
NG
1859 /* this callback is with swap_lock and sometimes page table lock held */
1860 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
08f85851 1861 struct module *owner;
bbd3e064 1862 const struct pr_ops *pr_ops;
08f85851
AV
1863};
1864
633a08b8
AV
1865extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1866 unsigned long);
47a191fd
MW
1867extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1868extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1869 struct writeback_control *);
b2e0d162 1870extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
2d96afc8 1871extern int bdev_dax_supported(struct super_block *, int);
a8078b1f 1872extern bool bdev_dax_capable(struct block_device *);
9361401e 1873#else /* CONFIG_BLOCK */
ac13a829
FF
1874
1875struct block_device;
1876
9361401e
DH
1877/*
1878 * stubs for when the block layer is configured out
1879 */
1880#define buffer_heads_over_limit 0
1881
9361401e
DH
1882static inline long nr_blockdev_pages(void)
1883{
1884 return 0;
1885}
1886
1f940bdf
JA
1887struct blk_plug {
1888};
1889
1890static inline void blk_start_plug(struct blk_plug *plug)
73c10101
JA
1891{
1892}
1893
1f940bdf 1894static inline void blk_finish_plug(struct blk_plug *plug)
73c10101
JA
1895{
1896}
1897
1f940bdf 1898static inline void blk_flush_plug(struct task_struct *task)
73c10101
JA
1899{
1900}
1901
a237c1c5
JA
1902static inline void blk_schedule_flush_plug(struct task_struct *task)
1903{
1904}
1905
1906
73c10101
JA
1907static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1908{
1909 return false;
1910}
1911
ac13a829
FF
1912static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1913 sector_t *error_sector)
1914{
1915 return 0;
1916}
1917
9361401e
DH
1918#endif /* CONFIG_BLOCK */
1919
1da177e4 1920#endif