]> git.ipfire.org Git - thirdparty/linux.git/blame - include/linux/blkdev.h
block: Clean up special command handling logic
[thirdparty/linux.git] / include / linux / blkdev.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_BLKDEV_H
2#define _LINUX_BLKDEV_H
3
85fd0bc9
RK
4#include <linux/sched.h>
5
f5ff8422
JA
6#ifdef CONFIG_BLOCK
7
1da177e4
LT
8#include <linux/major.h>
9#include <linux/genhd.h>
10#include <linux/list.h>
11#include <linux/timer.h>
12#include <linux/workqueue.h>
13#include <linux/pagemap.h>
14#include <linux/backing-dev.h>
15#include <linux/wait.h>
16#include <linux/mempool.h>
17#include <linux/bio.h>
1da177e4 18#include <linux/stringify.h>
3e6053d7 19#include <linux/gfp.h>
d351af01 20#include <linux/bsg.h>
c7c22e4d 21#include <linux/smp.h>
1da177e4
LT
22
23#include <asm/scatterlist.h>
24
de477254 25struct module;
21b2f0c8
CH
26struct scsi_ioctl_command;
27
1da177e4 28struct request_queue;
1da177e4 29struct elevator_queue;
1da177e4 30struct request_pm_state;
2056a782 31struct blk_trace;
3d6392cf
JA
32struct request;
33struct sg_io_hdr;
aa387cc8 34struct bsg_job;
3c798398 35struct blkcg_gq;
1da177e4
LT
36
37#define BLKDEV_MIN_RQ 4
38#define BLKDEV_MAX_RQ 128 /* Default maximum */
39
8bd435b3
TH
40/*
41 * Maximum number of blkcg policies allowed to be registered concurrently.
42 * Defined here to simplify include dependency.
43 */
44#define BLKCG_MAX_POLS 2
45
1da177e4 46struct request;
8ffdc655 47typedef void (rq_end_io_fn)(struct request *, int);
1da177e4 48
5b788ce3
TH
49#define BLK_RL_SYNCFULL (1U << 0)
50#define BLK_RL_ASYNCFULL (1U << 1)
51
1da177e4 52struct request_list {
5b788ce3 53 struct request_queue *q; /* the queue this rl belongs to */
a051661c
TH
54#ifdef CONFIG_BLK_CGROUP
55 struct blkcg_gq *blkg; /* blkg this request pool belongs to */
56#endif
1faa16d2
JA
57 /*
58 * count[], starved[], and wait[] are indexed by
59 * BLK_RW_SYNC/BLK_RW_ASYNC
60 */
8a5ecdd4
TH
61 int count[2];
62 int starved[2];
63 mempool_t *rq_pool;
64 wait_queue_head_t wait[2];
5b788ce3 65 unsigned int flags;
1da177e4
LT
66};
67
4aff5e23
JA
68/*
69 * request command types
70 */
71enum rq_cmd_type_bits {
72 REQ_TYPE_FS = 1, /* fs request */
73 REQ_TYPE_BLOCK_PC, /* scsi command */
74 REQ_TYPE_SENSE, /* sense request */
75 REQ_TYPE_PM_SUSPEND, /* suspend request */
76 REQ_TYPE_PM_RESUME, /* resume request */
77 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */
4aff5e23 78 REQ_TYPE_SPECIAL, /* driver defined type */
4aff5e23
JA
79 /*
80 * for ATA/ATAPI devices. this really doesn't belong here, ide should
81 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver
82 * private REQ_LB opcodes to differentiate what type of request this is
83 */
4aff5e23 84 REQ_TYPE_ATA_TASKFILE,
cea2885a 85 REQ_TYPE_ATA_PC,
4aff5e23
JA
86};
87
1da177e4
LT
88#define BLK_MAX_CDB 16
89
90/*
63a71386 91 * try to put the fields that are referenced together in the same cacheline.
4d0d98b6 92 * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init()
63a71386 93 * as well!
1da177e4
LT
94 */
95struct request {
ff856bad 96 struct list_head queuelist;
c7c22e4d 97 struct call_single_data csd;
ff856bad 98
165125e1 99 struct request_queue *q;
e6a1c874 100
4aff5e23
JA
101 unsigned int cmd_flags;
102 enum rq_cmd_type_bits cmd_type;
242f9dcb 103 unsigned long atomic_flags;
1da177e4 104
181fdde3
RK
105 int cpu;
106
a2dec7b3 107 /* the following two fields are internal, NEVER access directly */
a2dec7b3 108 unsigned int __data_len; /* total data len */
181fdde3 109 sector_t __sector; /* sector cursor */
1da177e4
LT
110
111 struct bio *bio;
112 struct bio *biotail;
113
9817064b 114 struct hlist_node hash; /* merge hash */
e6a1c874
JA
115 /*
116 * The rb_node is only used inside the io scheduler, requests
117 * are pruned when moved to the dispatch queue. So let the
c186794d 118 * completion_data share space with the rb_node.
e6a1c874
JA
119 */
120 union {
121 struct rb_node rb_node; /* sort/lookup */
c186794d 122 void *completion_data;
e6a1c874 123 };
9817064b 124
ff7d145f 125 /*
7f1dc8a2 126 * Three pointers are available for the IO schedulers, if they need
c186794d
MS
127 * more they have to dynamically allocate it. Flush requests are
128 * never put on the IO scheduler. So let the flush fields share
a612fddf 129 * space with the elevator data.
ff7d145f 130 */
c186794d 131 union {
a612fddf
TH
132 struct {
133 struct io_cq *icq;
134 void *priv[2];
135 } elv;
136
c186794d
MS
137 struct {
138 unsigned int seq;
139 struct list_head list;
4853abaa 140 rq_end_io_fn *saved_end_io;
c186794d
MS
141 } flush;
142 };
ff7d145f 143
8f34ee75 144 struct gendisk *rq_disk;
09e099d4 145 struct hd_struct *part;
1da177e4 146 unsigned long start_time;
9195291e 147#ifdef CONFIG_BLK_CGROUP
a051661c 148 struct request_list *rl; /* rl this rq is alloced from */
9195291e
DS
149 unsigned long long start_time_ns;
150 unsigned long long io_start_time_ns; /* when passed to hardware */
151#endif
1da177e4
LT
152 /* Number of scatter-gather DMA addr+len pairs after
153 * physical address coalescing is performed.
154 */
155 unsigned short nr_phys_segments;
13f05c8d
MP
156#if defined(CONFIG_BLK_DEV_INTEGRITY)
157 unsigned short nr_integrity_segments;
158#endif
1da177e4 159
8f34ee75
JA
160 unsigned short ioprio;
161
181fdde3
RK
162 int ref_count;
163
731ec497
TH
164 void *special; /* opaque pointer available for LLD use */
165 char *buffer; /* kaddr of the current segment if available */
1da177e4 166
cdd60262
JA
167 int tag;
168 int errors;
169
1da177e4
LT
170 /*
171 * when request is used as a packet command carrier
172 */
d7e3c324
FT
173 unsigned char __cmd[BLK_MAX_CDB];
174 unsigned char *cmd;
181fdde3 175 unsigned short cmd_len;
1da177e4 176
7a85f889 177 unsigned int extra_len; /* length of alignment and padding */
1da177e4 178 unsigned int sense_len;
c3a4d78c 179 unsigned int resid_len; /* residual count */
1da177e4
LT
180 void *sense;
181
242f9dcb
JA
182 unsigned long deadline;
183 struct list_head timeout_list;
1da177e4 184 unsigned int timeout;
17e01f21 185 int retries;
1da177e4 186
1da177e4 187 /*
c00895ab 188 * completion callback.
1da177e4
LT
189 */
190 rq_end_io_fn *end_io;
191 void *end_io_data;
abae1fde
FT
192
193 /* for bidi */
194 struct request *next_rq;
1da177e4
LT
195};
196
766ca442
FLVC
197static inline unsigned short req_get_ioprio(struct request *req)
198{
199 return req->ioprio;
200}
201
1da177e4 202/*
4aff5e23 203 * State information carried for REQ_TYPE_PM_SUSPEND and REQ_TYPE_PM_RESUME
1da177e4
LT
204 * requests. Some step values could eventually be made generic.
205 */
206struct request_pm_state
207{
208 /* PM state machine step value, currently driver specific */
209 int pm_step;
210 /* requested PM state value (S1, S2, S3, S4, ...) */
211 u32 pm_state;
212 void* data; /* for driver use */
213};
214
215#include <linux/elevator.h>
216
165125e1 217typedef void (request_fn_proc) (struct request_queue *q);
5a7bbad2 218typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
165125e1 219typedef int (prep_rq_fn) (struct request_queue *, struct request *);
28018c24 220typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
1da177e4
LT
221
222struct bio_vec;
cc371e66
AK
223struct bvec_merge_data {
224 struct block_device *bi_bdev;
225 sector_t bi_sector;
226 unsigned bi_size;
227 unsigned long bi_rw;
228};
229typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *,
230 struct bio_vec *);
ff856bad 231typedef void (softirq_done_fn)(struct request *);
2fb98e84 232typedef int (dma_drain_needed_fn)(struct request *);
ef9e3fac 233typedef int (lld_busy_fn) (struct request_queue *q);
aa387cc8 234typedef int (bsg_job_fn) (struct bsg_job *);
1da177e4 235
242f9dcb
JA
236enum blk_eh_timer_return {
237 BLK_EH_NOT_HANDLED,
238 BLK_EH_HANDLED,
239 BLK_EH_RESET_TIMER,
240};
241
242typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
243
1da177e4
LT
244enum blk_queue_state {
245 Queue_down,
246 Queue_up,
247};
248
1da177e4
LT
249struct blk_queue_tag {
250 struct request **tag_index; /* map of busy tags */
251 unsigned long *tag_map; /* bit map of free/busy tags */
1da177e4
LT
252 int busy; /* current depth */
253 int max_depth; /* what we will send to device */
ba025082 254 int real_max_depth; /* what the array can hold */
1da177e4
LT
255 atomic_t refcnt; /* map can be shared */
256};
257
abf54393
FT
258#define BLK_SCSI_MAX_CMDS (256)
259#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
260
025146e1
MP
261struct queue_limits {
262 unsigned long bounce_pfn;
263 unsigned long seg_boundary_mask;
264
265 unsigned int max_hw_sectors;
266 unsigned int max_sectors;
267 unsigned int max_segment_size;
c72758f3
MP
268 unsigned int physical_block_size;
269 unsigned int alignment_offset;
270 unsigned int io_min;
271 unsigned int io_opt;
67efc925 272 unsigned int max_discard_sectors;
86b37281
MP
273 unsigned int discard_granularity;
274 unsigned int discard_alignment;
025146e1
MP
275
276 unsigned short logical_block_size;
8a78362c 277 unsigned short max_segments;
13f05c8d 278 unsigned short max_integrity_segments;
025146e1 279
c72758f3 280 unsigned char misaligned;
86b37281 281 unsigned char discard_misaligned;
e692cb66 282 unsigned char cluster;
a934a00a 283 unsigned char discard_zeroes_data;
025146e1
MP
284};
285
d7b76301 286struct request_queue {
1da177e4
LT
287 /*
288 * Together with queue_head for cacheline sharing
289 */
290 struct list_head queue_head;
291 struct request *last_merge;
b374d18a 292 struct elevator_queue *elevator;
8a5ecdd4
TH
293 int nr_rqs[2]; /* # allocated [a]sync rqs */
294 int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
1da177e4
LT
295
296 /*
a051661c
TH
297 * If blkcg is not used, @q->root_rl serves all requests. If blkcg
298 * is used, root blkg allocates from @q->root_rl and all other
299 * blkgs from their own blkg->rl. Which one to use should be
300 * determined using bio_request_list().
1da177e4 301 */
a051661c 302 struct request_list root_rl;
1da177e4
LT
303
304 request_fn_proc *request_fn;
1da177e4
LT
305 make_request_fn *make_request_fn;
306 prep_rq_fn *prep_rq_fn;
28018c24 307 unprep_rq_fn *unprep_rq_fn;
1da177e4 308 merge_bvec_fn *merge_bvec_fn;
ff856bad 309 softirq_done_fn *softirq_done_fn;
242f9dcb 310 rq_timed_out_fn *rq_timed_out_fn;
2fb98e84 311 dma_drain_needed_fn *dma_drain_needed;
ef9e3fac 312 lld_busy_fn *lld_busy_fn;
1da177e4 313
8922e16c
TH
314 /*
315 * Dispatch queue sorting
316 */
1b47f531 317 sector_t end_sector;
8922e16c 318 struct request *boundary_rq;
8922e16c 319
1da177e4 320 /*
3cca6dc1 321 * Delayed queue handling
1da177e4 322 */
3cca6dc1 323 struct delayed_work delay_work;
1da177e4
LT
324
325 struct backing_dev_info backing_dev_info;
326
327 /*
328 * The queue owner gets to use this for whatever they like.
329 * ll_rw_blk doesn't touch it.
330 */
331 void *queuedata;
332
1da177e4 333 /*
d7b76301 334 * various queue flags, see QUEUE_* below
1da177e4 335 */
d7b76301 336 unsigned long queue_flags;
1da177e4 337
a73f730d
TH
338 /*
339 * ida allocated id for this queue. Used to index queues from
340 * ioctx.
341 */
342 int id;
343
1da177e4 344 /*
d7b76301 345 * queue needs bounce pages for pages above this limit
1da177e4 346 */
d7b76301 347 gfp_t bounce_gfp;
1da177e4
LT
348
349 /*
152587de
JA
350 * protects queue structures from reentrancy. ->__queue_lock should
351 * _never_ be used directly, it is queue private. always use
352 * ->queue_lock.
1da177e4 353 */
152587de 354 spinlock_t __queue_lock;
1da177e4
LT
355 spinlock_t *queue_lock;
356
357 /*
358 * queue kobject
359 */
360 struct kobject kobj;
361
362 /*
363 * queue settings
364 */
365 unsigned long nr_requests; /* Max # of requests */
366 unsigned int nr_congestion_on;
367 unsigned int nr_congestion_off;
368 unsigned int nr_batching;
369
fa0ccd83 370 unsigned int dma_drain_size;
d7b76301 371 void *dma_drain_buffer;
e3790c7d 372 unsigned int dma_pad_mask;
1da177e4
LT
373 unsigned int dma_alignment;
374
375 struct blk_queue_tag *queue_tags;
6eca9004 376 struct list_head tag_busy_list;
1da177e4 377
15853af9 378 unsigned int nr_sorted;
0a7ae2ff 379 unsigned int in_flight[2];
1da177e4 380
242f9dcb
JA
381 unsigned int rq_timeout;
382 struct timer_list timeout;
383 struct list_head timeout_list;
384
a612fddf 385 struct list_head icq_list;
4eef3049 386#ifdef CONFIG_BLK_CGROUP
a2b1693b 387 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
3c798398 388 struct blkcg_gq *root_blkg;
03aa264a 389 struct list_head blkg_list;
4eef3049 390#endif
a612fddf 391
025146e1
MP
392 struct queue_limits limits;
393
1da177e4
LT
394 /*
395 * sg stuff
396 */
397 unsigned int sg_timeout;
398 unsigned int sg_reserved_size;
1946089a 399 int node;
6c5c9341 400#ifdef CONFIG_BLK_DEV_IO_TRACE
2056a782 401 struct blk_trace *blk_trace;
6c5c9341 402#endif
1da177e4 403 /*
4913efe4 404 * for flush operations
1da177e4 405 */
4913efe4 406 unsigned int flush_flags;
f3876930 407 unsigned int flush_not_queueable:1;
3ac0cc45 408 unsigned int flush_queue_delayed:1;
ae1b1539
TH
409 unsigned int flush_pending_idx:1;
410 unsigned int flush_running_idx:1;
411 unsigned long flush_pending_since;
412 struct list_head flush_queue[2];
413 struct list_head flush_data_in_flight;
dd4c133f 414 struct request flush_rq;
483f4afc
AV
415
416 struct mutex sysfs_lock;
d351af01 417
d732580b
TH
418 int bypass_depth;
419
d351af01 420#if defined(CONFIG_BLK_DEV_BSG)
aa387cc8
MC
421 bsg_job_fn *bsg_job_fn;
422 int bsg_job_size;
d351af01
FT
423 struct bsg_class_device bsg_dev;
424#endif
e43473b7 425
923adde1
TH
426#ifdef CONFIG_BLK_CGROUP
427 struct list_head all_q_node;
428#endif
e43473b7
VG
429#ifdef CONFIG_BLK_DEV_THROTTLING
430 /* Throttle data */
431 struct throtl_data *td;
432#endif
1da177e4
LT
433};
434
1da177e4
LT
435#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
436#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
1faa16d2
JA
437#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
438#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
1da177e4 439#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
d732580b 440#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
c21e6beb
JA
441#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
442#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
5757a6d7 443#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
c21e6beb
JA
444#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
445#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
446#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
88e740f1 447#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
c21e6beb
JA
448#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
449#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
450#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
451#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
452#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
5757a6d7 453#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
bc58ba94
JA
454
455#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
01e97f6b 456 (1 << QUEUE_FLAG_STACKABLE) | \
e2e1a148
JA
457 (1 << QUEUE_FLAG_SAME_COMP) | \
458 (1 << QUEUE_FLAG_ADD_RANDOM))
797e7dbb 459
8bcb6c7d 460static inline void queue_lockdep_assert_held(struct request_queue *q)
8f45c1a5 461{
8bcb6c7d
AK
462 if (q->queue_lock)
463 lockdep_assert_held(q->queue_lock);
8f45c1a5
LT
464}
465
75ad23bc
NP
466static inline void queue_flag_set_unlocked(unsigned int flag,
467 struct request_queue *q)
468{
469 __set_bit(flag, &q->queue_flags);
470}
471
e48ec690
JA
472static inline int queue_flag_test_and_clear(unsigned int flag,
473 struct request_queue *q)
474{
8bcb6c7d 475 queue_lockdep_assert_held(q);
e48ec690
JA
476
477 if (test_bit(flag, &q->queue_flags)) {
478 __clear_bit(flag, &q->queue_flags);
479 return 1;
480 }
481
482 return 0;
483}
484
485static inline int queue_flag_test_and_set(unsigned int flag,
486 struct request_queue *q)
487{
8bcb6c7d 488 queue_lockdep_assert_held(q);
e48ec690
JA
489
490 if (!test_bit(flag, &q->queue_flags)) {
491 __set_bit(flag, &q->queue_flags);
492 return 0;
493 }
494
495 return 1;
496}
497
75ad23bc
NP
498static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
499{
8bcb6c7d 500 queue_lockdep_assert_held(q);
75ad23bc
NP
501 __set_bit(flag, &q->queue_flags);
502}
503
504static inline void queue_flag_clear_unlocked(unsigned int flag,
505 struct request_queue *q)
506{
507 __clear_bit(flag, &q->queue_flags);
508}
509
0a7ae2ff
JA
510static inline int queue_in_flight(struct request_queue *q)
511{
512 return q->in_flight[0] + q->in_flight[1];
513}
514
75ad23bc
NP
515static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
516{
8bcb6c7d 517 queue_lockdep_assert_held(q);
75ad23bc
NP
518 __clear_bit(flag, &q->queue_flags);
519}
520
1da177e4
LT
521#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
522#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
34f6055c 523#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
d732580b 524#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
ac9fafa1 525#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
488991e2
AB
526#define blk_queue_noxmerges(q) \
527 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
a68bbddb 528#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
bc58ba94 529#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
e2e1a148 530#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
4ee5eaf4
KU
531#define blk_queue_stackable(q) \
532 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
c15227de 533#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
8d57a98c
AH
534#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
535 test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
1da177e4 536
33659ebb
CH
537#define blk_noretry_request(rq) \
538 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
539 REQ_FAILFAST_DRIVER))
540
541#define blk_account_rq(rq) \
542 (((rq)->cmd_flags & REQ_STARTED) && \
e2a60da7 543 ((rq)->cmd_type == REQ_TYPE_FS))
33659ebb 544
1da177e4 545#define blk_pm_request(rq) \
33659ebb
CH
546 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \
547 (rq)->cmd_type == REQ_TYPE_PM_RESUME)
1da177e4 548
ab780f1e 549#define blk_rq_cpu_valid(rq) ((rq)->cpu != -1)
abae1fde 550#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
336cdb40
KU
551/* rq->queuelist of dequeued request must be list_empty() */
552#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
1da177e4
LT
553
554#define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist)
555
4aff5e23 556#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
1da177e4 557
e692cb66
MP
558static inline unsigned int blk_queue_cluster(struct request_queue *q)
559{
560 return q->limits.cluster;
561}
562
9e2585a8 563/*
1faa16d2 564 * We regard a request as sync, if either a read or a sync write
9e2585a8 565 */
1faa16d2
JA
566static inline bool rw_is_sync(unsigned int rw_flags)
567{
7b6d91da 568 return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC);
1faa16d2
JA
569}
570
571static inline bool rq_is_sync(struct request *rq)
572{
573 return rw_is_sync(rq->cmd_flags);
574}
575
5b788ce3 576static inline bool blk_rl_full(struct request_list *rl, bool sync)
1da177e4 577{
5b788ce3
TH
578 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
579
580 return rl->flags & flag;
1da177e4
LT
581}
582
5b788ce3 583static inline void blk_set_rl_full(struct request_list *rl, bool sync)
1da177e4 584{
5b788ce3
TH
585 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
586
587 rl->flags |= flag;
1da177e4
LT
588}
589
5b788ce3 590static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
1da177e4 591{
5b788ce3
TH
592 unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
593
594 rl->flags &= ~flag;
1da177e4
LT
595}
596
e2a60da7
MP
597static inline bool rq_mergeable(struct request *rq)
598{
599 if (rq->cmd_type != REQ_TYPE_FS)
600 return false;
1da177e4 601
e2a60da7
MP
602 if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
603 return false;
604
605 return true;
606}
1da177e4 607
1da177e4
LT
608/*
609 * q->prep_rq_fn return values
610 */
611#define BLKPREP_OK 0 /* serve it */
612#define BLKPREP_KILL 1 /* fatal error, kill */
613#define BLKPREP_DEFER 2 /* leave on queue */
614
615extern unsigned long blk_max_low_pfn, blk_max_pfn;
616
617/*
618 * standard bounce addresses:
619 *
620 * BLK_BOUNCE_HIGH : bounce all highmem pages
621 * BLK_BOUNCE_ANY : don't bounce anything
622 * BLK_BOUNCE_ISA : bounce pages above ISA DMA boundary
623 */
2472892a
AK
624
625#if BITS_PER_LONG == 32
1da177e4 626#define BLK_BOUNCE_HIGH ((u64)blk_max_low_pfn << PAGE_SHIFT)
2472892a
AK
627#else
628#define BLK_BOUNCE_HIGH -1ULL
629#endif
630#define BLK_BOUNCE_ANY (-1ULL)
bfe17231 631#define BLK_BOUNCE_ISA (DMA_BIT_MASK(24))
1da177e4 632
3d6392cf
JA
633/*
634 * default timeout for SG_IO if none specified
635 */
636#define BLK_DEFAULT_SG_TIMEOUT (60 * HZ)
f2f1fa78 637#define BLK_MIN_SG_TIMEOUT (7 * HZ)
3d6392cf 638
2a7326b5 639#ifdef CONFIG_BOUNCE
1da177e4 640extern int init_emergency_isa_pool(void);
165125e1 641extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
1da177e4
LT
642#else
643static inline int init_emergency_isa_pool(void)
644{
645 return 0;
646}
165125e1 647static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
1da177e4
LT
648{
649}
650#endif /* CONFIG_MMU */
651
152e283f
FT
652struct rq_map_data {
653 struct page **pages;
654 int page_order;
655 int nr_entries;
56c451f4 656 unsigned long offset;
97ae77a1 657 int null_mapped;
ecb554a8 658 int from_user;
152e283f
FT
659};
660
5705f702
N
661struct req_iterator {
662 int i;
663 struct bio *bio;
664};
665
666/* This should not be used directly - use rq_for_each_segment */
1e428079
JA
667#define for_each_bio(_bio) \
668 for (; _bio; _bio = _bio->bi_next)
5705f702 669#define __rq_for_each_bio(_bio, rq) \
1da177e4
LT
670 if ((rq->bio)) \
671 for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
672
5705f702
N
673#define rq_for_each_segment(bvl, _rq, _iter) \
674 __rq_for_each_bio(_iter.bio, _rq) \
675 bio_for_each_segment(bvl, _iter.bio, _iter.i)
676
677#define rq_iter_last(rq, _iter) \
678 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1)
679
2d4dc890
IL
680#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
681# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
682#endif
683#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
684extern void rq_flush_dcache_pages(struct request *rq);
685#else
686static inline void rq_flush_dcache_pages(struct request *rq)
687{
688}
689#endif
690
1da177e4
LT
691extern int blk_register_queue(struct gendisk *disk);
692extern void blk_unregister_queue(struct gendisk *disk);
1da177e4 693extern void generic_make_request(struct bio *bio);
2a4aa30c 694extern void blk_rq_init(struct request_queue *q, struct request *rq);
1da177e4 695extern void blk_put_request(struct request *);
165125e1 696extern void __blk_put_request(struct request_queue *, struct request *);
165125e1 697extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
79eb63e9
BH
698extern struct request *blk_make_request(struct request_queue *, struct bio *,
699 gfp_t);
165125e1 700extern void blk_requeue_request(struct request_queue *, struct request *);
66ac0280
CH
701extern void blk_add_request_payload(struct request *rq, struct page *page,
702 unsigned int len);
82124d60 703extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
ef9e3fac 704extern int blk_lld_busy(struct request_queue *q);
b0fd271d
KU
705extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
706 struct bio_set *bs, gfp_t gfp_mask,
707 int (*bio_ctr)(struct bio *, struct bio *, void *),
708 void *data);
709extern void blk_rq_unprep_clone(struct request *rq);
82124d60
KU
710extern int blk_insert_cloned_request(struct request_queue *q,
711 struct request *rq);
3cca6dc1 712extern void blk_delay_queue(struct request_queue *, unsigned long);
165125e1 713extern void blk_recount_segments(struct request_queue *, struct bio *);
0bfc96cb 714extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
577ebb37
PB
715extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
716 unsigned int, void __user *);
74f3c8af
AV
717extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
718 unsigned int, void __user *);
e915e872
AV
719extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
720 struct scsi_ioctl_command __user *);
3fcfab16 721
5a7bbad2 722extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
166e1f90 723
3fcfab16
AM
724/*
725 * A queue has just exitted congestion. Note this in the global counter of
726 * congested queues, and wake up anyone who was waiting for requests to be
727 * put back.
728 */
8aa7e847 729static inline void blk_clear_queue_congested(struct request_queue *q, int sync)
3fcfab16 730{
8aa7e847 731 clear_bdi_congested(&q->backing_dev_info, sync);
3fcfab16
AM
732}
733
734/*
735 * A queue has just entered congestion. Flag that in the queue's VM-visible
736 * state flags and increment the global gounter of congested queues.
737 */
8aa7e847 738static inline void blk_set_queue_congested(struct request_queue *q, int sync)
3fcfab16 739{
8aa7e847 740 set_bdi_congested(&q->backing_dev_info, sync);
3fcfab16
AM
741}
742
165125e1
JA
743extern void blk_start_queue(struct request_queue *q);
744extern void blk_stop_queue(struct request_queue *q);
1da177e4 745extern void blk_sync_queue(struct request_queue *q);
165125e1 746extern void __blk_stop_queue(struct request_queue *q);
24ecfbe2 747extern void __blk_run_queue(struct request_queue *q);
165125e1 748extern void blk_run_queue(struct request_queue *);
c21e6beb 749extern void blk_run_queue_async(struct request_queue *q);
a3bce90e 750extern int blk_rq_map_user(struct request_queue *, struct request *,
152e283f
FT
751 struct rq_map_data *, void __user *, unsigned long,
752 gfp_t);
8e5cfc45 753extern int blk_rq_unmap_user(struct bio *);
165125e1
JA
754extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
755extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
152e283f
FT
756 struct rq_map_data *, struct sg_iovec *, int,
757 unsigned int, gfp_t);
165125e1 758extern int blk_execute_rq(struct request_queue *, struct gendisk *,
994ca9a1 759 struct request *, int);
165125e1 760extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
15fc858a 761 struct request *, int, rq_end_io_fn *);
6e39b69e 762
165125e1 763static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
1da177e4
LT
764{
765 return bdev->bd_disk->queue;
766}
767
5efccd17 768/*
80a761fd
TH
769 * blk_rq_pos() : the current sector
770 * blk_rq_bytes() : bytes left in the entire request
771 * blk_rq_cur_bytes() : bytes left in the current segment
772 * blk_rq_err_bytes() : bytes left till the next error boundary
773 * blk_rq_sectors() : sectors left in the entire request
774 * blk_rq_cur_sectors() : sectors left in the current segment
5efccd17 775 */
5b93629b
TH
776static inline sector_t blk_rq_pos(const struct request *rq)
777{
a2dec7b3 778 return rq->__sector;
2e46e8b2
TH
779}
780
781static inline unsigned int blk_rq_bytes(const struct request *rq)
782{
a2dec7b3 783 return rq->__data_len;
5b93629b
TH
784}
785
2e46e8b2
TH
786static inline int blk_rq_cur_bytes(const struct request *rq)
787{
788 return rq->bio ? bio_cur_bytes(rq->bio) : 0;
789}
5efccd17 790
80a761fd
TH
791extern unsigned int blk_rq_err_bytes(const struct request *rq);
792
5b93629b
TH
793static inline unsigned int blk_rq_sectors(const struct request *rq)
794{
2e46e8b2 795 return blk_rq_bytes(rq) >> 9;
5b93629b
TH
796}
797
798static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
799{
2e46e8b2 800 return blk_rq_cur_bytes(rq) >> 9;
5b93629b
TH
801}
802
9934c8c0
TH
803/*
804 * Request issue related functions.
805 */
806extern struct request *blk_peek_request(struct request_queue *q);
807extern void blk_start_request(struct request *rq);
808extern struct request *blk_fetch_request(struct request_queue *q);
809
1da177e4 810/*
2e60e022
TH
811 * Request completion related functions.
812 *
813 * blk_update_request() completes given number of bytes and updates
814 * the request without completing it.
815 *
f06d9a2b
TH
816 * blk_end_request() and friends. __blk_end_request() must be called
817 * with the request queue spinlock acquired.
1da177e4
LT
818 *
819 * Several drivers define their own end_request and call
3bcddeac
KU
820 * blk_end_request() for parts of the original function.
821 * This prevents code duplication in drivers.
1da177e4 822 */
2e60e022
TH
823extern bool blk_update_request(struct request *rq, int error,
824 unsigned int nr_bytes);
b1f74493
FT
825extern bool blk_end_request(struct request *rq, int error,
826 unsigned int nr_bytes);
827extern void blk_end_request_all(struct request *rq, int error);
828extern bool blk_end_request_cur(struct request *rq, int error);
80a761fd 829extern bool blk_end_request_err(struct request *rq, int error);
b1f74493
FT
830extern bool __blk_end_request(struct request *rq, int error,
831 unsigned int nr_bytes);
832extern void __blk_end_request_all(struct request *rq, int error);
833extern bool __blk_end_request_cur(struct request *rq, int error);
80a761fd 834extern bool __blk_end_request_err(struct request *rq, int error);
2e60e022 835
ff856bad 836extern void blk_complete_request(struct request *);
242f9dcb
JA
837extern void __blk_complete_request(struct request *);
838extern void blk_abort_request(struct request *);
28018c24 839extern void blk_unprep_request(struct request *);
ff856bad 840
1da177e4
LT
841/*
842 * Access functions for manipulating queue properties
843 */
165125e1 844extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
1946089a 845 spinlock_t *lock, int node_id);
165125e1 846extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
01effb0d
MS
847extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
848 request_fn_proc *, spinlock_t *);
165125e1
JA
849extern void blk_cleanup_queue(struct request_queue *);
850extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
851extern void blk_queue_bounce_limit(struct request_queue *, u64);
72d4cd9f 852extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
086fa5ff 853extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
8a78362c 854extern void blk_queue_max_segments(struct request_queue *, unsigned short);
165125e1 855extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
67efc925
CH
856extern void blk_queue_max_discard_sectors(struct request_queue *q,
857 unsigned int max_discard_sectors);
e1defc4f 858extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
892b6f90 859extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
c72758f3
MP
860extern void blk_queue_alignment_offset(struct request_queue *q,
861 unsigned int alignment);
7c958e32 862extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
c72758f3 863extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
3c5820c7 864extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
c72758f3 865extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
e475bba2 866extern void blk_set_default_limits(struct queue_limits *lim);
b1bd055d 867extern void blk_set_stacking_limits(struct queue_limits *lim);
c72758f3
MP
868extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
869 sector_t offset);
17be8c24
MP
870extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
871 sector_t offset);
c72758f3
MP
872extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
873 sector_t offset);
165125e1 874extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
e3790c7d 875extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
27f8221a 876extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
2fb98e84
TH
877extern int blk_queue_dma_drain(struct request_queue *q,
878 dma_drain_needed_fn *dma_drain_needed,
879 void *buf, unsigned int size);
ef9e3fac 880extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
165125e1
JA
881extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
882extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
28018c24 883extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
165125e1
JA
884extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *);
885extern void blk_queue_dma_alignment(struct request_queue *, int);
11c3e689 886extern void blk_queue_update_dma_alignment(struct request_queue *, int);
165125e1 887extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
242f9dcb
JA
888extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
889extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
4913efe4 890extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
f3876930 891extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1da177e4 892extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1da177e4 893
165125e1 894extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
85b9f66a
AH
895extern int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
896 struct scatterlist *sglist);
1da177e4 897extern void blk_dump_rq_flags(struct request *, char *);
1da177e4 898extern long nr_blockdev_pages(void);
1da177e4 899
09ac46c4 900bool __must_check blk_get_queue(struct request_queue *);
165125e1
JA
901struct request_queue *blk_alloc_queue(gfp_t);
902struct request_queue *blk_alloc_queue_node(gfp_t, int);
903extern void blk_put_queue(struct request_queue *);
1da177e4 904
316cc67d 905/*
75df7136
SJ
906 * blk_plug permits building a queue of related requests by holding the I/O
907 * fragments for a short period. This allows merging of sequential requests
908 * into single larger request. As the requests are moved from a per-task list to
909 * the device's request_queue in a batch, this results in improved scalability
910 * as the lock contention for request_queue lock is reduced.
911 *
912 * It is ok not to disable preemption when adding the request to the plug list
913 * or when attempting a merge, because blk_schedule_flush_list() will only flush
914 * the plug list when the task sleeps by itself. For details, please see
915 * schedule() where blk_schedule_flush_plug() is called.
316cc67d 916 */
73c10101 917struct blk_plug {
75df7136
SJ
918 unsigned long magic; /* detect uninitialized use-cases */
919 struct list_head list; /* requests */
920 struct list_head cb_list; /* md requires an unplug callback */
921 unsigned int should_sort; /* list to be sorted before flushing? */
73c10101 922};
55c022bb
SL
923#define BLK_MAX_REQUEST_COUNT 16
924
9cbb1750 925struct blk_plug_cb;
74018dc3 926typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
048c9374
N
927struct blk_plug_cb {
928 struct list_head list;
9cbb1750
N
929 blk_plug_cb_fn callback;
930 void *data;
048c9374 931};
9cbb1750
N
932extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
933 void *data, int size);
73c10101
JA
934extern void blk_start_plug(struct blk_plug *);
935extern void blk_finish_plug(struct blk_plug *);
f6603783 936extern void blk_flush_plug_list(struct blk_plug *, bool);
73c10101
JA
937
938static inline void blk_flush_plug(struct task_struct *tsk)
939{
940 struct blk_plug *plug = tsk->plug;
941
a237c1c5
JA
942 if (plug)
943 blk_flush_plug_list(plug, false);
944}
945
946static inline void blk_schedule_flush_plug(struct task_struct *tsk)
947{
948 struct blk_plug *plug = tsk->plug;
949
88b996cd 950 if (plug)
f6603783 951 blk_flush_plug_list(plug, true);
73c10101
JA
952}
953
954static inline bool blk_needs_flush_plug(struct task_struct *tsk)
955{
956 struct blk_plug *plug = tsk->plug;
957
048c9374 958 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
73c10101
JA
959}
960
1da177e4
LT
961/*
962 * tag stuff
963 */
4aff5e23 964#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
165125e1
JA
965extern int blk_queue_start_tag(struct request_queue *, struct request *);
966extern struct request *blk_queue_find_tag(struct request_queue *, int);
967extern void blk_queue_end_tag(struct request_queue *, struct request *);
968extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *);
969extern void blk_queue_free_tags(struct request_queue *);
970extern int blk_queue_resize_tags(struct request_queue *, int);
971extern void blk_queue_invalidate_tags(struct request_queue *);
492dfb48
JB
972extern struct blk_queue_tag *blk_init_tags(int);
973extern void blk_free_tags(struct blk_queue_tag *);
1da177e4 974
f583f492
DS
975static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
976 int tag)
977{
978 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
979 return NULL;
980 return bqt->tag_index[tag];
981}
dd3932ed
CH
982
983#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
984
985extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
fbd9b09a
DM
986extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
987 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
3f14d792 988extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
dd3932ed 989 sector_t nr_sects, gfp_t gfp_mask);
2cf6d26a
CH
990static inline int sb_issue_discard(struct super_block *sb, sector_t block,
991 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
fb2dce86 992{
2cf6d26a
CH
993 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
994 nr_blocks << (sb->s_blocksize_bits - 9),
995 gfp_mask, flags);
fb2dce86 996}
e6fa0be6 997static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
a107e5a3 998 sector_t nr_blocks, gfp_t gfp_mask)
e6fa0be6
LC
999{
1000 return blkdev_issue_zeroout(sb->s_bdev,
1001 block << (sb->s_blocksize_bits - 9),
1002 nr_blocks << (sb->s_blocksize_bits - 9),
a107e5a3 1003 gfp_mask);
e6fa0be6 1004}
1da177e4 1005
018e0446 1006extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
0b07de85 1007
eb28d31b
MP
1008enum blk_default_limits {
1009 BLK_MAX_SEGMENTS = 128,
1010 BLK_SAFE_MAX_SECTORS = 255,
1011 BLK_DEF_MAX_SECTORS = 1024,
1012 BLK_MAX_SEGMENT_SIZE = 65536,
1013 BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
1014};
0e435ac2 1015
1da177e4
LT
1016#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1017
ae03bf63
MP
1018static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1019{
025146e1 1020 return q->limits.bounce_pfn;
ae03bf63
MP
1021}
1022
1023static inline unsigned long queue_segment_boundary(struct request_queue *q)
1024{
025146e1 1025 return q->limits.seg_boundary_mask;
ae03bf63
MP
1026}
1027
1028static inline unsigned int queue_max_sectors(struct request_queue *q)
1029{
025146e1 1030 return q->limits.max_sectors;
ae03bf63
MP
1031}
1032
1033static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1034{
025146e1 1035 return q->limits.max_hw_sectors;
ae03bf63
MP
1036}
1037
8a78362c 1038static inline unsigned short queue_max_segments(struct request_queue *q)
ae03bf63 1039{
8a78362c 1040 return q->limits.max_segments;
ae03bf63
MP
1041}
1042
1043static inline unsigned int queue_max_segment_size(struct request_queue *q)
1044{
025146e1 1045 return q->limits.max_segment_size;
ae03bf63
MP
1046}
1047
e1defc4f 1048static inline unsigned short queue_logical_block_size(struct request_queue *q)
1da177e4
LT
1049{
1050 int retval = 512;
1051
025146e1
MP
1052 if (q && q->limits.logical_block_size)
1053 retval = q->limits.logical_block_size;
1da177e4
LT
1054
1055 return retval;
1056}
1057
e1defc4f 1058static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1da177e4 1059{
e1defc4f 1060 return queue_logical_block_size(bdev_get_queue(bdev));
1da177e4
LT
1061}
1062
c72758f3
MP
1063static inline unsigned int queue_physical_block_size(struct request_queue *q)
1064{
1065 return q->limits.physical_block_size;
1066}
1067
892b6f90 1068static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
ac481c20
MP
1069{
1070 return queue_physical_block_size(bdev_get_queue(bdev));
1071}
1072
c72758f3
MP
1073static inline unsigned int queue_io_min(struct request_queue *q)
1074{
1075 return q->limits.io_min;
1076}
1077
ac481c20
MP
1078static inline int bdev_io_min(struct block_device *bdev)
1079{
1080 return queue_io_min(bdev_get_queue(bdev));
1081}
1082
c72758f3
MP
1083static inline unsigned int queue_io_opt(struct request_queue *q)
1084{
1085 return q->limits.io_opt;
1086}
1087
ac481c20
MP
1088static inline int bdev_io_opt(struct block_device *bdev)
1089{
1090 return queue_io_opt(bdev_get_queue(bdev));
1091}
1092
c72758f3
MP
1093static inline int queue_alignment_offset(struct request_queue *q)
1094{
ac481c20 1095 if (q->limits.misaligned)
c72758f3
MP
1096 return -1;
1097
ac481c20 1098 return q->limits.alignment_offset;
c72758f3
MP
1099}
1100
e03a72e1 1101static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
81744ee4
MP
1102{
1103 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
e03a72e1 1104 unsigned int alignment = (sector << 9) & (granularity - 1);
81744ee4 1105
e03a72e1
MP
1106 return (granularity + lim->alignment_offset - alignment)
1107 & (granularity - 1);
c72758f3
MP
1108}
1109
ac481c20
MP
1110static inline int bdev_alignment_offset(struct block_device *bdev)
1111{
1112 struct request_queue *q = bdev_get_queue(bdev);
1113
1114 if (q->limits.misaligned)
1115 return -1;
1116
1117 if (bdev != bdev->bd_contains)
1118 return bdev->bd_part->alignment_offset;
1119
1120 return q->limits.alignment_offset;
1121}
1122
86b37281
MP
1123static inline int queue_discard_alignment(struct request_queue *q)
1124{
1125 if (q->limits.discard_misaligned)
1126 return -1;
1127
1128 return q->limits.discard_alignment;
1129}
1130
e03a72e1 1131static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
86b37281 1132{
dd3d145d
MP
1133 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
1134
a934a00a
MP
1135 if (!lim->max_discard_sectors)
1136 return 0;
1137
dd3d145d
MP
1138 return (lim->discard_granularity + lim->discard_alignment - alignment)
1139 & (lim->discard_granularity - 1);
86b37281
MP
1140}
1141
c6e66634
PB
1142static inline int bdev_discard_alignment(struct block_device *bdev)
1143{
1144 struct request_queue *q = bdev_get_queue(bdev);
1145
1146 if (bdev != bdev->bd_contains)
1147 return bdev->bd_part->discard_alignment;
1148
1149 return q->limits.discard_alignment;
1150}
1151
98262f27
MP
1152static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1153{
a934a00a 1154 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
98262f27
MP
1155 return 1;
1156
1157 return 0;
1158}
1159
1160static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1161{
1162 return queue_discard_zeroes_data(bdev_get_queue(bdev));
1163}
1164
165125e1 1165static inline int queue_dma_alignment(struct request_queue *q)
1da177e4 1166{
482eb689 1167 return q ? q->dma_alignment : 511;
1da177e4
LT
1168}
1169
14417799 1170static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
87904074
FT
1171 unsigned int len)
1172{
1173 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
14417799 1174 return !(addr & alignment) && !(len & alignment);
87904074
FT
1175}
1176
1da177e4
LT
1177/* assumes size > 256 */
1178static inline unsigned int blksize_bits(unsigned int size)
1179{
1180 unsigned int bits = 8;
1181 do {
1182 bits++;
1183 size >>= 1;
1184 } while (size > 256);
1185 return bits;
1186}
1187
2befb9e3 1188static inline unsigned int block_size(struct block_device *bdev)
1da177e4
LT
1189{
1190 return bdev->bd_block_size;
1191}
1192
f3876930 1193static inline bool queue_flush_queueable(struct request_queue *q)
1194{
1195 return !q->flush_not_queueable;
1196}
1197
1da177e4
LT
1198typedef struct {struct page *v;} Sector;
1199
1200unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1201
1202static inline void put_dev_sector(Sector p)
1203{
1204 page_cache_release(p.v);
1205}
1206
1207struct work_struct;
18887ad9 1208int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1da177e4 1209
9195291e 1210#ifdef CONFIG_BLK_CGROUP
28f4197e
JA
1211/*
1212 * This should not be using sched_clock(). A real patch is in progress
1213 * to fix this up, until that is in place we need to disable preemption
1214 * around sched_clock() in this function and set_io_start_time_ns().
1215 */
9195291e
DS
1216static inline void set_start_time_ns(struct request *req)
1217{
28f4197e 1218 preempt_disable();
9195291e 1219 req->start_time_ns = sched_clock();
28f4197e 1220 preempt_enable();
9195291e
DS
1221}
1222
1223static inline void set_io_start_time_ns(struct request *req)
1224{
28f4197e 1225 preempt_disable();
9195291e 1226 req->io_start_time_ns = sched_clock();
28f4197e 1227 preempt_enable();
9195291e 1228}
84c124da
DS
1229
1230static inline uint64_t rq_start_time_ns(struct request *req)
1231{
1232 return req->start_time_ns;
1233}
1234
1235static inline uint64_t rq_io_start_time_ns(struct request *req)
1236{
1237 return req->io_start_time_ns;
1238}
9195291e
DS
1239#else
1240static inline void set_start_time_ns(struct request *req) {}
1241static inline void set_io_start_time_ns(struct request *req) {}
84c124da
DS
1242static inline uint64_t rq_start_time_ns(struct request *req)
1243{
1244 return 0;
1245}
1246static inline uint64_t rq_io_start_time_ns(struct request *req)
1247{
1248 return 0;
1249}
9195291e
DS
1250#endif
1251
1da177e4
LT
1252#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1253 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1254#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1255 MODULE_ALIAS("block-major-" __stringify(major) "-*")
1256
7ba1ba12
MP
1257#if defined(CONFIG_BLK_DEV_INTEGRITY)
1258
b24498d4
JA
1259#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */
1260#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */
7ba1ba12
MP
1261
1262struct blk_integrity_exchg {
1263 void *prot_buf;
1264 void *data_buf;
1265 sector_t sector;
1266 unsigned int data_size;
1267 unsigned short sector_size;
1268 const char *disk_name;
1269};
1270
1271typedef void (integrity_gen_fn) (struct blk_integrity_exchg *);
1272typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
1273typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
1274typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
1275
1276struct blk_integrity {
1277 integrity_gen_fn *generate_fn;
1278 integrity_vrfy_fn *verify_fn;
1279 integrity_set_tag_fn *set_tag_fn;
1280 integrity_get_tag_fn *get_tag_fn;
1281
1282 unsigned short flags;
1283 unsigned short tuple_size;
1284 unsigned short sector_size;
1285 unsigned short tag_size;
1286
1287 const char *name;
1288
1289 struct kobject kobj;
1290};
1291
a63a5cf8 1292extern bool blk_integrity_is_initialized(struct gendisk *);
7ba1ba12
MP
1293extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1294extern void blk_integrity_unregister(struct gendisk *);
ad7fce93 1295extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
13f05c8d
MP
1296extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1297 struct scatterlist *);
1298extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1299extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
1300 struct request *);
1301extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
1302 struct bio *);
7ba1ba12 1303
b04accc4
JA
1304static inline
1305struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1306{
1307 return bdev->bd_disk->integrity;
1308}
1309
b02739b0
MP
1310static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1311{
1312 return disk->integrity;
1313}
1314
7ba1ba12
MP
1315static inline int blk_integrity_rq(struct request *rq)
1316{
d442cc44
MP
1317 if (rq->bio == NULL)
1318 return 0;
1319
7ba1ba12
MP
1320 return bio_integrity(rq->bio);
1321}
1322
13f05c8d
MP
1323static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1324 unsigned int segs)
1325{
1326 q->limits.max_integrity_segments = segs;
1327}
1328
1329static inline unsigned short
1330queue_max_integrity_segments(struct request_queue *q)
1331{
1332 return q->limits.max_integrity_segments;
1333}
1334
7ba1ba12
MP
1335#else /* CONFIG_BLK_DEV_INTEGRITY */
1336
fd83240a
SR
1337struct bio;
1338struct block_device;
1339struct gendisk;
1340struct blk_integrity;
1341
1342static inline int blk_integrity_rq(struct request *rq)
1343{
1344 return 0;
1345}
1346static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1347 struct bio *b)
1348{
1349 return 0;
1350}
1351static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1352 struct bio *b,
1353 struct scatterlist *s)
1354{
1355 return 0;
1356}
1357static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1358{
1359 return 0;
1360}
1361static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1362{
1363 return NULL;
1364}
1365static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1366{
1367 return 0;
1368}
1369static inline int blk_integrity_register(struct gendisk *d,
1370 struct blk_integrity *b)
1371{
1372 return 0;
1373}
1374static inline void blk_integrity_unregister(struct gendisk *d)
1375{
1376}
1377static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1378 unsigned int segs)
1379{
1380}
1381static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1382{
1383 return 0;
1384}
1385static inline int blk_integrity_merge_rq(struct request_queue *rq,
1386 struct request *r1,
1387 struct request *r2)
1388{
1389 return 0;
1390}
1391static inline int blk_integrity_merge_bio(struct request_queue *rq,
1392 struct request *r,
1393 struct bio *b)
1394{
1395 return 0;
1396}
1397static inline bool blk_integrity_is_initialized(struct gendisk *g)
1398{
1399 return 0;
1400}
7ba1ba12
MP
1401
1402#endif /* CONFIG_BLK_DEV_INTEGRITY */
1403
08f85851 1404struct block_device_operations {
d4430d62
AV
1405 int (*open) (struct block_device *, fmode_t);
1406 int (*release) (struct gendisk *, fmode_t);
d4430d62
AV
1407 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1408 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
08f85851
AV
1409 int (*direct_access) (struct block_device *, sector_t,
1410 void **, unsigned long *);
77ea887e
TH
1411 unsigned int (*check_events) (struct gendisk *disk,
1412 unsigned int clearing);
1413 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
08f85851 1414 int (*media_changed) (struct gendisk *);
c3e33e04 1415 void (*unlock_native_capacity) (struct gendisk *);
08f85851
AV
1416 int (*revalidate_disk) (struct gendisk *);
1417 int (*getgeo)(struct block_device *, struct hd_geometry *);
b3a27d05
NG
1418 /* this callback is with swap_lock and sometimes page table lock held */
1419 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
08f85851
AV
1420 struct module *owner;
1421};
1422
633a08b8
AV
1423extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1424 unsigned long);
9361401e
DH
1425#else /* CONFIG_BLOCK */
1426/*
1427 * stubs for when the block layer is configured out
1428 */
1429#define buffer_heads_over_limit 0
1430
9361401e
DH
1431static inline long nr_blockdev_pages(void)
1432{
1433 return 0;
1434}
1435
1f940bdf
JA
1436struct blk_plug {
1437};
1438
1439static inline void blk_start_plug(struct blk_plug *plug)
73c10101
JA
1440{
1441}
1442
1f940bdf 1443static inline void blk_finish_plug(struct blk_plug *plug)
73c10101
JA
1444{
1445}
1446
1f940bdf 1447static inline void blk_flush_plug(struct task_struct *task)
73c10101
JA
1448{
1449}
1450
a237c1c5
JA
1451static inline void blk_schedule_flush_plug(struct task_struct *task)
1452{
1453}
1454
1455
73c10101
JA
1456static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1457{
1458 return false;
1459}
1460
9361401e
DH
1461#endif /* CONFIG_BLOCK */
1462
1da177e4 1463#endif