1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Block data types and constants. Directly include this file only to
4 * break include dependency loop.
6 #ifndef __LINUX_BLK_TYPES_H
7 #define __LINUX_BLK_TYPES_H
9 #include <linux/types.h>
10 #include <linux/bvec.h>
11 #include <linux/ktime.h>
15 struct bio_integrity_payload
;
19 struct cgroup_subsys_state
;
20 typedef void (bio_end_io_t
) (struct bio
*);
23 * Block error status values. See block/blk-core:blk_errors for the details.
24 * Alpha cannot write a byte atomically, so we need to use 32-bit value.
26 #if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
27 typedef u32 __bitwise blk_status_t
;
29 typedef u8 __bitwise blk_status_t
;
32 #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
33 #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
34 #define BLK_STS_NOSPC ((__force blk_status_t)3)
35 #define BLK_STS_TRANSPORT ((__force blk_status_t)4)
36 #define BLK_STS_TARGET ((__force blk_status_t)5)
37 #define BLK_STS_NEXUS ((__force blk_status_t)6)
38 #define BLK_STS_MEDIUM ((__force blk_status_t)7)
39 #define BLK_STS_PROTECTION ((__force blk_status_t)8)
40 #define BLK_STS_RESOURCE ((__force blk_status_t)9)
41 #define BLK_STS_IOERR ((__force blk_status_t)10)
43 /* hack for device mapper, don't use elsewhere: */
44 #define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
46 #define BLK_STS_AGAIN ((__force blk_status_t)12)
49 * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if
50 * device related resources are unavailable, but the driver can guarantee
51 * that the queue will be rerun in the future once resources become
52 * available again. This is typically the case for device specific
53 * resources that are consumed for IO. If the driver fails allocating these
54 * resources, we know that inflight (or pending) IO will free these
55 * resource upon completion.
57 * This is different from BLK_STS_RESOURCE in that it explicitly references
58 * a device specific resource. For resources of wider scope, allocation
59 * failure can happen without having pending IO. This means that we can't
60 * rely on request completions freeing these resources, as IO may not be in
61 * flight. Examples of that are kernel memory allocations, DMA mappings, or
62 * any other system wide resources.
64 #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
67 * blk_path_error - returns true if error may be path related
68 * @error: status the request was completed with
71 * This classifies block error status into non-retryable errors and ones
72 * that may be successful if retried on a failover path.
75 * %false - retrying failover path will not help
76 * %true - may succeed if retried
78 static inline bool blk_path_error(blk_status_t error
)
86 case BLK_STS_PROTECTION
:
90 /* Anything else could be a path failure, so should be retried */
95 * From most significant bit:
96 * 1 bit: reserved for other usage, see below
97 * 12 bits: original size of bio
98 * 51 bits: issue time of bio
100 #define BIO_ISSUE_RES_BITS 1
101 #define BIO_ISSUE_SIZE_BITS 12
102 #define BIO_ISSUE_RES_SHIFT (64 - BIO_ISSUE_RES_BITS)
103 #define BIO_ISSUE_SIZE_SHIFT (BIO_ISSUE_RES_SHIFT - BIO_ISSUE_SIZE_BITS)
104 #define BIO_ISSUE_TIME_MASK ((1ULL << BIO_ISSUE_SIZE_SHIFT) - 1)
105 #define BIO_ISSUE_SIZE_MASK \
106 (((1ULL << BIO_ISSUE_SIZE_BITS) - 1) << BIO_ISSUE_SIZE_SHIFT)
107 #define BIO_ISSUE_RES_MASK (~((1ULL << BIO_ISSUE_RES_SHIFT) - 1))
109 /* Reserved bit for blk-throtl */
110 #define BIO_ISSUE_THROTL_SKIP_LATENCY (1ULL << 63)
116 static inline u64
__bio_issue_time(u64 time
)
118 return time
& BIO_ISSUE_TIME_MASK
;
121 static inline u64
bio_issue_time(struct bio_issue
*issue
)
123 return __bio_issue_time(issue
->value
);
126 static inline sector_t
bio_issue_size(struct bio_issue
*issue
)
128 return ((issue
->value
& BIO_ISSUE_SIZE_MASK
) >> BIO_ISSUE_SIZE_SHIFT
);
131 static inline void bio_issue_init(struct bio_issue
*issue
,
134 size
&= (1ULL << BIO_ISSUE_SIZE_BITS
) - 1;
135 issue
->value
= ((issue
->value
& BIO_ISSUE_RES_MASK
) |
136 (ktime_get_ns() & BIO_ISSUE_TIME_MASK
) |
137 ((u64
)size
<< BIO_ISSUE_SIZE_SHIFT
));
141 * main unit of I/O for the block layer and lower layers (ie drivers and
145 struct bio
*bi_next
; /* request queue link */
146 struct gendisk
*bi_disk
;
147 unsigned int bi_opf
; /* bottom bits req flags,
148 * top bits REQ_OP. Use
151 unsigned short bi_flags
; /* status, etc and bvec pool number */
152 unsigned short bi_ioprio
;
153 unsigned short bi_write_hint
;
154 blk_status_t bi_status
;
156 atomic_t __bi_remaining
;
158 struct bvec_iter bi_iter
;
160 bio_end_io_t
*bi_end_io
;
163 #ifdef CONFIG_BLK_CGROUP
165 * Represents the association of the css and request_queue for the bio.
166 * If a bio goes direct to device, it will not have a blkg as it will
167 * not have a request_queue associated with it. The reference is put
168 * on release of the bio.
170 struct blkcg_gq
*bi_blkg
;
171 struct bio_issue bi_issue
;
172 #ifdef CONFIG_BLK_CGROUP_IOCOST
177 #if defined(CONFIG_BLK_DEV_INTEGRITY)
178 struct bio_integrity_payload
*bi_integrity
; /* data integrity */
182 unsigned short bi_vcnt
; /* how many bio_vec's */
185 * Everything starting with bi_max_vecs will be preserved by bio_reset()
188 unsigned short bi_max_vecs
; /* max bvl_vecs we can hold */
190 atomic_t __bi_cnt
; /* pin count */
192 struct bio_vec
*bi_io_vec
; /* the actual vec list */
194 struct bio_set
*bi_pool
;
197 * We can inline a number of vecs at the end of the bio, to avoid
198 * double allocations for a small number of bio_vecs. This member
199 * MUST obviously be kept at the very end of the bio.
201 struct bio_vec bi_inline_vecs
[];
204 #define BIO_RESET_BYTES offsetof(struct bio, bi_max_vecs)
210 BIO_NO_PAGE_REF
, /* don't put release vec pages */
211 BIO_CLONED
, /* doesn't own data */
212 BIO_BOUNCED
, /* bio is a bounce bio */
213 BIO_USER_MAPPED
, /* contains user pages */
214 BIO_NULL_MAPPED
, /* contains invalid user pages */
215 BIO_WORKINGSET
, /* contains userspace workingset pages */
216 BIO_QUIET
, /* Make BIO Quiet */
217 BIO_CHAIN
, /* chained bio, ->bi_remaining in effect */
218 BIO_REFFED
, /* bio has elevated ->bi_cnt */
219 BIO_THROTTLED
, /* This bio has already been subjected to
220 * throttling rules. Don't do it again. */
221 BIO_TRACE_COMPLETION
, /* bio_endio() should trace the final completion
223 BIO_QUEUE_ENTERED
, /* can use blk_queue_enter_live() */
224 BIO_TRACKED
, /* set if bio goes through the rq_qos path */
228 /* See BVEC_POOL_OFFSET below before adding new flags */
231 * We support 6 different bvec pools, the last one is magic in that it
232 * is backed by a mempool.
234 #define BVEC_POOL_NR 6
235 #define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
238 * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
239 * 1 to the actual index so that 0 indicates that there are no bvecs to be
242 #define BVEC_POOL_BITS (3)
243 #define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
244 #define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
245 #if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
246 # error "BVEC_POOL_BITS is too small"
250 * Flags starting here get preserved by bio_reset() - this includes
251 * only BVEC_POOL_IDX()
253 #define BIO_RESET_BITS BVEC_POOL_OFFSET
255 typedef __u32 __bitwise blk_mq_req_flags_t
;
258 * Operations and flags common to the bio and request structures.
259 * We use 8 bits for encoding the operation, and the remaining 24 for flags.
261 * The least significant bit of the operation number indicates the data
262 * transfer direction:
264 * - if the least significant bit is set transfers are TO the device
265 * - if the least significant bit is not set transfers are FROM the device
267 * If a operation does not transfer data the least significant bit has no
270 #define REQ_OP_BITS 8
271 #define REQ_OP_MASK ((1 << REQ_OP_BITS) - 1)
272 #define REQ_FLAG_BITS 24
275 /* read sectors from the device */
277 /* write sectors to the device */
279 /* flush the volatile write cache */
281 /* discard sectors */
283 /* securely erase sectors */
284 REQ_OP_SECURE_ERASE
= 5,
285 /* reset a zone write pointer */
286 REQ_OP_ZONE_RESET
= 6,
287 /* write the same sector many times */
288 REQ_OP_WRITE_SAME
= 7,
289 /* reset all the zone present on the device */
290 REQ_OP_ZONE_RESET_ALL
= 8,
291 /* write the zero filled sector many times */
292 REQ_OP_WRITE_ZEROES
= 9,
294 REQ_OP_ZONE_OPEN
= 10,
296 REQ_OP_ZONE_CLOSE
= 11,
297 /* Transition a zone to full */
298 REQ_OP_ZONE_FINISH
= 12,
300 /* SCSI passthrough using struct scsi_request */
302 REQ_OP_SCSI_OUT
= 33,
303 /* Driver private requests */
311 __REQ_FAILFAST_DEV
= /* no driver retries of device errors */
313 __REQ_FAILFAST_TRANSPORT
, /* no driver retries of transport errors */
314 __REQ_FAILFAST_DRIVER
, /* no driver retries of driver errors */
315 __REQ_SYNC
, /* request is sync (sync write or read) */
316 __REQ_META
, /* metadata io request */
317 __REQ_PRIO
, /* boost priority in cfq */
318 __REQ_NOMERGE
, /* don't touch this for merging */
319 __REQ_IDLE
, /* anticipate more IO after this one */
320 __REQ_INTEGRITY
, /* I/O includes block integrity payload */
321 __REQ_FUA
, /* forced unit access */
322 __REQ_PREFLUSH
, /* request for cache flush */
323 __REQ_RAHEAD
, /* read ahead, can fail anytime */
324 __REQ_BACKGROUND
, /* background IO */
325 __REQ_NOWAIT
, /* Don't wait if request will block */
326 __REQ_NOWAIT_INLINE
, /* Return would-block error inline */
328 * When a shared kthread needs to issue a bio for a cgroup, doing
329 * so synchronously can lead to priority inversions as the kthread
330 * can be trapped waiting for that cgroup. CGROUP_PUNT flag makes
331 * submit_bio() punt the actual issuing to a dedicated per-blkcg
332 * work item to avoid such priority inversions.
336 /* command specific flags for REQ_OP_WRITE_ZEROES: */
337 __REQ_NOUNMAP
, /* do not free blocks when zeroing */
343 __REQ_SWAP
, /* swapping request. */
344 __REQ_NR_BITS
, /* stops here */
347 #define REQ_FAILFAST_DEV (1ULL << __REQ_FAILFAST_DEV)
348 #define REQ_FAILFAST_TRANSPORT (1ULL << __REQ_FAILFAST_TRANSPORT)
349 #define REQ_FAILFAST_DRIVER (1ULL << __REQ_FAILFAST_DRIVER)
350 #define REQ_SYNC (1ULL << __REQ_SYNC)
351 #define REQ_META (1ULL << __REQ_META)
352 #define REQ_PRIO (1ULL << __REQ_PRIO)
353 #define REQ_NOMERGE (1ULL << __REQ_NOMERGE)
354 #define REQ_IDLE (1ULL << __REQ_IDLE)
355 #define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
356 #define REQ_FUA (1ULL << __REQ_FUA)
357 #define REQ_PREFLUSH (1ULL << __REQ_PREFLUSH)
358 #define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
359 #define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
360 #define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
361 #define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
362 #define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
364 #define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
365 #define REQ_HIPRI (1ULL << __REQ_HIPRI)
367 #define REQ_DRV (1ULL << __REQ_DRV)
368 #define REQ_SWAP (1ULL << __REQ_SWAP)
370 #define REQ_FAILFAST_MASK \
371 (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
373 #define REQ_NOMERGE_FLAGS \
374 (REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
385 #define bio_op(bio) \
386 ((bio)->bi_opf & REQ_OP_MASK)
387 #define req_op(req) \
388 ((req)->cmd_flags & REQ_OP_MASK)
390 /* obsolete, don't use in new code */
391 static inline void bio_set_op_attrs(struct bio
*bio
, unsigned op
,
394 bio
->bi_opf
= op
| op_flags
;
397 static inline bool op_is_write(unsigned int op
)
403 * Check if the bio or request is one that needs special treatment in the
404 * flush state machine.
406 static inline bool op_is_flush(unsigned int op
)
408 return op
& (REQ_FUA
| REQ_PREFLUSH
);
412 * Reads are always treated as synchronous, as are requests with the FUA or
413 * PREFLUSH flag. Other operations may be marked as synchronous using the
416 static inline bool op_is_sync(unsigned int op
)
418 return (op
& REQ_OP_MASK
) == REQ_OP_READ
||
419 (op
& (REQ_SYNC
| REQ_FUA
| REQ_PREFLUSH
));
422 static inline bool op_is_discard(unsigned int op
)
424 return (op
& REQ_OP_MASK
) == REQ_OP_DISCARD
;
428 * Check if a bio or request operation is a zone management operation, with
429 * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
430 * due to its different handling in the block layer and device response in
431 * case of command failure.
433 static inline bool op_is_zone_mgmt(enum req_opf op
)
435 switch (op
& REQ_OP_MASK
) {
436 case REQ_OP_ZONE_RESET
:
437 case REQ_OP_ZONE_OPEN
:
438 case REQ_OP_ZONE_CLOSE
:
439 case REQ_OP_ZONE_FINISH
:
446 static inline int op_stat_group(unsigned int op
)
448 if (op_is_discard(op
))
450 return op_is_write(op
);
453 typedef unsigned int blk_qc_t
;
454 #define BLK_QC_T_NONE -1U
455 #define BLK_QC_T_EAGAIN -2U
456 #define BLK_QC_T_SHIFT 16
457 #define BLK_QC_T_INTERNAL (1U << 31)
459 static inline bool blk_qc_t_valid(blk_qc_t cookie
)
461 return cookie
!= BLK_QC_T_NONE
&& cookie
!= BLK_QC_T_EAGAIN
;
464 static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie
)
466 return (cookie
& ~BLK_QC_T_INTERNAL
) >> BLK_QC_T_SHIFT
;
469 static inline unsigned int blk_qc_t_to_tag(blk_qc_t cookie
)
471 return cookie
& ((1u << BLK_QC_T_SHIFT
) - 1);
474 static inline bool blk_qc_t_is_internal(blk_qc_t cookie
)
476 return (cookie
& BLK_QC_T_INTERNAL
) != 0;
487 #endif /* __LINUX_BLK_TYPES_H */