]>
git.ipfire.org Git - thirdparty/kernel/stable.git/blob - block/blk-merge.c
1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to segment and merge handling
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/scatterlist.h>
11 #include <linux/part_stat.h>
12 #include <linux/blk-cgroup.h>
14 #include <trace/events/block.h>
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-throttle.h"
21 static inline void bio_get_first_bvec(struct bio
*bio
, struct bio_vec
*bv
)
23 *bv
= mp_bvec_iter_bvec(bio
->bi_io_vec
, bio
->bi_iter
);
26 static inline void bio_get_last_bvec(struct bio
*bio
, struct bio_vec
*bv
)
28 struct bvec_iter iter
= bio
->bi_iter
;
31 bio_get_first_bvec(bio
, bv
);
32 if (bv
->bv_len
== bio
->bi_iter
.bi_size
)
33 return; /* this bio only has a single bvec */
35 bio_advance_iter(bio
, &iter
, iter
.bi_size
);
37 if (!iter
.bi_bvec_done
)
38 idx
= iter
.bi_idx
- 1;
39 else /* in the middle of bvec */
42 *bv
= bio
->bi_io_vec
[idx
];
45 * iter.bi_bvec_done records actual length of the last bvec
46 * if this bio ends in the middle of one io vector
48 if (iter
.bi_bvec_done
)
49 bv
->bv_len
= iter
.bi_bvec_done
;
52 static inline bool bio_will_gap(struct request_queue
*q
,
53 struct request
*prev_rq
, struct bio
*prev
, struct bio
*next
)
55 struct bio_vec pb
, nb
;
57 if (!bio_has_data(prev
) || !queue_virt_boundary(q
))
61 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
62 * is quite difficult to respect the sg gap limit. We work hard to
63 * merge a huge number of small single bios in case of mkfs.
66 bio_get_first_bvec(prev_rq
->bio
, &pb
);
68 bio_get_first_bvec(prev
, &pb
);
69 if (pb
.bv_offset
& queue_virt_boundary(q
))
73 * We don't need to worry about the situation that the merged segment
74 * ends in unaligned virt boundary:
76 * - if 'pb' ends aligned, the merged segment ends aligned
77 * - if 'pb' ends unaligned, the next bio must include
78 * one single bvec of 'nb', otherwise the 'nb' can't
81 bio_get_last_bvec(prev
, &pb
);
82 bio_get_first_bvec(next
, &nb
);
83 if (biovec_phys_mergeable(q
, &pb
, &nb
))
85 return __bvec_gap_to_prev(&q
->limits
, &pb
, nb
.bv_offset
);
88 static inline bool req_gap_back_merge(struct request
*req
, struct bio
*bio
)
90 return bio_will_gap(req
->q
, req
, req
->biotail
, bio
);
93 static inline bool req_gap_front_merge(struct request
*req
, struct bio
*bio
)
95 return bio_will_gap(req
->q
, NULL
, bio
, req
->bio
);
99 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
100 * is defined as 'unsigned int', meantime it has to be aligned to with the
101 * logical block size, which is the minimum accepted unit by hardware.
103 static unsigned int bio_allowed_max_sectors(const struct queue_limits
*lim
)
105 return round_down(UINT_MAX
, lim
->logical_block_size
) >> SECTOR_SHIFT
;
108 static struct bio
*bio_split_discard(struct bio
*bio
,
109 const struct queue_limits
*lim
,
110 unsigned *nsegs
, struct bio_set
*bs
)
112 unsigned int max_discard_sectors
, granularity
;
114 unsigned split_sectors
;
118 granularity
= max(lim
->discard_granularity
>> 9, 1U);
120 max_discard_sectors
=
121 min(lim
->max_discard_sectors
, bio_allowed_max_sectors(lim
));
122 max_discard_sectors
-= max_discard_sectors
% granularity
;
123 if (unlikely(!max_discard_sectors
))
126 if (bio_sectors(bio
) <= max_discard_sectors
)
129 split_sectors
= max_discard_sectors
;
132 * If the next starting sector would be misaligned, stop the discard at
133 * the previous aligned sector.
135 tmp
= bio
->bi_iter
.bi_sector
+ split_sectors
-
136 ((lim
->discard_alignment
>> 9) % granularity
);
137 tmp
= sector_div(tmp
, granularity
);
139 if (split_sectors
> tmp
)
140 split_sectors
-= tmp
;
142 return bio_split(bio
, split_sectors
, GFP_NOIO
, bs
);
145 static struct bio
*bio_split_write_zeroes(struct bio
*bio
,
146 const struct queue_limits
*lim
,
147 unsigned *nsegs
, struct bio_set
*bs
)
150 if (!lim
->max_write_zeroes_sectors
)
152 if (bio_sectors(bio
) <= lim
->max_write_zeroes_sectors
)
154 return bio_split(bio
, lim
->max_write_zeroes_sectors
, GFP_NOIO
, bs
);
158 * Return the maximum number of sectors from the start of a bio that may be
159 * submitted as a single request to a block device. If enough sectors remain,
160 * align the end to the physical block size. Otherwise align the end to the
161 * logical block size. This approach minimizes the number of non-aligned
162 * requests that are submitted to a block device if the start of a bio is not
163 * aligned to a physical block boundary.
165 static inline unsigned get_max_io_size(struct bio
*bio
,
166 const struct queue_limits
*lim
)
168 unsigned pbs
= lim
->physical_block_size
>> SECTOR_SHIFT
;
169 unsigned lbs
= lim
->logical_block_size
>> SECTOR_SHIFT
;
170 unsigned max_sectors
= lim
->max_sectors
, start
, end
;
172 if (lim
->chunk_sectors
) {
173 max_sectors
= min(max_sectors
,
174 blk_chunk_sectors_left(bio
->bi_iter
.bi_sector
,
175 lim
->chunk_sectors
));
178 start
= bio
->bi_iter
.bi_sector
& (pbs
- 1);
179 end
= (start
+ max_sectors
) & ~(pbs
- 1);
182 return max_sectors
& ~(lbs
- 1);
186 * get_max_segment_size() - maximum number of bytes to add as a single segment
187 * @lim: Request queue limits.
188 * @start_page: See below.
189 * @offset: Offset from @start_page where to add a segment.
191 * Returns the maximum number of bytes that can be added as a single segment.
193 static inline unsigned get_max_segment_size(const struct queue_limits
*lim
,
194 struct page
*start_page
, unsigned long offset
)
196 unsigned long mask
= lim
->seg_boundary_mask
;
198 offset
= mask
& (page_to_phys(start_page
) + offset
);
201 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
202 * after having calculated the minimum.
204 return min(mask
- offset
, (unsigned long)lim
->max_segment_size
- 1) + 1;
208 * bvec_split_segs - verify whether or not a bvec should be split in the middle
209 * @lim: [in] queue limits to split based on
210 * @bv: [in] bvec to examine
211 * @nsegs: [in,out] Number of segments in the bio being built. Incremented
212 * by the number of segments from @bv that may be appended to that
213 * bio without exceeding @max_segs
214 * @bytes: [in,out] Number of bytes in the bio being built. Incremented
215 * by the number of bytes from @bv that may be appended to that
216 * bio without exceeding @max_bytes
217 * @max_segs: [in] upper bound for *@nsegs
218 * @max_bytes: [in] upper bound for *@bytes
220 * When splitting a bio, it can happen that a bvec is encountered that is too
221 * big to fit in a single segment and hence that it has to be split in the
222 * middle. This function verifies whether or not that should happen. The value
223 * %true is returned if and only if appending the entire @bv to a bio with
224 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
227 static bool bvec_split_segs(const struct queue_limits
*lim
,
228 const struct bio_vec
*bv
, unsigned *nsegs
, unsigned *bytes
,
229 unsigned max_segs
, unsigned max_bytes
)
231 unsigned max_len
= min(max_bytes
, UINT_MAX
) - *bytes
;
232 unsigned len
= min(bv
->bv_len
, max_len
);
233 unsigned total_len
= 0;
234 unsigned seg_size
= 0;
236 while (len
&& *nsegs
< max_segs
) {
237 seg_size
= get_max_segment_size(lim
, bv
->bv_page
,
238 bv
->bv_offset
+ total_len
);
239 seg_size
= min(seg_size
, len
);
242 total_len
+= seg_size
;
245 if ((bv
->bv_offset
+ total_len
) & lim
->virt_boundary_mask
)
251 /* tell the caller to split the bvec if it is too big to fit */
252 return len
> 0 || bv
->bv_len
> max_len
;
256 * bio_split_rw - split a bio in two bios
257 * @bio: [in] bio to be split
258 * @lim: [in] queue limits to split based on
259 * @segs: [out] number of segments in the bio with the first half of the sectors
260 * @bs: [in] bio set to allocate the clone from
261 * @max_bytes: [in] maximum number of bytes per bio
263 * Clone @bio, update the bi_iter of the clone to represent the first sectors
264 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
265 * following is guaranteed for the cloned bio:
266 * - That it has at most @max_bytes worth of data
267 * - That it has at most queue_max_segments(@q) segments.
269 * Except for discard requests the cloned bio will point at the bi_io_vec of
270 * the original bio. It is the responsibility of the caller to ensure that the
271 * original bio is not freed before the cloned bio. The caller is also
272 * responsible for ensuring that @bs is only destroyed after processing of the
273 * split bio has finished.
275 struct bio
*bio_split_rw(struct bio
*bio
, const struct queue_limits
*lim
,
276 unsigned *segs
, struct bio_set
*bs
, unsigned max_bytes
)
278 struct bio_vec bv
, bvprv
, *bvprvp
= NULL
;
279 struct bvec_iter iter
;
280 unsigned nsegs
= 0, bytes
= 0;
282 bio_for_each_bvec(bv
, bio
, iter
) {
284 * If the queue doesn't support SG gaps and adding this
285 * offset would create a gap, disallow it.
287 if (bvprvp
&& bvec_gap_to_prev(lim
, bvprvp
, bv
.bv_offset
))
290 if (nsegs
< lim
->max_segments
&&
291 bytes
+ bv
.bv_len
<= max_bytes
&&
292 bv
.bv_offset
+ bv
.bv_len
<= PAGE_SIZE
) {
296 if (bvec_split_segs(lim
, &bv
, &nsegs
, &bytes
,
297 lim
->max_segments
, max_bytes
))
309 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
310 * with EAGAIN if splitting is required and return an error pointer.
312 if (bio
->bi_opf
& REQ_NOWAIT
) {
313 bio
->bi_status
= BLK_STS_AGAIN
;
315 return ERR_PTR(-EAGAIN
);
321 * Individual bvecs might not be logical block aligned. Round down the
322 * split size so that each bio is properly block size aligned, even if
323 * we do not use the full hardware limits.
325 bytes
= ALIGN_DOWN(bytes
, lim
->logical_block_size
);
328 * Bio splitting may cause subtle trouble such as hang when doing sync
329 * iopoll in direct IO routine. Given performance gain of iopoll for
330 * big IO can be trival, disable iopoll when split needed.
332 bio_clear_polled(bio
);
333 return bio_split(bio
, bytes
>> SECTOR_SHIFT
, GFP_NOIO
, bs
);
335 EXPORT_SYMBOL_GPL(bio_split_rw
);
338 * __bio_split_to_limits - split a bio to fit the queue limits
339 * @bio: bio to be split
340 * @lim: queue limits to split based on
341 * @nr_segs: returns the number of segments in the returned bio
343 * Check if @bio needs splitting based on the queue limits, and if so split off
344 * a bio fitting the limits from the beginning of @bio and return it. @bio is
345 * shortened to the remainder and re-submitted.
347 * The split bio is allocated from @q->bio_split, which is provided by the
350 struct bio
*__bio_split_to_limits(struct bio
*bio
,
351 const struct queue_limits
*lim
,
352 unsigned int *nr_segs
)
354 struct bio_set
*bs
= &bio
->bi_bdev
->bd_disk
->bio_split
;
357 switch (bio_op(bio
)) {
359 case REQ_OP_SECURE_ERASE
:
360 split
= bio_split_discard(bio
, lim
, nr_segs
, bs
);
362 case REQ_OP_WRITE_ZEROES
:
363 split
= bio_split_write_zeroes(bio
, lim
, nr_segs
, bs
);
366 split
= bio_split_rw(bio
, lim
, nr_segs
, bs
,
367 get_max_io_size(bio
, lim
) << SECTOR_SHIFT
);
374 /* there isn't chance to merge the split bio */
375 split
->bi_opf
|= REQ_NOMERGE
;
377 blkcg_bio_issue_init(split
);
378 bio_chain(split
, bio
);
379 trace_block_split(split
, bio
->bi_iter
.bi_sector
);
380 submit_bio_noacct(bio
);
387 * bio_split_to_limits - split a bio to fit the queue limits
388 * @bio: bio to be split
390 * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
391 * if so split off a bio fitting the limits from the beginning of @bio and
392 * return it. @bio is shortened to the remainder and re-submitted.
394 * The split bio is allocated from @q->bio_split, which is provided by the
397 struct bio
*bio_split_to_limits(struct bio
*bio
)
399 const struct queue_limits
*lim
= &bdev_get_queue(bio
->bi_bdev
)->limits
;
400 unsigned int nr_segs
;
402 if (bio_may_exceed_limits(bio
, lim
))
403 return __bio_split_to_limits(bio
, lim
, &nr_segs
);
406 EXPORT_SYMBOL(bio_split_to_limits
);
408 unsigned int blk_recalc_rq_segments(struct request
*rq
)
410 unsigned int nr_phys_segs
= 0;
411 unsigned int bytes
= 0;
412 struct req_iterator iter
;
418 switch (bio_op(rq
->bio
)) {
420 case REQ_OP_SECURE_ERASE
:
421 if (queue_max_discard_segments(rq
->q
) > 1) {
422 struct bio
*bio
= rq
->bio
;
429 case REQ_OP_WRITE_ZEROES
:
435 rq_for_each_bvec(bv
, rq
, iter
)
436 bvec_split_segs(&rq
->q
->limits
, &bv
, &nr_phys_segs
, &bytes
,
441 static inline struct scatterlist
*blk_next_sg(struct scatterlist
**sg
,
442 struct scatterlist
*sglist
)
448 * If the driver previously mapped a shorter list, we could see a
449 * termination bit prematurely unless it fully inits the sg table
450 * on each mapping. We KNOW that there must be more entries here
451 * or the driver would be buggy, so force clear the termination bit
452 * to avoid doing a full sg_init_table() in drivers for each command.
458 static unsigned blk_bvec_map_sg(struct request_queue
*q
,
459 struct bio_vec
*bvec
, struct scatterlist
*sglist
,
460 struct scatterlist
**sg
)
462 unsigned nbytes
= bvec
->bv_len
;
463 unsigned nsegs
= 0, total
= 0;
466 unsigned offset
= bvec
->bv_offset
+ total
;
467 unsigned len
= min(get_max_segment_size(&q
->limits
,
468 bvec
->bv_page
, offset
), nbytes
);
469 struct page
*page
= bvec
->bv_page
;
472 * Unfortunately a fair number of drivers barf on scatterlists
473 * that have an offset larger than PAGE_SIZE, despite other
474 * subsystems dealing with that invariant just fine. For now
475 * stick to the legacy format where we never present those from
476 * the block layer, but the code below should be removed once
477 * these offenders (mostly MMC/SD drivers) are fixed.
479 page
+= (offset
>> PAGE_SHIFT
);
480 offset
&= ~PAGE_MASK
;
482 *sg
= blk_next_sg(sg
, sglist
);
483 sg_set_page(*sg
, page
, len
, offset
);
493 static inline int __blk_bvec_map_sg(struct bio_vec bv
,
494 struct scatterlist
*sglist
, struct scatterlist
**sg
)
496 *sg
= blk_next_sg(sg
, sglist
);
497 sg_set_page(*sg
, bv
.bv_page
, bv
.bv_len
, bv
.bv_offset
);
501 /* only try to merge bvecs into one sg if they are from two bios */
503 __blk_segment_map_sg_merge(struct request_queue
*q
, struct bio_vec
*bvec
,
504 struct bio_vec
*bvprv
, struct scatterlist
**sg
)
507 int nbytes
= bvec
->bv_len
;
512 if ((*sg
)->length
+ nbytes
> queue_max_segment_size(q
))
515 if (!biovec_phys_mergeable(q
, bvprv
, bvec
))
518 (*sg
)->length
+= nbytes
;
523 static int __blk_bios_map_sg(struct request_queue
*q
, struct bio
*bio
,
524 struct scatterlist
*sglist
,
525 struct scatterlist
**sg
)
527 struct bio_vec bvec
, bvprv
= { NULL
};
528 struct bvec_iter iter
;
530 bool new_bio
= false;
533 bio_for_each_bvec(bvec
, bio
, iter
) {
535 * Only try to merge bvecs from two bios given we
536 * have done bio internal merge when adding pages
540 __blk_segment_map_sg_merge(q
, &bvec
, &bvprv
, sg
))
543 if (bvec
.bv_offset
+ bvec
.bv_len
<= PAGE_SIZE
)
544 nsegs
+= __blk_bvec_map_sg(bvec
, sglist
, sg
);
546 nsegs
+= blk_bvec_map_sg(q
, &bvec
, sglist
, sg
);
550 if (likely(bio
->bi_iter
.bi_size
)) {
560 * map a request to scatterlist, return number of sg entries setup. Caller
561 * must make sure sg can hold rq->nr_phys_segments entries
563 int __blk_rq_map_sg(struct request_queue
*q
, struct request
*rq
,
564 struct scatterlist
*sglist
, struct scatterlist
**last_sg
)
568 if (rq
->rq_flags
& RQF_SPECIAL_PAYLOAD
)
569 nsegs
= __blk_bvec_map_sg(rq
->special_vec
, sglist
, last_sg
);
571 nsegs
= __blk_bios_map_sg(q
, rq
->bio
, sglist
, last_sg
);
574 sg_mark_end(*last_sg
);
577 * Something must have been wrong if the figured number of
578 * segment is bigger than number of req's physical segments
580 WARN_ON(nsegs
> blk_rq_nr_phys_segments(rq
));
584 EXPORT_SYMBOL(__blk_rq_map_sg
);
586 static inline unsigned int blk_rq_get_max_sectors(struct request
*rq
,
589 struct request_queue
*q
= rq
->q
;
590 unsigned int max_sectors
;
592 if (blk_rq_is_passthrough(rq
))
593 return q
->limits
.max_hw_sectors
;
595 max_sectors
= blk_queue_get_max_sectors(q
, req_op(rq
));
596 if (!q
->limits
.chunk_sectors
||
597 req_op(rq
) == REQ_OP_DISCARD
||
598 req_op(rq
) == REQ_OP_SECURE_ERASE
)
600 return min(max_sectors
,
601 blk_chunk_sectors_left(offset
, q
->limits
.chunk_sectors
));
604 static inline int ll_new_hw_segment(struct request
*req
, struct bio
*bio
,
605 unsigned int nr_phys_segs
)
607 if (!blk_cgroup_mergeable(req
, bio
))
610 if (blk_integrity_merge_bio(req
->q
, req
, bio
) == false)
613 /* discard request merge won't add new segment */
614 if (req_op(req
) == REQ_OP_DISCARD
)
617 if (req
->nr_phys_segments
+ nr_phys_segs
> blk_rq_get_max_segments(req
))
621 * This will form the start of a new hw segment. Bump both
624 req
->nr_phys_segments
+= nr_phys_segs
;
628 req_set_nomerge(req
->q
, req
);
632 int ll_back_merge_fn(struct request
*req
, struct bio
*bio
, unsigned int nr_segs
)
634 if (req_gap_back_merge(req
, bio
))
636 if (blk_integrity_rq(req
) &&
637 integrity_req_gap_back_merge(req
, bio
))
639 if (!bio_crypt_ctx_back_mergeable(req
, bio
))
641 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
642 blk_rq_get_max_sectors(req
, blk_rq_pos(req
))) {
643 req_set_nomerge(req
->q
, req
);
647 return ll_new_hw_segment(req
, bio
, nr_segs
);
650 static int ll_front_merge_fn(struct request
*req
, struct bio
*bio
,
651 unsigned int nr_segs
)
653 if (req_gap_front_merge(req
, bio
))
655 if (blk_integrity_rq(req
) &&
656 integrity_req_gap_front_merge(req
, bio
))
658 if (!bio_crypt_ctx_front_mergeable(req
, bio
))
660 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
661 blk_rq_get_max_sectors(req
, bio
->bi_iter
.bi_sector
)) {
662 req_set_nomerge(req
->q
, req
);
666 return ll_new_hw_segment(req
, bio
, nr_segs
);
669 static bool req_attempt_discard_merge(struct request_queue
*q
, struct request
*req
,
670 struct request
*next
)
672 unsigned short segments
= blk_rq_nr_discard_segments(req
);
674 if (segments
>= queue_max_discard_segments(q
))
676 if (blk_rq_sectors(req
) + bio_sectors(next
->bio
) >
677 blk_rq_get_max_sectors(req
, blk_rq_pos(req
)))
680 req
->nr_phys_segments
= segments
+ blk_rq_nr_discard_segments(next
);
683 req_set_nomerge(q
, req
);
687 static int ll_merge_requests_fn(struct request_queue
*q
, struct request
*req
,
688 struct request
*next
)
690 int total_phys_segments
;
692 if (req_gap_back_merge(req
, next
->bio
))
696 * Will it become too large?
698 if ((blk_rq_sectors(req
) + blk_rq_sectors(next
)) >
699 blk_rq_get_max_sectors(req
, blk_rq_pos(req
)))
702 total_phys_segments
= req
->nr_phys_segments
+ next
->nr_phys_segments
;
703 if (total_phys_segments
> blk_rq_get_max_segments(req
))
706 if (!blk_cgroup_mergeable(req
, next
->bio
))
709 if (blk_integrity_merge_rq(q
, req
, next
) == false)
712 if (!bio_crypt_ctx_merge_rq(req
, next
))
716 req
->nr_phys_segments
= total_phys_segments
;
721 * blk_rq_set_mixed_merge - mark a request as mixed merge
722 * @rq: request to mark as mixed merge
725 * @rq is about to be mixed merged. Make sure the attributes
726 * which can be mixed are set in each bio and mark @rq as mixed
729 static void blk_rq_set_mixed_merge(struct request
*rq
)
731 blk_opf_t ff
= rq
->cmd_flags
& REQ_FAILFAST_MASK
;
734 if (rq
->rq_flags
& RQF_MIXED_MERGE
)
738 * @rq will no longer represent mixable attributes for all the
739 * contained bios. It will just track those of the first one.
740 * Distributes the attributs to each bio.
742 for (bio
= rq
->bio
; bio
; bio
= bio
->bi_next
) {
743 WARN_ON_ONCE((bio
->bi_opf
& REQ_FAILFAST_MASK
) &&
744 (bio
->bi_opf
& REQ_FAILFAST_MASK
) != ff
);
747 rq
->rq_flags
|= RQF_MIXED_MERGE
;
750 static inline blk_opf_t
bio_failfast(const struct bio
*bio
)
752 if (bio
->bi_opf
& REQ_RAHEAD
)
753 return REQ_FAILFAST_MASK
;
755 return bio
->bi_opf
& REQ_FAILFAST_MASK
;
759 * After we are marked as MIXED_MERGE, any new RA bio has to be updated
760 * as failfast, and request's failfast has to be updated in case of
763 static inline void blk_update_mixed_merge(struct request
*req
,
764 struct bio
*bio
, bool front_merge
)
766 if (req
->rq_flags
& RQF_MIXED_MERGE
) {
767 if (bio
->bi_opf
& REQ_RAHEAD
)
768 bio
->bi_opf
|= REQ_FAILFAST_MASK
;
771 req
->cmd_flags
&= ~REQ_FAILFAST_MASK
;
772 req
->cmd_flags
|= bio
->bi_opf
& REQ_FAILFAST_MASK
;
777 static void blk_account_io_merge_request(struct request
*req
)
779 if (blk_do_io_stat(req
)) {
781 part_stat_inc(req
->part
, merges
[op_stat_group(req_op(req
))]);
786 static enum elv_merge
blk_try_req_merge(struct request
*req
,
787 struct request
*next
)
789 if (blk_discard_mergable(req
))
790 return ELEVATOR_DISCARD_MERGE
;
791 else if (blk_rq_pos(req
) + blk_rq_sectors(req
) == blk_rq_pos(next
))
792 return ELEVATOR_BACK_MERGE
;
794 return ELEVATOR_NO_MERGE
;
798 * For non-mq, this has to be called with the request spinlock acquired.
799 * For mq with scheduling, the appropriate queue wide lock should be held.
801 static struct request
*attempt_merge(struct request_queue
*q
,
802 struct request
*req
, struct request
*next
)
804 if (!rq_mergeable(req
) || !rq_mergeable(next
))
807 if (req_op(req
) != req_op(next
))
810 if (rq_data_dir(req
) != rq_data_dir(next
))
813 /* Don't merge requests with different write hints. */
814 if (req
->write_hint
!= next
->write_hint
)
817 if (req
->ioprio
!= next
->ioprio
)
821 * If we are allowed to merge, then append bio list
822 * from next to rq and release next. merge_requests_fn
823 * will have updated segment counts, update sector
824 * counts here. Handle DISCARDs separately, as they
825 * have separate settings.
828 switch (blk_try_req_merge(req
, next
)) {
829 case ELEVATOR_DISCARD_MERGE
:
830 if (!req_attempt_discard_merge(q
, req
, next
))
833 case ELEVATOR_BACK_MERGE
:
834 if (!ll_merge_requests_fn(q
, req
, next
))
842 * If failfast settings disagree or any of the two is already
843 * a mixed merge, mark both as mixed before proceeding. This
844 * makes sure that all involved bios have mixable attributes
847 if (((req
->rq_flags
| next
->rq_flags
) & RQF_MIXED_MERGE
) ||
848 (req
->cmd_flags
& REQ_FAILFAST_MASK
) !=
849 (next
->cmd_flags
& REQ_FAILFAST_MASK
)) {
850 blk_rq_set_mixed_merge(req
);
851 blk_rq_set_mixed_merge(next
);
855 * At this point we have either done a back merge or front merge. We
856 * need the smaller start_time_ns of the merged requests to be the
857 * current request for accounting purposes.
859 if (next
->start_time_ns
< req
->start_time_ns
)
860 req
->start_time_ns
= next
->start_time_ns
;
862 req
->biotail
->bi_next
= next
->bio
;
863 req
->biotail
= next
->biotail
;
865 req
->__data_len
+= blk_rq_bytes(next
);
867 if (!blk_discard_mergable(req
))
868 elv_merge_requests(q
, req
, next
);
870 blk_crypto_rq_put_keyslot(next
);
873 * 'next' is going away, so update stats accordingly
875 blk_account_io_merge_request(next
);
877 trace_block_rq_merge(next
);
880 * ownership of bio passed from next to req, return 'next' for
887 static struct request
*attempt_back_merge(struct request_queue
*q
,
890 struct request
*next
= elv_latter_request(q
, rq
);
893 return attempt_merge(q
, rq
, next
);
898 static struct request
*attempt_front_merge(struct request_queue
*q
,
901 struct request
*prev
= elv_former_request(q
, rq
);
904 return attempt_merge(q
, prev
, rq
);
910 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
911 * otherwise. The caller is responsible for freeing 'next' if the merge
914 bool blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
915 struct request
*next
)
917 return attempt_merge(q
, rq
, next
);
920 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
922 if (!rq_mergeable(rq
) || !bio_mergeable(bio
))
925 if (req_op(rq
) != bio_op(bio
))
928 /* different data direction or already started, don't merge */
929 if (bio_data_dir(bio
) != rq_data_dir(rq
))
932 /* don't merge across cgroup boundaries */
933 if (!blk_cgroup_mergeable(rq
, bio
))
936 /* only merge integrity protected bio into ditto rq */
937 if (blk_integrity_merge_bio(rq
->q
, rq
, bio
) == false)
940 /* Only merge if the crypt contexts are compatible */
941 if (!bio_crypt_rq_ctx_compatible(rq
, bio
))
944 /* Don't merge requests with different write hints. */
945 if (rq
->write_hint
!= bio
->bi_write_hint
)
948 if (rq
->ioprio
!= bio_prio(bio
))
954 enum elv_merge
blk_try_merge(struct request
*rq
, struct bio
*bio
)
956 if (blk_discard_mergable(rq
))
957 return ELEVATOR_DISCARD_MERGE
;
958 else if (blk_rq_pos(rq
) + blk_rq_sectors(rq
) == bio
->bi_iter
.bi_sector
)
959 return ELEVATOR_BACK_MERGE
;
960 else if (blk_rq_pos(rq
) - bio_sectors(bio
) == bio
->bi_iter
.bi_sector
)
961 return ELEVATOR_FRONT_MERGE
;
962 return ELEVATOR_NO_MERGE
;
965 static void blk_account_io_merge_bio(struct request
*req
)
967 if (!blk_do_io_stat(req
))
971 part_stat_inc(req
->part
, merges
[op_stat_group(req_op(req
))]);
975 enum bio_merge_status
{
981 static enum bio_merge_status
bio_attempt_back_merge(struct request
*req
,
982 struct bio
*bio
, unsigned int nr_segs
)
984 const blk_opf_t ff
= bio_failfast(bio
);
986 if (!ll_back_merge_fn(req
, bio
, nr_segs
))
987 return BIO_MERGE_FAILED
;
989 trace_block_bio_backmerge(bio
);
990 rq_qos_merge(req
->q
, req
, bio
);
992 if ((req
->cmd_flags
& REQ_FAILFAST_MASK
) != ff
)
993 blk_rq_set_mixed_merge(req
);
995 blk_update_mixed_merge(req
, bio
, false);
997 req
->biotail
->bi_next
= bio
;
999 req
->__data_len
+= bio
->bi_iter
.bi_size
;
1001 bio_crypt_free_ctx(bio
);
1003 blk_account_io_merge_bio(req
);
1004 return BIO_MERGE_OK
;
1007 static enum bio_merge_status
bio_attempt_front_merge(struct request
*req
,
1008 struct bio
*bio
, unsigned int nr_segs
)
1010 const blk_opf_t ff
= bio_failfast(bio
);
1012 if (!ll_front_merge_fn(req
, bio
, nr_segs
))
1013 return BIO_MERGE_FAILED
;
1015 trace_block_bio_frontmerge(bio
);
1016 rq_qos_merge(req
->q
, req
, bio
);
1018 if ((req
->cmd_flags
& REQ_FAILFAST_MASK
) != ff
)
1019 blk_rq_set_mixed_merge(req
);
1021 blk_update_mixed_merge(req
, bio
, true);
1023 bio
->bi_next
= req
->bio
;
1026 req
->__sector
= bio
->bi_iter
.bi_sector
;
1027 req
->__data_len
+= bio
->bi_iter
.bi_size
;
1029 bio_crypt_do_front_merge(req
, bio
);
1031 blk_account_io_merge_bio(req
);
1032 return BIO_MERGE_OK
;
1035 static enum bio_merge_status
bio_attempt_discard_merge(struct request_queue
*q
,
1036 struct request
*req
, struct bio
*bio
)
1038 unsigned short segments
= blk_rq_nr_discard_segments(req
);
1040 if (segments
>= queue_max_discard_segments(q
))
1042 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
1043 blk_rq_get_max_sectors(req
, blk_rq_pos(req
)))
1046 rq_qos_merge(q
, req
, bio
);
1048 req
->biotail
->bi_next
= bio
;
1050 req
->__data_len
+= bio
->bi_iter
.bi_size
;
1051 req
->nr_phys_segments
= segments
+ 1;
1053 blk_account_io_merge_bio(req
);
1054 return BIO_MERGE_OK
;
1056 req_set_nomerge(q
, req
);
1057 return BIO_MERGE_FAILED
;
1060 static enum bio_merge_status
blk_attempt_bio_merge(struct request_queue
*q
,
1063 unsigned int nr_segs
,
1064 bool sched_allow_merge
)
1066 if (!blk_rq_merge_ok(rq
, bio
))
1067 return BIO_MERGE_NONE
;
1069 switch (blk_try_merge(rq
, bio
)) {
1070 case ELEVATOR_BACK_MERGE
:
1071 if (!sched_allow_merge
|| blk_mq_sched_allow_merge(q
, rq
, bio
))
1072 return bio_attempt_back_merge(rq
, bio
, nr_segs
);
1074 case ELEVATOR_FRONT_MERGE
:
1075 if (!sched_allow_merge
|| blk_mq_sched_allow_merge(q
, rq
, bio
))
1076 return bio_attempt_front_merge(rq
, bio
, nr_segs
);
1078 case ELEVATOR_DISCARD_MERGE
:
1079 return bio_attempt_discard_merge(q
, rq
, bio
);
1081 return BIO_MERGE_NONE
;
1084 return BIO_MERGE_FAILED
;
1088 * blk_attempt_plug_merge - try to merge with %current's plugged list
1089 * @q: request_queue new bio is being queued at
1090 * @bio: new bio being queued
1091 * @nr_segs: number of segments in @bio
1092 * from the passed in @q already in the plug list
1094 * Determine whether @bio being queued on @q can be merged with the previous
1095 * request on %current's plugged list. Returns %true if merge was successful,
1098 * Plugging coalesces IOs from the same issuer for the same purpose without
1099 * going through @q->queue_lock. As such it's more of an issuing mechanism
1100 * than scheduling, and the request, while may have elvpriv data, is not
1101 * added on the elevator at this point. In addition, we don't have
1102 * reliable access to the elevator outside queue lock. Only check basic
1103 * merging parameters without querying the elevator.
1105 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1107 bool blk_attempt_plug_merge(struct request_queue
*q
, struct bio
*bio
,
1108 unsigned int nr_segs
)
1110 struct blk_plug
*plug
;
1113 plug
= blk_mq_plug(bio
);
1114 if (!plug
|| rq_list_empty(plug
->mq_list
))
1117 rq_list_for_each(&plug
->mq_list
, rq
) {
1119 if (blk_attempt_bio_merge(q
, rq
, bio
, nr_segs
, false) ==
1126 * Only keep iterating plug list for merges if we have multiple
1129 if (!plug
->multiple_queues
)
1136 * Iterate list of requests and see if we can merge this bio with any
1139 bool blk_bio_list_merge(struct request_queue
*q
, struct list_head
*list
,
1140 struct bio
*bio
, unsigned int nr_segs
)
1145 list_for_each_entry_reverse(rq
, list
, queuelist
) {
1149 switch (blk_attempt_bio_merge(q
, rq
, bio
, nr_segs
, true)) {
1150 case BIO_MERGE_NONE
:
1154 case BIO_MERGE_FAILED
:
1162 EXPORT_SYMBOL_GPL(blk_bio_list_merge
);
1164 bool blk_mq_sched_try_merge(struct request_queue
*q
, struct bio
*bio
,
1165 unsigned int nr_segs
, struct request
**merged_request
)
1169 switch (elv_merge(q
, &rq
, bio
)) {
1170 case ELEVATOR_BACK_MERGE
:
1171 if (!blk_mq_sched_allow_merge(q
, rq
, bio
))
1173 if (bio_attempt_back_merge(rq
, bio
, nr_segs
) != BIO_MERGE_OK
)
1175 *merged_request
= attempt_back_merge(q
, rq
);
1176 if (!*merged_request
)
1177 elv_merged_request(q
, rq
, ELEVATOR_BACK_MERGE
);
1179 case ELEVATOR_FRONT_MERGE
:
1180 if (!blk_mq_sched_allow_merge(q
, rq
, bio
))
1182 if (bio_attempt_front_merge(rq
, bio
, nr_segs
) != BIO_MERGE_OK
)
1184 *merged_request
= attempt_front_merge(q
, rq
);
1185 if (!*merged_request
)
1186 elv_merged_request(q
, rq
, ELEVATOR_FRONT_MERGE
);
1188 case ELEVATOR_DISCARD_MERGE
:
1189 return bio_attempt_discard_merge(q
, rq
, bio
) == BIO_MERGE_OK
;
1194 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge
);