]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - block/blk-merge.c
drm/amdgpu: differentiate external rev id for gfx 11.5.0
[thirdparty/kernel/stable.git] / block / blk-merge.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to segment and merge handling
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/scatterlist.h>
11 #include <linux/part_stat.h>
12 #include <linux/blk-cgroup.h>
13
14 #include <trace/events/block.h>
15
16 #include "blk.h"
17 #include "blk-mq-sched.h"
18 #include "blk-rq-qos.h"
19 #include "blk-throttle.h"
20
21 static inline void bio_get_first_bvec(struct bio *bio, struct bio_vec *bv)
22 {
23 *bv = mp_bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
24 }
25
26 static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv)
27 {
28 struct bvec_iter iter = bio->bi_iter;
29 int idx;
30
31 bio_get_first_bvec(bio, bv);
32 if (bv->bv_len == bio->bi_iter.bi_size)
33 return; /* this bio only has a single bvec */
34
35 bio_advance_iter(bio, &iter, iter.bi_size);
36
37 if (!iter.bi_bvec_done)
38 idx = iter.bi_idx - 1;
39 else /* in the middle of bvec */
40 idx = iter.bi_idx;
41
42 *bv = bio->bi_io_vec[idx];
43
44 /*
45 * iter.bi_bvec_done records actual length of the last bvec
46 * if this bio ends in the middle of one io vector
47 */
48 if (iter.bi_bvec_done)
49 bv->bv_len = iter.bi_bvec_done;
50 }
51
52 static inline bool bio_will_gap(struct request_queue *q,
53 struct request *prev_rq, struct bio *prev, struct bio *next)
54 {
55 struct bio_vec pb, nb;
56
57 if (!bio_has_data(prev) || !queue_virt_boundary(q))
58 return false;
59
60 /*
61 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
62 * is quite difficult to respect the sg gap limit. We work hard to
63 * merge a huge number of small single bios in case of mkfs.
64 */
65 if (prev_rq)
66 bio_get_first_bvec(prev_rq->bio, &pb);
67 else
68 bio_get_first_bvec(prev, &pb);
69 if (pb.bv_offset & queue_virt_boundary(q))
70 return true;
71
72 /*
73 * We don't need to worry about the situation that the merged segment
74 * ends in unaligned virt boundary:
75 *
76 * - if 'pb' ends aligned, the merged segment ends aligned
77 * - if 'pb' ends unaligned, the next bio must include
78 * one single bvec of 'nb', otherwise the 'nb' can't
79 * merge with 'pb'
80 */
81 bio_get_last_bvec(prev, &pb);
82 bio_get_first_bvec(next, &nb);
83 if (biovec_phys_mergeable(q, &pb, &nb))
84 return false;
85 return __bvec_gap_to_prev(&q->limits, &pb, nb.bv_offset);
86 }
87
88 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
89 {
90 return bio_will_gap(req->q, req, req->biotail, bio);
91 }
92
93 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
94 {
95 return bio_will_gap(req->q, NULL, bio, req->bio);
96 }
97
98 /*
99 * The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
100 * is defined as 'unsigned int', meantime it has to be aligned to with the
101 * logical block size, which is the minimum accepted unit by hardware.
102 */
103 static unsigned int bio_allowed_max_sectors(const struct queue_limits *lim)
104 {
105 return round_down(UINT_MAX, lim->logical_block_size) >> SECTOR_SHIFT;
106 }
107
108 static struct bio *bio_split_discard(struct bio *bio,
109 const struct queue_limits *lim,
110 unsigned *nsegs, struct bio_set *bs)
111 {
112 unsigned int max_discard_sectors, granularity;
113 sector_t tmp;
114 unsigned split_sectors;
115
116 *nsegs = 1;
117
118 granularity = max(lim->discard_granularity >> 9, 1U);
119
120 max_discard_sectors =
121 min(lim->max_discard_sectors, bio_allowed_max_sectors(lim));
122 max_discard_sectors -= max_discard_sectors % granularity;
123 if (unlikely(!max_discard_sectors))
124 return NULL;
125
126 if (bio_sectors(bio) <= max_discard_sectors)
127 return NULL;
128
129 split_sectors = max_discard_sectors;
130
131 /*
132 * If the next starting sector would be misaligned, stop the discard at
133 * the previous aligned sector.
134 */
135 tmp = bio->bi_iter.bi_sector + split_sectors -
136 ((lim->discard_alignment >> 9) % granularity);
137 tmp = sector_div(tmp, granularity);
138
139 if (split_sectors > tmp)
140 split_sectors -= tmp;
141
142 return bio_split(bio, split_sectors, GFP_NOIO, bs);
143 }
144
145 static struct bio *bio_split_write_zeroes(struct bio *bio,
146 const struct queue_limits *lim,
147 unsigned *nsegs, struct bio_set *bs)
148 {
149 *nsegs = 0;
150 if (!lim->max_write_zeroes_sectors)
151 return NULL;
152 if (bio_sectors(bio) <= lim->max_write_zeroes_sectors)
153 return NULL;
154 return bio_split(bio, lim->max_write_zeroes_sectors, GFP_NOIO, bs);
155 }
156
157 /*
158 * Return the maximum number of sectors from the start of a bio that may be
159 * submitted as a single request to a block device. If enough sectors remain,
160 * align the end to the physical block size. Otherwise align the end to the
161 * logical block size. This approach minimizes the number of non-aligned
162 * requests that are submitted to a block device if the start of a bio is not
163 * aligned to a physical block boundary.
164 */
165 static inline unsigned get_max_io_size(struct bio *bio,
166 const struct queue_limits *lim)
167 {
168 unsigned pbs = lim->physical_block_size >> SECTOR_SHIFT;
169 unsigned lbs = lim->logical_block_size >> SECTOR_SHIFT;
170 unsigned max_sectors = lim->max_sectors, start, end;
171
172 if (lim->chunk_sectors) {
173 max_sectors = min(max_sectors,
174 blk_chunk_sectors_left(bio->bi_iter.bi_sector,
175 lim->chunk_sectors));
176 }
177
178 start = bio->bi_iter.bi_sector & (pbs - 1);
179 end = (start + max_sectors) & ~(pbs - 1);
180 if (end > start)
181 return end - start;
182 return max_sectors & ~(lbs - 1);
183 }
184
185 /**
186 * get_max_segment_size() - maximum number of bytes to add as a single segment
187 * @lim: Request queue limits.
188 * @start_page: See below.
189 * @offset: Offset from @start_page where to add a segment.
190 *
191 * Returns the maximum number of bytes that can be added as a single segment.
192 */
193 static inline unsigned get_max_segment_size(const struct queue_limits *lim,
194 struct page *start_page, unsigned long offset)
195 {
196 unsigned long mask = lim->seg_boundary_mask;
197
198 offset = mask & (page_to_phys(start_page) + offset);
199
200 /*
201 * Prevent an overflow if mask = ULONG_MAX and offset = 0 by adding 1
202 * after having calculated the minimum.
203 */
204 return min(mask - offset, (unsigned long)lim->max_segment_size - 1) + 1;
205 }
206
207 /**
208 * bvec_split_segs - verify whether or not a bvec should be split in the middle
209 * @lim: [in] queue limits to split based on
210 * @bv: [in] bvec to examine
211 * @nsegs: [in,out] Number of segments in the bio being built. Incremented
212 * by the number of segments from @bv that may be appended to that
213 * bio without exceeding @max_segs
214 * @bytes: [in,out] Number of bytes in the bio being built. Incremented
215 * by the number of bytes from @bv that may be appended to that
216 * bio without exceeding @max_bytes
217 * @max_segs: [in] upper bound for *@nsegs
218 * @max_bytes: [in] upper bound for *@bytes
219 *
220 * When splitting a bio, it can happen that a bvec is encountered that is too
221 * big to fit in a single segment and hence that it has to be split in the
222 * middle. This function verifies whether or not that should happen. The value
223 * %true is returned if and only if appending the entire @bv to a bio with
224 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
225 * the block driver.
226 */
227 static bool bvec_split_segs(const struct queue_limits *lim,
228 const struct bio_vec *bv, unsigned *nsegs, unsigned *bytes,
229 unsigned max_segs, unsigned max_bytes)
230 {
231 unsigned max_len = min(max_bytes, UINT_MAX) - *bytes;
232 unsigned len = min(bv->bv_len, max_len);
233 unsigned total_len = 0;
234 unsigned seg_size = 0;
235
236 while (len && *nsegs < max_segs) {
237 seg_size = get_max_segment_size(lim, bv->bv_page,
238 bv->bv_offset + total_len);
239 seg_size = min(seg_size, len);
240
241 (*nsegs)++;
242 total_len += seg_size;
243 len -= seg_size;
244
245 if ((bv->bv_offset + total_len) & lim->virt_boundary_mask)
246 break;
247 }
248
249 *bytes += total_len;
250
251 /* tell the caller to split the bvec if it is too big to fit */
252 return len > 0 || bv->bv_len > max_len;
253 }
254
255 /**
256 * bio_split_rw - split a bio in two bios
257 * @bio: [in] bio to be split
258 * @lim: [in] queue limits to split based on
259 * @segs: [out] number of segments in the bio with the first half of the sectors
260 * @bs: [in] bio set to allocate the clone from
261 * @max_bytes: [in] maximum number of bytes per bio
262 *
263 * Clone @bio, update the bi_iter of the clone to represent the first sectors
264 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
265 * following is guaranteed for the cloned bio:
266 * - That it has at most @max_bytes worth of data
267 * - That it has at most queue_max_segments(@q) segments.
268 *
269 * Except for discard requests the cloned bio will point at the bi_io_vec of
270 * the original bio. It is the responsibility of the caller to ensure that the
271 * original bio is not freed before the cloned bio. The caller is also
272 * responsible for ensuring that @bs is only destroyed after processing of the
273 * split bio has finished.
274 */
275 struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim,
276 unsigned *segs, struct bio_set *bs, unsigned max_bytes)
277 {
278 struct bio_vec bv, bvprv, *bvprvp = NULL;
279 struct bvec_iter iter;
280 unsigned nsegs = 0, bytes = 0;
281
282 bio_for_each_bvec(bv, bio, iter) {
283 /*
284 * If the queue doesn't support SG gaps and adding this
285 * offset would create a gap, disallow it.
286 */
287 if (bvprvp && bvec_gap_to_prev(lim, bvprvp, bv.bv_offset))
288 goto split;
289
290 if (nsegs < lim->max_segments &&
291 bytes + bv.bv_len <= max_bytes &&
292 bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
293 nsegs++;
294 bytes += bv.bv_len;
295 } else {
296 if (bvec_split_segs(lim, &bv, &nsegs, &bytes,
297 lim->max_segments, max_bytes))
298 goto split;
299 }
300
301 bvprv = bv;
302 bvprvp = &bvprv;
303 }
304
305 *segs = nsegs;
306 return NULL;
307 split:
308 /*
309 * We can't sanely support splitting for a REQ_NOWAIT bio. End it
310 * with EAGAIN if splitting is required and return an error pointer.
311 */
312 if (bio->bi_opf & REQ_NOWAIT) {
313 bio->bi_status = BLK_STS_AGAIN;
314 bio_endio(bio);
315 return ERR_PTR(-EAGAIN);
316 }
317
318 *segs = nsegs;
319
320 /*
321 * Individual bvecs might not be logical block aligned. Round down the
322 * split size so that each bio is properly block size aligned, even if
323 * we do not use the full hardware limits.
324 */
325 bytes = ALIGN_DOWN(bytes, lim->logical_block_size);
326
327 /*
328 * Bio splitting may cause subtle trouble such as hang when doing sync
329 * iopoll in direct IO routine. Given performance gain of iopoll for
330 * big IO can be trival, disable iopoll when split needed.
331 */
332 bio_clear_polled(bio);
333 return bio_split(bio, bytes >> SECTOR_SHIFT, GFP_NOIO, bs);
334 }
335 EXPORT_SYMBOL_GPL(bio_split_rw);
336
337 /**
338 * __bio_split_to_limits - split a bio to fit the queue limits
339 * @bio: bio to be split
340 * @lim: queue limits to split based on
341 * @nr_segs: returns the number of segments in the returned bio
342 *
343 * Check if @bio needs splitting based on the queue limits, and if so split off
344 * a bio fitting the limits from the beginning of @bio and return it. @bio is
345 * shortened to the remainder and re-submitted.
346 *
347 * The split bio is allocated from @q->bio_split, which is provided by the
348 * block layer.
349 */
350 struct bio *__bio_split_to_limits(struct bio *bio,
351 const struct queue_limits *lim,
352 unsigned int *nr_segs)
353 {
354 struct bio_set *bs = &bio->bi_bdev->bd_disk->bio_split;
355 struct bio *split;
356
357 switch (bio_op(bio)) {
358 case REQ_OP_DISCARD:
359 case REQ_OP_SECURE_ERASE:
360 split = bio_split_discard(bio, lim, nr_segs, bs);
361 break;
362 case REQ_OP_WRITE_ZEROES:
363 split = bio_split_write_zeroes(bio, lim, nr_segs, bs);
364 break;
365 default:
366 split = bio_split_rw(bio, lim, nr_segs, bs,
367 get_max_io_size(bio, lim) << SECTOR_SHIFT);
368 if (IS_ERR(split))
369 return NULL;
370 break;
371 }
372
373 if (split) {
374 /* there isn't chance to merge the split bio */
375 split->bi_opf |= REQ_NOMERGE;
376
377 blkcg_bio_issue_init(split);
378 bio_chain(split, bio);
379 trace_block_split(split, bio->bi_iter.bi_sector);
380 submit_bio_noacct(bio);
381 return split;
382 }
383 return bio;
384 }
385
386 /**
387 * bio_split_to_limits - split a bio to fit the queue limits
388 * @bio: bio to be split
389 *
390 * Check if @bio needs splitting based on the queue limits of @bio->bi_bdev, and
391 * if so split off a bio fitting the limits from the beginning of @bio and
392 * return it. @bio is shortened to the remainder and re-submitted.
393 *
394 * The split bio is allocated from @q->bio_split, which is provided by the
395 * block layer.
396 */
397 struct bio *bio_split_to_limits(struct bio *bio)
398 {
399 const struct queue_limits *lim = &bdev_get_queue(bio->bi_bdev)->limits;
400 unsigned int nr_segs;
401
402 if (bio_may_exceed_limits(bio, lim))
403 return __bio_split_to_limits(bio, lim, &nr_segs);
404 return bio;
405 }
406 EXPORT_SYMBOL(bio_split_to_limits);
407
408 unsigned int blk_recalc_rq_segments(struct request *rq)
409 {
410 unsigned int nr_phys_segs = 0;
411 unsigned int bytes = 0;
412 struct req_iterator iter;
413 struct bio_vec bv;
414
415 if (!rq->bio)
416 return 0;
417
418 switch (bio_op(rq->bio)) {
419 case REQ_OP_DISCARD:
420 case REQ_OP_SECURE_ERASE:
421 if (queue_max_discard_segments(rq->q) > 1) {
422 struct bio *bio = rq->bio;
423
424 for_each_bio(bio)
425 nr_phys_segs++;
426 return nr_phys_segs;
427 }
428 return 1;
429 case REQ_OP_WRITE_ZEROES:
430 return 0;
431 default:
432 break;
433 }
434
435 rq_for_each_bvec(bv, rq, iter)
436 bvec_split_segs(&rq->q->limits, &bv, &nr_phys_segs, &bytes,
437 UINT_MAX, UINT_MAX);
438 return nr_phys_segs;
439 }
440
441 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
442 struct scatterlist *sglist)
443 {
444 if (!*sg)
445 return sglist;
446
447 /*
448 * If the driver previously mapped a shorter list, we could see a
449 * termination bit prematurely unless it fully inits the sg table
450 * on each mapping. We KNOW that there must be more entries here
451 * or the driver would be buggy, so force clear the termination bit
452 * to avoid doing a full sg_init_table() in drivers for each command.
453 */
454 sg_unmark_end(*sg);
455 return sg_next(*sg);
456 }
457
458 static unsigned blk_bvec_map_sg(struct request_queue *q,
459 struct bio_vec *bvec, struct scatterlist *sglist,
460 struct scatterlist **sg)
461 {
462 unsigned nbytes = bvec->bv_len;
463 unsigned nsegs = 0, total = 0;
464
465 while (nbytes > 0) {
466 unsigned offset = bvec->bv_offset + total;
467 unsigned len = min(get_max_segment_size(&q->limits,
468 bvec->bv_page, offset), nbytes);
469 struct page *page = bvec->bv_page;
470
471 /*
472 * Unfortunately a fair number of drivers barf on scatterlists
473 * that have an offset larger than PAGE_SIZE, despite other
474 * subsystems dealing with that invariant just fine. For now
475 * stick to the legacy format where we never present those from
476 * the block layer, but the code below should be removed once
477 * these offenders (mostly MMC/SD drivers) are fixed.
478 */
479 page += (offset >> PAGE_SHIFT);
480 offset &= ~PAGE_MASK;
481
482 *sg = blk_next_sg(sg, sglist);
483 sg_set_page(*sg, page, len, offset);
484
485 total += len;
486 nbytes -= len;
487 nsegs++;
488 }
489
490 return nsegs;
491 }
492
493 static inline int __blk_bvec_map_sg(struct bio_vec bv,
494 struct scatterlist *sglist, struct scatterlist **sg)
495 {
496 *sg = blk_next_sg(sg, sglist);
497 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
498 return 1;
499 }
500
501 /* only try to merge bvecs into one sg if they are from two bios */
502 static inline bool
503 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
504 struct bio_vec *bvprv, struct scatterlist **sg)
505 {
506
507 int nbytes = bvec->bv_len;
508
509 if (!*sg)
510 return false;
511
512 if ((*sg)->length + nbytes > queue_max_segment_size(q))
513 return false;
514
515 if (!biovec_phys_mergeable(q, bvprv, bvec))
516 return false;
517
518 (*sg)->length += nbytes;
519
520 return true;
521 }
522
523 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
524 struct scatterlist *sglist,
525 struct scatterlist **sg)
526 {
527 struct bio_vec bvec, bvprv = { NULL };
528 struct bvec_iter iter;
529 int nsegs = 0;
530 bool new_bio = false;
531
532 for_each_bio(bio) {
533 bio_for_each_bvec(bvec, bio, iter) {
534 /*
535 * Only try to merge bvecs from two bios given we
536 * have done bio internal merge when adding pages
537 * to bio
538 */
539 if (new_bio &&
540 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
541 goto next_bvec;
542
543 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
544 nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
545 else
546 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
547 next_bvec:
548 new_bio = false;
549 }
550 if (likely(bio->bi_iter.bi_size)) {
551 bvprv = bvec;
552 new_bio = true;
553 }
554 }
555
556 return nsegs;
557 }
558
559 /*
560 * map a request to scatterlist, return number of sg entries setup. Caller
561 * must make sure sg can hold rq->nr_phys_segments entries
562 */
563 int __blk_rq_map_sg(struct request_queue *q, struct request *rq,
564 struct scatterlist *sglist, struct scatterlist **last_sg)
565 {
566 int nsegs = 0;
567
568 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
569 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, last_sg);
570 else if (rq->bio)
571 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, last_sg);
572
573 if (*last_sg)
574 sg_mark_end(*last_sg);
575
576 /*
577 * Something must have been wrong if the figured number of
578 * segment is bigger than number of req's physical segments
579 */
580 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
581
582 return nsegs;
583 }
584 EXPORT_SYMBOL(__blk_rq_map_sg);
585
586 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
587 sector_t offset)
588 {
589 struct request_queue *q = rq->q;
590 unsigned int max_sectors;
591
592 if (blk_rq_is_passthrough(rq))
593 return q->limits.max_hw_sectors;
594
595 max_sectors = blk_queue_get_max_sectors(q, req_op(rq));
596 if (!q->limits.chunk_sectors ||
597 req_op(rq) == REQ_OP_DISCARD ||
598 req_op(rq) == REQ_OP_SECURE_ERASE)
599 return max_sectors;
600 return min(max_sectors,
601 blk_chunk_sectors_left(offset, q->limits.chunk_sectors));
602 }
603
604 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
605 unsigned int nr_phys_segs)
606 {
607 if (!blk_cgroup_mergeable(req, bio))
608 goto no_merge;
609
610 if (blk_integrity_merge_bio(req->q, req, bio) == false)
611 goto no_merge;
612
613 /* discard request merge won't add new segment */
614 if (req_op(req) == REQ_OP_DISCARD)
615 return 1;
616
617 if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
618 goto no_merge;
619
620 /*
621 * This will form the start of a new hw segment. Bump both
622 * counters.
623 */
624 req->nr_phys_segments += nr_phys_segs;
625 return 1;
626
627 no_merge:
628 req_set_nomerge(req->q, req);
629 return 0;
630 }
631
632 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
633 {
634 if (req_gap_back_merge(req, bio))
635 return 0;
636 if (blk_integrity_rq(req) &&
637 integrity_req_gap_back_merge(req, bio))
638 return 0;
639 if (!bio_crypt_ctx_back_mergeable(req, bio))
640 return 0;
641 if (blk_rq_sectors(req) + bio_sectors(bio) >
642 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
643 req_set_nomerge(req->q, req);
644 return 0;
645 }
646
647 return ll_new_hw_segment(req, bio, nr_segs);
648 }
649
650 static int ll_front_merge_fn(struct request *req, struct bio *bio,
651 unsigned int nr_segs)
652 {
653 if (req_gap_front_merge(req, bio))
654 return 0;
655 if (blk_integrity_rq(req) &&
656 integrity_req_gap_front_merge(req, bio))
657 return 0;
658 if (!bio_crypt_ctx_front_mergeable(req, bio))
659 return 0;
660 if (blk_rq_sectors(req) + bio_sectors(bio) >
661 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
662 req_set_nomerge(req->q, req);
663 return 0;
664 }
665
666 return ll_new_hw_segment(req, bio, nr_segs);
667 }
668
669 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
670 struct request *next)
671 {
672 unsigned short segments = blk_rq_nr_discard_segments(req);
673
674 if (segments >= queue_max_discard_segments(q))
675 goto no_merge;
676 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
677 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
678 goto no_merge;
679
680 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
681 return true;
682 no_merge:
683 req_set_nomerge(q, req);
684 return false;
685 }
686
687 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
688 struct request *next)
689 {
690 int total_phys_segments;
691
692 if (req_gap_back_merge(req, next->bio))
693 return 0;
694
695 /*
696 * Will it become too large?
697 */
698 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
699 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
700 return 0;
701
702 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
703 if (total_phys_segments > blk_rq_get_max_segments(req))
704 return 0;
705
706 if (!blk_cgroup_mergeable(req, next->bio))
707 return 0;
708
709 if (blk_integrity_merge_rq(q, req, next) == false)
710 return 0;
711
712 if (!bio_crypt_ctx_merge_rq(req, next))
713 return 0;
714
715 /* Merge is OK... */
716 req->nr_phys_segments = total_phys_segments;
717 return 1;
718 }
719
720 /**
721 * blk_rq_set_mixed_merge - mark a request as mixed merge
722 * @rq: request to mark as mixed merge
723 *
724 * Description:
725 * @rq is about to be mixed merged. Make sure the attributes
726 * which can be mixed are set in each bio and mark @rq as mixed
727 * merged.
728 */
729 static void blk_rq_set_mixed_merge(struct request *rq)
730 {
731 blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK;
732 struct bio *bio;
733
734 if (rq->rq_flags & RQF_MIXED_MERGE)
735 return;
736
737 /*
738 * @rq will no longer represent mixable attributes for all the
739 * contained bios. It will just track those of the first one.
740 * Distributes the attributs to each bio.
741 */
742 for (bio = rq->bio; bio; bio = bio->bi_next) {
743 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
744 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
745 bio->bi_opf |= ff;
746 }
747 rq->rq_flags |= RQF_MIXED_MERGE;
748 }
749
750 static inline blk_opf_t bio_failfast(const struct bio *bio)
751 {
752 if (bio->bi_opf & REQ_RAHEAD)
753 return REQ_FAILFAST_MASK;
754
755 return bio->bi_opf & REQ_FAILFAST_MASK;
756 }
757
758 /*
759 * After we are marked as MIXED_MERGE, any new RA bio has to be updated
760 * as failfast, and request's failfast has to be updated in case of
761 * front merge.
762 */
763 static inline void blk_update_mixed_merge(struct request *req,
764 struct bio *bio, bool front_merge)
765 {
766 if (req->rq_flags & RQF_MIXED_MERGE) {
767 if (bio->bi_opf & REQ_RAHEAD)
768 bio->bi_opf |= REQ_FAILFAST_MASK;
769
770 if (front_merge) {
771 req->cmd_flags &= ~REQ_FAILFAST_MASK;
772 req->cmd_flags |= bio->bi_opf & REQ_FAILFAST_MASK;
773 }
774 }
775 }
776
777 static void blk_account_io_merge_request(struct request *req)
778 {
779 if (blk_do_io_stat(req)) {
780 part_stat_lock();
781 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
782 part_stat_unlock();
783 }
784 }
785
786 static enum elv_merge blk_try_req_merge(struct request *req,
787 struct request *next)
788 {
789 if (blk_discard_mergable(req))
790 return ELEVATOR_DISCARD_MERGE;
791 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
792 return ELEVATOR_BACK_MERGE;
793
794 return ELEVATOR_NO_MERGE;
795 }
796
797 /*
798 * For non-mq, this has to be called with the request spinlock acquired.
799 * For mq with scheduling, the appropriate queue wide lock should be held.
800 */
801 static struct request *attempt_merge(struct request_queue *q,
802 struct request *req, struct request *next)
803 {
804 if (!rq_mergeable(req) || !rq_mergeable(next))
805 return NULL;
806
807 if (req_op(req) != req_op(next))
808 return NULL;
809
810 if (rq_data_dir(req) != rq_data_dir(next))
811 return NULL;
812
813 /* Don't merge requests with different write hints. */
814 if (req->write_hint != next->write_hint)
815 return NULL;
816
817 if (req->ioprio != next->ioprio)
818 return NULL;
819
820 /*
821 * If we are allowed to merge, then append bio list
822 * from next to rq and release next. merge_requests_fn
823 * will have updated segment counts, update sector
824 * counts here. Handle DISCARDs separately, as they
825 * have separate settings.
826 */
827
828 switch (blk_try_req_merge(req, next)) {
829 case ELEVATOR_DISCARD_MERGE:
830 if (!req_attempt_discard_merge(q, req, next))
831 return NULL;
832 break;
833 case ELEVATOR_BACK_MERGE:
834 if (!ll_merge_requests_fn(q, req, next))
835 return NULL;
836 break;
837 default:
838 return NULL;
839 }
840
841 /*
842 * If failfast settings disagree or any of the two is already
843 * a mixed merge, mark both as mixed before proceeding. This
844 * makes sure that all involved bios have mixable attributes
845 * set properly.
846 */
847 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
848 (req->cmd_flags & REQ_FAILFAST_MASK) !=
849 (next->cmd_flags & REQ_FAILFAST_MASK)) {
850 blk_rq_set_mixed_merge(req);
851 blk_rq_set_mixed_merge(next);
852 }
853
854 /*
855 * At this point we have either done a back merge or front merge. We
856 * need the smaller start_time_ns of the merged requests to be the
857 * current request for accounting purposes.
858 */
859 if (next->start_time_ns < req->start_time_ns)
860 req->start_time_ns = next->start_time_ns;
861
862 req->biotail->bi_next = next->bio;
863 req->biotail = next->biotail;
864
865 req->__data_len += blk_rq_bytes(next);
866
867 if (!blk_discard_mergable(req))
868 elv_merge_requests(q, req, next);
869
870 blk_crypto_rq_put_keyslot(next);
871
872 /*
873 * 'next' is going away, so update stats accordingly
874 */
875 blk_account_io_merge_request(next);
876
877 trace_block_rq_merge(next);
878
879 /*
880 * ownership of bio passed from next to req, return 'next' for
881 * the caller to free
882 */
883 next->bio = NULL;
884 return next;
885 }
886
887 static struct request *attempt_back_merge(struct request_queue *q,
888 struct request *rq)
889 {
890 struct request *next = elv_latter_request(q, rq);
891
892 if (next)
893 return attempt_merge(q, rq, next);
894
895 return NULL;
896 }
897
898 static struct request *attempt_front_merge(struct request_queue *q,
899 struct request *rq)
900 {
901 struct request *prev = elv_former_request(q, rq);
902
903 if (prev)
904 return attempt_merge(q, prev, rq);
905
906 return NULL;
907 }
908
909 /*
910 * Try to merge 'next' into 'rq'. Return true if the merge happened, false
911 * otherwise. The caller is responsible for freeing 'next' if the merge
912 * happened.
913 */
914 bool blk_attempt_req_merge(struct request_queue *q, struct request *rq,
915 struct request *next)
916 {
917 return attempt_merge(q, rq, next);
918 }
919
920 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
921 {
922 if (!rq_mergeable(rq) || !bio_mergeable(bio))
923 return false;
924
925 if (req_op(rq) != bio_op(bio))
926 return false;
927
928 /* different data direction or already started, don't merge */
929 if (bio_data_dir(bio) != rq_data_dir(rq))
930 return false;
931
932 /* don't merge across cgroup boundaries */
933 if (!blk_cgroup_mergeable(rq, bio))
934 return false;
935
936 /* only merge integrity protected bio into ditto rq */
937 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
938 return false;
939
940 /* Only merge if the crypt contexts are compatible */
941 if (!bio_crypt_rq_ctx_compatible(rq, bio))
942 return false;
943
944 /* Don't merge requests with different write hints. */
945 if (rq->write_hint != bio->bi_write_hint)
946 return false;
947
948 if (rq->ioprio != bio_prio(bio))
949 return false;
950
951 return true;
952 }
953
954 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
955 {
956 if (blk_discard_mergable(rq))
957 return ELEVATOR_DISCARD_MERGE;
958 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
959 return ELEVATOR_BACK_MERGE;
960 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
961 return ELEVATOR_FRONT_MERGE;
962 return ELEVATOR_NO_MERGE;
963 }
964
965 static void blk_account_io_merge_bio(struct request *req)
966 {
967 if (!blk_do_io_stat(req))
968 return;
969
970 part_stat_lock();
971 part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
972 part_stat_unlock();
973 }
974
975 enum bio_merge_status {
976 BIO_MERGE_OK,
977 BIO_MERGE_NONE,
978 BIO_MERGE_FAILED,
979 };
980
981 static enum bio_merge_status bio_attempt_back_merge(struct request *req,
982 struct bio *bio, unsigned int nr_segs)
983 {
984 const blk_opf_t ff = bio_failfast(bio);
985
986 if (!ll_back_merge_fn(req, bio, nr_segs))
987 return BIO_MERGE_FAILED;
988
989 trace_block_bio_backmerge(bio);
990 rq_qos_merge(req->q, req, bio);
991
992 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
993 blk_rq_set_mixed_merge(req);
994
995 blk_update_mixed_merge(req, bio, false);
996
997 req->biotail->bi_next = bio;
998 req->biotail = bio;
999 req->__data_len += bio->bi_iter.bi_size;
1000
1001 bio_crypt_free_ctx(bio);
1002
1003 blk_account_io_merge_bio(req);
1004 return BIO_MERGE_OK;
1005 }
1006
1007 static enum bio_merge_status bio_attempt_front_merge(struct request *req,
1008 struct bio *bio, unsigned int nr_segs)
1009 {
1010 const blk_opf_t ff = bio_failfast(bio);
1011
1012 if (!ll_front_merge_fn(req, bio, nr_segs))
1013 return BIO_MERGE_FAILED;
1014
1015 trace_block_bio_frontmerge(bio);
1016 rq_qos_merge(req->q, req, bio);
1017
1018 if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
1019 blk_rq_set_mixed_merge(req);
1020
1021 blk_update_mixed_merge(req, bio, true);
1022
1023 bio->bi_next = req->bio;
1024 req->bio = bio;
1025
1026 req->__sector = bio->bi_iter.bi_sector;
1027 req->__data_len += bio->bi_iter.bi_size;
1028
1029 bio_crypt_do_front_merge(req, bio);
1030
1031 blk_account_io_merge_bio(req);
1032 return BIO_MERGE_OK;
1033 }
1034
1035 static enum bio_merge_status bio_attempt_discard_merge(struct request_queue *q,
1036 struct request *req, struct bio *bio)
1037 {
1038 unsigned short segments = blk_rq_nr_discard_segments(req);
1039
1040 if (segments >= queue_max_discard_segments(q))
1041 goto no_merge;
1042 if (blk_rq_sectors(req) + bio_sectors(bio) >
1043 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
1044 goto no_merge;
1045
1046 rq_qos_merge(q, req, bio);
1047
1048 req->biotail->bi_next = bio;
1049 req->biotail = bio;
1050 req->__data_len += bio->bi_iter.bi_size;
1051 req->nr_phys_segments = segments + 1;
1052
1053 blk_account_io_merge_bio(req);
1054 return BIO_MERGE_OK;
1055 no_merge:
1056 req_set_nomerge(q, req);
1057 return BIO_MERGE_FAILED;
1058 }
1059
1060 static enum bio_merge_status blk_attempt_bio_merge(struct request_queue *q,
1061 struct request *rq,
1062 struct bio *bio,
1063 unsigned int nr_segs,
1064 bool sched_allow_merge)
1065 {
1066 if (!blk_rq_merge_ok(rq, bio))
1067 return BIO_MERGE_NONE;
1068
1069 switch (blk_try_merge(rq, bio)) {
1070 case ELEVATOR_BACK_MERGE:
1071 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1072 return bio_attempt_back_merge(rq, bio, nr_segs);
1073 break;
1074 case ELEVATOR_FRONT_MERGE:
1075 if (!sched_allow_merge || blk_mq_sched_allow_merge(q, rq, bio))
1076 return bio_attempt_front_merge(rq, bio, nr_segs);
1077 break;
1078 case ELEVATOR_DISCARD_MERGE:
1079 return bio_attempt_discard_merge(q, rq, bio);
1080 default:
1081 return BIO_MERGE_NONE;
1082 }
1083
1084 return BIO_MERGE_FAILED;
1085 }
1086
1087 /**
1088 * blk_attempt_plug_merge - try to merge with %current's plugged list
1089 * @q: request_queue new bio is being queued at
1090 * @bio: new bio being queued
1091 * @nr_segs: number of segments in @bio
1092 * from the passed in @q already in the plug list
1093 *
1094 * Determine whether @bio being queued on @q can be merged with the previous
1095 * request on %current's plugged list. Returns %true if merge was successful,
1096 * otherwise %false.
1097 *
1098 * Plugging coalesces IOs from the same issuer for the same purpose without
1099 * going through @q->queue_lock. As such it's more of an issuing mechanism
1100 * than scheduling, and the request, while may have elvpriv data, is not
1101 * added on the elevator at this point. In addition, we don't have
1102 * reliable access to the elevator outside queue lock. Only check basic
1103 * merging parameters without querying the elevator.
1104 *
1105 * Caller must ensure !blk_queue_nomerges(q) beforehand.
1106 */
1107 bool blk_attempt_plug_merge(struct request_queue *q, struct bio *bio,
1108 unsigned int nr_segs)
1109 {
1110 struct blk_plug *plug;
1111 struct request *rq;
1112
1113 plug = blk_mq_plug(bio);
1114 if (!plug || rq_list_empty(plug->mq_list))
1115 return false;
1116
1117 rq_list_for_each(&plug->mq_list, rq) {
1118 if (rq->q == q) {
1119 if (blk_attempt_bio_merge(q, rq, bio, nr_segs, false) ==
1120 BIO_MERGE_OK)
1121 return true;
1122 break;
1123 }
1124
1125 /*
1126 * Only keep iterating plug list for merges if we have multiple
1127 * queues
1128 */
1129 if (!plug->multiple_queues)
1130 break;
1131 }
1132 return false;
1133 }
1134
1135 /*
1136 * Iterate list of requests and see if we can merge this bio with any
1137 * of them.
1138 */
1139 bool blk_bio_list_merge(struct request_queue *q, struct list_head *list,
1140 struct bio *bio, unsigned int nr_segs)
1141 {
1142 struct request *rq;
1143 int checked = 8;
1144
1145 list_for_each_entry_reverse(rq, list, queuelist) {
1146 if (!checked--)
1147 break;
1148
1149 switch (blk_attempt_bio_merge(q, rq, bio, nr_segs, true)) {
1150 case BIO_MERGE_NONE:
1151 continue;
1152 case BIO_MERGE_OK:
1153 return true;
1154 case BIO_MERGE_FAILED:
1155 return false;
1156 }
1157
1158 }
1159
1160 return false;
1161 }
1162 EXPORT_SYMBOL_GPL(blk_bio_list_merge);
1163
1164 bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio,
1165 unsigned int nr_segs, struct request **merged_request)
1166 {
1167 struct request *rq;
1168
1169 switch (elv_merge(q, &rq, bio)) {
1170 case ELEVATOR_BACK_MERGE:
1171 if (!blk_mq_sched_allow_merge(q, rq, bio))
1172 return false;
1173 if (bio_attempt_back_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1174 return false;
1175 *merged_request = attempt_back_merge(q, rq);
1176 if (!*merged_request)
1177 elv_merged_request(q, rq, ELEVATOR_BACK_MERGE);
1178 return true;
1179 case ELEVATOR_FRONT_MERGE:
1180 if (!blk_mq_sched_allow_merge(q, rq, bio))
1181 return false;
1182 if (bio_attempt_front_merge(rq, bio, nr_segs) != BIO_MERGE_OK)
1183 return false;
1184 *merged_request = attempt_front_merge(q, rq);
1185 if (!*merged_request)
1186 elv_merged_request(q, rq, ELEVATOR_FRONT_MERGE);
1187 return true;
1188 case ELEVATOR_DISCARD_MERGE:
1189 return bio_attempt_discard_merge(q, rq, bio) == BIO_MERGE_OK;
1190 default:
1191 return false;
1192 }
1193 }
1194 EXPORT_SYMBOL_GPL(blk_mq_sched_try_merge);