]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - block/blk-merge.c
PM / devfreq: Fix devfreq_notifier_call returning errno
[thirdparty/kernel/stable.git] / block / blk-merge.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to segment and merge handling
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10
11 #include <trace/events/block.h>
12
13 #include "blk.h"
14
15 static inline bool bio_will_gap(struct request_queue *q,
16 struct request *prev_rq, struct bio *prev, struct bio *next)
17 {
18 struct bio_vec pb, nb;
19
20 if (!bio_has_data(prev) || !queue_virt_boundary(q))
21 return false;
22
23 /*
24 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
25 * is quite difficult to respect the sg gap limit. We work hard to
26 * merge a huge number of small single bios in case of mkfs.
27 */
28 if (prev_rq)
29 bio_get_first_bvec(prev_rq->bio, &pb);
30 else
31 bio_get_first_bvec(prev, &pb);
32 if (pb.bv_offset & queue_virt_boundary(q))
33 return true;
34
35 /*
36 * We don't need to worry about the situation that the merged segment
37 * ends in unaligned virt boundary:
38 *
39 * - if 'pb' ends aligned, the merged segment ends aligned
40 * - if 'pb' ends unaligned, the next bio must include
41 * one single bvec of 'nb', otherwise the 'nb' can't
42 * merge with 'pb'
43 */
44 bio_get_last_bvec(prev, &pb);
45 bio_get_first_bvec(next, &nb);
46 if (biovec_phys_mergeable(q, &pb, &nb))
47 return false;
48 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
49 }
50
51 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
52 {
53 return bio_will_gap(req->q, req, req->biotail, bio);
54 }
55
56 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
57 {
58 return bio_will_gap(req->q, NULL, bio, req->bio);
59 }
60
61 static struct bio *blk_bio_discard_split(struct request_queue *q,
62 struct bio *bio,
63 struct bio_set *bs,
64 unsigned *nsegs)
65 {
66 unsigned int max_discard_sectors, granularity;
67 int alignment;
68 sector_t tmp;
69 unsigned split_sectors;
70
71 *nsegs = 1;
72
73 /* Zero-sector (unknown) and one-sector granularities are the same. */
74 granularity = max(q->limits.discard_granularity >> 9, 1U);
75
76 max_discard_sectors = min(q->limits.max_discard_sectors,
77 bio_allowed_max_sectors(q));
78 max_discard_sectors -= max_discard_sectors % granularity;
79
80 if (unlikely(!max_discard_sectors)) {
81 /* XXX: warn */
82 return NULL;
83 }
84
85 if (bio_sectors(bio) <= max_discard_sectors)
86 return NULL;
87
88 split_sectors = max_discard_sectors;
89
90 /*
91 * If the next starting sector would be misaligned, stop the discard at
92 * the previous aligned sector.
93 */
94 alignment = (q->limits.discard_alignment >> 9) % granularity;
95
96 tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
97 tmp = sector_div(tmp, granularity);
98
99 if (split_sectors > tmp)
100 split_sectors -= tmp;
101
102 return bio_split(bio, split_sectors, GFP_NOIO, bs);
103 }
104
105 static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
106 struct bio *bio, struct bio_set *bs, unsigned *nsegs)
107 {
108 *nsegs = 0;
109
110 if (!q->limits.max_write_zeroes_sectors)
111 return NULL;
112
113 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
114 return NULL;
115
116 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
117 }
118
119 static struct bio *blk_bio_write_same_split(struct request_queue *q,
120 struct bio *bio,
121 struct bio_set *bs,
122 unsigned *nsegs)
123 {
124 *nsegs = 1;
125
126 if (!q->limits.max_write_same_sectors)
127 return NULL;
128
129 if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
130 return NULL;
131
132 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
133 }
134
135 /*
136 * Return the maximum number of sectors from the start of a bio that may be
137 * submitted as a single request to a block device. If enough sectors remain,
138 * align the end to the physical block size. Otherwise align the end to the
139 * logical block size. This approach minimizes the number of non-aligned
140 * requests that are submitted to a block device if the start of a bio is not
141 * aligned to a physical block boundary.
142 */
143 static inline unsigned get_max_io_size(struct request_queue *q,
144 struct bio *bio)
145 {
146 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
147 unsigned max_sectors = sectors;
148 unsigned pbs = queue_physical_block_size(q) >> SECTOR_SHIFT;
149 unsigned lbs = queue_logical_block_size(q) >> SECTOR_SHIFT;
150 unsigned start_offset = bio->bi_iter.bi_sector & (pbs - 1);
151
152 max_sectors += start_offset;
153 max_sectors &= ~(pbs - 1);
154 if (max_sectors > start_offset)
155 return max_sectors - start_offset;
156
157 return sectors & (lbs - 1);
158 }
159
160 static unsigned get_max_segment_size(const struct request_queue *q,
161 unsigned offset)
162 {
163 unsigned long mask = queue_segment_boundary(q);
164
165 /* default segment boundary mask means no boundary limit */
166 if (mask == BLK_SEG_BOUNDARY_MASK)
167 return queue_max_segment_size(q);
168
169 return min_t(unsigned long, mask - (mask & offset) + 1,
170 queue_max_segment_size(q));
171 }
172
173 /**
174 * bvec_split_segs - verify whether or not a bvec should be split in the middle
175 * @q: [in] request queue associated with the bio associated with @bv
176 * @bv: [in] bvec to examine
177 * @nsegs: [in,out] Number of segments in the bio being built. Incremented
178 * by the number of segments from @bv that may be appended to that
179 * bio without exceeding @max_segs
180 * @sectors: [in,out] Number of sectors in the bio being built. Incremented
181 * by the number of sectors from @bv that may be appended to that
182 * bio without exceeding @max_sectors
183 * @max_segs: [in] upper bound for *@nsegs
184 * @max_sectors: [in] upper bound for *@sectors
185 *
186 * When splitting a bio, it can happen that a bvec is encountered that is too
187 * big to fit in a single segment and hence that it has to be split in the
188 * middle. This function verifies whether or not that should happen. The value
189 * %true is returned if and only if appending the entire @bv to a bio with
190 * *@nsegs segments and *@sectors sectors would make that bio unacceptable for
191 * the block driver.
192 */
193 static bool bvec_split_segs(const struct request_queue *q,
194 const struct bio_vec *bv, unsigned *nsegs,
195 unsigned *sectors, unsigned max_segs,
196 unsigned max_sectors)
197 {
198 unsigned max_len = (min(max_sectors, UINT_MAX >> 9) - *sectors) << 9;
199 unsigned len = min(bv->bv_len, max_len);
200 unsigned total_len = 0;
201 unsigned seg_size = 0;
202
203 while (len && *nsegs < max_segs) {
204 seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
205 seg_size = min(seg_size, len);
206
207 (*nsegs)++;
208 total_len += seg_size;
209 len -= seg_size;
210
211 if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
212 break;
213 }
214
215 *sectors += total_len >> 9;
216
217 /* tell the caller to split the bvec if it is too big to fit */
218 return len > 0 || bv->bv_len > max_len;
219 }
220
221 /**
222 * blk_bio_segment_split - split a bio in two bios
223 * @q: [in] request queue pointer
224 * @bio: [in] bio to be split
225 * @bs: [in] bio set to allocate the clone from
226 * @segs: [out] number of segments in the bio with the first half of the sectors
227 *
228 * Clone @bio, update the bi_iter of the clone to represent the first sectors
229 * of @bio and update @bio->bi_iter to represent the remaining sectors. The
230 * following is guaranteed for the cloned bio:
231 * - That it has at most get_max_io_size(@q, @bio) sectors.
232 * - That it has at most queue_max_segments(@q) segments.
233 *
234 * Except for discard requests the cloned bio will point at the bi_io_vec of
235 * the original bio. It is the responsibility of the caller to ensure that the
236 * original bio is not freed before the cloned bio. The caller is also
237 * responsible for ensuring that @bs is only destroyed after processing of the
238 * split bio has finished.
239 */
240 static struct bio *blk_bio_segment_split(struct request_queue *q,
241 struct bio *bio,
242 struct bio_set *bs,
243 unsigned *segs)
244 {
245 struct bio_vec bv, bvprv, *bvprvp = NULL;
246 struct bvec_iter iter;
247 unsigned nsegs = 0, sectors = 0;
248 const unsigned max_sectors = get_max_io_size(q, bio);
249 const unsigned max_segs = queue_max_segments(q);
250
251 bio_for_each_bvec(bv, bio, iter) {
252 /*
253 * If the queue doesn't support SG gaps and adding this
254 * offset would create a gap, disallow it.
255 */
256 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
257 goto split;
258
259 if (nsegs < max_segs &&
260 sectors + (bv.bv_len >> 9) <= max_sectors &&
261 bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
262 nsegs++;
263 sectors += bv.bv_len >> 9;
264 } else if (bvec_split_segs(q, &bv, &nsegs, &sectors, max_segs,
265 max_sectors)) {
266 goto split;
267 }
268
269 bvprv = bv;
270 bvprvp = &bvprv;
271 }
272
273 *segs = nsegs;
274 return NULL;
275 split:
276 *segs = nsegs;
277 return bio_split(bio, sectors, GFP_NOIO, bs);
278 }
279
280 /**
281 * __blk_queue_split - split a bio and submit the second half
282 * @q: [in] request queue pointer
283 * @bio: [in, out] bio to be split
284 * @nr_segs: [out] number of segments in the first bio
285 *
286 * Split a bio into two bios, chain the two bios, submit the second half and
287 * store a pointer to the first half in *@bio. If the second bio is still too
288 * big it will be split by a recursive call to this function. Since this
289 * function may allocate a new bio from @q->bio_split, it is the responsibility
290 * of the caller to ensure that @q is only released after processing of the
291 * split bio has finished.
292 */
293 void __blk_queue_split(struct request_queue *q, struct bio **bio,
294 unsigned int *nr_segs)
295 {
296 struct bio *split = NULL;
297
298 switch (bio_op(*bio)) {
299 case REQ_OP_DISCARD:
300 case REQ_OP_SECURE_ERASE:
301 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
302 break;
303 case REQ_OP_WRITE_ZEROES:
304 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
305 nr_segs);
306 break;
307 case REQ_OP_WRITE_SAME:
308 split = blk_bio_write_same_split(q, *bio, &q->bio_split,
309 nr_segs);
310 break;
311 default:
312 /*
313 * All drivers must accept single-segments bios that are <=
314 * PAGE_SIZE. This is a quick and dirty check that relies on
315 * the fact that bi_io_vec[0] is always valid if a bio has data.
316 * The check might lead to occasional false negatives when bios
317 * are cloned, but compared to the performance impact of cloned
318 * bios themselves the loop below doesn't matter anyway.
319 */
320 if (!q->limits.chunk_sectors &&
321 (*bio)->bi_vcnt == 1 &&
322 ((*bio)->bi_io_vec[0].bv_len +
323 (*bio)->bi_io_vec[0].bv_offset) <= PAGE_SIZE) {
324 *nr_segs = 1;
325 break;
326 }
327 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
328 break;
329 }
330
331 if (split) {
332 /* there isn't chance to merge the splitted bio */
333 split->bi_opf |= REQ_NOMERGE;
334
335 /*
336 * Since we're recursing into make_request here, ensure
337 * that we mark this bio as already having entered the queue.
338 * If not, and the queue is going away, we can get stuck
339 * forever on waiting for the queue reference to drop. But
340 * that will never happen, as we're already holding a
341 * reference to it.
342 */
343 bio_set_flag(*bio, BIO_QUEUE_ENTERED);
344
345 bio_chain(split, *bio);
346 trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
347 generic_make_request(*bio);
348 *bio = split;
349 }
350 }
351
352 /**
353 * blk_queue_split - split a bio and submit the second half
354 * @q: [in] request queue pointer
355 * @bio: [in, out] bio to be split
356 *
357 * Split a bio into two bios, chains the two bios, submit the second half and
358 * store a pointer to the first half in *@bio. Since this function may allocate
359 * a new bio from @q->bio_split, it is the responsibility of the caller to
360 * ensure that @q is only released after processing of the split bio has
361 * finished.
362 */
363 void blk_queue_split(struct request_queue *q, struct bio **bio)
364 {
365 unsigned int nr_segs;
366
367 __blk_queue_split(q, bio, &nr_segs);
368 }
369 EXPORT_SYMBOL(blk_queue_split);
370
371 unsigned int blk_recalc_rq_segments(struct request *rq)
372 {
373 unsigned int nr_phys_segs = 0;
374 unsigned int nr_sectors = 0;
375 struct req_iterator iter;
376 struct bio_vec bv;
377
378 if (!rq->bio)
379 return 0;
380
381 switch (bio_op(rq->bio)) {
382 case REQ_OP_DISCARD:
383 case REQ_OP_SECURE_ERASE:
384 case REQ_OP_WRITE_ZEROES:
385 return 0;
386 case REQ_OP_WRITE_SAME:
387 return 1;
388 }
389
390 rq_for_each_bvec(bv, rq, iter)
391 bvec_split_segs(rq->q, &bv, &nr_phys_segs, &nr_sectors,
392 UINT_MAX, UINT_MAX);
393 return nr_phys_segs;
394 }
395
396 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
397 struct scatterlist *sglist)
398 {
399 if (!*sg)
400 return sglist;
401
402 /*
403 * If the driver previously mapped a shorter list, we could see a
404 * termination bit prematurely unless it fully inits the sg table
405 * on each mapping. We KNOW that there must be more entries here
406 * or the driver would be buggy, so force clear the termination bit
407 * to avoid doing a full sg_init_table() in drivers for each command.
408 */
409 sg_unmark_end(*sg);
410 return sg_next(*sg);
411 }
412
413 static unsigned blk_bvec_map_sg(struct request_queue *q,
414 struct bio_vec *bvec, struct scatterlist *sglist,
415 struct scatterlist **sg)
416 {
417 unsigned nbytes = bvec->bv_len;
418 unsigned nsegs = 0, total = 0;
419
420 while (nbytes > 0) {
421 unsigned offset = bvec->bv_offset + total;
422 unsigned len = min(get_max_segment_size(q, offset), nbytes);
423 struct page *page = bvec->bv_page;
424
425 /*
426 * Unfortunately a fair number of drivers barf on scatterlists
427 * that have an offset larger than PAGE_SIZE, despite other
428 * subsystems dealing with that invariant just fine. For now
429 * stick to the legacy format where we never present those from
430 * the block layer, but the code below should be removed once
431 * these offenders (mostly MMC/SD drivers) are fixed.
432 */
433 page += (offset >> PAGE_SHIFT);
434 offset &= ~PAGE_MASK;
435
436 *sg = blk_next_sg(sg, sglist);
437 sg_set_page(*sg, page, len, offset);
438
439 total += len;
440 nbytes -= len;
441 nsegs++;
442 }
443
444 return nsegs;
445 }
446
447 static inline int __blk_bvec_map_sg(struct bio_vec bv,
448 struct scatterlist *sglist, struct scatterlist **sg)
449 {
450 *sg = blk_next_sg(sg, sglist);
451 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
452 return 1;
453 }
454
455 /* only try to merge bvecs into one sg if they are from two bios */
456 static inline bool
457 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
458 struct bio_vec *bvprv, struct scatterlist **sg)
459 {
460
461 int nbytes = bvec->bv_len;
462
463 if (!*sg)
464 return false;
465
466 if ((*sg)->length + nbytes > queue_max_segment_size(q))
467 return false;
468
469 if (!biovec_phys_mergeable(q, bvprv, bvec))
470 return false;
471
472 (*sg)->length += nbytes;
473
474 return true;
475 }
476
477 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
478 struct scatterlist *sglist,
479 struct scatterlist **sg)
480 {
481 struct bio_vec uninitialized_var(bvec), bvprv = { NULL };
482 struct bvec_iter iter;
483 int nsegs = 0;
484 bool new_bio = false;
485
486 for_each_bio(bio) {
487 bio_for_each_bvec(bvec, bio, iter) {
488 /*
489 * Only try to merge bvecs from two bios given we
490 * have done bio internal merge when adding pages
491 * to bio
492 */
493 if (new_bio &&
494 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
495 goto next_bvec;
496
497 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
498 nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
499 else
500 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
501 next_bvec:
502 new_bio = false;
503 }
504 if (likely(bio->bi_iter.bi_size)) {
505 bvprv = bvec;
506 new_bio = true;
507 }
508 }
509
510 return nsegs;
511 }
512
513 /*
514 * map a request to scatterlist, return number of sg entries setup. Caller
515 * must make sure sg can hold rq->nr_phys_segments entries
516 */
517 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
518 struct scatterlist *sglist)
519 {
520 struct scatterlist *sg = NULL;
521 int nsegs = 0;
522
523 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
524 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
525 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
526 nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
527 else if (rq->bio)
528 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
529
530 if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
531 (blk_rq_bytes(rq) & q->dma_pad_mask)) {
532 unsigned int pad_len =
533 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
534
535 sg->length += pad_len;
536 rq->extra_len += pad_len;
537 }
538
539 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
540 if (op_is_write(req_op(rq)))
541 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
542
543 sg_unmark_end(sg);
544 sg = sg_next(sg);
545 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
546 q->dma_drain_size,
547 ((unsigned long)q->dma_drain_buffer) &
548 (PAGE_SIZE - 1));
549 nsegs++;
550 rq->extra_len += q->dma_drain_size;
551 }
552
553 if (sg)
554 sg_mark_end(sg);
555
556 /*
557 * Something must have been wrong if the figured number of
558 * segment is bigger than number of req's physical segments
559 */
560 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
561
562 return nsegs;
563 }
564 EXPORT_SYMBOL(blk_rq_map_sg);
565
566 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
567 unsigned int nr_phys_segs)
568 {
569 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
570 goto no_merge;
571
572 if (blk_integrity_merge_bio(req->q, req, bio) == false)
573 goto no_merge;
574
575 /*
576 * This will form the start of a new hw segment. Bump both
577 * counters.
578 */
579 req->nr_phys_segments += nr_phys_segs;
580 return 1;
581
582 no_merge:
583 req_set_nomerge(req->q, req);
584 return 0;
585 }
586
587 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
588 {
589 if (req_gap_back_merge(req, bio))
590 return 0;
591 if (blk_integrity_rq(req) &&
592 integrity_req_gap_back_merge(req, bio))
593 return 0;
594 if (blk_rq_sectors(req) + bio_sectors(bio) >
595 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
596 req_set_nomerge(req->q, req);
597 return 0;
598 }
599
600 return ll_new_hw_segment(req, bio, nr_segs);
601 }
602
603 int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
604 {
605 if (req_gap_front_merge(req, bio))
606 return 0;
607 if (blk_integrity_rq(req) &&
608 integrity_req_gap_front_merge(req, bio))
609 return 0;
610 if (blk_rq_sectors(req) + bio_sectors(bio) >
611 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
612 req_set_nomerge(req->q, req);
613 return 0;
614 }
615
616 return ll_new_hw_segment(req, bio, nr_segs);
617 }
618
619 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
620 struct request *next)
621 {
622 unsigned short segments = blk_rq_nr_discard_segments(req);
623
624 if (segments >= queue_max_discard_segments(q))
625 goto no_merge;
626 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
627 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
628 goto no_merge;
629
630 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
631 return true;
632 no_merge:
633 req_set_nomerge(q, req);
634 return false;
635 }
636
637 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
638 struct request *next)
639 {
640 int total_phys_segments;
641
642 if (req_gap_back_merge(req, next->bio))
643 return 0;
644
645 /*
646 * Will it become too large?
647 */
648 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
649 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
650 return 0;
651
652 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
653 if (total_phys_segments > queue_max_segments(q))
654 return 0;
655
656 if (blk_integrity_merge_rq(q, req, next) == false)
657 return 0;
658
659 /* Merge is OK... */
660 req->nr_phys_segments = total_phys_segments;
661 return 1;
662 }
663
664 /**
665 * blk_rq_set_mixed_merge - mark a request as mixed merge
666 * @rq: request to mark as mixed merge
667 *
668 * Description:
669 * @rq is about to be mixed merged. Make sure the attributes
670 * which can be mixed are set in each bio and mark @rq as mixed
671 * merged.
672 */
673 void blk_rq_set_mixed_merge(struct request *rq)
674 {
675 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
676 struct bio *bio;
677
678 if (rq->rq_flags & RQF_MIXED_MERGE)
679 return;
680
681 /*
682 * @rq will no longer represent mixable attributes for all the
683 * contained bios. It will just track those of the first one.
684 * Distributes the attributs to each bio.
685 */
686 for (bio = rq->bio; bio; bio = bio->bi_next) {
687 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
688 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
689 bio->bi_opf |= ff;
690 }
691 rq->rq_flags |= RQF_MIXED_MERGE;
692 }
693
694 static void blk_account_io_merge(struct request *req)
695 {
696 if (blk_do_io_stat(req)) {
697 struct hd_struct *part;
698
699 part_stat_lock();
700 part = req->part;
701
702 part_dec_in_flight(req->q, part, rq_data_dir(req));
703
704 hd_struct_put(part);
705 part_stat_unlock();
706 }
707 }
708 /*
709 * Two cases of handling DISCARD merge:
710 * If max_discard_segments > 1, the driver takes every bio
711 * as a range and send them to controller together. The ranges
712 * needn't to be contiguous.
713 * Otherwise, the bios/requests will be handled as same as
714 * others which should be contiguous.
715 */
716 static inline bool blk_discard_mergable(struct request *req)
717 {
718 if (req_op(req) == REQ_OP_DISCARD &&
719 queue_max_discard_segments(req->q) > 1)
720 return true;
721 return false;
722 }
723
724 static enum elv_merge blk_try_req_merge(struct request *req,
725 struct request *next)
726 {
727 if (blk_discard_mergable(req))
728 return ELEVATOR_DISCARD_MERGE;
729 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
730 return ELEVATOR_BACK_MERGE;
731
732 return ELEVATOR_NO_MERGE;
733 }
734
735 /*
736 * For non-mq, this has to be called with the request spinlock acquired.
737 * For mq with scheduling, the appropriate queue wide lock should be held.
738 */
739 static struct request *attempt_merge(struct request_queue *q,
740 struct request *req, struct request *next)
741 {
742 if (!rq_mergeable(req) || !rq_mergeable(next))
743 return NULL;
744
745 if (req_op(req) != req_op(next))
746 return NULL;
747
748 if (rq_data_dir(req) != rq_data_dir(next)
749 || req->rq_disk != next->rq_disk)
750 return NULL;
751
752 if (req_op(req) == REQ_OP_WRITE_SAME &&
753 !blk_write_same_mergeable(req->bio, next->bio))
754 return NULL;
755
756 /*
757 * Don't allow merge of different write hints, or for a hint with
758 * non-hint IO.
759 */
760 if (req->write_hint != next->write_hint)
761 return NULL;
762
763 if (req->ioprio != next->ioprio)
764 return NULL;
765
766 /*
767 * If we are allowed to merge, then append bio list
768 * from next to rq and release next. merge_requests_fn
769 * will have updated segment counts, update sector
770 * counts here. Handle DISCARDs separately, as they
771 * have separate settings.
772 */
773
774 switch (blk_try_req_merge(req, next)) {
775 case ELEVATOR_DISCARD_MERGE:
776 if (!req_attempt_discard_merge(q, req, next))
777 return NULL;
778 break;
779 case ELEVATOR_BACK_MERGE:
780 if (!ll_merge_requests_fn(q, req, next))
781 return NULL;
782 break;
783 default:
784 return NULL;
785 }
786
787 /*
788 * If failfast settings disagree or any of the two is already
789 * a mixed merge, mark both as mixed before proceeding. This
790 * makes sure that all involved bios have mixable attributes
791 * set properly.
792 */
793 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
794 (req->cmd_flags & REQ_FAILFAST_MASK) !=
795 (next->cmd_flags & REQ_FAILFAST_MASK)) {
796 blk_rq_set_mixed_merge(req);
797 blk_rq_set_mixed_merge(next);
798 }
799
800 /*
801 * At this point we have either done a back merge or front merge. We
802 * need the smaller start_time_ns of the merged requests to be the
803 * current request for accounting purposes.
804 */
805 if (next->start_time_ns < req->start_time_ns)
806 req->start_time_ns = next->start_time_ns;
807
808 req->biotail->bi_next = next->bio;
809 req->biotail = next->biotail;
810
811 req->__data_len += blk_rq_bytes(next);
812
813 if (!blk_discard_mergable(req))
814 elv_merge_requests(q, req, next);
815
816 /*
817 * 'next' is going away, so update stats accordingly
818 */
819 blk_account_io_merge(next);
820
821 /*
822 * ownership of bio passed from next to req, return 'next' for
823 * the caller to free
824 */
825 next->bio = NULL;
826 return next;
827 }
828
829 struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
830 {
831 struct request *next = elv_latter_request(q, rq);
832
833 if (next)
834 return attempt_merge(q, rq, next);
835
836 return NULL;
837 }
838
839 struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
840 {
841 struct request *prev = elv_former_request(q, rq);
842
843 if (prev)
844 return attempt_merge(q, prev, rq);
845
846 return NULL;
847 }
848
849 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
850 struct request *next)
851 {
852 struct request *free;
853
854 free = attempt_merge(q, rq, next);
855 if (free) {
856 blk_put_request(free);
857 return 1;
858 }
859
860 return 0;
861 }
862
863 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
864 {
865 if (!rq_mergeable(rq) || !bio_mergeable(bio))
866 return false;
867
868 if (req_op(rq) != bio_op(bio))
869 return false;
870
871 /* different data direction or already started, don't merge */
872 if (bio_data_dir(bio) != rq_data_dir(rq))
873 return false;
874
875 /* must be same device */
876 if (rq->rq_disk != bio->bi_disk)
877 return false;
878
879 /* only merge integrity protected bio into ditto rq */
880 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
881 return false;
882
883 /* must be using the same buffer */
884 if (req_op(rq) == REQ_OP_WRITE_SAME &&
885 !blk_write_same_mergeable(rq->bio, bio))
886 return false;
887
888 /*
889 * Don't allow merge of different write hints, or for a hint with
890 * non-hint IO.
891 */
892 if (rq->write_hint != bio->bi_write_hint)
893 return false;
894
895 if (rq->ioprio != bio_prio(bio))
896 return false;
897
898 return true;
899 }
900
901 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
902 {
903 if (blk_discard_mergable(rq))
904 return ELEVATOR_DISCARD_MERGE;
905 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
906 return ELEVATOR_BACK_MERGE;
907 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
908 return ELEVATOR_FRONT_MERGE;
909 return ELEVATOR_NO_MERGE;
910 }