1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to segment and merge handling
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
11 #include <trace/events/block.h>
15 static inline bool bio_will_gap(struct request_queue
*q
,
16 struct request
*prev_rq
, struct bio
*prev
, struct bio
*next
)
18 struct bio_vec pb
, nb
;
20 if (!bio_has_data(prev
) || !queue_virt_boundary(q
))
24 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
25 * is quite difficult to respect the sg gap limit. We work hard to
26 * merge a huge number of small single bios in case of mkfs.
29 bio_get_first_bvec(prev_rq
->bio
, &pb
);
31 bio_get_first_bvec(prev
, &pb
);
32 if (pb
.bv_offset
& queue_virt_boundary(q
))
36 * We don't need to worry about the situation that the merged segment
37 * ends in unaligned virt boundary:
39 * - if 'pb' ends aligned, the merged segment ends aligned
40 * - if 'pb' ends unaligned, the next bio must include
41 * one single bvec of 'nb', otherwise the 'nb' can't
44 bio_get_last_bvec(prev
, &pb
);
45 bio_get_first_bvec(next
, &nb
);
46 if (biovec_phys_mergeable(q
, &pb
, &nb
))
48 return __bvec_gap_to_prev(q
, &pb
, nb
.bv_offset
);
51 static inline bool req_gap_back_merge(struct request
*req
, struct bio
*bio
)
53 return bio_will_gap(req
->q
, req
, req
->biotail
, bio
);
56 static inline bool req_gap_front_merge(struct request
*req
, struct bio
*bio
)
58 return bio_will_gap(req
->q
, NULL
, bio
, req
->bio
);
61 static struct bio
*blk_bio_discard_split(struct request_queue
*q
,
66 unsigned int max_discard_sectors
, granularity
;
69 unsigned split_sectors
;
73 /* Zero-sector (unknown) and one-sector granularities are the same. */
74 granularity
= max(q
->limits
.discard_granularity
>> 9, 1U);
76 max_discard_sectors
= min(q
->limits
.max_discard_sectors
,
77 bio_allowed_max_sectors(q
));
78 max_discard_sectors
-= max_discard_sectors
% granularity
;
80 if (unlikely(!max_discard_sectors
)) {
85 if (bio_sectors(bio
) <= max_discard_sectors
)
88 split_sectors
= max_discard_sectors
;
91 * If the next starting sector would be misaligned, stop the discard at
92 * the previous aligned sector.
94 alignment
= (q
->limits
.discard_alignment
>> 9) % granularity
;
96 tmp
= bio
->bi_iter
.bi_sector
+ split_sectors
- alignment
;
97 tmp
= sector_div(tmp
, granularity
);
99 if (split_sectors
> tmp
)
100 split_sectors
-= tmp
;
102 return bio_split(bio
, split_sectors
, GFP_NOIO
, bs
);
105 static struct bio
*blk_bio_write_zeroes_split(struct request_queue
*q
,
106 struct bio
*bio
, struct bio_set
*bs
, unsigned *nsegs
)
110 if (!q
->limits
.max_write_zeroes_sectors
)
113 if (bio_sectors(bio
) <= q
->limits
.max_write_zeroes_sectors
)
116 return bio_split(bio
, q
->limits
.max_write_zeroes_sectors
, GFP_NOIO
, bs
);
119 static struct bio
*blk_bio_write_same_split(struct request_queue
*q
,
126 if (!q
->limits
.max_write_same_sectors
)
129 if (bio_sectors(bio
) <= q
->limits
.max_write_same_sectors
)
132 return bio_split(bio
, q
->limits
.max_write_same_sectors
, GFP_NOIO
, bs
);
135 static inline unsigned get_max_io_size(struct request_queue
*q
,
138 unsigned sectors
= blk_max_size_offset(q
, bio
->bi_iter
.bi_sector
);
139 unsigned mask
= queue_logical_block_size(q
) - 1;
141 /* aligned to logical block size */
142 sectors
&= ~(mask
>> 9);
147 static unsigned get_max_segment_size(struct request_queue
*q
,
150 unsigned long mask
= queue_segment_boundary(q
);
152 /* default segment boundary mask means no boundary limit */
153 if (mask
== BLK_SEG_BOUNDARY_MASK
)
154 return queue_max_segment_size(q
);
156 return min_t(unsigned long, mask
- (mask
& offset
) + 1,
157 queue_max_segment_size(q
));
161 * Split the bvec @bv into segments, and update all kinds of
164 static bool bvec_split_segs(struct request_queue
*q
, struct bio_vec
*bv
,
165 unsigned *nsegs
, unsigned *sectors
, unsigned max_segs
)
167 unsigned len
= bv
->bv_len
;
168 unsigned total_len
= 0;
169 unsigned new_nsegs
= 0, seg_size
= 0;
172 * Multi-page bvec may be too big to hold in one segment, so the
173 * current bvec has to be splitted as multiple segments.
175 while (len
&& new_nsegs
+ *nsegs
< max_segs
) {
176 seg_size
= get_max_segment_size(q
, bv
->bv_offset
+ total_len
);
177 seg_size
= min(seg_size
, len
);
180 total_len
+= seg_size
;
183 if ((bv
->bv_offset
+ total_len
) & queue_virt_boundary(q
))
190 *sectors
+= total_len
>> 9;
193 /* split in the middle of the bvec if len != 0 */
197 static struct bio
*blk_bio_segment_split(struct request_queue
*q
,
202 struct bio_vec bv
, bvprv
, *bvprvp
= NULL
;
203 struct bvec_iter iter
;
204 unsigned nsegs
= 0, sectors
= 0;
205 bool do_split
= true;
206 struct bio
*new = NULL
;
207 const unsigned max_sectors
= get_max_io_size(q
, bio
);
208 const unsigned max_segs
= queue_max_segments(q
);
210 bio_for_each_bvec(bv
, bio
, iter
) {
212 * If the queue doesn't support SG gaps and adding this
213 * offset would create a gap, disallow it.
215 if (bvprvp
&& bvec_gap_to_prev(q
, bvprvp
, bv
.bv_offset
))
218 if (sectors
+ (bv
.bv_len
>> 9) > max_sectors
) {
220 * Consider this a new segment if we're splitting in
221 * the middle of this vector.
223 if (nsegs
< max_segs
&&
224 sectors
< max_sectors
) {
225 /* split in the middle of bvec */
226 bv
.bv_len
= (max_sectors
- sectors
) << 9;
227 bvec_split_segs(q
, &bv
, &nsegs
,
233 if (nsegs
== max_segs
)
239 if (bv
.bv_offset
+ bv
.bv_len
<= PAGE_SIZE
) {
241 sectors
+= bv
.bv_len
>> 9;
242 } else if (bvec_split_segs(q
, &bv
, &nsegs
, §ors
,
253 new = bio_split(bio
, sectors
, GFP_NOIO
, bs
);
258 return do_split
? new : NULL
;
261 void blk_queue_split(struct request_queue
*q
, struct bio
**bio
)
263 struct bio
*split
, *res
;
266 switch (bio_op(*bio
)) {
268 case REQ_OP_SECURE_ERASE
:
269 split
= blk_bio_discard_split(q
, *bio
, &q
->bio_split
, &nsegs
);
271 case REQ_OP_WRITE_ZEROES
:
272 split
= blk_bio_write_zeroes_split(q
, *bio
, &q
->bio_split
, &nsegs
);
274 case REQ_OP_WRITE_SAME
:
275 split
= blk_bio_write_same_split(q
, *bio
, &q
->bio_split
, &nsegs
);
278 split
= blk_bio_segment_split(q
, *bio
, &q
->bio_split
, &nsegs
);
282 /* physical segments can be figured out during splitting */
283 res
= split
? split
: *bio
;
284 res
->bi_phys_segments
= nsegs
;
285 bio_set_flag(res
, BIO_SEG_VALID
);
288 /* there isn't chance to merge the splitted bio */
289 split
->bi_opf
|= REQ_NOMERGE
;
292 * Since we're recursing into make_request here, ensure
293 * that we mark this bio as already having entered the queue.
294 * If not, and the queue is going away, we can get stuck
295 * forever on waiting for the queue reference to drop. But
296 * that will never happen, as we're already holding a
299 bio_set_flag(*bio
, BIO_QUEUE_ENTERED
);
301 bio_chain(split
, *bio
);
302 trace_block_split(q
, split
, (*bio
)->bi_iter
.bi_sector
);
303 generic_make_request(*bio
);
307 EXPORT_SYMBOL(blk_queue_split
);
309 static unsigned int __blk_recalc_rq_segments(struct request_queue
*q
,
312 unsigned int nr_phys_segs
= 0;
313 struct bvec_iter iter
;
319 switch (bio_op(bio
)) {
321 case REQ_OP_SECURE_ERASE
:
322 case REQ_OP_WRITE_ZEROES
:
324 case REQ_OP_WRITE_SAME
:
329 bio_for_each_bvec(bv
, bio
, iter
)
330 bvec_split_segs(q
, &bv
, &nr_phys_segs
, NULL
, UINT_MAX
);
336 void blk_recalc_rq_segments(struct request
*rq
)
338 rq
->nr_phys_segments
= __blk_recalc_rq_segments(rq
->q
, rq
->bio
);
341 void blk_recount_segments(struct request_queue
*q
, struct bio
*bio
)
343 struct bio
*nxt
= bio
->bi_next
;
346 bio
->bi_phys_segments
= __blk_recalc_rq_segments(q
, bio
);
349 bio_set_flag(bio
, BIO_SEG_VALID
);
352 static inline struct scatterlist
*blk_next_sg(struct scatterlist
**sg
,
353 struct scatterlist
*sglist
)
359 * If the driver previously mapped a shorter list, we could see a
360 * termination bit prematurely unless it fully inits the sg table
361 * on each mapping. We KNOW that there must be more entries here
362 * or the driver would be buggy, so force clear the termination bit
363 * to avoid doing a full sg_init_table() in drivers for each command.
369 static unsigned blk_bvec_map_sg(struct request_queue
*q
,
370 struct bio_vec
*bvec
, struct scatterlist
*sglist
,
371 struct scatterlist
**sg
)
373 unsigned nbytes
= bvec
->bv_len
;
374 unsigned nsegs
= 0, total
= 0;
377 unsigned offset
= bvec
->bv_offset
+ total
;
378 unsigned len
= min(get_max_segment_size(q
, offset
), nbytes
);
379 struct page
*page
= bvec
->bv_page
;
382 * Unfortunately a fair number of drivers barf on scatterlists
383 * that have an offset larger than PAGE_SIZE, despite other
384 * subsystems dealing with that invariant just fine. For now
385 * stick to the legacy format where we never present those from
386 * the block layer, but the code below should be removed once
387 * these offenders (mostly MMC/SD drivers) are fixed.
389 page
+= (offset
>> PAGE_SHIFT
);
390 offset
&= ~PAGE_MASK
;
392 *sg
= blk_next_sg(sg
, sglist
);
393 sg_set_page(*sg
, page
, len
, offset
);
403 static inline int __blk_bvec_map_sg(struct bio_vec bv
,
404 struct scatterlist
*sglist
, struct scatterlist
**sg
)
406 *sg
= blk_next_sg(sg
, sglist
);
407 sg_set_page(*sg
, bv
.bv_page
, bv
.bv_len
, bv
.bv_offset
);
411 /* only try to merge bvecs into one sg if they are from two bios */
413 __blk_segment_map_sg_merge(struct request_queue
*q
, struct bio_vec
*bvec
,
414 struct bio_vec
*bvprv
, struct scatterlist
**sg
)
417 int nbytes
= bvec
->bv_len
;
422 if ((*sg
)->length
+ nbytes
> queue_max_segment_size(q
))
425 if (!biovec_phys_mergeable(q
, bvprv
, bvec
))
428 (*sg
)->length
+= nbytes
;
433 static int __blk_bios_map_sg(struct request_queue
*q
, struct bio
*bio
,
434 struct scatterlist
*sglist
,
435 struct scatterlist
**sg
)
437 struct bio_vec
uninitialized_var(bvec
), bvprv
= { NULL
};
438 struct bvec_iter iter
;
440 bool new_bio
= false;
443 bio_for_each_bvec(bvec
, bio
, iter
) {
445 * Only try to merge bvecs from two bios given we
446 * have done bio internal merge when adding pages
450 __blk_segment_map_sg_merge(q
, &bvec
, &bvprv
, sg
))
453 if (bvec
.bv_offset
+ bvec
.bv_len
<= PAGE_SIZE
)
454 nsegs
+= __blk_bvec_map_sg(bvec
, sglist
, sg
);
456 nsegs
+= blk_bvec_map_sg(q
, &bvec
, sglist
, sg
);
460 if (likely(bio
->bi_iter
.bi_size
)) {
470 * map a request to scatterlist, return number of sg entries setup. Caller
471 * must make sure sg can hold rq->nr_phys_segments entries
473 int blk_rq_map_sg(struct request_queue
*q
, struct request
*rq
,
474 struct scatterlist
*sglist
)
476 struct scatterlist
*sg
= NULL
;
479 if (rq
->rq_flags
& RQF_SPECIAL_PAYLOAD
)
480 nsegs
= __blk_bvec_map_sg(rq
->special_vec
, sglist
, &sg
);
481 else if (rq
->bio
&& bio_op(rq
->bio
) == REQ_OP_WRITE_SAME
)
482 nsegs
= __blk_bvec_map_sg(bio_iovec(rq
->bio
), sglist
, &sg
);
484 nsegs
= __blk_bios_map_sg(q
, rq
->bio
, sglist
, &sg
);
486 if (unlikely(rq
->rq_flags
& RQF_COPY_USER
) &&
487 (blk_rq_bytes(rq
) & q
->dma_pad_mask
)) {
488 unsigned int pad_len
=
489 (q
->dma_pad_mask
& ~blk_rq_bytes(rq
)) + 1;
491 sg
->length
+= pad_len
;
492 rq
->extra_len
+= pad_len
;
495 if (q
->dma_drain_size
&& q
->dma_drain_needed(rq
)) {
496 if (op_is_write(req_op(rq
)))
497 memset(q
->dma_drain_buffer
, 0, q
->dma_drain_size
);
501 sg_set_page(sg
, virt_to_page(q
->dma_drain_buffer
),
503 ((unsigned long)q
->dma_drain_buffer
) &
506 rq
->extra_len
+= q
->dma_drain_size
;
513 * Something must have been wrong if the figured number of
514 * segment is bigger than number of req's physical segments
516 WARN_ON(nsegs
> blk_rq_nr_phys_segments(rq
));
520 EXPORT_SYMBOL(blk_rq_map_sg
);
522 static inline int ll_new_hw_segment(struct request_queue
*q
,
526 int nr_phys_segs
= bio_phys_segments(q
, bio
);
528 if (req
->nr_phys_segments
+ nr_phys_segs
> queue_max_segments(q
))
531 if (blk_integrity_merge_bio(q
, req
, bio
) == false)
535 * This will form the start of a new hw segment. Bump both
538 req
->nr_phys_segments
+= nr_phys_segs
;
542 req_set_nomerge(q
, req
);
546 int ll_back_merge_fn(struct request_queue
*q
, struct request
*req
,
549 if (req_gap_back_merge(req
, bio
))
551 if (blk_integrity_rq(req
) &&
552 integrity_req_gap_back_merge(req
, bio
))
554 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
555 blk_rq_get_max_sectors(req
, blk_rq_pos(req
))) {
556 req_set_nomerge(q
, req
);
559 if (!bio_flagged(req
->biotail
, BIO_SEG_VALID
))
560 blk_recount_segments(q
, req
->biotail
);
561 if (!bio_flagged(bio
, BIO_SEG_VALID
))
562 blk_recount_segments(q
, bio
);
564 return ll_new_hw_segment(q
, req
, bio
);
567 int ll_front_merge_fn(struct request_queue
*q
, struct request
*req
,
571 if (req_gap_front_merge(req
, bio
))
573 if (blk_integrity_rq(req
) &&
574 integrity_req_gap_front_merge(req
, bio
))
576 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
577 blk_rq_get_max_sectors(req
, bio
->bi_iter
.bi_sector
)) {
578 req_set_nomerge(q
, req
);
581 if (!bio_flagged(bio
, BIO_SEG_VALID
))
582 blk_recount_segments(q
, bio
);
583 if (!bio_flagged(req
->bio
, BIO_SEG_VALID
))
584 blk_recount_segments(q
, req
->bio
);
586 return ll_new_hw_segment(q
, req
, bio
);
589 static bool req_attempt_discard_merge(struct request_queue
*q
, struct request
*req
,
590 struct request
*next
)
592 unsigned short segments
= blk_rq_nr_discard_segments(req
);
594 if (segments
>= queue_max_discard_segments(q
))
596 if (blk_rq_sectors(req
) + bio_sectors(next
->bio
) >
597 blk_rq_get_max_sectors(req
, blk_rq_pos(req
)))
600 req
->nr_phys_segments
= segments
+ blk_rq_nr_discard_segments(next
);
603 req_set_nomerge(q
, req
);
607 static int ll_merge_requests_fn(struct request_queue
*q
, struct request
*req
,
608 struct request
*next
)
610 int total_phys_segments
;
612 if (req_gap_back_merge(req
, next
->bio
))
616 * Will it become too large?
618 if ((blk_rq_sectors(req
) + blk_rq_sectors(next
)) >
619 blk_rq_get_max_sectors(req
, blk_rq_pos(req
)))
622 total_phys_segments
= req
->nr_phys_segments
+ next
->nr_phys_segments
;
623 if (total_phys_segments
> queue_max_segments(q
))
626 if (blk_integrity_merge_rq(q
, req
, next
) == false)
630 req
->nr_phys_segments
= total_phys_segments
;
635 * blk_rq_set_mixed_merge - mark a request as mixed merge
636 * @rq: request to mark as mixed merge
639 * @rq is about to be mixed merged. Make sure the attributes
640 * which can be mixed are set in each bio and mark @rq as mixed
643 void blk_rq_set_mixed_merge(struct request
*rq
)
645 unsigned int ff
= rq
->cmd_flags
& REQ_FAILFAST_MASK
;
648 if (rq
->rq_flags
& RQF_MIXED_MERGE
)
652 * @rq will no longer represent mixable attributes for all the
653 * contained bios. It will just track those of the first one.
654 * Distributes the attributs to each bio.
656 for (bio
= rq
->bio
; bio
; bio
= bio
->bi_next
) {
657 WARN_ON_ONCE((bio
->bi_opf
& REQ_FAILFAST_MASK
) &&
658 (bio
->bi_opf
& REQ_FAILFAST_MASK
) != ff
);
661 rq
->rq_flags
|= RQF_MIXED_MERGE
;
664 static void blk_account_io_merge(struct request
*req
)
666 if (blk_do_io_stat(req
)) {
667 struct hd_struct
*part
;
672 part_dec_in_flight(req
->q
, part
, rq_data_dir(req
));
679 * Two cases of handling DISCARD merge:
680 * If max_discard_segments > 1, the driver takes every bio
681 * as a range and send them to controller together. The ranges
682 * needn't to be contiguous.
683 * Otherwise, the bios/requests will be handled as same as
684 * others which should be contiguous.
686 static inline bool blk_discard_mergable(struct request
*req
)
688 if (req_op(req
) == REQ_OP_DISCARD
&&
689 queue_max_discard_segments(req
->q
) > 1)
694 static enum elv_merge
blk_try_req_merge(struct request
*req
,
695 struct request
*next
)
697 if (blk_discard_mergable(req
))
698 return ELEVATOR_DISCARD_MERGE
;
699 else if (blk_rq_pos(req
) + blk_rq_sectors(req
) == blk_rq_pos(next
))
700 return ELEVATOR_BACK_MERGE
;
702 return ELEVATOR_NO_MERGE
;
706 * For non-mq, this has to be called with the request spinlock acquired.
707 * For mq with scheduling, the appropriate queue wide lock should be held.
709 static struct request
*attempt_merge(struct request_queue
*q
,
710 struct request
*req
, struct request
*next
)
712 if (!rq_mergeable(req
) || !rq_mergeable(next
))
715 if (req_op(req
) != req_op(next
))
718 if (rq_data_dir(req
) != rq_data_dir(next
)
719 || req
->rq_disk
!= next
->rq_disk
)
722 if (req_op(req
) == REQ_OP_WRITE_SAME
&&
723 !blk_write_same_mergeable(req
->bio
, next
->bio
))
727 * Don't allow merge of different write hints, or for a hint with
730 if (req
->write_hint
!= next
->write_hint
)
733 if (req
->ioprio
!= next
->ioprio
)
737 * If we are allowed to merge, then append bio list
738 * from next to rq and release next. merge_requests_fn
739 * will have updated segment counts, update sector
740 * counts here. Handle DISCARDs separately, as they
741 * have separate settings.
744 switch (blk_try_req_merge(req
, next
)) {
745 case ELEVATOR_DISCARD_MERGE
:
746 if (!req_attempt_discard_merge(q
, req
, next
))
749 case ELEVATOR_BACK_MERGE
:
750 if (!ll_merge_requests_fn(q
, req
, next
))
758 * If failfast settings disagree or any of the two is already
759 * a mixed merge, mark both as mixed before proceeding. This
760 * makes sure that all involved bios have mixable attributes
763 if (((req
->rq_flags
| next
->rq_flags
) & RQF_MIXED_MERGE
) ||
764 (req
->cmd_flags
& REQ_FAILFAST_MASK
) !=
765 (next
->cmd_flags
& REQ_FAILFAST_MASK
)) {
766 blk_rq_set_mixed_merge(req
);
767 blk_rq_set_mixed_merge(next
);
771 * At this point we have either done a back merge or front merge. We
772 * need the smaller start_time_ns of the merged requests to be the
773 * current request for accounting purposes.
775 if (next
->start_time_ns
< req
->start_time_ns
)
776 req
->start_time_ns
= next
->start_time_ns
;
778 req
->biotail
->bi_next
= next
->bio
;
779 req
->biotail
= next
->biotail
;
781 req
->__data_len
+= blk_rq_bytes(next
);
783 if (!blk_discard_mergable(req
))
784 elv_merge_requests(q
, req
, next
);
787 * 'next' is going away, so update stats accordingly
789 blk_account_io_merge(next
);
792 * ownership of bio passed from next to req, return 'next' for
799 struct request
*attempt_back_merge(struct request_queue
*q
, struct request
*rq
)
801 struct request
*next
= elv_latter_request(q
, rq
);
804 return attempt_merge(q
, rq
, next
);
809 struct request
*attempt_front_merge(struct request_queue
*q
, struct request
*rq
)
811 struct request
*prev
= elv_former_request(q
, rq
);
814 return attempt_merge(q
, prev
, rq
);
819 int blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
820 struct request
*next
)
822 struct request
*free
;
824 free
= attempt_merge(q
, rq
, next
);
826 blk_put_request(free
);
833 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
835 if (!rq_mergeable(rq
) || !bio_mergeable(bio
))
838 if (req_op(rq
) != bio_op(bio
))
841 /* different data direction or already started, don't merge */
842 if (bio_data_dir(bio
) != rq_data_dir(rq
))
845 /* must be same device */
846 if (rq
->rq_disk
!= bio
->bi_disk
)
849 /* only merge integrity protected bio into ditto rq */
850 if (blk_integrity_merge_bio(rq
->q
, rq
, bio
) == false)
853 /* must be using the same buffer */
854 if (req_op(rq
) == REQ_OP_WRITE_SAME
&&
855 !blk_write_same_mergeable(rq
->bio
, bio
))
859 * Don't allow merge of different write hints, or for a hint with
862 if (rq
->write_hint
!= bio
->bi_write_hint
)
865 if (rq
->ioprio
!= bio_prio(bio
))
871 enum elv_merge
blk_try_merge(struct request
*rq
, struct bio
*bio
)
873 if (blk_discard_mergable(rq
))
874 return ELEVATOR_DISCARD_MERGE
;
875 else if (blk_rq_pos(rq
) + blk_rq_sectors(rq
) == bio
->bi_iter
.bi_sector
)
876 return ELEVATOR_BACK_MERGE
;
877 else if (blk_rq_pos(rq
) - bio_sectors(bio
) == bio
->bi_iter
.bi_sector
)
878 return ELEVATOR_FRONT_MERGE
;
879 return ELEVATOR_NO_MERGE
;