2 * Functions related to segment and merge handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
10 #include <trace/events/block.h>
14 static struct bio
*blk_bio_discard_split(struct request_queue
*q
,
19 unsigned int max_discard_sectors
, granularity
;
22 unsigned split_sectors
;
26 /* Zero-sector (unknown) and one-sector granularities are the same. */
27 granularity
= max(q
->limits
.discard_granularity
>> 9, 1U);
29 max_discard_sectors
= min(q
->limits
.max_discard_sectors
, UINT_MAX
>> 9);
30 max_discard_sectors
-= max_discard_sectors
% granularity
;
32 if (unlikely(!max_discard_sectors
)) {
37 if (bio_sectors(bio
) <= max_discard_sectors
)
40 split_sectors
= max_discard_sectors
;
43 * If the next starting sector would be misaligned, stop the discard at
44 * the previous aligned sector.
46 alignment
= (q
->limits
.discard_alignment
>> 9) % granularity
;
48 tmp
= bio
->bi_iter
.bi_sector
+ split_sectors
- alignment
;
49 tmp
= sector_div(tmp
, granularity
);
51 if (split_sectors
> tmp
)
54 return bio_split(bio
, split_sectors
, GFP_NOIO
, bs
);
57 static struct bio
*blk_bio_write_same_split(struct request_queue
*q
,
64 if (!q
->limits
.max_write_same_sectors
)
67 if (bio_sectors(bio
) <= q
->limits
.max_write_same_sectors
)
70 return bio_split(bio
, q
->limits
.max_write_same_sectors
, GFP_NOIO
, bs
);
73 static inline unsigned get_max_io_size(struct request_queue
*q
,
76 unsigned sectors
= blk_max_size_offset(q
, bio
->bi_iter
.bi_sector
);
77 unsigned mask
= queue_logical_block_size(q
) - 1;
79 /* aligned to logical block size */
80 sectors
&= ~(mask
>> 9);
85 static struct bio
*blk_bio_segment_split(struct request_queue
*q
,
90 struct bio_vec bv
, bvprv
, *bvprvp
= NULL
;
91 struct bvec_iter iter
;
92 unsigned seg_size
= 0, nsegs
= 0, sectors
= 0;
93 unsigned front_seg_size
= bio
->bi_seg_front_size
;
95 struct bio
*new = NULL
;
96 const unsigned max_sectors
= get_max_io_size(q
, bio
);
99 bio_for_each_segment(bv
, bio
, iter
) {
101 * With arbitrary bio size, the incoming bio may be very
102 * big. We have to split the bio into small bios so that
103 * each holds at most BIO_MAX_PAGES bvecs because
104 * bio_clone() can fail to allocate big bvecs.
106 * It should have been better to apply the limit per
107 * request queue in which bio_clone() is involved,
108 * instead of globally. The biggest blocker is the
109 * bio_clone() in bio bounce.
111 * If bio is splitted by this reason, we should have
112 * allowed to continue bios merging, but don't do
113 * that now for making the change simple.
115 * TODO: deal with bio bounce's bio_clone() gracefully
116 * and convert the global limit into per-queue limit.
118 if (bvecs
++ >= BIO_MAX_PAGES
)
122 * If the queue doesn't support SG gaps and adding this
123 * offset would create a gap, disallow it.
125 if (bvprvp
&& bvec_gap_to_prev(q
, bvprvp
, bv
.bv_offset
))
128 if (sectors
+ (bv
.bv_len
>> 9) > max_sectors
) {
130 * Consider this a new segment if we're splitting in
131 * the middle of this vector.
133 if (nsegs
< queue_max_segments(q
) &&
134 sectors
< max_sectors
) {
136 sectors
= max_sectors
;
140 /* Make this single bvec as the 1st segment */
143 if (bvprvp
&& blk_queue_cluster(q
)) {
144 if (seg_size
+ bv
.bv_len
> queue_max_segment_size(q
))
146 if (!BIOVEC_PHYS_MERGEABLE(bvprvp
, &bv
))
148 if (!BIOVEC_SEG_BOUNDARY(q
, bvprvp
, &bv
))
151 seg_size
+= bv
.bv_len
;
154 sectors
+= bv
.bv_len
>> 9;
156 if (nsegs
== 1 && seg_size
> front_seg_size
)
157 front_seg_size
= seg_size
;
161 if (nsegs
== queue_max_segments(q
))
167 seg_size
= bv
.bv_len
;
168 sectors
+= bv
.bv_len
>> 9;
170 if (nsegs
== 1 && seg_size
> front_seg_size
)
171 front_seg_size
= seg_size
;
179 new = bio_split(bio
, sectors
, GFP_NOIO
, bs
);
184 bio
->bi_seg_front_size
= front_seg_size
;
185 if (seg_size
> bio
->bi_seg_back_size
)
186 bio
->bi_seg_back_size
= seg_size
;
188 return do_split
? new : NULL
;
191 void blk_queue_split(struct request_queue
*q
, struct bio
**bio
,
194 struct bio
*split
, *res
;
197 switch (bio_op(*bio
)) {
199 case REQ_OP_SECURE_ERASE
:
200 split
= blk_bio_discard_split(q
, *bio
, bs
, &nsegs
);
202 case REQ_OP_WRITE_ZEROES
:
204 nsegs
= (*bio
)->bi_phys_segments
;
206 case REQ_OP_WRITE_SAME
:
207 split
= blk_bio_write_same_split(q
, *bio
, bs
, &nsegs
);
210 split
= blk_bio_segment_split(q
, *bio
, q
->bio_split
, &nsegs
);
214 /* physical segments can be figured out during splitting */
215 res
= split
? split
: *bio
;
216 res
->bi_phys_segments
= nsegs
;
217 bio_set_flag(res
, BIO_SEG_VALID
);
220 /* there isn't chance to merge the splitted bio */
221 split
->bi_opf
|= REQ_NOMERGE
;
223 bio_chain(split
, *bio
);
224 trace_block_split(q
, split
, (*bio
)->bi_iter
.bi_sector
);
225 generic_make_request(*bio
);
229 EXPORT_SYMBOL(blk_queue_split
);
231 static unsigned int __blk_recalc_rq_segments(struct request_queue
*q
,
235 struct bio_vec bv
, bvprv
= { NULL
};
236 int cluster
, prev
= 0;
237 unsigned int seg_size
, nr_phys_segs
;
238 struct bio
*fbio
, *bbio
;
239 struct bvec_iter iter
;
244 switch (bio_op(bio
)) {
246 case REQ_OP_SECURE_ERASE
:
247 case REQ_OP_WRITE_ZEROES
:
249 case REQ_OP_WRITE_SAME
:
254 cluster
= blk_queue_cluster(q
);
258 bio_for_each_segment(bv
, bio
, iter
) {
260 * If SG merging is disabled, each bio vector is
266 if (prev
&& cluster
) {
267 if (seg_size
+ bv
.bv_len
268 > queue_max_segment_size(q
))
270 if (!BIOVEC_PHYS_MERGEABLE(&bvprv
, &bv
))
272 if (!BIOVEC_SEG_BOUNDARY(q
, &bvprv
, &bv
))
275 seg_size
+= bv
.bv_len
;
280 if (nr_phys_segs
== 1 && seg_size
>
281 fbio
->bi_seg_front_size
)
282 fbio
->bi_seg_front_size
= seg_size
;
287 seg_size
= bv
.bv_len
;
292 if (nr_phys_segs
== 1 && seg_size
> fbio
->bi_seg_front_size
)
293 fbio
->bi_seg_front_size
= seg_size
;
294 if (seg_size
> bbio
->bi_seg_back_size
)
295 bbio
->bi_seg_back_size
= seg_size
;
300 void blk_recalc_rq_segments(struct request
*rq
)
302 bool no_sg_merge
= !!test_bit(QUEUE_FLAG_NO_SG_MERGE
,
303 &rq
->q
->queue_flags
);
305 rq
->nr_phys_segments
= __blk_recalc_rq_segments(rq
->q
, rq
->bio
,
309 void blk_recount_segments(struct request_queue
*q
, struct bio
*bio
)
311 unsigned short seg_cnt
;
313 /* estimate segment number by bi_vcnt for non-cloned bio */
314 if (bio_flagged(bio
, BIO_CLONED
))
315 seg_cnt
= bio_segments(bio
);
317 seg_cnt
= bio
->bi_vcnt
;
319 if (test_bit(QUEUE_FLAG_NO_SG_MERGE
, &q
->queue_flags
) &&
320 (seg_cnt
< queue_max_segments(q
)))
321 bio
->bi_phys_segments
= seg_cnt
;
323 struct bio
*nxt
= bio
->bi_next
;
326 bio
->bi_phys_segments
= __blk_recalc_rq_segments(q
, bio
, false);
330 bio_set_flag(bio
, BIO_SEG_VALID
);
332 EXPORT_SYMBOL(blk_recount_segments
);
334 static int blk_phys_contig_segment(struct request_queue
*q
, struct bio
*bio
,
337 struct bio_vec end_bv
= { NULL
}, nxt_bv
;
339 if (!blk_queue_cluster(q
))
342 if (bio
->bi_seg_back_size
+ nxt
->bi_seg_front_size
>
343 queue_max_segment_size(q
))
346 if (!bio_has_data(bio
))
349 bio_get_last_bvec(bio
, &end_bv
);
350 bio_get_first_bvec(nxt
, &nxt_bv
);
352 if (!BIOVEC_PHYS_MERGEABLE(&end_bv
, &nxt_bv
))
356 * bio and nxt are contiguous in memory; check if the queue allows
357 * these two to be merged into one
359 if (BIOVEC_SEG_BOUNDARY(q
, &end_bv
, &nxt_bv
))
366 __blk_segment_map_sg(struct request_queue
*q
, struct bio_vec
*bvec
,
367 struct scatterlist
*sglist
, struct bio_vec
*bvprv
,
368 struct scatterlist
**sg
, int *nsegs
, int *cluster
)
371 int nbytes
= bvec
->bv_len
;
373 if (*sg
&& *cluster
) {
374 if ((*sg
)->length
+ nbytes
> queue_max_segment_size(q
))
377 if (!BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
))
379 if (!BIOVEC_SEG_BOUNDARY(q
, bvprv
, bvec
))
382 (*sg
)->length
+= nbytes
;
389 * If the driver previously mapped a shorter
390 * list, we could see a termination bit
391 * prematurely unless it fully inits the sg
392 * table on each mapping. We KNOW that there
393 * must be more entries here or the driver
394 * would be buggy, so force clear the
395 * termination bit to avoid doing a full
396 * sg_init_table() in drivers for each command.
402 sg_set_page(*sg
, bvec
->bv_page
, nbytes
, bvec
->bv_offset
);
408 static inline int __blk_bvec_map_sg(struct request_queue
*q
, struct bio_vec bv
,
409 struct scatterlist
*sglist
, struct scatterlist
**sg
)
412 sg_set_page(*sg
, bv
.bv_page
, bv
.bv_len
, bv
.bv_offset
);
416 static int __blk_bios_map_sg(struct request_queue
*q
, struct bio
*bio
,
417 struct scatterlist
*sglist
,
418 struct scatterlist
**sg
)
420 struct bio_vec bvec
, bvprv
= { NULL
};
421 struct bvec_iter iter
;
422 int cluster
= blk_queue_cluster(q
), nsegs
= 0;
425 bio_for_each_segment(bvec
, bio
, iter
)
426 __blk_segment_map_sg(q
, &bvec
, sglist
, &bvprv
, sg
,
433 * map a request to scatterlist, return number of sg entries setup. Caller
434 * must make sure sg can hold rq->nr_phys_segments entries
436 int blk_rq_map_sg(struct request_queue
*q
, struct request
*rq
,
437 struct scatterlist
*sglist
)
439 struct scatterlist
*sg
= NULL
;
442 if (rq
->rq_flags
& RQF_SPECIAL_PAYLOAD
)
443 nsegs
= __blk_bvec_map_sg(q
, rq
->special_vec
, sglist
, &sg
);
444 else if (rq
->bio
&& bio_op(rq
->bio
) == REQ_OP_WRITE_SAME
)
445 nsegs
= __blk_bvec_map_sg(q
, bio_iovec(rq
->bio
), sglist
, &sg
);
447 nsegs
= __blk_bios_map_sg(q
, rq
->bio
, sglist
, &sg
);
449 if (unlikely(rq
->rq_flags
& RQF_COPY_USER
) &&
450 (blk_rq_bytes(rq
) & q
->dma_pad_mask
)) {
451 unsigned int pad_len
=
452 (q
->dma_pad_mask
& ~blk_rq_bytes(rq
)) + 1;
454 sg
->length
+= pad_len
;
455 rq
->extra_len
+= pad_len
;
458 if (q
->dma_drain_size
&& q
->dma_drain_needed(rq
)) {
459 if (op_is_write(req_op(rq
)))
460 memset(q
->dma_drain_buffer
, 0, q
->dma_drain_size
);
464 sg_set_page(sg
, virt_to_page(q
->dma_drain_buffer
),
466 ((unsigned long)q
->dma_drain_buffer
) &
469 rq
->extra_len
+= q
->dma_drain_size
;
476 * Something must have been wrong if the figured number of
477 * segment is bigger than number of req's physical segments
479 WARN_ON(nsegs
> blk_rq_nr_phys_segments(rq
));
483 EXPORT_SYMBOL(blk_rq_map_sg
);
485 static inline int ll_new_hw_segment(struct request_queue
*q
,
489 int nr_phys_segs
= bio_phys_segments(q
, bio
);
491 if (req
->nr_phys_segments
+ nr_phys_segs
> queue_max_segments(q
))
494 if (blk_integrity_merge_bio(q
, req
, bio
) == false)
498 * This will form the start of a new hw segment. Bump both
501 req
->nr_phys_segments
+= nr_phys_segs
;
505 req_set_nomerge(q
, req
);
509 int ll_back_merge_fn(struct request_queue
*q
, struct request
*req
,
512 if (req_gap_back_merge(req
, bio
))
514 if (blk_integrity_rq(req
) &&
515 integrity_req_gap_back_merge(req
, bio
))
517 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
518 blk_rq_get_max_sectors(req
, blk_rq_pos(req
))) {
519 req_set_nomerge(q
, req
);
522 if (!bio_flagged(req
->biotail
, BIO_SEG_VALID
))
523 blk_recount_segments(q
, req
->biotail
);
524 if (!bio_flagged(bio
, BIO_SEG_VALID
))
525 blk_recount_segments(q
, bio
);
527 return ll_new_hw_segment(q
, req
, bio
);
530 int ll_front_merge_fn(struct request_queue
*q
, struct request
*req
,
534 if (req_gap_front_merge(req
, bio
))
536 if (blk_integrity_rq(req
) &&
537 integrity_req_gap_front_merge(req
, bio
))
539 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
540 blk_rq_get_max_sectors(req
, bio
->bi_iter
.bi_sector
)) {
541 req_set_nomerge(q
, req
);
544 if (!bio_flagged(bio
, BIO_SEG_VALID
))
545 blk_recount_segments(q
, bio
);
546 if (!bio_flagged(req
->bio
, BIO_SEG_VALID
))
547 blk_recount_segments(q
, req
->bio
);
549 return ll_new_hw_segment(q
, req
, bio
);
553 * blk-mq uses req->special to carry normal driver per-request payload, it
554 * does not indicate a prepared command that we cannot merge with.
556 static bool req_no_special_merge(struct request
*req
)
558 struct request_queue
*q
= req
->q
;
560 return !q
->mq_ops
&& req
->special
;
563 static int ll_merge_requests_fn(struct request_queue
*q
, struct request
*req
,
564 struct request
*next
)
566 int total_phys_segments
;
567 unsigned int seg_size
=
568 req
->biotail
->bi_seg_back_size
+ next
->bio
->bi_seg_front_size
;
571 * First check if the either of the requests are re-queued
572 * requests. Can't merge them if they are.
574 if (req_no_special_merge(req
) || req_no_special_merge(next
))
577 if (req_gap_back_merge(req
, next
->bio
))
581 * Will it become too large?
583 if ((blk_rq_sectors(req
) + blk_rq_sectors(next
)) >
584 blk_rq_get_max_sectors(req
, blk_rq_pos(req
)))
587 total_phys_segments
= req
->nr_phys_segments
+ next
->nr_phys_segments
;
588 if (blk_phys_contig_segment(q
, req
->biotail
, next
->bio
)) {
589 if (req
->nr_phys_segments
== 1)
590 req
->bio
->bi_seg_front_size
= seg_size
;
591 if (next
->nr_phys_segments
== 1)
592 next
->biotail
->bi_seg_back_size
= seg_size
;
593 total_phys_segments
--;
596 if (total_phys_segments
> queue_max_segments(q
))
599 if (blk_integrity_merge_rq(q
, req
, next
) == false)
603 req
->nr_phys_segments
= total_phys_segments
;
608 * blk_rq_set_mixed_merge - mark a request as mixed merge
609 * @rq: request to mark as mixed merge
612 * @rq is about to be mixed merged. Make sure the attributes
613 * which can be mixed are set in each bio and mark @rq as mixed
616 void blk_rq_set_mixed_merge(struct request
*rq
)
618 unsigned int ff
= rq
->cmd_flags
& REQ_FAILFAST_MASK
;
621 if (rq
->rq_flags
& RQF_MIXED_MERGE
)
625 * @rq will no longer represent mixable attributes for all the
626 * contained bios. It will just track those of the first one.
627 * Distributes the attributs to each bio.
629 for (bio
= rq
->bio
; bio
; bio
= bio
->bi_next
) {
630 WARN_ON_ONCE((bio
->bi_opf
& REQ_FAILFAST_MASK
) &&
631 (bio
->bi_opf
& REQ_FAILFAST_MASK
) != ff
);
634 rq
->rq_flags
|= RQF_MIXED_MERGE
;
637 static void blk_account_io_merge(struct request
*req
)
639 if (blk_do_io_stat(req
)) {
640 struct hd_struct
*part
;
643 cpu
= part_stat_lock();
646 part_round_stats(cpu
, part
);
647 part_dec_in_flight(part
, rq_data_dir(req
));
655 * For non-mq, this has to be called with the request spinlock acquired.
656 * For mq with scheduling, the appropriate queue wide lock should be held.
658 static struct request
*attempt_merge(struct request_queue
*q
,
659 struct request
*req
, struct request
*next
)
661 if (!rq_mergeable(req
) || !rq_mergeable(next
))
664 if (req_op(req
) != req_op(next
))
670 if (blk_rq_pos(req
) + blk_rq_sectors(req
) != blk_rq_pos(next
))
673 if (rq_data_dir(req
) != rq_data_dir(next
)
674 || req
->rq_disk
!= next
->rq_disk
675 || req_no_special_merge(next
))
678 if (req_op(req
) == REQ_OP_WRITE_SAME
&&
679 !blk_write_same_mergeable(req
->bio
, next
->bio
))
683 * If we are allowed to merge, then append bio list
684 * from next to rq and release next. merge_requests_fn
685 * will have updated segment counts, update sector
688 if (!ll_merge_requests_fn(q
, req
, next
))
692 * If failfast settings disagree or any of the two is already
693 * a mixed merge, mark both as mixed before proceeding. This
694 * makes sure that all involved bios have mixable attributes
697 if (((req
->rq_flags
| next
->rq_flags
) & RQF_MIXED_MERGE
) ||
698 (req
->cmd_flags
& REQ_FAILFAST_MASK
) !=
699 (next
->cmd_flags
& REQ_FAILFAST_MASK
)) {
700 blk_rq_set_mixed_merge(req
);
701 blk_rq_set_mixed_merge(next
);
705 * At this point we have either done a back merge
706 * or front merge. We need the smaller start_time of
707 * the merged requests to be the current request
708 * for accounting purposes.
710 if (time_after(req
->start_time
, next
->start_time
))
711 req
->start_time
= next
->start_time
;
713 req
->biotail
->bi_next
= next
->bio
;
714 req
->biotail
= next
->biotail
;
716 req
->__data_len
+= blk_rq_bytes(next
);
718 elv_merge_requests(q
, req
, next
);
721 * 'next' is going away, so update stats accordingly
723 blk_account_io_merge(next
);
725 req
->ioprio
= ioprio_best(req
->ioprio
, next
->ioprio
);
726 if (blk_rq_cpu_valid(next
))
727 req
->cpu
= next
->cpu
;
730 * ownership of bio passed from next to req, return 'next' for
737 struct request
*attempt_back_merge(struct request_queue
*q
, struct request
*rq
)
739 struct request
*next
= elv_latter_request(q
, rq
);
742 return attempt_merge(q
, rq
, next
);
747 struct request
*attempt_front_merge(struct request_queue
*q
, struct request
*rq
)
749 struct request
*prev
= elv_former_request(q
, rq
);
752 return attempt_merge(q
, prev
, rq
);
757 int blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
758 struct request
*next
)
760 struct elevator_queue
*e
= q
->elevator
;
761 struct request
*free
;
763 if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_allow_rq_merge_fn
)
764 if (!e
->type
->ops
.sq
.elevator_allow_rq_merge_fn(q
, rq
, next
))
767 free
= attempt_merge(q
, rq
, next
);
769 __blk_put_request(q
, free
);
776 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
778 if (!rq_mergeable(rq
) || !bio_mergeable(bio
))
781 if (req_op(rq
) != bio_op(bio
))
784 /* different data direction or already started, don't merge */
785 if (bio_data_dir(bio
) != rq_data_dir(rq
))
788 /* must be same device and not a special request */
789 if (rq
->rq_disk
!= bio
->bi_bdev
->bd_disk
|| req_no_special_merge(rq
))
792 /* only merge integrity protected bio into ditto rq */
793 if (blk_integrity_merge_bio(rq
->q
, rq
, bio
) == false)
796 /* must be using the same buffer */
797 if (req_op(rq
) == REQ_OP_WRITE_SAME
&&
798 !blk_write_same_mergeable(rq
->bio
, bio
))
804 enum elv_merge
blk_try_merge(struct request
*rq
, struct bio
*bio
)
806 if (req_op(rq
) == REQ_OP_DISCARD
&&
807 queue_max_discard_segments(rq
->q
) > 1)
808 return ELEVATOR_DISCARD_MERGE
;
809 else if (blk_rq_pos(rq
) + blk_rq_sectors(rq
) == bio
->bi_iter
.bi_sector
)
810 return ELEVATOR_BACK_MERGE
;
811 else if (blk_rq_pos(rq
) - bio_sectors(bio
) == bio
->bi_iter
.bi_sector
)
812 return ELEVATOR_FRONT_MERGE
;
813 return ELEVATOR_NO_MERGE
;