1 // SPDX-License-Identifier: GPL-2.0
3 * Functions related to segment and merge handling
5 #include <linux/kernel.h>
6 #include <linux/module.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
11 #include <trace/events/block.h>
16 * Check if the two bvecs from two bios can be merged to one segment. If yes,
17 * no need to check gap between the two bios since the 1st bio and the 1st bvec
18 * in the 2nd bio can be handled in one segment.
20 static inline bool bios_segs_mergeable(struct request_queue
*q
,
21 struct bio
*prev
, struct bio_vec
*prev_last_bv
,
22 struct bio_vec
*next_first_bv
)
24 if (!biovec_phys_mergeable(q
, prev_last_bv
, next_first_bv
))
26 if (prev
->bi_seg_back_size
+ next_first_bv
->bv_len
>
27 queue_max_segment_size(q
))
32 static inline bool bio_will_gap(struct request_queue
*q
,
33 struct request
*prev_rq
, struct bio
*prev
, struct bio
*next
)
35 struct bio_vec pb
, nb
;
37 if (!bio_has_data(prev
) || !queue_virt_boundary(q
))
41 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
42 * is quite difficult to respect the sg gap limit. We work hard to
43 * merge a huge number of small single bios in case of mkfs.
46 bio_get_first_bvec(prev_rq
->bio
, &pb
);
48 bio_get_first_bvec(prev
, &pb
);
53 * We don't need to worry about the situation that the merged segment
54 * ends in unaligned virt boundary:
56 * - if 'pb' ends aligned, the merged segment ends aligned
57 * - if 'pb' ends unaligned, the next bio must include
58 * one single bvec of 'nb', otherwise the 'nb' can't
61 bio_get_last_bvec(prev
, &pb
);
62 bio_get_first_bvec(next
, &nb
);
63 if (bios_segs_mergeable(q
, prev
, &pb
, &nb
))
65 return __bvec_gap_to_prev(q
, &pb
, nb
.bv_offset
);
68 static inline bool req_gap_back_merge(struct request
*req
, struct bio
*bio
)
70 return bio_will_gap(req
->q
, req
, req
->biotail
, bio
);
73 static inline bool req_gap_front_merge(struct request
*req
, struct bio
*bio
)
75 return bio_will_gap(req
->q
, NULL
, bio
, req
->bio
);
78 static struct bio
*blk_bio_discard_split(struct request_queue
*q
,
83 unsigned int max_discard_sectors
, granularity
;
86 unsigned split_sectors
;
90 /* Zero-sector (unknown) and one-sector granularities are the same. */
91 granularity
= max(q
->limits
.discard_granularity
>> 9, 1U);
93 max_discard_sectors
= min(q
->limits
.max_discard_sectors
, UINT_MAX
>> 9);
94 max_discard_sectors
-= max_discard_sectors
% granularity
;
96 if (unlikely(!max_discard_sectors
)) {
101 if (bio_sectors(bio
) <= max_discard_sectors
)
104 split_sectors
= max_discard_sectors
;
107 * If the next starting sector would be misaligned, stop the discard at
108 * the previous aligned sector.
110 alignment
= (q
->limits
.discard_alignment
>> 9) % granularity
;
112 tmp
= bio
->bi_iter
.bi_sector
+ split_sectors
- alignment
;
113 tmp
= sector_div(tmp
, granularity
);
115 if (split_sectors
> tmp
)
116 split_sectors
-= tmp
;
118 return bio_split(bio
, split_sectors
, GFP_NOIO
, bs
);
121 static struct bio
*blk_bio_write_zeroes_split(struct request_queue
*q
,
122 struct bio
*bio
, struct bio_set
*bs
, unsigned *nsegs
)
126 if (!q
->limits
.max_write_zeroes_sectors
)
129 if (bio_sectors(bio
) <= q
->limits
.max_write_zeroes_sectors
)
132 return bio_split(bio
, q
->limits
.max_write_zeroes_sectors
, GFP_NOIO
, bs
);
135 static struct bio
*blk_bio_write_same_split(struct request_queue
*q
,
142 if (!q
->limits
.max_write_same_sectors
)
145 if (bio_sectors(bio
) <= q
->limits
.max_write_same_sectors
)
148 return bio_split(bio
, q
->limits
.max_write_same_sectors
, GFP_NOIO
, bs
);
151 static inline unsigned get_max_io_size(struct request_queue
*q
,
154 unsigned sectors
= blk_max_size_offset(q
, bio
->bi_iter
.bi_sector
);
155 unsigned mask
= queue_logical_block_size(q
) - 1;
157 /* aligned to logical block size */
158 sectors
&= ~(mask
>> 9);
163 static struct bio
*blk_bio_segment_split(struct request_queue
*q
,
168 struct bio_vec bv
, bvprv
, *bvprvp
= NULL
;
169 struct bvec_iter iter
;
170 unsigned seg_size
= 0, nsegs
= 0, sectors
= 0;
171 unsigned front_seg_size
= bio
->bi_seg_front_size
;
172 bool do_split
= true;
173 struct bio
*new = NULL
;
174 const unsigned max_sectors
= get_max_io_size(q
, bio
);
176 bio_for_each_segment(bv
, bio
, iter
) {
178 * If the queue doesn't support SG gaps and adding this
179 * offset would create a gap, disallow it.
181 if (bvprvp
&& bvec_gap_to_prev(q
, bvprvp
, bv
.bv_offset
))
184 if (sectors
+ (bv
.bv_len
>> 9) > max_sectors
) {
186 * Consider this a new segment if we're splitting in
187 * the middle of this vector.
189 if (nsegs
< queue_max_segments(q
) &&
190 sectors
< max_sectors
) {
192 sectors
= max_sectors
;
197 if (bvprvp
&& blk_queue_cluster(q
)) {
198 if (seg_size
+ bv
.bv_len
> queue_max_segment_size(q
))
200 if (!biovec_phys_mergeable(q
, bvprvp
, &bv
))
203 seg_size
+= bv
.bv_len
;
206 sectors
+= bv
.bv_len
>> 9;
211 if (nsegs
== queue_max_segments(q
))
214 if (nsegs
== 1 && seg_size
> front_seg_size
)
215 front_seg_size
= seg_size
;
220 seg_size
= bv
.bv_len
;
221 sectors
+= bv
.bv_len
>> 9;
230 new = bio_split(bio
, sectors
, GFP_NOIO
, bs
);
235 if (nsegs
== 1 && seg_size
> front_seg_size
)
236 front_seg_size
= seg_size
;
237 bio
->bi_seg_front_size
= front_seg_size
;
238 if (seg_size
> bio
->bi_seg_back_size
)
239 bio
->bi_seg_back_size
= seg_size
;
241 return do_split
? new : NULL
;
244 void blk_queue_split(struct request_queue
*q
, struct bio
**bio
)
246 struct bio
*split
, *res
;
249 switch (bio_op(*bio
)) {
251 case REQ_OP_SECURE_ERASE
:
252 split
= blk_bio_discard_split(q
, *bio
, &q
->bio_split
, &nsegs
);
254 case REQ_OP_WRITE_ZEROES
:
255 split
= blk_bio_write_zeroes_split(q
, *bio
, &q
->bio_split
, &nsegs
);
257 case REQ_OP_WRITE_SAME
:
258 split
= blk_bio_write_same_split(q
, *bio
, &q
->bio_split
, &nsegs
);
261 split
= blk_bio_segment_split(q
, *bio
, &q
->bio_split
, &nsegs
);
265 /* physical segments can be figured out during splitting */
266 res
= split
? split
: *bio
;
267 res
->bi_phys_segments
= nsegs
;
268 bio_set_flag(res
, BIO_SEG_VALID
);
271 /* there isn't chance to merge the splitted bio */
272 split
->bi_opf
|= REQ_NOMERGE
;
275 * Since we're recursing into make_request here, ensure
276 * that we mark this bio as already having entered the queue.
277 * If not, and the queue is going away, we can get stuck
278 * forever on waiting for the queue reference to drop. But
279 * that will never happen, as we're already holding a
282 bio_set_flag(*bio
, BIO_QUEUE_ENTERED
);
284 bio_chain(split
, *bio
);
285 trace_block_split(q
, split
, (*bio
)->bi_iter
.bi_sector
);
286 generic_make_request(*bio
);
290 EXPORT_SYMBOL(blk_queue_split
);
292 static unsigned int __blk_recalc_rq_segments(struct request_queue
*q
,
296 struct bio_vec bv
, bvprv
= { NULL
};
297 int cluster
, prev
= 0;
298 unsigned int seg_size
, nr_phys_segs
;
299 struct bio
*fbio
, *bbio
;
300 struct bvec_iter iter
;
305 switch (bio_op(bio
)) {
307 case REQ_OP_SECURE_ERASE
:
308 case REQ_OP_WRITE_ZEROES
:
310 case REQ_OP_WRITE_SAME
:
315 cluster
= blk_queue_cluster(q
);
319 bio_for_each_segment(bv
, bio
, iter
) {
321 * If SG merging is disabled, each bio vector is
327 if (prev
&& cluster
) {
328 if (seg_size
+ bv
.bv_len
329 > queue_max_segment_size(q
))
331 if (!biovec_phys_mergeable(q
, &bvprv
, &bv
))
334 seg_size
+= bv
.bv_len
;
339 if (nr_phys_segs
== 1 && seg_size
>
340 fbio
->bi_seg_front_size
)
341 fbio
->bi_seg_front_size
= seg_size
;
346 seg_size
= bv
.bv_len
;
351 if (nr_phys_segs
== 1 && seg_size
> fbio
->bi_seg_front_size
)
352 fbio
->bi_seg_front_size
= seg_size
;
353 if (seg_size
> bbio
->bi_seg_back_size
)
354 bbio
->bi_seg_back_size
= seg_size
;
359 void blk_recalc_rq_segments(struct request
*rq
)
361 bool no_sg_merge
= !!test_bit(QUEUE_FLAG_NO_SG_MERGE
,
362 &rq
->q
->queue_flags
);
364 rq
->nr_phys_segments
= __blk_recalc_rq_segments(rq
->q
, rq
->bio
,
368 void blk_recount_segments(struct request_queue
*q
, struct bio
*bio
)
370 unsigned short seg_cnt
;
372 /* estimate segment number by bi_vcnt for non-cloned bio */
373 if (bio_flagged(bio
, BIO_CLONED
))
374 seg_cnt
= bio_segments(bio
);
376 seg_cnt
= bio
->bi_vcnt
;
378 if (test_bit(QUEUE_FLAG_NO_SG_MERGE
, &q
->queue_flags
) &&
379 (seg_cnt
< queue_max_segments(q
)))
380 bio
->bi_phys_segments
= seg_cnt
;
382 struct bio
*nxt
= bio
->bi_next
;
385 bio
->bi_phys_segments
= __blk_recalc_rq_segments(q
, bio
, false);
389 bio_set_flag(bio
, BIO_SEG_VALID
);
391 EXPORT_SYMBOL(blk_recount_segments
);
393 static int blk_phys_contig_segment(struct request_queue
*q
, struct bio
*bio
,
396 struct bio_vec end_bv
= { NULL
}, nxt_bv
;
398 if (!blk_queue_cluster(q
))
401 if (bio
->bi_seg_back_size
+ nxt
->bi_seg_front_size
>
402 queue_max_segment_size(q
))
405 if (!bio_has_data(bio
))
408 bio_get_last_bvec(bio
, &end_bv
);
409 bio_get_first_bvec(nxt
, &nxt_bv
);
411 return biovec_phys_mergeable(q
, &end_bv
, &nxt_bv
);
415 __blk_segment_map_sg(struct request_queue
*q
, struct bio_vec
*bvec
,
416 struct scatterlist
*sglist
, struct bio_vec
*bvprv
,
417 struct scatterlist
**sg
, int *nsegs
, int *cluster
)
420 int nbytes
= bvec
->bv_len
;
422 if (*sg
&& *cluster
) {
423 if ((*sg
)->length
+ nbytes
> queue_max_segment_size(q
))
425 if (!biovec_phys_mergeable(q
, bvprv
, bvec
))
428 (*sg
)->length
+= nbytes
;
435 * If the driver previously mapped a shorter
436 * list, we could see a termination bit
437 * prematurely unless it fully inits the sg
438 * table on each mapping. We KNOW that there
439 * must be more entries here or the driver
440 * would be buggy, so force clear the
441 * termination bit to avoid doing a full
442 * sg_init_table() in drivers for each command.
448 sg_set_page(*sg
, bvec
->bv_page
, nbytes
, bvec
->bv_offset
);
454 static inline int __blk_bvec_map_sg(struct request_queue
*q
, struct bio_vec bv
,
455 struct scatterlist
*sglist
, struct scatterlist
**sg
)
458 sg_set_page(*sg
, bv
.bv_page
, bv
.bv_len
, bv
.bv_offset
);
462 static int __blk_bios_map_sg(struct request_queue
*q
, struct bio
*bio
,
463 struct scatterlist
*sglist
,
464 struct scatterlist
**sg
)
466 struct bio_vec bvec
, bvprv
= { NULL
};
467 struct bvec_iter iter
;
468 int cluster
= blk_queue_cluster(q
), nsegs
= 0;
471 bio_for_each_segment(bvec
, bio
, iter
)
472 __blk_segment_map_sg(q
, &bvec
, sglist
, &bvprv
, sg
,
479 * map a request to scatterlist, return number of sg entries setup. Caller
480 * must make sure sg can hold rq->nr_phys_segments entries
482 int blk_rq_map_sg(struct request_queue
*q
, struct request
*rq
,
483 struct scatterlist
*sglist
)
485 struct scatterlist
*sg
= NULL
;
488 if (rq
->rq_flags
& RQF_SPECIAL_PAYLOAD
)
489 nsegs
= __blk_bvec_map_sg(q
, rq
->special_vec
, sglist
, &sg
);
490 else if (rq
->bio
&& bio_op(rq
->bio
) == REQ_OP_WRITE_SAME
)
491 nsegs
= __blk_bvec_map_sg(q
, bio_iovec(rq
->bio
), sglist
, &sg
);
493 nsegs
= __blk_bios_map_sg(q
, rq
->bio
, sglist
, &sg
);
495 if (unlikely(rq
->rq_flags
& RQF_COPY_USER
) &&
496 (blk_rq_bytes(rq
) & q
->dma_pad_mask
)) {
497 unsigned int pad_len
=
498 (q
->dma_pad_mask
& ~blk_rq_bytes(rq
)) + 1;
500 sg
->length
+= pad_len
;
501 rq
->extra_len
+= pad_len
;
504 if (q
->dma_drain_size
&& q
->dma_drain_needed(rq
)) {
505 if (op_is_write(req_op(rq
)))
506 memset(q
->dma_drain_buffer
, 0, q
->dma_drain_size
);
510 sg_set_page(sg
, virt_to_page(q
->dma_drain_buffer
),
512 ((unsigned long)q
->dma_drain_buffer
) &
515 rq
->extra_len
+= q
->dma_drain_size
;
522 * Something must have been wrong if the figured number of
523 * segment is bigger than number of req's physical segments
525 WARN_ON(nsegs
> blk_rq_nr_phys_segments(rq
));
529 EXPORT_SYMBOL(blk_rq_map_sg
);
531 static inline int ll_new_hw_segment(struct request_queue
*q
,
535 int nr_phys_segs
= bio_phys_segments(q
, bio
);
537 if (req
->nr_phys_segments
+ nr_phys_segs
> queue_max_segments(q
))
540 if (blk_integrity_merge_bio(q
, req
, bio
) == false)
544 * This will form the start of a new hw segment. Bump both
547 req
->nr_phys_segments
+= nr_phys_segs
;
551 req_set_nomerge(q
, req
);
555 int ll_back_merge_fn(struct request_queue
*q
, struct request
*req
,
558 if (req_gap_back_merge(req
, bio
))
560 if (blk_integrity_rq(req
) &&
561 integrity_req_gap_back_merge(req
, bio
))
563 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
564 blk_rq_get_max_sectors(req
, blk_rq_pos(req
))) {
565 req_set_nomerge(q
, req
);
568 if (!bio_flagged(req
->biotail
, BIO_SEG_VALID
))
569 blk_recount_segments(q
, req
->biotail
);
570 if (!bio_flagged(bio
, BIO_SEG_VALID
))
571 blk_recount_segments(q
, bio
);
573 return ll_new_hw_segment(q
, req
, bio
);
576 int ll_front_merge_fn(struct request_queue
*q
, struct request
*req
,
580 if (req_gap_front_merge(req
, bio
))
582 if (blk_integrity_rq(req
) &&
583 integrity_req_gap_front_merge(req
, bio
))
585 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
586 blk_rq_get_max_sectors(req
, bio
->bi_iter
.bi_sector
)) {
587 req_set_nomerge(q
, req
);
590 if (!bio_flagged(bio
, BIO_SEG_VALID
))
591 blk_recount_segments(q
, bio
);
592 if (!bio_flagged(req
->bio
, BIO_SEG_VALID
))
593 blk_recount_segments(q
, req
->bio
);
595 return ll_new_hw_segment(q
, req
, bio
);
599 * blk-mq uses req->special to carry normal driver per-request payload, it
600 * does not indicate a prepared command that we cannot merge with.
602 static bool req_no_special_merge(struct request
*req
)
604 struct request_queue
*q
= req
->q
;
606 return !q
->mq_ops
&& req
->special
;
609 static bool req_attempt_discard_merge(struct request_queue
*q
, struct request
*req
,
610 struct request
*next
)
612 unsigned short segments
= blk_rq_nr_discard_segments(req
);
614 if (segments
>= queue_max_discard_segments(q
))
616 if (blk_rq_sectors(req
) + bio_sectors(next
->bio
) >
617 blk_rq_get_max_sectors(req
, blk_rq_pos(req
)))
620 req
->nr_phys_segments
= segments
+ blk_rq_nr_discard_segments(next
);
623 req_set_nomerge(q
, req
);
627 static int ll_merge_requests_fn(struct request_queue
*q
, struct request
*req
,
628 struct request
*next
)
630 int total_phys_segments
;
631 unsigned int seg_size
=
632 req
->biotail
->bi_seg_back_size
+ next
->bio
->bi_seg_front_size
;
635 * First check if the either of the requests are re-queued
636 * requests. Can't merge them if they are.
638 if (req_no_special_merge(req
) || req_no_special_merge(next
))
641 if (req_gap_back_merge(req
, next
->bio
))
645 * Will it become too large?
647 if ((blk_rq_sectors(req
) + blk_rq_sectors(next
)) >
648 blk_rq_get_max_sectors(req
, blk_rq_pos(req
)))
651 total_phys_segments
= req
->nr_phys_segments
+ next
->nr_phys_segments
;
652 if (blk_phys_contig_segment(q
, req
->biotail
, next
->bio
)) {
653 if (req
->nr_phys_segments
== 1)
654 req
->bio
->bi_seg_front_size
= seg_size
;
655 if (next
->nr_phys_segments
== 1)
656 next
->biotail
->bi_seg_back_size
= seg_size
;
657 total_phys_segments
--;
660 if (total_phys_segments
> queue_max_segments(q
))
663 if (blk_integrity_merge_rq(q
, req
, next
) == false)
667 req
->nr_phys_segments
= total_phys_segments
;
672 * blk_rq_set_mixed_merge - mark a request as mixed merge
673 * @rq: request to mark as mixed merge
676 * @rq is about to be mixed merged. Make sure the attributes
677 * which can be mixed are set in each bio and mark @rq as mixed
680 void blk_rq_set_mixed_merge(struct request
*rq
)
682 unsigned int ff
= rq
->cmd_flags
& REQ_FAILFAST_MASK
;
685 if (rq
->rq_flags
& RQF_MIXED_MERGE
)
689 * @rq will no longer represent mixable attributes for all the
690 * contained bios. It will just track those of the first one.
691 * Distributes the attributs to each bio.
693 for (bio
= rq
->bio
; bio
; bio
= bio
->bi_next
) {
694 WARN_ON_ONCE((bio
->bi_opf
& REQ_FAILFAST_MASK
) &&
695 (bio
->bi_opf
& REQ_FAILFAST_MASK
) != ff
);
698 rq
->rq_flags
|= RQF_MIXED_MERGE
;
701 static void blk_account_io_merge(struct request
*req
)
703 if (blk_do_io_stat(req
)) {
704 struct hd_struct
*part
;
707 cpu
= part_stat_lock();
710 part_round_stats(req
->q
, cpu
, part
);
711 part_dec_in_flight(req
->q
, part
, rq_data_dir(req
));
719 * For non-mq, this has to be called with the request spinlock acquired.
720 * For mq with scheduling, the appropriate queue wide lock should be held.
722 static struct request
*attempt_merge(struct request_queue
*q
,
723 struct request
*req
, struct request
*next
)
726 lockdep_assert_held(q
->queue_lock
);
728 if (!rq_mergeable(req
) || !rq_mergeable(next
))
731 if (req_op(req
) != req_op(next
))
737 if (blk_rq_pos(req
) + blk_rq_sectors(req
) != blk_rq_pos(next
))
740 if (rq_data_dir(req
) != rq_data_dir(next
)
741 || req
->rq_disk
!= next
->rq_disk
742 || req_no_special_merge(next
))
745 if (req_op(req
) == REQ_OP_WRITE_SAME
&&
746 !blk_write_same_mergeable(req
->bio
, next
->bio
))
750 * Don't allow merge of different write hints, or for a hint with
753 if (req
->write_hint
!= next
->write_hint
)
757 * If we are allowed to merge, then append bio list
758 * from next to rq and release next. merge_requests_fn
759 * will have updated segment counts, update sector
760 * counts here. Handle DISCARDs separately, as they
761 * have separate settings.
763 if (req_op(req
) == REQ_OP_DISCARD
) {
764 if (!req_attempt_discard_merge(q
, req
, next
))
766 } else if (!ll_merge_requests_fn(q
, req
, next
))
770 * If failfast settings disagree or any of the two is already
771 * a mixed merge, mark both as mixed before proceeding. This
772 * makes sure that all involved bios have mixable attributes
775 if (((req
->rq_flags
| next
->rq_flags
) & RQF_MIXED_MERGE
) ||
776 (req
->cmd_flags
& REQ_FAILFAST_MASK
) !=
777 (next
->cmd_flags
& REQ_FAILFAST_MASK
)) {
778 blk_rq_set_mixed_merge(req
);
779 blk_rq_set_mixed_merge(next
);
783 * At this point we have either done a back merge or front merge. We
784 * need the smaller start_time_ns of the merged requests to be the
785 * current request for accounting purposes.
787 if (next
->start_time_ns
< req
->start_time_ns
)
788 req
->start_time_ns
= next
->start_time_ns
;
790 req
->biotail
->bi_next
= next
->bio
;
791 req
->biotail
= next
->biotail
;
793 req
->__data_len
+= blk_rq_bytes(next
);
795 if (req_op(req
) != REQ_OP_DISCARD
)
796 elv_merge_requests(q
, req
, next
);
799 * 'next' is going away, so update stats accordingly
801 blk_account_io_merge(next
);
803 req
->ioprio
= ioprio_best(req
->ioprio
, next
->ioprio
);
804 if (blk_rq_cpu_valid(next
))
805 req
->cpu
= next
->cpu
;
808 * ownership of bio passed from next to req, return 'next' for
815 struct request
*attempt_back_merge(struct request_queue
*q
, struct request
*rq
)
817 struct request
*next
= elv_latter_request(q
, rq
);
820 return attempt_merge(q
, rq
, next
);
825 struct request
*attempt_front_merge(struct request_queue
*q
, struct request
*rq
)
827 struct request
*prev
= elv_former_request(q
, rq
);
830 return attempt_merge(q
, prev
, rq
);
835 int blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
836 struct request
*next
)
838 struct elevator_queue
*e
= q
->elevator
;
839 struct request
*free
;
841 if (!e
->uses_mq
&& e
->type
->ops
.sq
.elevator_allow_rq_merge_fn
)
842 if (!e
->type
->ops
.sq
.elevator_allow_rq_merge_fn(q
, rq
, next
))
845 free
= attempt_merge(q
, rq
, next
);
847 __blk_put_request(q
, free
);
854 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
856 if (!rq_mergeable(rq
) || !bio_mergeable(bio
))
859 if (req_op(rq
) != bio_op(bio
))
862 /* different data direction or already started, don't merge */
863 if (bio_data_dir(bio
) != rq_data_dir(rq
))
866 /* must be same device and not a special request */
867 if (rq
->rq_disk
!= bio
->bi_disk
|| req_no_special_merge(rq
))
870 /* only merge integrity protected bio into ditto rq */
871 if (blk_integrity_merge_bio(rq
->q
, rq
, bio
) == false)
874 /* must be using the same buffer */
875 if (req_op(rq
) == REQ_OP_WRITE_SAME
&&
876 !blk_write_same_mergeable(rq
->bio
, bio
))
880 * Don't allow merge of different write hints, or for a hint with
883 if (rq
->write_hint
!= bio
->bi_write_hint
)
889 enum elv_merge
blk_try_merge(struct request
*rq
, struct bio
*bio
)
891 if (req_op(rq
) == REQ_OP_DISCARD
&&
892 queue_max_discard_segments(rq
->q
) > 1)
893 return ELEVATOR_DISCARD_MERGE
;
894 else if (blk_rq_pos(rq
) + blk_rq_sectors(rq
) == bio
->bi_iter
.bi_sector
)
895 return ELEVATOR_BACK_MERGE
;
896 else if (blk_rq_pos(rq
) - bio_sectors(bio
) == bio
->bi_iter
.bi_sector
)
897 return ELEVATOR_FRONT_MERGE
;
898 return ELEVATOR_NO_MERGE
;