video: of: display_timing: Don't yell if no timing node is present
[linux/fpc-iii.git] / block / blk-merge.c
blob57f7990b342dd1957077ae48baf71283663c0e73
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to segment and merge handling
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
11 #include <trace/events/block.h>
13 #include "blk.h"
15 static inline bool bio_will_gap(struct request_queue *q,
16 struct request *prev_rq, struct bio *prev, struct bio *next)
18 struct bio_vec pb, nb;
20 if (!bio_has_data(prev) || !queue_virt_boundary(q))
21 return false;
24 * Don't merge if the 1st bio starts with non-zero offset, otherwise it
25 * is quite difficult to respect the sg gap limit. We work hard to
26 * merge a huge number of small single bios in case of mkfs.
28 if (prev_rq)
29 bio_get_first_bvec(prev_rq->bio, &pb);
30 else
31 bio_get_first_bvec(prev, &pb);
32 if (pb.bv_offset & queue_virt_boundary(q))
33 return true;
36 * We don't need to worry about the situation that the merged segment
37 * ends in unaligned virt boundary:
39 * - if 'pb' ends aligned, the merged segment ends aligned
40 * - if 'pb' ends unaligned, the next bio must include
41 * one single bvec of 'nb', otherwise the 'nb' can't
42 * merge with 'pb'
44 bio_get_last_bvec(prev, &pb);
45 bio_get_first_bvec(next, &nb);
46 if (biovec_phys_mergeable(q, &pb, &nb))
47 return false;
48 return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
51 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
53 return bio_will_gap(req->q, req, req->biotail, bio);
56 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
58 return bio_will_gap(req->q, NULL, bio, req->bio);
61 static struct bio *blk_bio_discard_split(struct request_queue *q,
62 struct bio *bio,
63 struct bio_set *bs,
64 unsigned *nsegs)
66 unsigned int max_discard_sectors, granularity;
67 int alignment;
68 sector_t tmp;
69 unsigned split_sectors;
71 *nsegs = 1;
73 /* Zero-sector (unknown) and one-sector granularities are the same. */
74 granularity = max(q->limits.discard_granularity >> 9, 1U);
76 max_discard_sectors = min(q->limits.max_discard_sectors,
77 bio_allowed_max_sectors(q));
78 max_discard_sectors -= max_discard_sectors % granularity;
80 if (unlikely(!max_discard_sectors)) {
81 /* XXX: warn */
82 return NULL;
85 if (bio_sectors(bio) <= max_discard_sectors)
86 return NULL;
88 split_sectors = max_discard_sectors;
91 * If the next starting sector would be misaligned, stop the discard at
92 * the previous aligned sector.
94 alignment = (q->limits.discard_alignment >> 9) % granularity;
96 tmp = bio->bi_iter.bi_sector + split_sectors - alignment;
97 tmp = sector_div(tmp, granularity);
99 if (split_sectors > tmp)
100 split_sectors -= tmp;
102 return bio_split(bio, split_sectors, GFP_NOIO, bs);
105 static struct bio *blk_bio_write_zeroes_split(struct request_queue *q,
106 struct bio *bio, struct bio_set *bs, unsigned *nsegs)
108 *nsegs = 0;
110 if (!q->limits.max_write_zeroes_sectors)
111 return NULL;
113 if (bio_sectors(bio) <= q->limits.max_write_zeroes_sectors)
114 return NULL;
116 return bio_split(bio, q->limits.max_write_zeroes_sectors, GFP_NOIO, bs);
119 static struct bio *blk_bio_write_same_split(struct request_queue *q,
120 struct bio *bio,
121 struct bio_set *bs,
122 unsigned *nsegs)
124 *nsegs = 1;
126 if (!q->limits.max_write_same_sectors)
127 return NULL;
129 if (bio_sectors(bio) <= q->limits.max_write_same_sectors)
130 return NULL;
132 return bio_split(bio, q->limits.max_write_same_sectors, GFP_NOIO, bs);
135 static inline unsigned get_max_io_size(struct request_queue *q,
136 struct bio *bio)
138 unsigned sectors = blk_max_size_offset(q, bio->bi_iter.bi_sector);
139 unsigned mask = queue_logical_block_size(q) - 1;
141 /* aligned to logical block size */
142 sectors &= ~(mask >> 9);
144 return sectors;
147 static unsigned get_max_segment_size(struct request_queue *q,
148 unsigned offset)
150 unsigned long mask = queue_segment_boundary(q);
152 /* default segment boundary mask means no boundary limit */
153 if (mask == BLK_SEG_BOUNDARY_MASK)
154 return queue_max_segment_size(q);
156 return min_t(unsigned long, mask - (mask & offset) + 1,
157 queue_max_segment_size(q));
161 * Split the bvec @bv into segments, and update all kinds of
162 * variables.
164 static bool bvec_split_segs(struct request_queue *q, struct bio_vec *bv,
165 unsigned *nsegs, unsigned *sectors, unsigned max_segs)
167 unsigned len = bv->bv_len;
168 unsigned total_len = 0;
169 unsigned new_nsegs = 0, seg_size = 0;
172 * Multi-page bvec may be too big to hold in one segment, so the
173 * current bvec has to be splitted as multiple segments.
175 while (len && new_nsegs + *nsegs < max_segs) {
176 seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
177 seg_size = min(seg_size, len);
179 new_nsegs++;
180 total_len += seg_size;
181 len -= seg_size;
183 if ((bv->bv_offset + total_len) & queue_virt_boundary(q))
184 break;
187 if (new_nsegs) {
188 *nsegs += new_nsegs;
189 if (sectors)
190 *sectors += total_len >> 9;
193 /* split in the middle of the bvec if len != 0 */
194 return !!len;
197 static struct bio *blk_bio_segment_split(struct request_queue *q,
198 struct bio *bio,
199 struct bio_set *bs,
200 unsigned *segs)
202 struct bio_vec bv, bvprv, *bvprvp = NULL;
203 struct bvec_iter iter;
204 unsigned nsegs = 0, sectors = 0;
205 const unsigned max_sectors = get_max_io_size(q, bio);
206 const unsigned max_segs = queue_max_segments(q);
208 bio_for_each_bvec(bv, bio, iter) {
210 * If the queue doesn't support SG gaps and adding this
211 * offset would create a gap, disallow it.
213 if (bvprvp && bvec_gap_to_prev(q, bvprvp, bv.bv_offset))
214 goto split;
216 if (sectors + (bv.bv_len >> 9) > max_sectors) {
218 * Consider this a new segment if we're splitting in
219 * the middle of this vector.
221 if (nsegs < max_segs &&
222 sectors < max_sectors) {
223 /* split in the middle of bvec */
224 bv.bv_len = (max_sectors - sectors) << 9;
225 bvec_split_segs(q, &bv, &nsegs,
226 &sectors, max_segs);
228 goto split;
231 if (nsegs == max_segs)
232 goto split;
234 bvprv = bv;
235 bvprvp = &bvprv;
237 if (bv.bv_offset + bv.bv_len <= PAGE_SIZE) {
238 nsegs++;
239 sectors += bv.bv_len >> 9;
240 } else if (bvec_split_segs(q, &bv, &nsegs, &sectors,
241 max_segs)) {
242 goto split;
246 *segs = nsegs;
247 return NULL;
248 split:
249 *segs = nsegs;
250 return bio_split(bio, sectors, GFP_NOIO, bs);
253 void __blk_queue_split(struct request_queue *q, struct bio **bio,
254 unsigned int *nr_segs)
256 struct bio *split;
258 switch (bio_op(*bio)) {
259 case REQ_OP_DISCARD:
260 case REQ_OP_SECURE_ERASE:
261 split = blk_bio_discard_split(q, *bio, &q->bio_split, nr_segs);
262 break;
263 case REQ_OP_WRITE_ZEROES:
264 split = blk_bio_write_zeroes_split(q, *bio, &q->bio_split,
265 nr_segs);
266 break;
267 case REQ_OP_WRITE_SAME:
268 split = blk_bio_write_same_split(q, *bio, &q->bio_split,
269 nr_segs);
270 break;
271 default:
272 split = blk_bio_segment_split(q, *bio, &q->bio_split, nr_segs);
273 break;
276 if (split) {
277 /* there isn't chance to merge the splitted bio */
278 split->bi_opf |= REQ_NOMERGE;
281 * Since we're recursing into make_request here, ensure
282 * that we mark this bio as already having entered the queue.
283 * If not, and the queue is going away, we can get stuck
284 * forever on waiting for the queue reference to drop. But
285 * that will never happen, as we're already holding a
286 * reference to it.
288 bio_set_flag(*bio, BIO_QUEUE_ENTERED);
290 bio_chain(split, *bio);
291 trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
292 generic_make_request(*bio);
293 *bio = split;
297 void blk_queue_split(struct request_queue *q, struct bio **bio)
299 unsigned int nr_segs;
301 __blk_queue_split(q, bio, &nr_segs);
303 EXPORT_SYMBOL(blk_queue_split);
305 unsigned int blk_recalc_rq_segments(struct request *rq)
307 unsigned int nr_phys_segs = 0;
308 struct req_iterator iter;
309 struct bio_vec bv;
311 if (!rq->bio)
312 return 0;
314 switch (bio_op(rq->bio)) {
315 case REQ_OP_DISCARD:
316 case REQ_OP_SECURE_ERASE:
317 case REQ_OP_WRITE_ZEROES:
318 return 0;
319 case REQ_OP_WRITE_SAME:
320 return 1;
323 rq_for_each_bvec(bv, rq, iter)
324 bvec_split_segs(rq->q, &bv, &nr_phys_segs, NULL, UINT_MAX);
325 return nr_phys_segs;
328 static inline struct scatterlist *blk_next_sg(struct scatterlist **sg,
329 struct scatterlist *sglist)
331 if (!*sg)
332 return sglist;
335 * If the driver previously mapped a shorter list, we could see a
336 * termination bit prematurely unless it fully inits the sg table
337 * on each mapping. We KNOW that there must be more entries here
338 * or the driver would be buggy, so force clear the termination bit
339 * to avoid doing a full sg_init_table() in drivers for each command.
341 sg_unmark_end(*sg);
342 return sg_next(*sg);
345 static unsigned blk_bvec_map_sg(struct request_queue *q,
346 struct bio_vec *bvec, struct scatterlist *sglist,
347 struct scatterlist **sg)
349 unsigned nbytes = bvec->bv_len;
350 unsigned nsegs = 0, total = 0;
352 while (nbytes > 0) {
353 unsigned offset = bvec->bv_offset + total;
354 unsigned len = min(get_max_segment_size(q, offset), nbytes);
355 struct page *page = bvec->bv_page;
358 * Unfortunately a fair number of drivers barf on scatterlists
359 * that have an offset larger than PAGE_SIZE, despite other
360 * subsystems dealing with that invariant just fine. For now
361 * stick to the legacy format where we never present those from
362 * the block layer, but the code below should be removed once
363 * these offenders (mostly MMC/SD drivers) are fixed.
365 page += (offset >> PAGE_SHIFT);
366 offset &= ~PAGE_MASK;
368 *sg = blk_next_sg(sg, sglist);
369 sg_set_page(*sg, page, len, offset);
371 total += len;
372 nbytes -= len;
373 nsegs++;
376 return nsegs;
379 static inline int __blk_bvec_map_sg(struct bio_vec bv,
380 struct scatterlist *sglist, struct scatterlist **sg)
382 *sg = blk_next_sg(sg, sglist);
383 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset);
384 return 1;
387 /* only try to merge bvecs into one sg if they are from two bios */
388 static inline bool
389 __blk_segment_map_sg_merge(struct request_queue *q, struct bio_vec *bvec,
390 struct bio_vec *bvprv, struct scatterlist **sg)
393 int nbytes = bvec->bv_len;
395 if (!*sg)
396 return false;
398 if ((*sg)->length + nbytes > queue_max_segment_size(q))
399 return false;
401 if (!biovec_phys_mergeable(q, bvprv, bvec))
402 return false;
404 (*sg)->length += nbytes;
406 return true;
409 static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
410 struct scatterlist *sglist,
411 struct scatterlist **sg)
413 struct bio_vec uninitialized_var(bvec), bvprv = { NULL };
414 struct bvec_iter iter;
415 int nsegs = 0;
416 bool new_bio = false;
418 for_each_bio(bio) {
419 bio_for_each_bvec(bvec, bio, iter) {
421 * Only try to merge bvecs from two bios given we
422 * have done bio internal merge when adding pages
423 * to bio
425 if (new_bio &&
426 __blk_segment_map_sg_merge(q, &bvec, &bvprv, sg))
427 goto next_bvec;
429 if (bvec.bv_offset + bvec.bv_len <= PAGE_SIZE)
430 nsegs += __blk_bvec_map_sg(bvec, sglist, sg);
431 else
432 nsegs += blk_bvec_map_sg(q, &bvec, sglist, sg);
433 next_bvec:
434 new_bio = false;
436 if (likely(bio->bi_iter.bi_size)) {
437 bvprv = bvec;
438 new_bio = true;
442 return nsegs;
446 * map a request to scatterlist, return number of sg entries setup. Caller
447 * must make sure sg can hold rq->nr_phys_segments entries
449 int blk_rq_map_sg(struct request_queue *q, struct request *rq,
450 struct scatterlist *sglist)
452 struct scatterlist *sg = NULL;
453 int nsegs = 0;
455 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
456 nsegs = __blk_bvec_map_sg(rq->special_vec, sglist, &sg);
457 else if (rq->bio && bio_op(rq->bio) == REQ_OP_WRITE_SAME)
458 nsegs = __blk_bvec_map_sg(bio_iovec(rq->bio), sglist, &sg);
459 else if (rq->bio)
460 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
462 if (unlikely(rq->rq_flags & RQF_COPY_USER) &&
463 (blk_rq_bytes(rq) & q->dma_pad_mask)) {
464 unsigned int pad_len =
465 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1;
467 sg->length += pad_len;
468 rq->extra_len += pad_len;
471 if (q->dma_drain_size && q->dma_drain_needed(rq)) {
472 if (op_is_write(req_op(rq)))
473 memset(q->dma_drain_buffer, 0, q->dma_drain_size);
475 sg_unmark_end(sg);
476 sg = sg_next(sg);
477 sg_set_page(sg, virt_to_page(q->dma_drain_buffer),
478 q->dma_drain_size,
479 ((unsigned long)q->dma_drain_buffer) &
480 (PAGE_SIZE - 1));
481 nsegs++;
482 rq->extra_len += q->dma_drain_size;
485 if (sg)
486 sg_mark_end(sg);
489 * Something must have been wrong if the figured number of
490 * segment is bigger than number of req's physical segments
492 WARN_ON(nsegs > blk_rq_nr_phys_segments(rq));
494 return nsegs;
496 EXPORT_SYMBOL(blk_rq_map_sg);
498 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
499 unsigned int nr_phys_segs)
501 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
502 goto no_merge;
504 if (blk_integrity_merge_bio(req->q, req, bio) == false)
505 goto no_merge;
508 * This will form the start of a new hw segment. Bump both
509 * counters.
511 req->nr_phys_segments += nr_phys_segs;
512 return 1;
514 no_merge:
515 req_set_nomerge(req->q, req);
516 return 0;
519 int ll_back_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
521 if (req_gap_back_merge(req, bio))
522 return 0;
523 if (blk_integrity_rq(req) &&
524 integrity_req_gap_back_merge(req, bio))
525 return 0;
526 if (blk_rq_sectors(req) + bio_sectors(bio) >
527 blk_rq_get_max_sectors(req, blk_rq_pos(req))) {
528 req_set_nomerge(req->q, req);
529 return 0;
532 return ll_new_hw_segment(req, bio, nr_segs);
535 int ll_front_merge_fn(struct request *req, struct bio *bio, unsigned int nr_segs)
537 if (req_gap_front_merge(req, bio))
538 return 0;
539 if (blk_integrity_rq(req) &&
540 integrity_req_gap_front_merge(req, bio))
541 return 0;
542 if (blk_rq_sectors(req) + bio_sectors(bio) >
543 blk_rq_get_max_sectors(req, bio->bi_iter.bi_sector)) {
544 req_set_nomerge(req->q, req);
545 return 0;
548 return ll_new_hw_segment(req, bio, nr_segs);
551 static bool req_attempt_discard_merge(struct request_queue *q, struct request *req,
552 struct request *next)
554 unsigned short segments = blk_rq_nr_discard_segments(req);
556 if (segments >= queue_max_discard_segments(q))
557 goto no_merge;
558 if (blk_rq_sectors(req) + bio_sectors(next->bio) >
559 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
560 goto no_merge;
562 req->nr_phys_segments = segments + blk_rq_nr_discard_segments(next);
563 return true;
564 no_merge:
565 req_set_nomerge(q, req);
566 return false;
569 static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
570 struct request *next)
572 int total_phys_segments;
574 if (req_gap_back_merge(req, next->bio))
575 return 0;
578 * Will it become too large?
580 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) >
581 blk_rq_get_max_sectors(req, blk_rq_pos(req)))
582 return 0;
584 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
585 if (total_phys_segments > queue_max_segments(q))
586 return 0;
588 if (blk_integrity_merge_rq(q, req, next) == false)
589 return 0;
591 /* Merge is OK... */
592 req->nr_phys_segments = total_phys_segments;
593 return 1;
597 * blk_rq_set_mixed_merge - mark a request as mixed merge
598 * @rq: request to mark as mixed merge
600 * Description:
601 * @rq is about to be mixed merged. Make sure the attributes
602 * which can be mixed are set in each bio and mark @rq as mixed
603 * merged.
605 void blk_rq_set_mixed_merge(struct request *rq)
607 unsigned int ff = rq->cmd_flags & REQ_FAILFAST_MASK;
608 struct bio *bio;
610 if (rq->rq_flags & RQF_MIXED_MERGE)
611 return;
614 * @rq will no longer represent mixable attributes for all the
615 * contained bios. It will just track those of the first one.
616 * Distributes the attributs to each bio.
618 for (bio = rq->bio; bio; bio = bio->bi_next) {
619 WARN_ON_ONCE((bio->bi_opf & REQ_FAILFAST_MASK) &&
620 (bio->bi_opf & REQ_FAILFAST_MASK) != ff);
621 bio->bi_opf |= ff;
623 rq->rq_flags |= RQF_MIXED_MERGE;
626 static void blk_account_io_merge(struct request *req)
628 if (blk_do_io_stat(req)) {
629 struct hd_struct *part;
631 part_stat_lock();
632 part = req->part;
634 part_dec_in_flight(req->q, part, rq_data_dir(req));
636 hd_struct_put(part);
637 part_stat_unlock();
641 * Two cases of handling DISCARD merge:
642 * If max_discard_segments > 1, the driver takes every bio
643 * as a range and send them to controller together. The ranges
644 * needn't to be contiguous.
645 * Otherwise, the bios/requests will be handled as same as
646 * others which should be contiguous.
648 static inline bool blk_discard_mergable(struct request *req)
650 if (req_op(req) == REQ_OP_DISCARD &&
651 queue_max_discard_segments(req->q) > 1)
652 return true;
653 return false;
656 static enum elv_merge blk_try_req_merge(struct request *req,
657 struct request *next)
659 if (blk_discard_mergable(req))
660 return ELEVATOR_DISCARD_MERGE;
661 else if (blk_rq_pos(req) + blk_rq_sectors(req) == blk_rq_pos(next))
662 return ELEVATOR_BACK_MERGE;
664 return ELEVATOR_NO_MERGE;
668 * For non-mq, this has to be called with the request spinlock acquired.
669 * For mq with scheduling, the appropriate queue wide lock should be held.
671 static struct request *attempt_merge(struct request_queue *q,
672 struct request *req, struct request *next)
674 if (!rq_mergeable(req) || !rq_mergeable(next))
675 return NULL;
677 if (req_op(req) != req_op(next))
678 return NULL;
680 if (rq_data_dir(req) != rq_data_dir(next)
681 || req->rq_disk != next->rq_disk)
682 return NULL;
684 if (req_op(req) == REQ_OP_WRITE_SAME &&
685 !blk_write_same_mergeable(req->bio, next->bio))
686 return NULL;
689 * Don't allow merge of different write hints, or for a hint with
690 * non-hint IO.
692 if (req->write_hint != next->write_hint)
693 return NULL;
695 if (req->ioprio != next->ioprio)
696 return NULL;
699 * If we are allowed to merge, then append bio list
700 * from next to rq and release next. merge_requests_fn
701 * will have updated segment counts, update sector
702 * counts here. Handle DISCARDs separately, as they
703 * have separate settings.
706 switch (blk_try_req_merge(req, next)) {
707 case ELEVATOR_DISCARD_MERGE:
708 if (!req_attempt_discard_merge(q, req, next))
709 return NULL;
710 break;
711 case ELEVATOR_BACK_MERGE:
712 if (!ll_merge_requests_fn(q, req, next))
713 return NULL;
714 break;
715 default:
716 return NULL;
720 * If failfast settings disagree or any of the two is already
721 * a mixed merge, mark both as mixed before proceeding. This
722 * makes sure that all involved bios have mixable attributes
723 * set properly.
725 if (((req->rq_flags | next->rq_flags) & RQF_MIXED_MERGE) ||
726 (req->cmd_flags & REQ_FAILFAST_MASK) !=
727 (next->cmd_flags & REQ_FAILFAST_MASK)) {
728 blk_rq_set_mixed_merge(req);
729 blk_rq_set_mixed_merge(next);
733 * At this point we have either done a back merge or front merge. We
734 * need the smaller start_time_ns of the merged requests to be the
735 * current request for accounting purposes.
737 if (next->start_time_ns < req->start_time_ns)
738 req->start_time_ns = next->start_time_ns;
740 req->biotail->bi_next = next->bio;
741 req->biotail = next->biotail;
743 req->__data_len += blk_rq_bytes(next);
745 if (!blk_discard_mergable(req))
746 elv_merge_requests(q, req, next);
749 * 'next' is going away, so update stats accordingly
751 blk_account_io_merge(next);
754 * ownership of bio passed from next to req, return 'next' for
755 * the caller to free
757 next->bio = NULL;
758 return next;
761 struct request *attempt_back_merge(struct request_queue *q, struct request *rq)
763 struct request *next = elv_latter_request(q, rq);
765 if (next)
766 return attempt_merge(q, rq, next);
768 return NULL;
771 struct request *attempt_front_merge(struct request_queue *q, struct request *rq)
773 struct request *prev = elv_former_request(q, rq);
775 if (prev)
776 return attempt_merge(q, prev, rq);
778 return NULL;
781 int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
782 struct request *next)
784 struct request *free;
786 free = attempt_merge(q, rq, next);
787 if (free) {
788 blk_put_request(free);
789 return 1;
792 return 0;
795 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
797 if (!rq_mergeable(rq) || !bio_mergeable(bio))
798 return false;
800 if (req_op(rq) != bio_op(bio))
801 return false;
803 /* different data direction or already started, don't merge */
804 if (bio_data_dir(bio) != rq_data_dir(rq))
805 return false;
807 /* must be same device */
808 if (rq->rq_disk != bio->bi_disk)
809 return false;
811 /* only merge integrity protected bio into ditto rq */
812 if (blk_integrity_merge_bio(rq->q, rq, bio) == false)
813 return false;
815 /* must be using the same buffer */
816 if (req_op(rq) == REQ_OP_WRITE_SAME &&
817 !blk_write_same_mergeable(rq->bio, bio))
818 return false;
821 * Don't allow merge of different write hints, or for a hint with
822 * non-hint IO.
824 if (rq->write_hint != bio->bi_write_hint)
825 return false;
827 if (rq->ioprio != bio_prio(bio))
828 return false;
830 return true;
833 enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
835 if (blk_discard_mergable(rq))
836 return ELEVATOR_DISCARD_MERGE;
837 else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
838 return ELEVATOR_BACK_MERGE;
839 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
840 return ELEVATOR_FRONT_MERGE;
841 return ELEVATOR_NO_MERGE;