2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 #include <linux/swap.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/uio.h>
23 #include <linux/iocontext.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/export.h>
28 #include <linux/mempool.h>
29 #include <linux/workqueue.h>
30 #include <linux/cgroup.h>
32 #include <trace/events/block.h>
35 * Test patch to inline a certain number of bi_io_vec's inside the bio
36 * itself, to shrink a bio data allocation from two mempool calls to one
38 #define BIO_INLINE_VECS 4
41 * if you change this list, also change bvec_alloc or things will
42 * break badly! cannot be bigger than what you can fit into an
45 #define BV(x, n) { .nr_vecs = x, .name = "biovec-"#n }
46 static struct biovec_slab bvec_slabs
[BVEC_POOL_NR
] __read_mostly
= {
47 BV(1, 1), BV(4, 4), BV(16, 16), BV(64, 64), BV(128, 128), BV(BIO_MAX_PAGES
, max
),
52 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
53 * IO code that does not need private memory pools.
55 struct bio_set
*fs_bio_set
;
56 EXPORT_SYMBOL(fs_bio_set
);
59 * Our slab pool management
62 struct kmem_cache
*slab
;
63 unsigned int slab_ref
;
64 unsigned int slab_size
;
67 static DEFINE_MUTEX(bio_slab_lock
);
68 static struct bio_slab
*bio_slabs
;
69 static unsigned int bio_slab_nr
, bio_slab_max
;
71 static struct kmem_cache
*bio_find_or_create_slab(unsigned int extra_size
)
73 unsigned int sz
= sizeof(struct bio
) + extra_size
;
74 struct kmem_cache
*slab
= NULL
;
75 struct bio_slab
*bslab
, *new_bio_slabs
;
76 unsigned int new_bio_slab_max
;
77 unsigned int i
, entry
= -1;
79 mutex_lock(&bio_slab_lock
);
82 while (i
< bio_slab_nr
) {
83 bslab
= &bio_slabs
[i
];
85 if (!bslab
->slab
&& entry
== -1)
87 else if (bslab
->slab_size
== sz
) {
98 if (bio_slab_nr
== bio_slab_max
&& entry
== -1) {
99 new_bio_slab_max
= bio_slab_max
<< 1;
100 new_bio_slabs
= krealloc(bio_slabs
,
101 new_bio_slab_max
* sizeof(struct bio_slab
),
105 bio_slab_max
= new_bio_slab_max
;
106 bio_slabs
= new_bio_slabs
;
109 entry
= bio_slab_nr
++;
111 bslab
= &bio_slabs
[entry
];
113 snprintf(bslab
->name
, sizeof(bslab
->name
), "bio-%d", entry
);
114 slab
= kmem_cache_create(bslab
->name
, sz
, ARCH_KMALLOC_MINALIGN
,
115 SLAB_HWCACHE_ALIGN
, NULL
);
121 bslab
->slab_size
= sz
;
123 mutex_unlock(&bio_slab_lock
);
127 static void bio_put_slab(struct bio_set
*bs
)
129 struct bio_slab
*bslab
= NULL
;
132 mutex_lock(&bio_slab_lock
);
134 for (i
= 0; i
< bio_slab_nr
; i
++) {
135 if (bs
->bio_slab
== bio_slabs
[i
].slab
) {
136 bslab
= &bio_slabs
[i
];
141 if (WARN(!bslab
, KERN_ERR
"bio: unable to find slab!\n"))
144 WARN_ON(!bslab
->slab_ref
);
146 if (--bslab
->slab_ref
)
149 kmem_cache_destroy(bslab
->slab
);
153 mutex_unlock(&bio_slab_lock
);
156 unsigned int bvec_nr_vecs(unsigned short idx
)
158 return bvec_slabs
[--idx
].nr_vecs
;
161 void bvec_free(mempool_t
*pool
, struct bio_vec
*bv
, unsigned int idx
)
167 BIO_BUG_ON(idx
>= BVEC_POOL_NR
);
169 if (idx
== BVEC_POOL_MAX
) {
170 mempool_free(bv
, pool
);
172 struct biovec_slab
*bvs
= bvec_slabs
+ idx
;
174 kmem_cache_free(bvs
->slab
, bv
);
178 struct bio_vec
*bvec_alloc(gfp_t gfp_mask
, int nr
, unsigned long *idx
,
184 * see comment near bvec_array define!
202 case 129 ... BIO_MAX_PAGES
:
210 * idx now points to the pool we want to allocate from. only the
211 * 1-vec entry pool is mempool backed.
213 if (*idx
== BVEC_POOL_MAX
) {
215 bvl
= mempool_alloc(pool
, gfp_mask
);
217 struct biovec_slab
*bvs
= bvec_slabs
+ *idx
;
218 gfp_t __gfp_mask
= gfp_mask
& ~(__GFP_DIRECT_RECLAIM
| __GFP_IO
);
221 * Make this allocation restricted and don't dump info on
222 * allocation failures, since we'll fallback to the mempool
223 * in case of failure.
225 __gfp_mask
|= __GFP_NOMEMALLOC
| __GFP_NORETRY
| __GFP_NOWARN
;
228 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
229 * is set, retry with the 1-entry mempool
231 bvl
= kmem_cache_alloc(bvs
->slab
, __gfp_mask
);
232 if (unlikely(!bvl
&& (gfp_mask
& __GFP_DIRECT_RECLAIM
))) {
233 *idx
= BVEC_POOL_MAX
;
242 static void __bio_free(struct bio
*bio
)
244 bio_disassociate_task(bio
);
246 if (bio_integrity(bio
))
247 bio_integrity_free(bio
);
250 static void bio_free(struct bio
*bio
)
252 struct bio_set
*bs
= bio
->bi_pool
;
258 bvec_free(bs
->bvec_pool
, bio
->bi_io_vec
, BVEC_POOL_IDX(bio
));
261 * If we have front padding, adjust the bio pointer before freeing
266 mempool_free(p
, bs
->bio_pool
);
268 /* Bio was allocated by bio_kmalloc() */
273 void bio_init(struct bio
*bio
)
275 memset(bio
, 0, sizeof(*bio
));
276 atomic_set(&bio
->__bi_remaining
, 1);
277 atomic_set(&bio
->__bi_cnt
, 1);
279 EXPORT_SYMBOL(bio_init
);
282 * bio_reset - reinitialize a bio
286 * After calling bio_reset(), @bio will be in the same state as a freshly
287 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
288 * preserved are the ones that are initialized by bio_alloc_bioset(). See
289 * comment in struct bio.
291 void bio_reset(struct bio
*bio
)
293 unsigned long flags
= bio
->bi_flags
& (~0UL << BIO_RESET_BITS
);
297 memset(bio
, 0, BIO_RESET_BYTES
);
298 bio
->bi_flags
= flags
;
299 atomic_set(&bio
->__bi_remaining
, 1);
301 EXPORT_SYMBOL(bio_reset
);
303 static struct bio
*__bio_chain_endio(struct bio
*bio
)
305 struct bio
*parent
= bio
->bi_private
;
307 if (!parent
->bi_error
)
308 parent
->bi_error
= bio
->bi_error
;
313 static void bio_chain_endio(struct bio
*bio
)
315 bio_endio(__bio_chain_endio(bio
));
319 * bio_chain - chain bio completions
320 * @bio: the target bio
321 * @parent: the @bio's parent bio
323 * The caller won't have a bi_end_io called when @bio completes - instead,
324 * @parent's bi_end_io won't be called until both @parent and @bio have
325 * completed; the chained bio will also be freed when it completes.
327 * The caller must not set bi_private or bi_end_io in @bio.
329 void bio_chain(struct bio
*bio
, struct bio
*parent
)
331 BUG_ON(bio
->bi_private
|| bio
->bi_end_io
);
333 bio
->bi_private
= parent
;
334 bio
->bi_end_io
= bio_chain_endio
;
335 bio_inc_remaining(parent
);
337 EXPORT_SYMBOL(bio_chain
);
339 static void bio_alloc_rescue(struct work_struct
*work
)
341 struct bio_set
*bs
= container_of(work
, struct bio_set
, rescue_work
);
345 spin_lock(&bs
->rescue_lock
);
346 bio
= bio_list_pop(&bs
->rescue_list
);
347 spin_unlock(&bs
->rescue_lock
);
352 generic_make_request(bio
);
356 static void punt_bios_to_rescuer(struct bio_set
*bs
)
358 struct bio_list punt
, nopunt
;
362 * In order to guarantee forward progress we must punt only bios that
363 * were allocated from this bio_set; otherwise, if there was a bio on
364 * there for a stacking driver higher up in the stack, processing it
365 * could require allocating bios from this bio_set, and doing that from
366 * our own rescuer would be bad.
368 * Since bio lists are singly linked, pop them all instead of trying to
369 * remove from the middle of the list:
372 bio_list_init(&punt
);
373 bio_list_init(&nopunt
);
375 while ((bio
= bio_list_pop(¤t
->bio_list
[0])))
376 bio_list_add(bio
->bi_pool
== bs
? &punt
: &nopunt
, bio
);
377 current
->bio_list
[0] = nopunt
;
379 bio_list_init(&nopunt
);
380 while ((bio
= bio_list_pop(¤t
->bio_list
[1])))
381 bio_list_add(bio
->bi_pool
== bs
? &punt
: &nopunt
, bio
);
382 current
->bio_list
[1] = nopunt
;
384 spin_lock(&bs
->rescue_lock
);
385 bio_list_merge(&bs
->rescue_list
, &punt
);
386 spin_unlock(&bs
->rescue_lock
);
388 queue_work(bs
->rescue_workqueue
, &bs
->rescue_work
);
392 * bio_alloc_bioset - allocate a bio for I/O
393 * @gfp_mask: the GFP_ mask given to the slab allocator
394 * @nr_iovecs: number of iovecs to pre-allocate
395 * @bs: the bio_set to allocate from.
398 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
399 * backed by the @bs's mempool.
401 * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
402 * always be able to allocate a bio. This is due to the mempool guarantees.
403 * To make this work, callers must never allocate more than 1 bio at a time
404 * from this pool. Callers that need to allocate more than 1 bio must always
405 * submit the previously allocated bio for IO before attempting to allocate
406 * a new one. Failure to do so can cause deadlocks under memory pressure.
408 * Note that when running under generic_make_request() (i.e. any block
409 * driver), bios are not submitted until after you return - see the code in
410 * generic_make_request() that converts recursion into iteration, to prevent
413 * This would normally mean allocating multiple bios under
414 * generic_make_request() would be susceptible to deadlocks, but we have
415 * deadlock avoidance code that resubmits any blocked bios from a rescuer
418 * However, we do not guarantee forward progress for allocations from other
419 * mempools. Doing multiple allocations from the same mempool under
420 * generic_make_request() should be avoided - instead, use bio_set's front_pad
421 * for per bio allocations.
424 * Pointer to new bio on success, NULL on failure.
426 struct bio
*bio_alloc_bioset(gfp_t gfp_mask
, int nr_iovecs
, struct bio_set
*bs
)
428 gfp_t saved_gfp
= gfp_mask
;
430 unsigned inline_vecs
;
431 struct bio_vec
*bvl
= NULL
;
436 if (nr_iovecs
> UIO_MAXIOV
)
439 p
= kmalloc(sizeof(struct bio
) +
440 nr_iovecs
* sizeof(struct bio_vec
),
443 inline_vecs
= nr_iovecs
;
445 /* should not use nobvec bioset for nr_iovecs > 0 */
446 if (WARN_ON_ONCE(!bs
->bvec_pool
&& nr_iovecs
> 0))
449 * generic_make_request() converts recursion to iteration; this
450 * means if we're running beneath it, any bios we allocate and
451 * submit will not be submitted (and thus freed) until after we
454 * This exposes us to a potential deadlock if we allocate
455 * multiple bios from the same bio_set() while running
456 * underneath generic_make_request(). If we were to allocate
457 * multiple bios (say a stacking block driver that was splitting
458 * bios), we would deadlock if we exhausted the mempool's
461 * We solve this, and guarantee forward progress, with a rescuer
462 * workqueue per bio_set. If we go to allocate and there are
463 * bios on current->bio_list, we first try the allocation
464 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
465 * bios we would be blocking to the rescuer workqueue before
466 * we retry with the original gfp_flags.
469 if (current
->bio_list
&&
470 (!bio_list_empty(¤t
->bio_list
[0]) ||
471 !bio_list_empty(¤t
->bio_list
[1])))
472 gfp_mask
&= ~__GFP_DIRECT_RECLAIM
;
474 p
= mempool_alloc(bs
->bio_pool
, gfp_mask
);
475 if (!p
&& gfp_mask
!= saved_gfp
) {
476 punt_bios_to_rescuer(bs
);
477 gfp_mask
= saved_gfp
;
478 p
= mempool_alloc(bs
->bio_pool
, gfp_mask
);
481 front_pad
= bs
->front_pad
;
482 inline_vecs
= BIO_INLINE_VECS
;
491 if (nr_iovecs
> inline_vecs
) {
492 unsigned long idx
= 0;
494 bvl
= bvec_alloc(gfp_mask
, nr_iovecs
, &idx
, bs
->bvec_pool
);
495 if (!bvl
&& gfp_mask
!= saved_gfp
) {
496 punt_bios_to_rescuer(bs
);
497 gfp_mask
= saved_gfp
;
498 bvl
= bvec_alloc(gfp_mask
, nr_iovecs
, &idx
, bs
->bvec_pool
);
504 bio
->bi_flags
|= idx
<< BVEC_POOL_OFFSET
;
505 } else if (nr_iovecs
) {
506 bvl
= bio
->bi_inline_vecs
;
510 bio
->bi_max_vecs
= nr_iovecs
;
511 bio
->bi_io_vec
= bvl
;
515 mempool_free(p
, bs
->bio_pool
);
518 EXPORT_SYMBOL(bio_alloc_bioset
);
520 void zero_fill_bio(struct bio
*bio
)
524 struct bvec_iter iter
;
526 bio_for_each_segment(bv
, bio
, iter
) {
527 char *data
= bvec_kmap_irq(&bv
, &flags
);
528 memset(data
, 0, bv
.bv_len
);
529 flush_dcache_page(bv
.bv_page
);
530 bvec_kunmap_irq(data
, &flags
);
533 EXPORT_SYMBOL(zero_fill_bio
);
536 * bio_put - release a reference to a bio
537 * @bio: bio to release reference to
540 * Put a reference to a &struct bio, either one you have gotten with
541 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
543 void bio_put(struct bio
*bio
)
545 if (!bio_flagged(bio
, BIO_REFFED
))
548 BIO_BUG_ON(!atomic_read(&bio
->__bi_cnt
));
553 if (atomic_dec_and_test(&bio
->__bi_cnt
))
557 EXPORT_SYMBOL(bio_put
);
559 inline int bio_phys_segments(struct request_queue
*q
, struct bio
*bio
)
561 if (unlikely(!bio_flagged(bio
, BIO_SEG_VALID
)))
562 blk_recount_segments(q
, bio
);
564 return bio
->bi_phys_segments
;
566 EXPORT_SYMBOL(bio_phys_segments
);
569 * __bio_clone_fast - clone a bio that shares the original bio's biovec
570 * @bio: destination bio
571 * @bio_src: bio to clone
573 * Clone a &bio. Caller will own the returned bio, but not
574 * the actual data it points to. Reference count of returned
577 * Caller must ensure that @bio_src is not freed before @bio.
579 void __bio_clone_fast(struct bio
*bio
, struct bio
*bio_src
)
581 BUG_ON(bio
->bi_pool
&& BVEC_POOL_IDX(bio
));
584 * most users will be overriding ->bi_bdev with a new target,
585 * so we don't set nor calculate new physical/hw segment counts here
587 bio
->bi_bdev
= bio_src
->bi_bdev
;
588 bio_set_flag(bio
, BIO_CLONED
);
589 bio
->bi_opf
= bio_src
->bi_opf
;
590 bio
->bi_iter
= bio_src
->bi_iter
;
591 bio
->bi_io_vec
= bio_src
->bi_io_vec
;
593 bio_clone_blkcg_association(bio
, bio_src
);
595 EXPORT_SYMBOL(__bio_clone_fast
);
598 * bio_clone_fast - clone a bio that shares the original bio's biovec
600 * @gfp_mask: allocation priority
601 * @bs: bio_set to allocate from
603 * Like __bio_clone_fast, only also allocates the returned bio
605 struct bio
*bio_clone_fast(struct bio
*bio
, gfp_t gfp_mask
, struct bio_set
*bs
)
609 b
= bio_alloc_bioset(gfp_mask
, 0, bs
);
613 __bio_clone_fast(b
, bio
);
615 if (bio_integrity(bio
)) {
618 ret
= bio_integrity_clone(b
, bio
, gfp_mask
);
628 EXPORT_SYMBOL(bio_clone_fast
);
631 * bio_clone_bioset - clone a bio
632 * @bio_src: bio to clone
633 * @gfp_mask: allocation priority
634 * @bs: bio_set to allocate from
636 * Clone bio. Caller will own the returned bio, but not the actual data it
637 * points to. Reference count of returned bio will be one.
639 struct bio
*bio_clone_bioset(struct bio
*bio_src
, gfp_t gfp_mask
,
642 struct bvec_iter iter
;
647 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
648 * bio_src->bi_io_vec to bio->bi_io_vec.
650 * We can't do that anymore, because:
652 * - The point of cloning the biovec is to produce a bio with a biovec
653 * the caller can modify: bi_idx and bi_bvec_done should be 0.
655 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
656 * we tried to clone the whole thing bio_alloc_bioset() would fail.
657 * But the clone should succeed as long as the number of biovecs we
658 * actually need to allocate is fewer than BIO_MAX_PAGES.
660 * - Lastly, bi_vcnt should not be looked at or relied upon by code
661 * that does not own the bio - reason being drivers don't use it for
662 * iterating over the biovec anymore, so expecting it to be kept up
663 * to date (i.e. for clones that share the parent biovec) is just
664 * asking for trouble and would force extra work on
665 * __bio_clone_fast() anyways.
668 bio
= bio_alloc_bioset(gfp_mask
, bio_segments(bio_src
), bs
);
671 bio
->bi_bdev
= bio_src
->bi_bdev
;
672 bio
->bi_opf
= bio_src
->bi_opf
;
673 bio
->bi_iter
.bi_sector
= bio_src
->bi_iter
.bi_sector
;
674 bio
->bi_iter
.bi_size
= bio_src
->bi_iter
.bi_size
;
676 switch (bio_op(bio
)) {
678 case REQ_OP_SECURE_ERASE
:
680 case REQ_OP_WRITE_SAME
:
681 bio
->bi_io_vec
[bio
->bi_vcnt
++] = bio_src
->bi_io_vec
[0];
684 bio_for_each_segment(bv
, bio_src
, iter
)
685 bio
->bi_io_vec
[bio
->bi_vcnt
++] = bv
;
689 if (bio_integrity(bio_src
)) {
692 ret
= bio_integrity_clone(bio
, bio_src
, gfp_mask
);
699 bio_clone_blkcg_association(bio
, bio_src
);
703 EXPORT_SYMBOL(bio_clone_bioset
);
706 * bio_add_pc_page - attempt to add page to bio
707 * @q: the target queue
708 * @bio: destination bio
710 * @len: vec entry length
711 * @offset: vec entry offset
713 * Attempt to add a page to the bio_vec maplist. This can fail for a
714 * number of reasons, such as the bio being full or target block device
715 * limitations. The target block device must allow bio's up to PAGE_SIZE,
716 * so it is always possible to add a single page to an empty bio.
718 * This should only be used by REQ_PC bios.
720 int bio_add_pc_page(struct request_queue
*q
, struct bio
*bio
, struct page
721 *page
, unsigned int len
, unsigned int offset
)
723 int retried_segments
= 0;
724 struct bio_vec
*bvec
;
727 * cloned bio must not modify vec list
729 if (unlikely(bio_flagged(bio
, BIO_CLONED
)))
732 if (((bio
->bi_iter
.bi_size
+ len
) >> 9) > queue_max_hw_sectors(q
))
736 * For filesystems with a blocksize smaller than the pagesize
737 * we will often be called with the same page as last time and
738 * a consecutive offset. Optimize this special case.
740 if (bio
->bi_vcnt
> 0) {
741 struct bio_vec
*prev
= &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
743 if (page
== prev
->bv_page
&&
744 offset
== prev
->bv_offset
+ prev
->bv_len
) {
746 bio
->bi_iter
.bi_size
+= len
;
751 * If the queue doesn't support SG gaps and adding this
752 * offset would create a gap, disallow it.
754 if (bvec_gap_to_prev(q
, prev
, offset
))
758 if (bio
->bi_vcnt
>= bio
->bi_max_vecs
)
762 * setup the new entry, we might clear it again later if we
763 * cannot add the page
765 bvec
= &bio
->bi_io_vec
[bio
->bi_vcnt
];
766 bvec
->bv_page
= page
;
768 bvec
->bv_offset
= offset
;
770 bio
->bi_phys_segments
++;
771 bio
->bi_iter
.bi_size
+= len
;
774 * Perform a recount if the number of segments is greater
775 * than queue_max_segments(q).
778 while (bio
->bi_phys_segments
> queue_max_segments(q
)) {
780 if (retried_segments
)
783 retried_segments
= 1;
784 blk_recount_segments(q
, bio
);
787 /* If we may be able to merge these biovecs, force a recount */
788 if (bio
->bi_vcnt
> 1 && (BIOVEC_PHYS_MERGEABLE(bvec
-1, bvec
)))
789 bio_clear_flag(bio
, BIO_SEG_VALID
);
795 bvec
->bv_page
= NULL
;
799 bio
->bi_iter
.bi_size
-= len
;
800 blk_recount_segments(q
, bio
);
803 EXPORT_SYMBOL(bio_add_pc_page
);
806 * bio_add_page - attempt to add page to bio
807 * @bio: destination bio
809 * @len: vec entry length
810 * @offset: vec entry offset
812 * Attempt to add a page to the bio_vec maplist. This will only fail
813 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
815 int bio_add_page(struct bio
*bio
, struct page
*page
,
816 unsigned int len
, unsigned int offset
)
821 * cloned bio must not modify vec list
823 if (WARN_ON_ONCE(bio_flagged(bio
, BIO_CLONED
)))
827 * For filesystems with a blocksize smaller than the pagesize
828 * we will often be called with the same page as last time and
829 * a consecutive offset. Optimize this special case.
831 if (bio
->bi_vcnt
> 0) {
832 bv
= &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
834 if (page
== bv
->bv_page
&&
835 offset
== bv
->bv_offset
+ bv
->bv_len
) {
841 if (bio
->bi_vcnt
>= bio
->bi_max_vecs
)
844 bv
= &bio
->bi_io_vec
[bio
->bi_vcnt
];
847 bv
->bv_offset
= offset
;
851 bio
->bi_iter
.bi_size
+= len
;
854 EXPORT_SYMBOL(bio_add_page
);
856 struct submit_bio_ret
{
857 struct completion event
;
861 static void submit_bio_wait_endio(struct bio
*bio
)
863 struct submit_bio_ret
*ret
= bio
->bi_private
;
865 ret
->error
= bio
->bi_error
;
866 complete(&ret
->event
);
870 * submit_bio_wait - submit a bio, and wait until it completes
871 * @bio: The &struct bio which describes the I/O
873 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
874 * bio_endio() on failure.
876 int submit_bio_wait(struct bio
*bio
)
878 struct submit_bio_ret ret
;
880 init_completion(&ret
.event
);
881 bio
->bi_private
= &ret
;
882 bio
->bi_end_io
= submit_bio_wait_endio
;
883 bio
->bi_opf
|= REQ_SYNC
;
885 wait_for_completion_io(&ret
.event
);
889 EXPORT_SYMBOL(submit_bio_wait
);
892 * bio_advance - increment/complete a bio by some number of bytes
893 * @bio: bio to advance
894 * @bytes: number of bytes to complete
896 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
897 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
898 * be updated on the last bvec as well.
900 * @bio will then represent the remaining, uncompleted portion of the io.
902 void bio_advance(struct bio
*bio
, unsigned bytes
)
904 if (bio_integrity(bio
))
905 bio_integrity_advance(bio
, bytes
);
907 bio_advance_iter(bio
, &bio
->bi_iter
, bytes
);
909 EXPORT_SYMBOL(bio_advance
);
912 * bio_alloc_pages - allocates a single page for each bvec in a bio
913 * @bio: bio to allocate pages for
914 * @gfp_mask: flags for allocation
916 * Allocates pages up to @bio->bi_vcnt.
918 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
921 int bio_alloc_pages(struct bio
*bio
, gfp_t gfp_mask
)
926 bio_for_each_segment_all(bv
, bio
, i
) {
927 bv
->bv_page
= alloc_page(gfp_mask
);
929 while (--bv
>= bio
->bi_io_vec
)
930 __free_page(bv
->bv_page
);
937 EXPORT_SYMBOL(bio_alloc_pages
);
940 * bio_copy_data - copy contents of data buffers from one chain of bios to
942 * @src: source bio list
943 * @dst: destination bio list
945 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
946 * @src and @dst as linked lists of bios.
948 * Stops when it reaches the end of either @src or @dst - that is, copies
949 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
951 void bio_copy_data(struct bio
*dst
, struct bio
*src
)
953 struct bvec_iter src_iter
, dst_iter
;
954 struct bio_vec src_bv
, dst_bv
;
958 src_iter
= src
->bi_iter
;
959 dst_iter
= dst
->bi_iter
;
962 if (!src_iter
.bi_size
) {
967 src_iter
= src
->bi_iter
;
970 if (!dst_iter
.bi_size
) {
975 dst_iter
= dst
->bi_iter
;
978 src_bv
= bio_iter_iovec(src
, src_iter
);
979 dst_bv
= bio_iter_iovec(dst
, dst_iter
);
981 bytes
= min(src_bv
.bv_len
, dst_bv
.bv_len
);
983 src_p
= kmap_atomic(src_bv
.bv_page
);
984 dst_p
= kmap_atomic(dst_bv
.bv_page
);
986 memcpy(dst_p
+ dst_bv
.bv_offset
,
987 src_p
+ src_bv
.bv_offset
,
990 kunmap_atomic(dst_p
);
991 kunmap_atomic(src_p
);
993 bio_advance_iter(src
, &src_iter
, bytes
);
994 bio_advance_iter(dst
, &dst_iter
, bytes
);
997 EXPORT_SYMBOL(bio_copy_data
);
999 struct bio_map_data
{
1001 struct iov_iter iter
;
1005 static struct bio_map_data
*bio_alloc_map_data(unsigned int iov_count
,
1008 if (iov_count
> UIO_MAXIOV
)
1011 return kmalloc(sizeof(struct bio_map_data
) +
1012 sizeof(struct iovec
) * iov_count
, gfp_mask
);
1016 * bio_copy_from_iter - copy all pages from iov_iter to bio
1017 * @bio: The &struct bio which describes the I/O as destination
1018 * @iter: iov_iter as source
1020 * Copy all pages from iov_iter to bio.
1021 * Returns 0 on success, or error on failure.
1023 static int bio_copy_from_iter(struct bio
*bio
, struct iov_iter iter
)
1026 struct bio_vec
*bvec
;
1028 bio_for_each_segment_all(bvec
, bio
, i
) {
1031 ret
= copy_page_from_iter(bvec
->bv_page
,
1036 if (!iov_iter_count(&iter
))
1039 if (ret
< bvec
->bv_len
)
1047 * bio_copy_to_iter - copy all pages from bio to iov_iter
1048 * @bio: The &struct bio which describes the I/O as source
1049 * @iter: iov_iter as destination
1051 * Copy all pages from bio to iov_iter.
1052 * Returns 0 on success, or error on failure.
1054 static int bio_copy_to_iter(struct bio
*bio
, struct iov_iter iter
)
1057 struct bio_vec
*bvec
;
1059 bio_for_each_segment_all(bvec
, bio
, i
) {
1062 ret
= copy_page_to_iter(bvec
->bv_page
,
1067 if (!iov_iter_count(&iter
))
1070 if (ret
< bvec
->bv_len
)
1077 void bio_free_pages(struct bio
*bio
)
1079 struct bio_vec
*bvec
;
1082 bio_for_each_segment_all(bvec
, bio
, i
)
1083 __free_page(bvec
->bv_page
);
1085 EXPORT_SYMBOL(bio_free_pages
);
1088 * bio_uncopy_user - finish previously mapped bio
1089 * @bio: bio being terminated
1091 * Free pages allocated from bio_copy_user_iov() and write back data
1092 * to user space in case of a read.
1094 int bio_uncopy_user(struct bio
*bio
)
1096 struct bio_map_data
*bmd
= bio
->bi_private
;
1099 if (!bio_flagged(bio
, BIO_NULL_MAPPED
)) {
1101 * if we're in a workqueue, the request is orphaned, so
1102 * don't copy into a random user address space, just free
1103 * and return -EINTR so user space doesn't expect any data.
1107 else if (bio_data_dir(bio
) == READ
)
1108 ret
= bio_copy_to_iter(bio
, bmd
->iter
);
1109 if (bmd
->is_our_pages
)
1110 bio_free_pages(bio
);
1118 * bio_copy_user_iov - copy user data to bio
1119 * @q: destination block queue
1120 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1121 * @iter: iovec iterator
1122 * @gfp_mask: memory allocation flags
1124 * Prepares and returns a bio for indirect user io, bouncing data
1125 * to/from kernel pages as necessary. Must be paired with
1126 * call bio_uncopy_user() on io completion.
1128 struct bio
*bio_copy_user_iov(struct request_queue
*q
,
1129 struct rq_map_data
*map_data
,
1130 const struct iov_iter
*iter
,
1133 struct bio_map_data
*bmd
;
1138 unsigned int len
= iter
->count
;
1139 unsigned int offset
= map_data
? offset_in_page(map_data
->offset
) : 0;
1141 for (i
= 0; i
< iter
->nr_segs
; i
++) {
1142 unsigned long uaddr
;
1144 unsigned long start
;
1146 uaddr
= (unsigned long) iter
->iov
[i
].iov_base
;
1147 end
= (uaddr
+ iter
->iov
[i
].iov_len
+ PAGE_SIZE
- 1)
1149 start
= uaddr
>> PAGE_SHIFT
;
1155 return ERR_PTR(-EINVAL
);
1157 nr_pages
+= end
- start
;
1163 bmd
= bio_alloc_map_data(iter
->nr_segs
, gfp_mask
);
1165 return ERR_PTR(-ENOMEM
);
1168 * We need to do a deep copy of the iov_iter including the iovecs.
1169 * The caller provided iov might point to an on-stack or otherwise
1172 bmd
->is_our_pages
= map_data
? 0 : 1;
1173 memcpy(bmd
->iov
, iter
->iov
, sizeof(struct iovec
) * iter
->nr_segs
);
1175 bmd
->iter
.iov
= bmd
->iov
;
1178 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1182 if (iter
->type
& WRITE
)
1183 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
1188 nr_pages
= 1 << map_data
->page_order
;
1189 i
= map_data
->offset
/ PAGE_SIZE
;
1192 unsigned int bytes
= PAGE_SIZE
;
1200 if (i
== map_data
->nr_entries
* nr_pages
) {
1205 page
= map_data
->pages
[i
/ nr_pages
];
1206 page
+= (i
% nr_pages
);
1210 page
= alloc_page(q
->bounce_gfp
| gfp_mask
);
1217 if (bio_add_pc_page(q
, bio
, page
, bytes
, offset
) < bytes
)
1230 if (((iter
->type
& WRITE
) && (!map_data
|| !map_data
->null_mapped
)) ||
1231 (map_data
&& map_data
->from_user
)) {
1232 ret
= bio_copy_from_iter(bio
, *iter
);
1237 bio
->bi_private
= bmd
;
1241 bio_free_pages(bio
);
1245 return ERR_PTR(ret
);
1249 * bio_map_user_iov - map user iovec into bio
1250 * @q: the struct request_queue for the bio
1251 * @iter: iovec iterator
1252 * @gfp_mask: memory allocation flags
1254 * Map the user space address into a bio suitable for io to a block
1255 * device. Returns an error pointer in case of error.
1257 struct bio
*bio_map_user_iov(struct request_queue
*q
,
1258 const struct iov_iter
*iter
,
1263 struct page
**pages
;
1269 struct bio_vec
*bvec
;
1271 iov_for_each(iov
, i
, *iter
) {
1272 unsigned long uaddr
= (unsigned long) iov
.iov_base
;
1273 unsigned long len
= iov
.iov_len
;
1274 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1275 unsigned long start
= uaddr
>> PAGE_SHIFT
;
1281 return ERR_PTR(-EINVAL
);
1283 nr_pages
+= end
- start
;
1285 * buffer must be aligned to at least logical block size for now
1287 if (uaddr
& queue_dma_alignment(q
))
1288 return ERR_PTR(-EINVAL
);
1292 return ERR_PTR(-EINVAL
);
1294 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1296 return ERR_PTR(-ENOMEM
);
1299 pages
= kcalloc(nr_pages
, sizeof(struct page
*), gfp_mask
);
1303 iov_for_each(iov
, i
, *iter
) {
1304 unsigned long uaddr
= (unsigned long) iov
.iov_base
;
1305 unsigned long len
= iov
.iov_len
;
1306 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1307 unsigned long start
= uaddr
>> PAGE_SHIFT
;
1308 const int local_nr_pages
= end
- start
;
1309 const int page_limit
= cur_page
+ local_nr_pages
;
1311 ret
= get_user_pages_fast(uaddr
, local_nr_pages
,
1312 (iter
->type
& WRITE
) != WRITE
,
1314 if (unlikely(ret
< local_nr_pages
)) {
1315 for (j
= cur_page
; j
< page_limit
; j
++) {
1324 offset
= offset_in_page(uaddr
);
1325 for (j
= cur_page
; j
< page_limit
; j
++) {
1326 unsigned int bytes
= PAGE_SIZE
- offset
;
1327 unsigned short prev_bi_vcnt
= bio
->bi_vcnt
;
1338 if (bio_add_pc_page(q
, bio
, pages
[j
], bytes
, offset
) <
1343 * check if vector was merged with previous
1344 * drop page reference if needed
1346 if (bio
->bi_vcnt
== prev_bi_vcnt
)
1355 * release the pages we didn't map into the bio, if any
1357 while (j
< page_limit
)
1358 put_page(pages
[j
++]);
1364 * set data direction, and check if mapped pages need bouncing
1366 if (iter
->type
& WRITE
)
1367 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
1369 bio_set_flag(bio
, BIO_USER_MAPPED
);
1372 * subtle -- if __bio_map_user() ended up bouncing a bio,
1373 * it would normally disappear when its bi_end_io is run.
1374 * however, we need it for the unmap, so grab an extra
1381 bio_for_each_segment_all(bvec
, bio
, j
) {
1382 put_page(bvec
->bv_page
);
1387 return ERR_PTR(ret
);
1390 static void __bio_unmap_user(struct bio
*bio
)
1392 struct bio_vec
*bvec
;
1396 * make sure we dirty pages we wrote to
1398 bio_for_each_segment_all(bvec
, bio
, i
) {
1399 if (bio_data_dir(bio
) == READ
)
1400 set_page_dirty_lock(bvec
->bv_page
);
1402 put_page(bvec
->bv_page
);
1409 * bio_unmap_user - unmap a bio
1410 * @bio: the bio being unmapped
1412 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1413 * a process context.
1415 * bio_unmap_user() may sleep.
1417 void bio_unmap_user(struct bio
*bio
)
1419 __bio_unmap_user(bio
);
1423 static void bio_map_kern_endio(struct bio
*bio
)
1429 * bio_map_kern - map kernel address into bio
1430 * @q: the struct request_queue for the bio
1431 * @data: pointer to buffer to map
1432 * @len: length in bytes
1433 * @gfp_mask: allocation flags for bio allocation
1435 * Map the kernel address into a bio suitable for io to a block
1436 * device. Returns an error pointer in case of error.
1438 struct bio
*bio_map_kern(struct request_queue
*q
, void *data
, unsigned int len
,
1441 unsigned long kaddr
= (unsigned long)data
;
1442 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1443 unsigned long start
= kaddr
>> PAGE_SHIFT
;
1444 const int nr_pages
= end
- start
;
1448 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1450 return ERR_PTR(-ENOMEM
);
1452 offset
= offset_in_page(kaddr
);
1453 for (i
= 0; i
< nr_pages
; i
++) {
1454 unsigned int bytes
= PAGE_SIZE
- offset
;
1462 if (bio_add_pc_page(q
, bio
, virt_to_page(data
), bytes
,
1464 /* we don't support partial mappings */
1466 return ERR_PTR(-EINVAL
);
1474 bio
->bi_end_io
= bio_map_kern_endio
;
1477 EXPORT_SYMBOL(bio_map_kern
);
1479 static void bio_copy_kern_endio(struct bio
*bio
)
1481 bio_free_pages(bio
);
1485 static void bio_copy_kern_endio_read(struct bio
*bio
)
1487 char *p
= bio
->bi_private
;
1488 struct bio_vec
*bvec
;
1491 bio_for_each_segment_all(bvec
, bio
, i
) {
1492 memcpy(p
, page_address(bvec
->bv_page
), bvec
->bv_len
);
1496 bio_copy_kern_endio(bio
);
1500 * bio_copy_kern - copy kernel address into bio
1501 * @q: the struct request_queue for the bio
1502 * @data: pointer to buffer to copy
1503 * @len: length in bytes
1504 * @gfp_mask: allocation flags for bio and page allocation
1505 * @reading: data direction is READ
1507 * copy the kernel address into a bio suitable for io to a block
1508 * device. Returns an error pointer in case of error.
1510 struct bio
*bio_copy_kern(struct request_queue
*q
, void *data
, unsigned int len
,
1511 gfp_t gfp_mask
, int reading
)
1513 unsigned long kaddr
= (unsigned long)data
;
1514 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1515 unsigned long start
= kaddr
>> PAGE_SHIFT
;
1524 return ERR_PTR(-EINVAL
);
1526 nr_pages
= end
- start
;
1527 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1529 return ERR_PTR(-ENOMEM
);
1533 unsigned int bytes
= PAGE_SIZE
;
1538 page
= alloc_page(q
->bounce_gfp
| gfp_mask
);
1543 memcpy(page_address(page
), p
, bytes
);
1545 if (bio_add_pc_page(q
, bio
, page
, bytes
, 0) < bytes
)
1553 bio
->bi_end_io
= bio_copy_kern_endio_read
;
1554 bio
->bi_private
= data
;
1556 bio
->bi_end_io
= bio_copy_kern_endio
;
1557 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
1563 bio_free_pages(bio
);
1565 return ERR_PTR(-ENOMEM
);
1569 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1570 * for performing direct-IO in BIOs.
1572 * The problem is that we cannot run set_page_dirty() from interrupt context
1573 * because the required locks are not interrupt-safe. So what we can do is to
1574 * mark the pages dirty _before_ performing IO. And in interrupt context,
1575 * check that the pages are still dirty. If so, fine. If not, redirty them
1576 * in process context.
1578 * We special-case compound pages here: normally this means reads into hugetlb
1579 * pages. The logic in here doesn't really work right for compound pages
1580 * because the VM does not uniformly chase down the head page in all cases.
1581 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1582 * handle them at all. So we skip compound pages here at an early stage.
1584 * Note that this code is very hard to test under normal circumstances because
1585 * direct-io pins the pages with get_user_pages(). This makes
1586 * is_page_cache_freeable return false, and the VM will not clean the pages.
1587 * But other code (eg, flusher threads) could clean the pages if they are mapped
1590 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1591 * deferred bio dirtying paths.
1595 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1597 void bio_set_pages_dirty(struct bio
*bio
)
1599 struct bio_vec
*bvec
;
1602 bio_for_each_segment_all(bvec
, bio
, i
) {
1603 struct page
*page
= bvec
->bv_page
;
1605 if (page
&& !PageCompound(page
))
1606 set_page_dirty_lock(page
);
1610 static void bio_release_pages(struct bio
*bio
)
1612 struct bio_vec
*bvec
;
1615 bio_for_each_segment_all(bvec
, bio
, i
) {
1616 struct page
*page
= bvec
->bv_page
;
1624 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1625 * If they are, then fine. If, however, some pages are clean then they must
1626 * have been written out during the direct-IO read. So we take another ref on
1627 * the BIO and the offending pages and re-dirty the pages in process context.
1629 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1630 * here on. It will run one put_page() against each page and will run one
1631 * bio_put() against the BIO.
1634 static void bio_dirty_fn(struct work_struct
*work
);
1636 static DECLARE_WORK(bio_dirty_work
, bio_dirty_fn
);
1637 static DEFINE_SPINLOCK(bio_dirty_lock
);
1638 static struct bio
*bio_dirty_list
;
1641 * This runs in process context
1643 static void bio_dirty_fn(struct work_struct
*work
)
1645 unsigned long flags
;
1648 spin_lock_irqsave(&bio_dirty_lock
, flags
);
1649 bio
= bio_dirty_list
;
1650 bio_dirty_list
= NULL
;
1651 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
1654 struct bio
*next
= bio
->bi_private
;
1656 bio_set_pages_dirty(bio
);
1657 bio_release_pages(bio
);
1663 void bio_check_pages_dirty(struct bio
*bio
)
1665 struct bio_vec
*bvec
;
1666 int nr_clean_pages
= 0;
1669 bio_for_each_segment_all(bvec
, bio
, i
) {
1670 struct page
*page
= bvec
->bv_page
;
1672 if (PageDirty(page
) || PageCompound(page
)) {
1674 bvec
->bv_page
= NULL
;
1680 if (nr_clean_pages
) {
1681 unsigned long flags
;
1683 spin_lock_irqsave(&bio_dirty_lock
, flags
);
1684 bio
->bi_private
= bio_dirty_list
;
1685 bio_dirty_list
= bio
;
1686 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
1687 schedule_work(&bio_dirty_work
);
1693 void generic_start_io_acct(int rw
, unsigned long sectors
,
1694 struct hd_struct
*part
)
1696 int cpu
= part_stat_lock();
1698 part_round_stats(cpu
, part
);
1699 part_stat_inc(cpu
, part
, ios
[rw
]);
1700 part_stat_add(cpu
, part
, sectors
[rw
], sectors
);
1701 part_inc_in_flight(part
, rw
);
1705 EXPORT_SYMBOL(generic_start_io_acct
);
1707 void generic_end_io_acct(int rw
, struct hd_struct
*part
,
1708 unsigned long start_time
)
1710 unsigned long duration
= jiffies
- start_time
;
1711 int cpu
= part_stat_lock();
1713 part_stat_add(cpu
, part
, ticks
[rw
], duration
);
1714 part_round_stats(cpu
, part
);
1715 part_dec_in_flight(part
, rw
);
1719 EXPORT_SYMBOL(generic_end_io_acct
);
1721 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1722 void bio_flush_dcache_pages(struct bio
*bi
)
1724 struct bio_vec bvec
;
1725 struct bvec_iter iter
;
1727 bio_for_each_segment(bvec
, bi
, iter
)
1728 flush_dcache_page(bvec
.bv_page
);
1730 EXPORT_SYMBOL(bio_flush_dcache_pages
);
1733 static inline bool bio_remaining_done(struct bio
*bio
)
1736 * If we're not chaining, then ->__bi_remaining is always 1 and
1737 * we always end io on the first invocation.
1739 if (!bio_flagged(bio
, BIO_CHAIN
))
1742 BUG_ON(atomic_read(&bio
->__bi_remaining
) <= 0);
1744 if (atomic_dec_and_test(&bio
->__bi_remaining
)) {
1745 bio_clear_flag(bio
, BIO_CHAIN
);
1753 * bio_endio - end I/O on a bio
1757 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1758 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1759 * bio unless they own it and thus know that it has an end_io function.
1761 void bio_endio(struct bio
*bio
)
1764 if (!bio_remaining_done(bio
))
1768 * Need to have a real endio function for chained bios, otherwise
1769 * various corner cases will break (like stacking block devices that
1770 * save/restore bi_end_io) - however, we want to avoid unbounded
1771 * recursion and blowing the stack. Tail call optimization would
1772 * handle this, but compiling with frame pointers also disables
1773 * gcc's sibling call optimization.
1775 if (bio
->bi_end_io
== bio_chain_endio
) {
1776 bio
= __bio_chain_endio(bio
);
1781 bio
->bi_end_io(bio
);
1783 EXPORT_SYMBOL(bio_endio
);
1786 * bio_split - split a bio
1787 * @bio: bio to split
1788 * @sectors: number of sectors to split from the front of @bio
1790 * @bs: bio set to allocate from
1792 * Allocates and returns a new bio which represents @sectors from the start of
1793 * @bio, and updates @bio to represent the remaining sectors.
1795 * Unless this is a discard request the newly allocated bio will point
1796 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1797 * @bio is not freed before the split.
1799 struct bio
*bio_split(struct bio
*bio
, int sectors
,
1800 gfp_t gfp
, struct bio_set
*bs
)
1802 struct bio
*split
= NULL
;
1804 BUG_ON(sectors
<= 0);
1805 BUG_ON(sectors
>= bio_sectors(bio
));
1808 * Discards need a mutable bio_vec to accommodate the payload
1809 * required by the DSM TRIM and UNMAP commands.
1811 if (bio_op(bio
) == REQ_OP_DISCARD
|| bio_op(bio
) == REQ_OP_SECURE_ERASE
)
1812 split
= bio_clone_bioset(bio
, gfp
, bs
);
1814 split
= bio_clone_fast(bio
, gfp
, bs
);
1819 split
->bi_iter
.bi_size
= sectors
<< 9;
1821 if (bio_integrity(split
))
1822 bio_integrity_trim(split
, 0, sectors
);
1824 bio_advance(bio
, split
->bi_iter
.bi_size
);
1828 EXPORT_SYMBOL(bio_split
);
1831 * bio_trim - trim a bio
1833 * @offset: number of sectors to trim from the front of @bio
1834 * @size: size we want to trim @bio to, in sectors
1836 void bio_trim(struct bio
*bio
, int offset
, int size
)
1838 /* 'bio' is a cloned bio which we need to trim to match
1839 * the given offset and size.
1843 if (offset
== 0 && size
== bio
->bi_iter
.bi_size
)
1846 bio_clear_flag(bio
, BIO_SEG_VALID
);
1848 bio_advance(bio
, offset
<< 9);
1850 bio
->bi_iter
.bi_size
= size
;
1852 EXPORT_SYMBOL_GPL(bio_trim
);
1855 * create memory pools for biovec's in a bio_set.
1856 * use the global biovec slabs created for general use.
1858 mempool_t
*biovec_create_pool(int pool_entries
)
1860 struct biovec_slab
*bp
= bvec_slabs
+ BVEC_POOL_MAX
;
1862 return mempool_create_slab_pool(pool_entries
, bp
->slab
);
1865 void bioset_free(struct bio_set
*bs
)
1867 if (bs
->rescue_workqueue
)
1868 destroy_workqueue(bs
->rescue_workqueue
);
1871 mempool_destroy(bs
->bio_pool
);
1874 mempool_destroy(bs
->bvec_pool
);
1876 bioset_integrity_free(bs
);
1881 EXPORT_SYMBOL(bioset_free
);
1883 static struct bio_set
*__bioset_create(unsigned int pool_size
,
1884 unsigned int front_pad
,
1885 bool create_bvec_pool
)
1887 unsigned int back_pad
= BIO_INLINE_VECS
* sizeof(struct bio_vec
);
1890 bs
= kzalloc(sizeof(*bs
), GFP_KERNEL
);
1894 bs
->front_pad
= front_pad
;
1896 spin_lock_init(&bs
->rescue_lock
);
1897 bio_list_init(&bs
->rescue_list
);
1898 INIT_WORK(&bs
->rescue_work
, bio_alloc_rescue
);
1900 bs
->bio_slab
= bio_find_or_create_slab(front_pad
+ back_pad
);
1901 if (!bs
->bio_slab
) {
1906 bs
->bio_pool
= mempool_create_slab_pool(pool_size
, bs
->bio_slab
);
1910 if (create_bvec_pool
) {
1911 bs
->bvec_pool
= biovec_create_pool(pool_size
);
1916 bs
->rescue_workqueue
= alloc_workqueue("bioset", WQ_MEM_RECLAIM
, 0);
1917 if (!bs
->rescue_workqueue
)
1927 * bioset_create - Create a bio_set
1928 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1929 * @front_pad: Number of bytes to allocate in front of the returned bio
1932 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1933 * to ask for a number of bytes to be allocated in front of the bio.
1934 * Front pad allocation is useful for embedding the bio inside
1935 * another structure, to avoid allocating extra data to go with the bio.
1936 * Note that the bio must be embedded at the END of that structure always,
1937 * or things will break badly.
1939 struct bio_set
*bioset_create(unsigned int pool_size
, unsigned int front_pad
)
1941 return __bioset_create(pool_size
, front_pad
, true);
1943 EXPORT_SYMBOL(bioset_create
);
1946 * bioset_create_nobvec - Create a bio_set without bio_vec mempool
1947 * @pool_size: Number of bio to cache in the mempool
1948 * @front_pad: Number of bytes to allocate in front of the returned bio
1951 * Same functionality as bioset_create() except that mempool is not
1952 * created for bio_vecs. Saving some memory for bio_clone_fast() users.
1954 struct bio_set
*bioset_create_nobvec(unsigned int pool_size
, unsigned int front_pad
)
1956 return __bioset_create(pool_size
, front_pad
, false);
1958 EXPORT_SYMBOL(bioset_create_nobvec
);
1960 #ifdef CONFIG_BLK_CGROUP
1963 * bio_associate_blkcg - associate a bio with the specified blkcg
1965 * @blkcg_css: css of the blkcg to associate
1967 * Associate @bio with the blkcg specified by @blkcg_css. Block layer will
1968 * treat @bio as if it were issued by a task which belongs to the blkcg.
1970 * This function takes an extra reference of @blkcg_css which will be put
1971 * when @bio is released. The caller must own @bio and is responsible for
1972 * synchronizing calls to this function.
1974 int bio_associate_blkcg(struct bio
*bio
, struct cgroup_subsys_state
*blkcg_css
)
1976 if (unlikely(bio
->bi_css
))
1979 bio
->bi_css
= blkcg_css
;
1982 EXPORT_SYMBOL_GPL(bio_associate_blkcg
);
1985 * bio_associate_current - associate a bio with %current
1988 * Associate @bio with %current if it hasn't been associated yet. Block
1989 * layer will treat @bio as if it were issued by %current no matter which
1990 * task actually issues it.
1992 * This function takes an extra reference of @task's io_context and blkcg
1993 * which will be put when @bio is released. The caller must own @bio,
1994 * ensure %current->io_context exists, and is responsible for synchronizing
1995 * calls to this function.
1997 int bio_associate_current(struct bio
*bio
)
1999 struct io_context
*ioc
;
2004 ioc
= current
->io_context
;
2008 get_io_context_active(ioc
);
2010 bio
->bi_css
= task_get_css(current
, io_cgrp_id
);
2013 EXPORT_SYMBOL_GPL(bio_associate_current
);
2016 * bio_disassociate_task - undo bio_associate_current()
2019 void bio_disassociate_task(struct bio
*bio
)
2022 put_io_context(bio
->bi_ioc
);
2026 css_put(bio
->bi_css
);
2032 * bio_clone_blkcg_association - clone blkcg association from src to dst bio
2033 * @dst: destination bio
2036 void bio_clone_blkcg_association(struct bio
*dst
, struct bio
*src
)
2039 WARN_ON(bio_associate_blkcg(dst
, src
->bi_css
));
2042 #endif /* CONFIG_BLK_CGROUP */
2044 static void __init
biovec_init_slabs(void)
2048 for (i
= 0; i
< BVEC_POOL_NR
; i
++) {
2050 struct biovec_slab
*bvs
= bvec_slabs
+ i
;
2052 if (bvs
->nr_vecs
<= BIO_INLINE_VECS
) {
2057 size
= bvs
->nr_vecs
* sizeof(struct bio_vec
);
2058 bvs
->slab
= kmem_cache_create(bvs
->name
, size
, 0,
2059 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
2063 static int __init
init_bio(void)
2067 bio_slabs
= kzalloc(bio_slab_max
* sizeof(struct bio_slab
), GFP_KERNEL
);
2069 panic("bio: can't allocate bios\n");
2071 bio_integrity_init();
2072 biovec_init_slabs();
2074 fs_bio_set
= bioset_create(BIO_POOL_SIZE
, 0);
2076 panic("bio: can't allocate bios\n");
2078 if (bioset_integrity_create(fs_bio_set
, BIO_POOL_SIZE
))
2079 panic("bio: can't create integrity pool\n");
2083 subsys_initcall(init_bio
);