2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
19 #include <linux/swap.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/uio.h>
23 #include <linux/iocontext.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/export.h>
28 #include <linux/mempool.h>
29 #include <linux/workqueue.h>
30 #include <linux/cgroup.h>
31 #include <scsi/sg.h> /* for struct sg_iovec */
33 #include <trace/events/block.h>
36 * Test patch to inline a certain number of bi_io_vec's inside the bio
37 * itself, to shrink a bio data allocation from two mempool calls to one
39 #define BIO_INLINE_VECS 4
42 * if you change this list, also change bvec_alloc or things will
43 * break badly! cannot be bigger than what you can fit into an
46 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
47 static struct biovec_slab bvec_slabs
[BIOVEC_NR_POOLS
] __read_mostly
= {
48 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES
),
53 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
54 * IO code that does not need private memory pools.
56 struct bio_set
*fs_bio_set
;
57 EXPORT_SYMBOL(fs_bio_set
);
60 * Our slab pool management
63 struct kmem_cache
*slab
;
64 unsigned int slab_ref
;
65 unsigned int slab_size
;
68 static DEFINE_MUTEX(bio_slab_lock
);
69 static struct bio_slab
*bio_slabs
;
70 static unsigned int bio_slab_nr
, bio_slab_max
;
72 static struct kmem_cache
*bio_find_or_create_slab(unsigned int extra_size
)
74 unsigned int sz
= sizeof(struct bio
) + extra_size
;
75 struct kmem_cache
*slab
= NULL
;
76 struct bio_slab
*bslab
, *new_bio_slabs
;
77 unsigned int new_bio_slab_max
;
78 unsigned int i
, entry
= -1;
80 mutex_lock(&bio_slab_lock
);
83 while (i
< bio_slab_nr
) {
84 bslab
= &bio_slabs
[i
];
86 if (!bslab
->slab
&& entry
== -1)
88 else if (bslab
->slab_size
== sz
) {
99 if (bio_slab_nr
== bio_slab_max
&& entry
== -1) {
100 new_bio_slab_max
= bio_slab_max
<< 1;
101 new_bio_slabs
= krealloc(bio_slabs
,
102 new_bio_slab_max
* sizeof(struct bio_slab
),
106 bio_slab_max
= new_bio_slab_max
;
107 bio_slabs
= new_bio_slabs
;
110 entry
= bio_slab_nr
++;
112 bslab
= &bio_slabs
[entry
];
114 snprintf(bslab
->name
, sizeof(bslab
->name
), "bio-%d", entry
);
115 slab
= kmem_cache_create(bslab
->name
, sz
, 0, SLAB_HWCACHE_ALIGN
, NULL
);
121 bslab
->slab_size
= sz
;
123 mutex_unlock(&bio_slab_lock
);
127 static void bio_put_slab(struct bio_set
*bs
)
129 struct bio_slab
*bslab
= NULL
;
132 mutex_lock(&bio_slab_lock
);
134 for (i
= 0; i
< bio_slab_nr
; i
++) {
135 if (bs
->bio_slab
== bio_slabs
[i
].slab
) {
136 bslab
= &bio_slabs
[i
];
141 if (WARN(!bslab
, KERN_ERR
"bio: unable to find slab!\n"))
144 WARN_ON(!bslab
->slab_ref
);
146 if (--bslab
->slab_ref
)
149 kmem_cache_destroy(bslab
->slab
);
153 mutex_unlock(&bio_slab_lock
);
156 unsigned int bvec_nr_vecs(unsigned short idx
)
158 return bvec_slabs
[idx
].nr_vecs
;
161 void bvec_free(mempool_t
*pool
, struct bio_vec
*bv
, unsigned int idx
)
163 BIO_BUG_ON(idx
>= BIOVEC_NR_POOLS
);
165 if (idx
== BIOVEC_MAX_IDX
)
166 mempool_free(bv
, pool
);
168 struct biovec_slab
*bvs
= bvec_slabs
+ idx
;
170 kmem_cache_free(bvs
->slab
, bv
);
174 struct bio_vec
*bvec_alloc(gfp_t gfp_mask
, int nr
, unsigned long *idx
,
180 * see comment near bvec_array define!
198 case 129 ... BIO_MAX_PAGES
:
206 * idx now points to the pool we want to allocate from. only the
207 * 1-vec entry pool is mempool backed.
209 if (*idx
== BIOVEC_MAX_IDX
) {
211 bvl
= mempool_alloc(pool
, gfp_mask
);
213 struct biovec_slab
*bvs
= bvec_slabs
+ *idx
;
214 gfp_t __gfp_mask
= gfp_mask
& ~(__GFP_WAIT
| __GFP_IO
);
217 * Make this allocation restricted and don't dump info on
218 * allocation failures, since we'll fallback to the mempool
219 * in case of failure.
221 __gfp_mask
|= __GFP_NOMEMALLOC
| __GFP_NORETRY
| __GFP_NOWARN
;
224 * Try a slab allocation. If this fails and __GFP_WAIT
225 * is set, retry with the 1-entry mempool
227 bvl
= kmem_cache_alloc(bvs
->slab
, __gfp_mask
);
228 if (unlikely(!bvl
&& (gfp_mask
& __GFP_WAIT
))) {
229 *idx
= BIOVEC_MAX_IDX
;
237 static void __bio_free(struct bio
*bio
)
239 bio_disassociate_task(bio
);
241 if (bio_integrity(bio
))
242 bio_integrity_free(bio
);
245 static void bio_free(struct bio
*bio
)
247 struct bio_set
*bs
= bio
->bi_pool
;
253 if (bio_flagged(bio
, BIO_OWNS_VEC
))
254 bvec_free(bs
->bvec_pool
, bio
->bi_io_vec
, BIO_POOL_IDX(bio
));
257 * If we have front padding, adjust the bio pointer before freeing
262 mempool_free(p
, bs
->bio_pool
);
264 /* Bio was allocated by bio_kmalloc() */
269 void bio_init(struct bio
*bio
)
271 memset(bio
, 0, sizeof(*bio
));
272 bio
->bi_flags
= 1 << BIO_UPTODATE
;
273 atomic_set(&bio
->bi_remaining
, 1);
274 atomic_set(&bio
->bi_cnt
, 1);
276 EXPORT_SYMBOL(bio_init
);
279 * bio_reset - reinitialize a bio
283 * After calling bio_reset(), @bio will be in the same state as a freshly
284 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
285 * preserved are the ones that are initialized by bio_alloc_bioset(). See
286 * comment in struct bio.
288 void bio_reset(struct bio
*bio
)
290 unsigned long flags
= bio
->bi_flags
& (~0UL << BIO_RESET_BITS
);
294 memset(bio
, 0, BIO_RESET_BYTES
);
295 bio
->bi_flags
= flags
|(1 << BIO_UPTODATE
);
296 atomic_set(&bio
->bi_remaining
, 1);
298 EXPORT_SYMBOL(bio_reset
);
300 static void bio_chain_endio(struct bio
*bio
, int error
)
302 bio_endio(bio
->bi_private
, error
);
307 * bio_chain - chain bio completions
308 * @bio: the target bio
309 * @parent: the @bio's parent bio
311 * The caller won't have a bi_end_io called when @bio completes - instead,
312 * @parent's bi_end_io won't be called until both @parent and @bio have
313 * completed; the chained bio will also be freed when it completes.
315 * The caller must not set bi_private or bi_end_io in @bio.
317 void bio_chain(struct bio
*bio
, struct bio
*parent
)
319 BUG_ON(bio
->bi_private
|| bio
->bi_end_io
);
321 bio
->bi_private
= parent
;
322 bio
->bi_end_io
= bio_chain_endio
;
323 atomic_inc(&parent
->bi_remaining
);
325 EXPORT_SYMBOL(bio_chain
);
327 static void bio_alloc_rescue(struct work_struct
*work
)
329 struct bio_set
*bs
= container_of(work
, struct bio_set
, rescue_work
);
333 spin_lock(&bs
->rescue_lock
);
334 bio
= bio_list_pop(&bs
->rescue_list
);
335 spin_unlock(&bs
->rescue_lock
);
340 generic_make_request(bio
);
344 static void punt_bios_to_rescuer(struct bio_set
*bs
)
346 struct bio_list punt
, nopunt
;
350 * In order to guarantee forward progress we must punt only bios that
351 * were allocated from this bio_set; otherwise, if there was a bio on
352 * there for a stacking driver higher up in the stack, processing it
353 * could require allocating bios from this bio_set, and doing that from
354 * our own rescuer would be bad.
356 * Since bio lists are singly linked, pop them all instead of trying to
357 * remove from the middle of the list:
360 bio_list_init(&punt
);
361 bio_list_init(&nopunt
);
363 while ((bio
= bio_list_pop(current
->bio_list
)))
364 bio_list_add(bio
->bi_pool
== bs
? &punt
: &nopunt
, bio
);
366 *current
->bio_list
= nopunt
;
368 spin_lock(&bs
->rescue_lock
);
369 bio_list_merge(&bs
->rescue_list
, &punt
);
370 spin_unlock(&bs
->rescue_lock
);
372 queue_work(bs
->rescue_workqueue
, &bs
->rescue_work
);
376 * bio_alloc_bioset - allocate a bio for I/O
377 * @gfp_mask: the GFP_ mask given to the slab allocator
378 * @nr_iovecs: number of iovecs to pre-allocate
379 * @bs: the bio_set to allocate from.
382 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
383 * backed by the @bs's mempool.
385 * When @bs is not NULL, if %__GFP_WAIT is set then bio_alloc will always be
386 * able to allocate a bio. This is due to the mempool guarantees. To make this
387 * work, callers must never allocate more than 1 bio at a time from this pool.
388 * Callers that need to allocate more than 1 bio must always submit the
389 * previously allocated bio for IO before attempting to allocate a new one.
390 * Failure to do so can cause deadlocks under memory pressure.
392 * Note that when running under generic_make_request() (i.e. any block
393 * driver), bios are not submitted until after you return - see the code in
394 * generic_make_request() that converts recursion into iteration, to prevent
397 * This would normally mean allocating multiple bios under
398 * generic_make_request() would be susceptible to deadlocks, but we have
399 * deadlock avoidance code that resubmits any blocked bios from a rescuer
402 * However, we do not guarantee forward progress for allocations from other
403 * mempools. Doing multiple allocations from the same mempool under
404 * generic_make_request() should be avoided - instead, use bio_set's front_pad
405 * for per bio allocations.
408 * Pointer to new bio on success, NULL on failure.
410 struct bio
*bio_alloc_bioset(gfp_t gfp_mask
, int nr_iovecs
, struct bio_set
*bs
)
412 gfp_t saved_gfp
= gfp_mask
;
414 unsigned inline_vecs
;
415 unsigned long idx
= BIO_POOL_NONE
;
416 struct bio_vec
*bvl
= NULL
;
421 if (nr_iovecs
> UIO_MAXIOV
)
424 p
= kmalloc(sizeof(struct bio
) +
425 nr_iovecs
* sizeof(struct bio_vec
),
428 inline_vecs
= nr_iovecs
;
431 * generic_make_request() converts recursion to iteration; this
432 * means if we're running beneath it, any bios we allocate and
433 * submit will not be submitted (and thus freed) until after we
436 * This exposes us to a potential deadlock if we allocate
437 * multiple bios from the same bio_set() while running
438 * underneath generic_make_request(). If we were to allocate
439 * multiple bios (say a stacking block driver that was splitting
440 * bios), we would deadlock if we exhausted the mempool's
443 * We solve this, and guarantee forward progress, with a rescuer
444 * workqueue per bio_set. If we go to allocate and there are
445 * bios on current->bio_list, we first try the allocation
446 * without __GFP_WAIT; if that fails, we punt those bios we
447 * would be blocking to the rescuer workqueue before we retry
448 * with the original gfp_flags.
451 if (current
->bio_list
&& !bio_list_empty(current
->bio_list
))
452 gfp_mask
&= ~__GFP_WAIT
;
454 p
= mempool_alloc(bs
->bio_pool
, gfp_mask
);
455 if (!p
&& gfp_mask
!= saved_gfp
) {
456 punt_bios_to_rescuer(bs
);
457 gfp_mask
= saved_gfp
;
458 p
= mempool_alloc(bs
->bio_pool
, gfp_mask
);
461 front_pad
= bs
->front_pad
;
462 inline_vecs
= BIO_INLINE_VECS
;
471 if (nr_iovecs
> inline_vecs
) {
472 bvl
= bvec_alloc(gfp_mask
, nr_iovecs
, &idx
, bs
->bvec_pool
);
473 if (!bvl
&& gfp_mask
!= saved_gfp
) {
474 punt_bios_to_rescuer(bs
);
475 gfp_mask
= saved_gfp
;
476 bvl
= bvec_alloc(gfp_mask
, nr_iovecs
, &idx
, bs
->bvec_pool
);
482 bio
->bi_flags
|= 1 << BIO_OWNS_VEC
;
483 } else if (nr_iovecs
) {
484 bvl
= bio
->bi_inline_vecs
;
488 bio
->bi_flags
|= idx
<< BIO_POOL_OFFSET
;
489 bio
->bi_max_vecs
= nr_iovecs
;
490 bio
->bi_io_vec
= bvl
;
494 mempool_free(p
, bs
->bio_pool
);
497 EXPORT_SYMBOL(bio_alloc_bioset
);
499 void zero_fill_bio(struct bio
*bio
)
503 struct bvec_iter iter
;
505 bio_for_each_segment(bv
, bio
, iter
) {
506 char *data
= bvec_kmap_irq(&bv
, &flags
);
507 memset(data
, 0, bv
.bv_len
);
508 flush_dcache_page(bv
.bv_page
);
509 bvec_kunmap_irq(data
, &flags
);
512 EXPORT_SYMBOL(zero_fill_bio
);
515 * bio_put - release a reference to a bio
516 * @bio: bio to release reference to
519 * Put a reference to a &struct bio, either one you have gotten with
520 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
522 void bio_put(struct bio
*bio
)
524 BIO_BUG_ON(!atomic_read(&bio
->bi_cnt
));
529 if (atomic_dec_and_test(&bio
->bi_cnt
))
532 EXPORT_SYMBOL(bio_put
);
534 inline int bio_phys_segments(struct request_queue
*q
, struct bio
*bio
)
536 if (unlikely(!bio_flagged(bio
, BIO_SEG_VALID
)))
537 blk_recount_segments(q
, bio
);
539 return bio
->bi_phys_segments
;
541 EXPORT_SYMBOL(bio_phys_segments
);
544 * __bio_clone_fast - clone a bio that shares the original bio's biovec
545 * @bio: destination bio
546 * @bio_src: bio to clone
548 * Clone a &bio. Caller will own the returned bio, but not
549 * the actual data it points to. Reference count of returned
552 * Caller must ensure that @bio_src is not freed before @bio.
554 void __bio_clone_fast(struct bio
*bio
, struct bio
*bio_src
)
556 BUG_ON(bio
->bi_pool
&& BIO_POOL_IDX(bio
) != BIO_POOL_NONE
);
559 * most users will be overriding ->bi_bdev with a new target,
560 * so we don't set nor calculate new physical/hw segment counts here
562 bio
->bi_bdev
= bio_src
->bi_bdev
;
563 bio
->bi_flags
|= 1 << BIO_CLONED
;
564 bio
->bi_rw
= bio_src
->bi_rw
;
565 bio
->bi_iter
= bio_src
->bi_iter
;
566 bio
->bi_io_vec
= bio_src
->bi_io_vec
;
568 EXPORT_SYMBOL(__bio_clone_fast
);
571 * bio_clone_fast - clone a bio that shares the original bio's biovec
573 * @gfp_mask: allocation priority
574 * @bs: bio_set to allocate from
576 * Like __bio_clone_fast, only also allocates the returned bio
578 struct bio
*bio_clone_fast(struct bio
*bio
, gfp_t gfp_mask
, struct bio_set
*bs
)
582 b
= bio_alloc_bioset(gfp_mask
, 0, bs
);
586 __bio_clone_fast(b
, bio
);
588 if (bio_integrity(bio
)) {
591 ret
= bio_integrity_clone(b
, bio
, gfp_mask
);
601 EXPORT_SYMBOL(bio_clone_fast
);
604 * bio_clone_bioset - clone a bio
605 * @bio_src: bio to clone
606 * @gfp_mask: allocation priority
607 * @bs: bio_set to allocate from
609 * Clone bio. Caller will own the returned bio, but not the actual data it
610 * points to. Reference count of returned bio will be one.
612 struct bio
*bio_clone_bioset(struct bio
*bio_src
, gfp_t gfp_mask
,
615 struct bvec_iter iter
;
620 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
621 * bio_src->bi_io_vec to bio->bi_io_vec.
623 * We can't do that anymore, because:
625 * - The point of cloning the biovec is to produce a bio with a biovec
626 * the caller can modify: bi_idx and bi_bvec_done should be 0.
628 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
629 * we tried to clone the whole thing bio_alloc_bioset() would fail.
630 * But the clone should succeed as long as the number of biovecs we
631 * actually need to allocate is fewer than BIO_MAX_PAGES.
633 * - Lastly, bi_vcnt should not be looked at or relied upon by code
634 * that does not own the bio - reason being drivers don't use it for
635 * iterating over the biovec anymore, so expecting it to be kept up
636 * to date (i.e. for clones that share the parent biovec) is just
637 * asking for trouble and would force extra work on
638 * __bio_clone_fast() anyways.
641 bio
= bio_alloc_bioset(gfp_mask
, bio_segments(bio_src
), bs
);
645 bio
->bi_bdev
= bio_src
->bi_bdev
;
646 bio
->bi_rw
= bio_src
->bi_rw
;
647 bio
->bi_iter
.bi_sector
= bio_src
->bi_iter
.bi_sector
;
648 bio
->bi_iter
.bi_size
= bio_src
->bi_iter
.bi_size
;
650 if (bio
->bi_rw
& REQ_DISCARD
)
651 goto integrity_clone
;
653 if (bio
->bi_rw
& REQ_WRITE_SAME
) {
654 bio
->bi_io_vec
[bio
->bi_vcnt
++] = bio_src
->bi_io_vec
[0];
655 goto integrity_clone
;
658 bio_for_each_segment(bv
, bio_src
, iter
)
659 bio
->bi_io_vec
[bio
->bi_vcnt
++] = bv
;
662 if (bio_integrity(bio_src
)) {
665 ret
= bio_integrity_clone(bio
, bio_src
, gfp_mask
);
674 EXPORT_SYMBOL(bio_clone_bioset
);
677 * bio_get_nr_vecs - return approx number of vecs
680 * Return the approximate number of pages we can send to this target.
681 * There's no guarantee that you will be able to fit this number of pages
682 * into a bio, it does not account for dynamic restrictions that vary
685 int bio_get_nr_vecs(struct block_device
*bdev
)
687 struct request_queue
*q
= bdev_get_queue(bdev
);
690 nr_pages
= min_t(unsigned,
691 queue_max_segments(q
),
692 queue_max_sectors(q
) / (PAGE_SIZE
>> 9) + 1);
694 return min_t(unsigned, nr_pages
, BIO_MAX_PAGES
);
697 EXPORT_SYMBOL(bio_get_nr_vecs
);
699 static int __bio_add_page(struct request_queue
*q
, struct bio
*bio
, struct page
700 *page
, unsigned int len
, unsigned int offset
,
701 unsigned int max_sectors
)
703 int retried_segments
= 0;
704 struct bio_vec
*bvec
;
707 * cloned bio must not modify vec list
709 if (unlikely(bio_flagged(bio
, BIO_CLONED
)))
712 if (((bio
->bi_iter
.bi_size
+ len
) >> 9) > max_sectors
)
716 * For filesystems with a blocksize smaller than the pagesize
717 * we will often be called with the same page as last time and
718 * a consecutive offset. Optimize this special case.
720 if (bio
->bi_vcnt
> 0) {
721 struct bio_vec
*prev
= &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
723 if (page
== prev
->bv_page
&&
724 offset
== prev
->bv_offset
+ prev
->bv_len
) {
725 unsigned int prev_bv_len
= prev
->bv_len
;
728 if (q
->merge_bvec_fn
) {
729 struct bvec_merge_data bvm
= {
730 /* prev_bvec is already charged in
731 bi_size, discharge it in order to
732 simulate merging updated prev_bvec
734 .bi_bdev
= bio
->bi_bdev
,
735 .bi_sector
= bio
->bi_iter
.bi_sector
,
736 .bi_size
= bio
->bi_iter
.bi_size
-
741 if (q
->merge_bvec_fn(q
, &bvm
, prev
) < prev
->bv_len
) {
751 if (bio
->bi_vcnt
>= bio
->bi_max_vecs
)
755 * we might lose a segment or two here, but rather that than
756 * make this too complex.
759 while (bio
->bi_phys_segments
>= queue_max_segments(q
)) {
761 if (retried_segments
)
764 retried_segments
= 1;
765 blk_recount_segments(q
, bio
);
769 * setup the new entry, we might clear it again later if we
770 * cannot add the page
772 bvec
= &bio
->bi_io_vec
[bio
->bi_vcnt
];
773 bvec
->bv_page
= page
;
775 bvec
->bv_offset
= offset
;
778 * if queue has other restrictions (eg varying max sector size
779 * depending on offset), it can specify a merge_bvec_fn in the
780 * queue to get further control
782 if (q
->merge_bvec_fn
) {
783 struct bvec_merge_data bvm
= {
784 .bi_bdev
= bio
->bi_bdev
,
785 .bi_sector
= bio
->bi_iter
.bi_sector
,
786 .bi_size
= bio
->bi_iter
.bi_size
,
791 * merge_bvec_fn() returns number of bytes it can accept
794 if (q
->merge_bvec_fn(q
, &bvm
, bvec
) < bvec
->bv_len
) {
795 bvec
->bv_page
= NULL
;
802 /* If we may be able to merge these biovecs, force a recount */
803 if (bio
->bi_vcnt
&& (BIOVEC_PHYS_MERGEABLE(bvec
-1, bvec
)))
804 bio
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
807 bio
->bi_phys_segments
++;
809 bio
->bi_iter
.bi_size
+= len
;
814 * bio_add_pc_page - attempt to add page to bio
815 * @q: the target queue
816 * @bio: destination bio
818 * @len: vec entry length
819 * @offset: vec entry offset
821 * Attempt to add a page to the bio_vec maplist. This can fail for a
822 * number of reasons, such as the bio being full or target block device
823 * limitations. The target block device must allow bio's up to PAGE_SIZE,
824 * so it is always possible to add a single page to an empty bio.
826 * This should only be used by REQ_PC bios.
828 int bio_add_pc_page(struct request_queue
*q
, struct bio
*bio
, struct page
*page
,
829 unsigned int len
, unsigned int offset
)
831 return __bio_add_page(q
, bio
, page
, len
, offset
,
832 queue_max_hw_sectors(q
));
834 EXPORT_SYMBOL(bio_add_pc_page
);
837 * bio_add_page - attempt to add page to bio
838 * @bio: destination bio
840 * @len: vec entry length
841 * @offset: vec entry offset
843 * Attempt to add a page to the bio_vec maplist. This can fail for a
844 * number of reasons, such as the bio being full or target block device
845 * limitations. The target block device must allow bio's up to PAGE_SIZE,
846 * so it is always possible to add a single page to an empty bio.
848 int bio_add_page(struct bio
*bio
, struct page
*page
, unsigned int len
,
851 struct request_queue
*q
= bdev_get_queue(bio
->bi_bdev
);
852 unsigned int max_sectors
;
854 max_sectors
= blk_max_size_offset(q
, bio
->bi_iter
.bi_sector
);
855 if ((max_sectors
< (len
>> 9)) && !bio
->bi_iter
.bi_size
)
856 max_sectors
= len
>> 9;
858 return __bio_add_page(q
, bio
, page
, len
, offset
, max_sectors
);
860 EXPORT_SYMBOL(bio_add_page
);
862 struct submit_bio_ret
{
863 struct completion event
;
867 static void submit_bio_wait_endio(struct bio
*bio
, int error
)
869 struct submit_bio_ret
*ret
= bio
->bi_private
;
872 complete(&ret
->event
);
876 * submit_bio_wait - submit a bio, and wait until it completes
877 * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
878 * @bio: The &struct bio which describes the I/O
880 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
881 * bio_endio() on failure.
883 int submit_bio_wait(int rw
, struct bio
*bio
)
885 struct submit_bio_ret ret
;
888 init_completion(&ret
.event
);
889 bio
->bi_private
= &ret
;
890 bio
->bi_end_io
= submit_bio_wait_endio
;
892 wait_for_completion(&ret
.event
);
896 EXPORT_SYMBOL(submit_bio_wait
);
899 * bio_advance - increment/complete a bio by some number of bytes
900 * @bio: bio to advance
901 * @bytes: number of bytes to complete
903 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
904 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
905 * be updated on the last bvec as well.
907 * @bio will then represent the remaining, uncompleted portion of the io.
909 void bio_advance(struct bio
*bio
, unsigned bytes
)
911 if (bio_integrity(bio
))
912 bio_integrity_advance(bio
, bytes
);
914 bio_advance_iter(bio
, &bio
->bi_iter
, bytes
);
916 EXPORT_SYMBOL(bio_advance
);
919 * bio_alloc_pages - allocates a single page for each bvec in a bio
920 * @bio: bio to allocate pages for
921 * @gfp_mask: flags for allocation
923 * Allocates pages up to @bio->bi_vcnt.
925 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
928 int bio_alloc_pages(struct bio
*bio
, gfp_t gfp_mask
)
933 bio_for_each_segment_all(bv
, bio
, i
) {
934 bv
->bv_page
= alloc_page(gfp_mask
);
936 while (--bv
>= bio
->bi_io_vec
)
937 __free_page(bv
->bv_page
);
944 EXPORT_SYMBOL(bio_alloc_pages
);
947 * bio_copy_data - copy contents of data buffers from one chain of bios to
949 * @src: source bio list
950 * @dst: destination bio list
952 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
953 * @src and @dst as linked lists of bios.
955 * Stops when it reaches the end of either @src or @dst - that is, copies
956 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
958 void bio_copy_data(struct bio
*dst
, struct bio
*src
)
960 struct bvec_iter src_iter
, dst_iter
;
961 struct bio_vec src_bv
, dst_bv
;
965 src_iter
= src
->bi_iter
;
966 dst_iter
= dst
->bi_iter
;
969 if (!src_iter
.bi_size
) {
974 src_iter
= src
->bi_iter
;
977 if (!dst_iter
.bi_size
) {
982 dst_iter
= dst
->bi_iter
;
985 src_bv
= bio_iter_iovec(src
, src_iter
);
986 dst_bv
= bio_iter_iovec(dst
, dst_iter
);
988 bytes
= min(src_bv
.bv_len
, dst_bv
.bv_len
);
990 src_p
= kmap_atomic(src_bv
.bv_page
);
991 dst_p
= kmap_atomic(dst_bv
.bv_page
);
993 memcpy(dst_p
+ dst_bv
.bv_offset
,
994 src_p
+ src_bv
.bv_offset
,
997 kunmap_atomic(dst_p
);
998 kunmap_atomic(src_p
);
1000 bio_advance_iter(src
, &src_iter
, bytes
);
1001 bio_advance_iter(dst
, &dst_iter
, bytes
);
1004 EXPORT_SYMBOL(bio_copy_data
);
1006 struct bio_map_data
{
1009 struct sg_iovec sgvecs
[];
1012 static void bio_set_map_data(struct bio_map_data
*bmd
, struct bio
*bio
,
1013 const struct sg_iovec
*iov
, int iov_count
,
1016 memcpy(bmd
->sgvecs
, iov
, sizeof(struct sg_iovec
) * iov_count
);
1017 bmd
->nr_sgvecs
= iov_count
;
1018 bmd
->is_our_pages
= is_our_pages
;
1019 bio
->bi_private
= bmd
;
1022 static struct bio_map_data
*bio_alloc_map_data(unsigned int iov_count
,
1025 if (iov_count
> UIO_MAXIOV
)
1028 return kmalloc(sizeof(struct bio_map_data
) +
1029 sizeof(struct sg_iovec
) * iov_count
, gfp_mask
);
1032 static int __bio_copy_iov(struct bio
*bio
, const struct sg_iovec
*iov
, int iov_count
,
1033 int to_user
, int from_user
, int do_free_page
)
1036 struct bio_vec
*bvec
;
1038 unsigned int iov_off
= 0;
1040 bio_for_each_segment_all(bvec
, bio
, i
) {
1041 char *bv_addr
= page_address(bvec
->bv_page
);
1042 unsigned int bv_len
= bvec
->bv_len
;
1044 while (bv_len
&& iov_idx
< iov_count
) {
1046 char __user
*iov_addr
;
1048 bytes
= min_t(unsigned int,
1049 iov
[iov_idx
].iov_len
- iov_off
, bv_len
);
1050 iov_addr
= iov
[iov_idx
].iov_base
+ iov_off
;
1054 ret
= copy_to_user(iov_addr
, bv_addr
,
1058 ret
= copy_from_user(bv_addr
, iov_addr
,
1070 if (iov
[iov_idx
].iov_len
== iov_off
) {
1077 __free_page(bvec
->bv_page
);
1084 * bio_uncopy_user - finish previously mapped bio
1085 * @bio: bio being terminated
1087 * Free pages allocated from bio_copy_user() and write back data
1088 * to user space in case of a read.
1090 int bio_uncopy_user(struct bio
*bio
)
1092 struct bio_map_data
*bmd
= bio
->bi_private
;
1093 struct bio_vec
*bvec
;
1096 if (!bio_flagged(bio
, BIO_NULL_MAPPED
)) {
1098 * if we're in a workqueue, the request is orphaned, so
1099 * don't copy into a random user address space, just free.
1102 ret
= __bio_copy_iov(bio
, bmd
->sgvecs
, bmd
->nr_sgvecs
,
1103 bio_data_dir(bio
) == READ
,
1104 0, bmd
->is_our_pages
);
1105 else if (bmd
->is_our_pages
)
1106 bio_for_each_segment_all(bvec
, bio
, i
)
1107 __free_page(bvec
->bv_page
);
1113 EXPORT_SYMBOL(bio_uncopy_user
);
1116 * bio_copy_user_iov - copy user data to bio
1117 * @q: destination block queue
1118 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1120 * @iov_count: number of elements in the iovec
1121 * @write_to_vm: bool indicating writing to pages or not
1122 * @gfp_mask: memory allocation flags
1124 * Prepares and returns a bio for indirect user io, bouncing data
1125 * to/from kernel pages as necessary. Must be paired with
1126 * call bio_uncopy_user() on io completion.
1128 struct bio
*bio_copy_user_iov(struct request_queue
*q
,
1129 struct rq_map_data
*map_data
,
1130 const struct sg_iovec
*iov
, int iov_count
,
1131 int write_to_vm
, gfp_t gfp_mask
)
1133 struct bio_map_data
*bmd
;
1134 struct bio_vec
*bvec
;
1139 unsigned int len
= 0;
1140 unsigned int offset
= map_data
? map_data
->offset
& ~PAGE_MASK
: 0;
1142 for (i
= 0; i
< iov_count
; i
++) {
1143 unsigned long uaddr
;
1145 unsigned long start
;
1147 uaddr
= (unsigned long)iov
[i
].iov_base
;
1148 end
= (uaddr
+ iov
[i
].iov_len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1149 start
= uaddr
>> PAGE_SHIFT
;
1155 return ERR_PTR(-EINVAL
);
1157 nr_pages
+= end
- start
;
1158 len
+= iov
[i
].iov_len
;
1164 bmd
= bio_alloc_map_data(iov_count
, gfp_mask
);
1166 return ERR_PTR(-ENOMEM
);
1169 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1174 bio
->bi_rw
|= REQ_WRITE
;
1179 nr_pages
= 1 << map_data
->page_order
;
1180 i
= map_data
->offset
/ PAGE_SIZE
;
1183 unsigned int bytes
= PAGE_SIZE
;
1191 if (i
== map_data
->nr_entries
* nr_pages
) {
1196 page
= map_data
->pages
[i
/ nr_pages
];
1197 page
+= (i
% nr_pages
);
1201 page
= alloc_page(q
->bounce_gfp
| gfp_mask
);
1208 if (bio_add_pc_page(q
, bio
, page
, bytes
, offset
) < bytes
)
1221 if ((!write_to_vm
&& (!map_data
|| !map_data
->null_mapped
)) ||
1222 (map_data
&& map_data
->from_user
)) {
1223 ret
= __bio_copy_iov(bio
, iov
, iov_count
, 0, 1, 0);
1228 bio_set_map_data(bmd
, bio
, iov
, iov_count
, map_data
? 0 : 1);
1232 bio_for_each_segment_all(bvec
, bio
, i
)
1233 __free_page(bvec
->bv_page
);
1238 return ERR_PTR(ret
);
1242 * bio_copy_user - copy user data to bio
1243 * @q: destination block queue
1244 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1245 * @uaddr: start of user address
1246 * @len: length in bytes
1247 * @write_to_vm: bool indicating writing to pages or not
1248 * @gfp_mask: memory allocation flags
1250 * Prepares and returns a bio for indirect user io, bouncing data
1251 * to/from kernel pages as necessary. Must be paired with
1252 * call bio_uncopy_user() on io completion.
1254 struct bio
*bio_copy_user(struct request_queue
*q
, struct rq_map_data
*map_data
,
1255 unsigned long uaddr
, unsigned int len
,
1256 int write_to_vm
, gfp_t gfp_mask
)
1258 struct sg_iovec iov
;
1260 iov
.iov_base
= (void __user
*)uaddr
;
1263 return bio_copy_user_iov(q
, map_data
, &iov
, 1, write_to_vm
, gfp_mask
);
1265 EXPORT_SYMBOL(bio_copy_user
);
1267 static struct bio
*__bio_map_user_iov(struct request_queue
*q
,
1268 struct block_device
*bdev
,
1269 const struct sg_iovec
*iov
, int iov_count
,
1270 int write_to_vm
, gfp_t gfp_mask
)
1274 struct page
**pages
;
1279 for (i
= 0; i
< iov_count
; i
++) {
1280 unsigned long uaddr
= (unsigned long)iov
[i
].iov_base
;
1281 unsigned long len
= iov
[i
].iov_len
;
1282 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1283 unsigned long start
= uaddr
>> PAGE_SHIFT
;
1289 return ERR_PTR(-EINVAL
);
1291 nr_pages
+= end
- start
;
1293 * buffer must be aligned to at least hardsector size for now
1295 if (uaddr
& queue_dma_alignment(q
))
1296 return ERR_PTR(-EINVAL
);
1300 return ERR_PTR(-EINVAL
);
1302 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1304 return ERR_PTR(-ENOMEM
);
1307 pages
= kcalloc(nr_pages
, sizeof(struct page
*), gfp_mask
);
1311 for (i
= 0; i
< iov_count
; i
++) {
1312 unsigned long uaddr
= (unsigned long)iov
[i
].iov_base
;
1313 unsigned long len
= iov
[i
].iov_len
;
1314 unsigned long end
= (uaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1315 unsigned long start
= uaddr
>> PAGE_SHIFT
;
1316 const int local_nr_pages
= end
- start
;
1317 const int page_limit
= cur_page
+ local_nr_pages
;
1319 ret
= get_user_pages_fast(uaddr
, local_nr_pages
,
1320 write_to_vm
, &pages
[cur_page
]);
1321 if (ret
< local_nr_pages
) {
1326 offset
= uaddr
& ~PAGE_MASK
;
1327 for (j
= cur_page
; j
< page_limit
; j
++) {
1328 unsigned int bytes
= PAGE_SIZE
- offset
;
1339 if (bio_add_pc_page(q
, bio
, pages
[j
], bytes
, offset
) <
1349 * release the pages we didn't map into the bio, if any
1351 while (j
< page_limit
)
1352 page_cache_release(pages
[j
++]);
1358 * set data direction, and check if mapped pages need bouncing
1361 bio
->bi_rw
|= REQ_WRITE
;
1363 bio
->bi_bdev
= bdev
;
1364 bio
->bi_flags
|= (1 << BIO_USER_MAPPED
);
1368 for (i
= 0; i
< nr_pages
; i
++) {
1371 page_cache_release(pages
[i
]);
1376 return ERR_PTR(ret
);
1380 * bio_map_user - map user address into bio
1381 * @q: the struct request_queue for the bio
1382 * @bdev: destination block device
1383 * @uaddr: start of user address
1384 * @len: length in bytes
1385 * @write_to_vm: bool indicating writing to pages or not
1386 * @gfp_mask: memory allocation flags
1388 * Map the user space address into a bio suitable for io to a block
1389 * device. Returns an error pointer in case of error.
1391 struct bio
*bio_map_user(struct request_queue
*q
, struct block_device
*bdev
,
1392 unsigned long uaddr
, unsigned int len
, int write_to_vm
,
1395 struct sg_iovec iov
;
1397 iov
.iov_base
= (void __user
*)uaddr
;
1400 return bio_map_user_iov(q
, bdev
, &iov
, 1, write_to_vm
, gfp_mask
);
1402 EXPORT_SYMBOL(bio_map_user
);
1405 * bio_map_user_iov - map user sg_iovec table into bio
1406 * @q: the struct request_queue for the bio
1407 * @bdev: destination block device
1409 * @iov_count: number of elements in the iovec
1410 * @write_to_vm: bool indicating writing to pages or not
1411 * @gfp_mask: memory allocation flags
1413 * Map the user space address into a bio suitable for io to a block
1414 * device. Returns an error pointer in case of error.
1416 struct bio
*bio_map_user_iov(struct request_queue
*q
, struct block_device
*bdev
,
1417 const struct sg_iovec
*iov
, int iov_count
,
1418 int write_to_vm
, gfp_t gfp_mask
)
1422 bio
= __bio_map_user_iov(q
, bdev
, iov
, iov_count
, write_to_vm
,
1428 * subtle -- if __bio_map_user() ended up bouncing a bio,
1429 * it would normally disappear when its bi_end_io is run.
1430 * however, we need it for the unmap, so grab an extra
1438 static void __bio_unmap_user(struct bio
*bio
)
1440 struct bio_vec
*bvec
;
1444 * make sure we dirty pages we wrote to
1446 bio_for_each_segment_all(bvec
, bio
, i
) {
1447 if (bio_data_dir(bio
) == READ
)
1448 set_page_dirty_lock(bvec
->bv_page
);
1450 page_cache_release(bvec
->bv_page
);
1457 * bio_unmap_user - unmap a bio
1458 * @bio: the bio being unmapped
1460 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1461 * a process context.
1463 * bio_unmap_user() may sleep.
1465 void bio_unmap_user(struct bio
*bio
)
1467 __bio_unmap_user(bio
);
1470 EXPORT_SYMBOL(bio_unmap_user
);
1472 static void bio_map_kern_endio(struct bio
*bio
, int err
)
1477 static struct bio
*__bio_map_kern(struct request_queue
*q
, void *data
,
1478 unsigned int len
, gfp_t gfp_mask
)
1480 unsigned long kaddr
= (unsigned long)data
;
1481 unsigned long end
= (kaddr
+ len
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
1482 unsigned long start
= kaddr
>> PAGE_SHIFT
;
1483 const int nr_pages
= end
- start
;
1487 bio
= bio_kmalloc(gfp_mask
, nr_pages
);
1489 return ERR_PTR(-ENOMEM
);
1491 offset
= offset_in_page(kaddr
);
1492 for (i
= 0; i
< nr_pages
; i
++) {
1493 unsigned int bytes
= PAGE_SIZE
- offset
;
1501 if (bio_add_pc_page(q
, bio
, virt_to_page(data
), bytes
,
1510 bio
->bi_end_io
= bio_map_kern_endio
;
1515 * bio_map_kern - map kernel address into bio
1516 * @q: the struct request_queue for the bio
1517 * @data: pointer to buffer to map
1518 * @len: length in bytes
1519 * @gfp_mask: allocation flags for bio allocation
1521 * Map the kernel address into a bio suitable for io to a block
1522 * device. Returns an error pointer in case of error.
1524 struct bio
*bio_map_kern(struct request_queue
*q
, void *data
, unsigned int len
,
1529 bio
= __bio_map_kern(q
, data
, len
, gfp_mask
);
1533 if (bio
->bi_iter
.bi_size
== len
)
1537 * Don't support partial mappings.
1540 return ERR_PTR(-EINVAL
);
1542 EXPORT_SYMBOL(bio_map_kern
);
1544 static void bio_copy_kern_endio(struct bio
*bio
, int err
)
1546 struct bio_vec
*bvec
;
1547 const int read
= bio_data_dir(bio
) == READ
;
1548 struct bio_map_data
*bmd
= bio
->bi_private
;
1550 char *p
= bmd
->sgvecs
[0].iov_base
;
1552 bio_for_each_segment_all(bvec
, bio
, i
) {
1553 char *addr
= page_address(bvec
->bv_page
);
1556 memcpy(p
, addr
, bvec
->bv_len
);
1558 __free_page(bvec
->bv_page
);
1567 * bio_copy_kern - copy kernel address into bio
1568 * @q: the struct request_queue for the bio
1569 * @data: pointer to buffer to copy
1570 * @len: length in bytes
1571 * @gfp_mask: allocation flags for bio and page allocation
1572 * @reading: data direction is READ
1574 * copy the kernel address into a bio suitable for io to a block
1575 * device. Returns an error pointer in case of error.
1577 struct bio
*bio_copy_kern(struct request_queue
*q
, void *data
, unsigned int len
,
1578 gfp_t gfp_mask
, int reading
)
1581 struct bio_vec
*bvec
;
1584 bio
= bio_copy_user(q
, NULL
, (unsigned long)data
, len
, 1, gfp_mask
);
1591 bio_for_each_segment_all(bvec
, bio
, i
) {
1592 char *addr
= page_address(bvec
->bv_page
);
1594 memcpy(addr
, p
, bvec
->bv_len
);
1599 bio
->bi_end_io
= bio_copy_kern_endio
;
1603 EXPORT_SYMBOL(bio_copy_kern
);
1606 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1607 * for performing direct-IO in BIOs.
1609 * The problem is that we cannot run set_page_dirty() from interrupt context
1610 * because the required locks are not interrupt-safe. So what we can do is to
1611 * mark the pages dirty _before_ performing IO. And in interrupt context,
1612 * check that the pages are still dirty. If so, fine. If not, redirty them
1613 * in process context.
1615 * We special-case compound pages here: normally this means reads into hugetlb
1616 * pages. The logic in here doesn't really work right for compound pages
1617 * because the VM does not uniformly chase down the head page in all cases.
1618 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1619 * handle them at all. So we skip compound pages here at an early stage.
1621 * Note that this code is very hard to test under normal circumstances because
1622 * direct-io pins the pages with get_user_pages(). This makes
1623 * is_page_cache_freeable return false, and the VM will not clean the pages.
1624 * But other code (eg, flusher threads) could clean the pages if they are mapped
1627 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1628 * deferred bio dirtying paths.
1632 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1634 void bio_set_pages_dirty(struct bio
*bio
)
1636 struct bio_vec
*bvec
;
1639 bio_for_each_segment_all(bvec
, bio
, i
) {
1640 struct page
*page
= bvec
->bv_page
;
1642 if (page
&& !PageCompound(page
))
1643 set_page_dirty_lock(page
);
1647 static void bio_release_pages(struct bio
*bio
)
1649 struct bio_vec
*bvec
;
1652 bio_for_each_segment_all(bvec
, bio
, i
) {
1653 struct page
*page
= bvec
->bv_page
;
1661 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1662 * If they are, then fine. If, however, some pages are clean then they must
1663 * have been written out during the direct-IO read. So we take another ref on
1664 * the BIO and the offending pages and re-dirty the pages in process context.
1666 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1667 * here on. It will run one page_cache_release() against each page and will
1668 * run one bio_put() against the BIO.
1671 static void bio_dirty_fn(struct work_struct
*work
);
1673 static DECLARE_WORK(bio_dirty_work
, bio_dirty_fn
);
1674 static DEFINE_SPINLOCK(bio_dirty_lock
);
1675 static struct bio
*bio_dirty_list
;
1678 * This runs in process context
1680 static void bio_dirty_fn(struct work_struct
*work
)
1682 unsigned long flags
;
1685 spin_lock_irqsave(&bio_dirty_lock
, flags
);
1686 bio
= bio_dirty_list
;
1687 bio_dirty_list
= NULL
;
1688 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
1691 struct bio
*next
= bio
->bi_private
;
1693 bio_set_pages_dirty(bio
);
1694 bio_release_pages(bio
);
1700 void bio_check_pages_dirty(struct bio
*bio
)
1702 struct bio_vec
*bvec
;
1703 int nr_clean_pages
= 0;
1706 bio_for_each_segment_all(bvec
, bio
, i
) {
1707 struct page
*page
= bvec
->bv_page
;
1709 if (PageDirty(page
) || PageCompound(page
)) {
1710 page_cache_release(page
);
1711 bvec
->bv_page
= NULL
;
1717 if (nr_clean_pages
) {
1718 unsigned long flags
;
1720 spin_lock_irqsave(&bio_dirty_lock
, flags
);
1721 bio
->bi_private
= bio_dirty_list
;
1722 bio_dirty_list
= bio
;
1723 spin_unlock_irqrestore(&bio_dirty_lock
, flags
);
1724 schedule_work(&bio_dirty_work
);
1730 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1731 void bio_flush_dcache_pages(struct bio
*bi
)
1733 struct bio_vec bvec
;
1734 struct bvec_iter iter
;
1736 bio_for_each_segment(bvec
, bi
, iter
)
1737 flush_dcache_page(bvec
.bv_page
);
1739 EXPORT_SYMBOL(bio_flush_dcache_pages
);
1743 * bio_endio - end I/O on a bio
1745 * @error: error, if any
1748 * bio_endio() will end I/O on the whole bio. bio_endio() is the
1749 * preferred way to end I/O on a bio, it takes care of clearing
1750 * BIO_UPTODATE on error. @error is 0 on success, and and one of the
1751 * established -Exxxx (-EIO, for instance) error values in case
1752 * something went wrong. No one should call bi_end_io() directly on a
1753 * bio unless they own it and thus know that it has an end_io
1756 void bio_endio(struct bio
*bio
, int error
)
1759 BUG_ON(atomic_read(&bio
->bi_remaining
) <= 0);
1762 clear_bit(BIO_UPTODATE
, &bio
->bi_flags
);
1763 else if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1766 if (!atomic_dec_and_test(&bio
->bi_remaining
))
1770 * Need to have a real endio function for chained bios,
1771 * otherwise various corner cases will break (like stacking
1772 * block devices that save/restore bi_end_io) - however, we want
1773 * to avoid unbounded recursion and blowing the stack. Tail call
1774 * optimization would handle this, but compiling with frame
1775 * pointers also disables gcc's sibling call optimization.
1777 if (bio
->bi_end_io
== bio_chain_endio
) {
1778 struct bio
*parent
= bio
->bi_private
;
1783 bio
->bi_end_io(bio
, error
);
1788 EXPORT_SYMBOL(bio_endio
);
1791 * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
1793 * @error: error, if any
1795 * For code that has saved and restored bi_end_io; thing hard before using this
1796 * function, probably you should've cloned the entire bio.
1798 void bio_endio_nodec(struct bio
*bio
, int error
)
1800 atomic_inc(&bio
->bi_remaining
);
1801 bio_endio(bio
, error
);
1803 EXPORT_SYMBOL(bio_endio_nodec
);
1806 * bio_split - split a bio
1807 * @bio: bio to split
1808 * @sectors: number of sectors to split from the front of @bio
1810 * @bs: bio set to allocate from
1812 * Allocates and returns a new bio which represents @sectors from the start of
1813 * @bio, and updates @bio to represent the remaining sectors.
1815 * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
1816 * responsibility to ensure that @bio is not freed before the split.
1818 struct bio
*bio_split(struct bio
*bio
, int sectors
,
1819 gfp_t gfp
, struct bio_set
*bs
)
1821 struct bio
*split
= NULL
;
1823 BUG_ON(sectors
<= 0);
1824 BUG_ON(sectors
>= bio_sectors(bio
));
1826 split
= bio_clone_fast(bio
, gfp
, bs
);
1830 split
->bi_iter
.bi_size
= sectors
<< 9;
1832 if (bio_integrity(split
))
1833 bio_integrity_trim(split
, 0, sectors
);
1835 bio_advance(bio
, split
->bi_iter
.bi_size
);
1839 EXPORT_SYMBOL(bio_split
);
1842 * bio_trim - trim a bio
1844 * @offset: number of sectors to trim from the front of @bio
1845 * @size: size we want to trim @bio to, in sectors
1847 void bio_trim(struct bio
*bio
, int offset
, int size
)
1849 /* 'bio' is a cloned bio which we need to trim to match
1850 * the given offset and size.
1854 if (offset
== 0 && size
== bio
->bi_iter
.bi_size
)
1857 clear_bit(BIO_SEG_VALID
, &bio
->bi_flags
);
1859 bio_advance(bio
, offset
<< 9);
1861 bio
->bi_iter
.bi_size
= size
;
1863 EXPORT_SYMBOL_GPL(bio_trim
);
1866 * create memory pools for biovec's in a bio_set.
1867 * use the global biovec slabs created for general use.
1869 mempool_t
*biovec_create_pool(int pool_entries
)
1871 struct biovec_slab
*bp
= bvec_slabs
+ BIOVEC_MAX_IDX
;
1873 return mempool_create_slab_pool(pool_entries
, bp
->slab
);
1876 void bioset_free(struct bio_set
*bs
)
1878 if (bs
->rescue_workqueue
)
1879 destroy_workqueue(bs
->rescue_workqueue
);
1882 mempool_destroy(bs
->bio_pool
);
1885 mempool_destroy(bs
->bvec_pool
);
1887 bioset_integrity_free(bs
);
1892 EXPORT_SYMBOL(bioset_free
);
1895 * bioset_create - Create a bio_set
1896 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1897 * @front_pad: Number of bytes to allocate in front of the returned bio
1900 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1901 * to ask for a number of bytes to be allocated in front of the bio.
1902 * Front pad allocation is useful for embedding the bio inside
1903 * another structure, to avoid allocating extra data to go with the bio.
1904 * Note that the bio must be embedded at the END of that structure always,
1905 * or things will break badly.
1907 struct bio_set
*bioset_create(unsigned int pool_size
, unsigned int front_pad
)
1909 unsigned int back_pad
= BIO_INLINE_VECS
* sizeof(struct bio_vec
);
1912 bs
= kzalloc(sizeof(*bs
), GFP_KERNEL
);
1916 bs
->front_pad
= front_pad
;
1918 spin_lock_init(&bs
->rescue_lock
);
1919 bio_list_init(&bs
->rescue_list
);
1920 INIT_WORK(&bs
->rescue_work
, bio_alloc_rescue
);
1922 bs
->bio_slab
= bio_find_or_create_slab(front_pad
+ back_pad
);
1923 if (!bs
->bio_slab
) {
1928 bs
->bio_pool
= mempool_create_slab_pool(pool_size
, bs
->bio_slab
);
1932 bs
->bvec_pool
= biovec_create_pool(pool_size
);
1936 bs
->rescue_workqueue
= alloc_workqueue("bioset", WQ_MEM_RECLAIM
, 0);
1937 if (!bs
->rescue_workqueue
)
1945 EXPORT_SYMBOL(bioset_create
);
1947 #ifdef CONFIG_BLK_CGROUP
1949 * bio_associate_current - associate a bio with %current
1952 * Associate @bio with %current if it hasn't been associated yet. Block
1953 * layer will treat @bio as if it were issued by %current no matter which
1954 * task actually issues it.
1956 * This function takes an extra reference of @task's io_context and blkcg
1957 * which will be put when @bio is released. The caller must own @bio,
1958 * ensure %current->io_context exists, and is responsible for synchronizing
1959 * calls to this function.
1961 int bio_associate_current(struct bio
*bio
)
1963 struct io_context
*ioc
;
1964 struct cgroup_subsys_state
*css
;
1969 ioc
= current
->io_context
;
1973 /* acquire active ref on @ioc and associate */
1974 get_io_context_active(ioc
);
1977 /* associate blkcg if exists */
1979 css
= task_css(current
, blkio_cgrp_id
);
1980 if (css
&& css_tryget_online(css
))
1988 * bio_disassociate_task - undo bio_associate_current()
1991 void bio_disassociate_task(struct bio
*bio
)
1994 put_io_context(bio
->bi_ioc
);
1998 css_put(bio
->bi_css
);
2003 #endif /* CONFIG_BLK_CGROUP */
2005 static void __init
biovec_init_slabs(void)
2009 for (i
= 0; i
< BIOVEC_NR_POOLS
; i
++) {
2011 struct biovec_slab
*bvs
= bvec_slabs
+ i
;
2013 if (bvs
->nr_vecs
<= BIO_INLINE_VECS
) {
2018 size
= bvs
->nr_vecs
* sizeof(struct bio_vec
);
2019 bvs
->slab
= kmem_cache_create(bvs
->name
, size
, 0,
2020 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
2024 static int __init
init_bio(void)
2028 bio_slabs
= kzalloc(bio_slab_max
* sizeof(struct bio_slab
), GFP_KERNEL
);
2030 panic("bio: can't allocate bios\n");
2032 bio_integrity_init();
2033 biovec_init_slabs();
2035 fs_bio_set
= bioset_create(BIO_POOL_SIZE
, 0);
2037 panic("bio: can't allocate bios\n");
2039 if (bioset_integrity_create(fs_bio_set
, BIO_POOL_SIZE
))
2040 panic("bio: can't create integrity pool\n");
2044 subsys_initcall(init_bio
);