1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
8 #include <linux/mempool.h>
9 /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */
10 #include <linux/blk_types.h>
11 #include <linux/uio.h>
13 #define BIO_MAX_VECS 256U
17 static inline unsigned int bio_max_segs(unsigned int nr_segs
)
19 return min(nr_segs
, BIO_MAX_VECS
);
22 #define bio_prio(bio) (bio)->bi_ioprio
23 #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio)
25 #define bio_iter_iovec(bio, iter) \
26 bvec_iter_bvec((bio)->bi_io_vec, (iter))
28 #define bio_iter_page(bio, iter) \
29 bvec_iter_page((bio)->bi_io_vec, (iter))
30 #define bio_iter_len(bio, iter) \
31 bvec_iter_len((bio)->bi_io_vec, (iter))
32 #define bio_iter_offset(bio, iter) \
33 bvec_iter_offset((bio)->bi_io_vec, (iter))
35 #define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
36 #define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
37 #define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
39 #define bvec_iter_sectors(iter) ((iter).bi_size >> 9)
40 #define bvec_iter_end_sector(iter) ((iter).bi_sector + bvec_iter_sectors((iter)))
42 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter)
43 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter)
46 * Return the data direction, READ or WRITE.
48 #define bio_data_dir(bio) \
49 (op_is_write(bio_op(bio)) ? WRITE : READ)
52 * Check whether this bio carries any data or not. A NULL bio is allowed.
54 static inline bool bio_has_data(struct bio
*bio
)
57 bio
->bi_iter
.bi_size
&&
58 bio_op(bio
) != REQ_OP_DISCARD
&&
59 bio_op(bio
) != REQ_OP_SECURE_ERASE
&&
60 bio_op(bio
) != REQ_OP_WRITE_ZEROES
)
66 static inline bool bio_no_advance_iter(const struct bio
*bio
)
68 return bio_op(bio
) == REQ_OP_DISCARD
||
69 bio_op(bio
) == REQ_OP_SECURE_ERASE
||
70 bio_op(bio
) == REQ_OP_WRITE_ZEROES
;
73 static inline void *bio_data(struct bio
*bio
)
75 if (bio_has_data(bio
))
76 return page_address(bio_page(bio
)) + bio_offset(bio
);
81 static inline bool bio_next_segment(const struct bio
*bio
,
82 struct bvec_iter_all
*iter
)
84 if (iter
->idx
>= bio
->bi_vcnt
)
87 bvec_advance(&bio
->bi_io_vec
[iter
->idx
], iter
);
92 * drivers should _never_ use the all version - the bio may have been split
93 * before it got to the driver and the driver won't own all of it
95 #define bio_for_each_segment_all(bvl, bio, iter) \
96 for (bvl = bvec_init_iter_all(&iter); bio_next_segment((bio), &iter); )
98 static inline void bio_advance_iter(const struct bio
*bio
,
99 struct bvec_iter
*iter
, unsigned int bytes
)
101 iter
->bi_sector
+= bytes
>> 9;
103 if (bio_no_advance_iter(bio
))
104 iter
->bi_size
-= bytes
;
106 bvec_iter_advance(bio
->bi_io_vec
, iter
, bytes
);
107 /* TODO: It is reasonable to complete bio with error here. */
110 /* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
111 static inline void bio_advance_iter_single(const struct bio
*bio
,
112 struct bvec_iter
*iter
,
115 iter
->bi_sector
+= bytes
>> 9;
117 if (bio_no_advance_iter(bio
))
118 iter
->bi_size
-= bytes
;
120 bvec_iter_advance_single(bio
->bi_io_vec
, iter
, bytes
);
123 void __bio_advance(struct bio
*, unsigned bytes
);
126 * bio_advance - increment/complete a bio by some number of bytes
127 * @bio: bio to advance
128 * @nbytes: number of bytes to complete
130 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
131 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
132 * be updated on the last bvec as well.
134 * @bio will then represent the remaining, uncompleted portion of the io.
136 static inline void bio_advance(struct bio
*bio
, unsigned int nbytes
)
138 if (nbytes
== bio
->bi_iter
.bi_size
) {
139 bio
->bi_iter
.bi_size
= 0;
142 __bio_advance(bio
, nbytes
);
145 #define __bio_for_each_segment(bvl, bio, iter, start) \
146 for (iter = (start); \
148 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
149 bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
151 #define bio_for_each_segment(bvl, bio, iter) \
152 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
154 #define __bio_for_each_bvec(bvl, bio, iter, start) \
155 for (iter = (start); \
157 ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
158 bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
160 /* iterate over multi-page bvec */
161 #define bio_for_each_bvec(bvl, bio, iter) \
162 __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter)
165 * Iterate over all multi-page bvecs. Drivers shouldn't use this version for the
166 * same reasons as bio_for_each_segment_all().
168 #define bio_for_each_bvec_all(bvl, bio, i) \
169 for (i = 0, bvl = bio_first_bvec_all(bio); \
170 i < (bio)->bi_vcnt; i++, bvl++)
172 #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
174 static inline unsigned bio_segments(struct bio
*bio
)
178 struct bvec_iter iter
;
181 * We special case discard/write same/write zeroes, because they
182 * interpret bi_size differently:
185 switch (bio_op(bio
)) {
187 case REQ_OP_SECURE_ERASE
:
188 case REQ_OP_WRITE_ZEROES
:
194 bio_for_each_segment(bv
, bio
, iter
)
201 * get a reference to a bio, so it won't disappear. the intended use is
205 * submit_bio(rw, bio);
206 * if (bio->bi_flags ...)
210 * without the bio_get(), it could potentially complete I/O before submit_bio
211 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
214 static inline void bio_get(struct bio
*bio
)
216 bio
->bi_flags
|= (1 << BIO_REFFED
);
217 smp_mb__before_atomic();
218 atomic_inc(&bio
->__bi_cnt
);
221 static inline void bio_cnt_set(struct bio
*bio
, unsigned int count
)
224 bio
->bi_flags
|= (1 << BIO_REFFED
);
227 atomic_set(&bio
->__bi_cnt
, count
);
230 static inline bool bio_flagged(struct bio
*bio
, unsigned int bit
)
232 return bio
->bi_flags
& (1U << bit
);
235 static inline void bio_set_flag(struct bio
*bio
, unsigned int bit
)
237 bio
->bi_flags
|= (1U << bit
);
240 static inline void bio_clear_flag(struct bio
*bio
, unsigned int bit
)
242 bio
->bi_flags
&= ~(1U << bit
);
245 static inline struct bio_vec
*bio_first_bvec_all(struct bio
*bio
)
247 WARN_ON_ONCE(bio_flagged(bio
, BIO_CLONED
));
248 return bio
->bi_io_vec
;
251 static inline struct page
*bio_first_page_all(struct bio
*bio
)
253 return bio_first_bvec_all(bio
)->bv_page
;
256 static inline struct folio
*bio_first_folio_all(struct bio
*bio
)
258 return page_folio(bio_first_page_all(bio
));
261 static inline struct bio_vec
*bio_last_bvec_all(struct bio
*bio
)
263 WARN_ON_ONCE(bio_flagged(bio
, BIO_CLONED
));
264 return &bio
->bi_io_vec
[bio
->bi_vcnt
- 1];
268 * struct folio_iter - State for iterating all folios in a bio.
269 * @folio: The current folio we're iterating. NULL after the last folio.
270 * @offset: The byte offset within the current folio.
271 * @length: The number of bytes in this iteration (will not cross folio
278 /* private: for use by the iterator */
284 static inline void bio_first_folio(struct folio_iter
*fi
, struct bio
*bio
,
287 struct bio_vec
*bvec
= bio_first_bvec_all(bio
) + i
;
289 if (unlikely(i
>= bio
->bi_vcnt
)) {
294 fi
->folio
= page_folio(bvec
->bv_page
);
295 fi
->offset
= bvec
->bv_offset
+
296 PAGE_SIZE
* (bvec
->bv_page
- &fi
->folio
->page
);
297 fi
->_seg_count
= bvec
->bv_len
;
298 fi
->length
= min(folio_size(fi
->folio
) - fi
->offset
, fi
->_seg_count
);
299 fi
->_next
= folio_next(fi
->folio
);
303 static inline void bio_next_folio(struct folio_iter
*fi
, struct bio
*bio
)
305 fi
->_seg_count
-= fi
->length
;
306 if (fi
->_seg_count
) {
307 fi
->folio
= fi
->_next
;
309 fi
->length
= min(folio_size(fi
->folio
), fi
->_seg_count
);
310 fi
->_next
= folio_next(fi
->folio
);
312 bio_first_folio(fi
, bio
, fi
->_i
+ 1);
317 * bio_for_each_folio_all - Iterate over each folio in a bio.
318 * @fi: struct folio_iter which is updated for each folio.
319 * @bio: struct bio to iterate over.
321 #define bio_for_each_folio_all(fi, bio) \
322 for (bio_first_folio(&fi, bio, 0); fi.folio; bio_next_folio(&fi, bio))
324 void bio_trim(struct bio
*bio
, sector_t offset
, sector_t size
);
325 extern struct bio
*bio_split(struct bio
*bio
, int sectors
,
326 gfp_t gfp
, struct bio_set
*bs
);
327 int bio_split_rw_at(struct bio
*bio
, const struct queue_limits
*lim
,
328 unsigned *segs
, unsigned max_bytes
);
331 * bio_next_split - get next @sectors from a bio, splitting if necessary
333 * @sectors: number of sectors to split from the front of @bio
335 * @bs: bio set to allocate from
337 * Return: a bio representing the next @sectors of @bio - if the bio is smaller
338 * than @sectors, returns the original bio unchanged.
340 static inline struct bio
*bio_next_split(struct bio
*bio
, int sectors
,
341 gfp_t gfp
, struct bio_set
*bs
)
343 if (sectors
>= bio_sectors(bio
))
346 return bio_split(bio
, sectors
, gfp
, bs
);
350 BIOSET_NEED_BVECS
= BIT(0),
351 BIOSET_NEED_RESCUER
= BIT(1),
352 BIOSET_PERCPU_CACHE
= BIT(2),
354 extern int bioset_init(struct bio_set
*, unsigned int, unsigned int, int flags
);
355 extern void bioset_exit(struct bio_set
*);
356 extern int biovec_init_pool(mempool_t
*pool
, int pool_entries
);
358 struct bio
*bio_alloc_bioset(struct block_device
*bdev
, unsigned short nr_vecs
,
359 blk_opf_t opf
, gfp_t gfp_mask
,
361 struct bio
*bio_kmalloc(unsigned short nr_vecs
, gfp_t gfp_mask
);
362 extern void bio_put(struct bio
*);
364 struct bio
*bio_alloc_clone(struct block_device
*bdev
, struct bio
*bio_src
,
365 gfp_t gfp
, struct bio_set
*bs
);
366 int bio_init_clone(struct block_device
*bdev
, struct bio
*bio
,
367 struct bio
*bio_src
, gfp_t gfp
);
369 extern struct bio_set fs_bio_set
;
371 static inline struct bio
*bio_alloc(struct block_device
*bdev
,
372 unsigned short nr_vecs
, blk_opf_t opf
, gfp_t gfp_mask
)
374 return bio_alloc_bioset(bdev
, nr_vecs
, opf
, gfp_mask
, &fs_bio_set
);
377 void submit_bio(struct bio
*bio
);
379 extern void bio_endio(struct bio
*);
381 static inline void bio_io_error(struct bio
*bio
)
383 bio
->bi_status
= BLK_STS_IOERR
;
387 static inline void bio_wouldblock_error(struct bio
*bio
)
389 bio_set_flag(bio
, BIO_QUIET
);
390 bio
->bi_status
= BLK_STS_AGAIN
;
395 * Calculate number of bvec segments that should be allocated to fit data
396 * pointed by @iter. If @iter is backed by bvec it's going to be reused
397 * instead of allocating a new one.
399 static inline int bio_iov_vecs_to_alloc(struct iov_iter
*iter
, int max_segs
)
401 if (iov_iter_is_bvec(iter
))
403 return iov_iter_npages(iter
, max_segs
);
406 struct request_queue
;
408 extern int submit_bio_wait(struct bio
*bio
);
409 void bio_init(struct bio
*bio
, struct block_device
*bdev
, struct bio_vec
*table
,
410 unsigned short max_vecs
, blk_opf_t opf
);
411 extern void bio_uninit(struct bio
*);
412 void bio_reset(struct bio
*bio
, struct block_device
*bdev
, blk_opf_t opf
);
413 void bio_chain(struct bio
*, struct bio
*);
415 int __must_check
bio_add_page(struct bio
*bio
, struct page
*page
, unsigned len
,
417 bool __must_check
bio_add_folio(struct bio
*bio
, struct folio
*folio
,
418 size_t len
, size_t off
);
419 extern int bio_add_pc_page(struct request_queue
*, struct bio
*, struct page
*,
420 unsigned int, unsigned int);
421 void __bio_add_page(struct bio
*bio
, struct page
*page
,
422 unsigned int len
, unsigned int off
);
423 void bio_add_folio_nofail(struct bio
*bio
, struct folio
*folio
, size_t len
,
425 int bio_iov_iter_get_pages(struct bio
*bio
, struct iov_iter
*iter
);
426 void bio_iov_bvec_set(struct bio
*bio
, struct iov_iter
*iter
);
427 void __bio_release_pages(struct bio
*bio
, bool mark_dirty
);
428 extern void bio_set_pages_dirty(struct bio
*bio
);
429 extern void bio_check_pages_dirty(struct bio
*bio
);
431 extern void bio_copy_data_iter(struct bio
*dst
, struct bvec_iter
*dst_iter
,
432 struct bio
*src
, struct bvec_iter
*src_iter
);
433 extern void bio_copy_data(struct bio
*dst
, struct bio
*src
);
434 extern void bio_free_pages(struct bio
*bio
);
435 void guard_bio_eod(struct bio
*bio
);
436 void zero_fill_bio_iter(struct bio
*bio
, struct bvec_iter iter
);
438 static inline void zero_fill_bio(struct bio
*bio
)
440 zero_fill_bio_iter(bio
, bio
->bi_iter
);
443 static inline void bio_release_pages(struct bio
*bio
, bool mark_dirty
)
445 if (bio_flagged(bio
, BIO_PAGE_PINNED
))
446 __bio_release_pages(bio
, mark_dirty
);
449 #define bio_dev(bio) \
450 disk_devt((bio)->bi_bdev->bd_disk)
452 #ifdef CONFIG_BLK_CGROUP
453 void bio_associate_blkg(struct bio
*bio
);
454 void bio_associate_blkg_from_css(struct bio
*bio
,
455 struct cgroup_subsys_state
*css
);
456 void bio_clone_blkg_association(struct bio
*dst
, struct bio
*src
);
457 void blkcg_punt_bio_submit(struct bio
*bio
);
458 #else /* CONFIG_BLK_CGROUP */
459 static inline void bio_associate_blkg(struct bio
*bio
) { }
460 static inline void bio_associate_blkg_from_css(struct bio
*bio
,
461 struct cgroup_subsys_state
*css
)
463 static inline void bio_clone_blkg_association(struct bio
*dst
,
465 static inline void blkcg_punt_bio_submit(struct bio
*bio
)
469 #endif /* CONFIG_BLK_CGROUP */
471 static inline void bio_set_dev(struct bio
*bio
, struct block_device
*bdev
)
473 bio_clear_flag(bio
, BIO_REMAPPED
);
474 if (bio
->bi_bdev
!= bdev
)
475 bio_clear_flag(bio
, BIO_BPS_THROTTLED
);
477 bio_associate_blkg(bio
);
481 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
483 * A bio_list anchors a singly-linked list of bios chained through the bi_next
484 * member of the bio. The bio_list also caches the last list member to allow
485 * fast access to the tail.
492 static inline int bio_list_empty(const struct bio_list
*bl
)
494 return bl
->head
== NULL
;
497 static inline void bio_list_init(struct bio_list
*bl
)
499 bl
->head
= bl
->tail
= NULL
;
502 #define BIO_EMPTY_LIST { NULL, NULL }
504 #define bio_list_for_each(bio, bl) \
505 for (bio = (bl)->head; bio; bio = bio->bi_next)
507 static inline unsigned bio_list_size(const struct bio_list
*bl
)
512 bio_list_for_each(bio
, bl
)
518 static inline void bio_list_add(struct bio_list
*bl
, struct bio
*bio
)
523 bl
->tail
->bi_next
= bio
;
530 static inline void bio_list_add_head(struct bio_list
*bl
, struct bio
*bio
)
532 bio
->bi_next
= bl
->head
;
540 static inline void bio_list_merge(struct bio_list
*bl
, struct bio_list
*bl2
)
546 bl
->tail
->bi_next
= bl2
->head
;
548 bl
->head
= bl2
->head
;
550 bl
->tail
= bl2
->tail
;
553 static inline void bio_list_merge_init(struct bio_list
*bl
,
554 struct bio_list
*bl2
)
556 bio_list_merge(bl
, bl2
);
560 static inline void bio_list_merge_head(struct bio_list
*bl
,
561 struct bio_list
*bl2
)
567 bl2
->tail
->bi_next
= bl
->head
;
569 bl
->tail
= bl2
->tail
;
571 bl
->head
= bl2
->head
;
574 static inline struct bio
*bio_list_peek(struct bio_list
*bl
)
579 static inline struct bio
*bio_list_pop(struct bio_list
*bl
)
581 struct bio
*bio
= bl
->head
;
584 bl
->head
= bl
->head
->bi_next
;
594 static inline struct bio
*bio_list_get(struct bio_list
*bl
)
596 struct bio
*bio
= bl
->head
;
598 bl
->head
= bl
->tail
= NULL
;
604 * Increment chain count for the bio. Make sure the CHAIN flag update
605 * is visible before the raised count.
607 static inline void bio_inc_remaining(struct bio
*bio
)
609 bio_set_flag(bio
, BIO_CHAIN
);
610 smp_mb__before_atomic();
611 atomic_inc(&bio
->__bi_remaining
);
615 * bio_set is used to allow other portions of the IO system to
616 * allocate their own private memory pools for bio and iovec structures.
617 * These memory pools in turn all allocate from the bio_slab
618 * and the bvec_slabs[].
620 #define BIO_POOL_SIZE 2
623 struct kmem_cache
*bio_slab
;
624 unsigned int front_pad
;
627 * per-cpu bio alloc cache
629 struct bio_alloc_cache __percpu
*cache
;
633 #if defined(CONFIG_BLK_DEV_INTEGRITY)
634 mempool_t bio_integrity_pool
;
635 mempool_t bvec_integrity_pool
;
638 unsigned int back_pad
;
640 * Deadlock avoidance for stacking block drivers: see comments in
641 * bio_alloc_bioset() for details
643 spinlock_t rescue_lock
;
644 struct bio_list rescue_list
;
645 struct work_struct rescue_work
;
646 struct workqueue_struct
*rescue_workqueue
;
649 * Hot un-plug notifier for the per-cpu cache, if used
651 struct hlist_node cpuhp_dead
;
654 static inline bool bioset_initialized(struct bio_set
*bs
)
656 return bs
->bio_slab
!= NULL
;
660 * Mark a bio as polled. Note that for async polled IO, the caller must
661 * expect -EWOULDBLOCK if we cannot allocate a request (or other resources).
662 * We cannot block waiting for requests on polled IO, as those completions
663 * must be found by the caller. This is different than IRQ driven IO, where
664 * it's safe to wait for IO to complete.
666 static inline void bio_set_polled(struct bio
*bio
, struct kiocb
*kiocb
)
668 bio
->bi_opf
|= REQ_POLLED
;
669 if (kiocb
->ki_flags
& IOCB_NOWAIT
)
670 bio
->bi_opf
|= REQ_NOWAIT
;
673 static inline void bio_clear_polled(struct bio
*bio
)
675 bio
->bi_opf
&= ~REQ_POLLED
;
679 * bio_is_zone_append - is this a zone append bio?
682 * Check if @bio is a zone append operation. Core block layer code and end_io
683 * handlers must use this instead of an open coded REQ_OP_ZONE_APPEND check
684 * because the block layer can rewrite REQ_OP_ZONE_APPEND to REQ_OP_WRITE if
685 * it is not natively supported.
687 static inline bool bio_is_zone_append(struct bio
*bio
)
689 if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED
))
691 return bio_op(bio
) == REQ_OP_ZONE_APPEND
||
692 bio_flagged(bio
, BIO_EMULATES_ZONE_APPEND
);
695 struct bio
*blk_next_bio(struct bio
*bio
, struct block_device
*bdev
,
696 unsigned int nr_pages
, blk_opf_t opf
, gfp_t gfp
);
697 struct bio
*bio_chain_and_submit(struct bio
*prev
, struct bio
*new);
699 struct bio
*blk_alloc_discard_bio(struct block_device
*bdev
,
700 sector_t
*sector
, sector_t
*nr_sects
, gfp_t gfp_mask
);
702 #endif /* __LINUX_BIO_H */