4 * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public Licens
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
23 #include <linux/highmem.h>
24 #include <linux/mempool.h>
26 /* Platforms may set this to teach the BIO layer about IOMMU hardware. */
29 #if defined(BIO_VMERGE_MAX_SIZE) && defined(BIO_VMERGE_BOUNDARY)
30 #define BIOVEC_VIRT_START_SIZE(x) (bvec_to_phys(x) & (BIO_VMERGE_BOUNDARY - 1))
31 #define BIOVEC_VIRT_OVERSIZE(x) ((x) > BIO_VMERGE_MAX_SIZE)
33 #define BIOVEC_VIRT_START_SIZE(x) 0
34 #define BIOVEC_VIRT_OVERSIZE(x) 0
37 #ifndef BIO_VMERGE_BOUNDARY
38 #define BIO_VMERGE_BOUNDARY 0
44 #define BIO_BUG_ON BUG_ON
49 #define BIO_MAX_PAGES (256)
50 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT)
51 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9)
54 * was unsigned short, but we might as well be ready for > 64kB I/O pages
59 unsigned int bv_offset
;
63 typedef int (bio_end_io_t
) (struct bio
*, unsigned int, int);
64 typedef void (bio_destructor_t
) (struct bio
*);
67 * main unit of I/O for the block layer and lower layers (ie drivers and
72 struct bio
*bi_next
; /* request queue link */
73 struct block_device
*bi_bdev
;
74 unsigned long bi_flags
; /* status, command, etc */
75 unsigned long bi_rw
; /* bottom bits READ/WRITE,
79 unsigned short bi_vcnt
; /* how many bio_vec's */
80 unsigned short bi_idx
; /* current index into bvl_vec */
82 /* Number of segments in this BIO after
83 * physical address coalescing is performed.
85 unsigned short bi_phys_segments
;
87 /* Number of segments after physical and DMA remapping
88 * hardware coalescing is performed.
90 unsigned short bi_hw_segments
;
92 unsigned int bi_size
; /* residual I/O count */
95 * To keep track of the max hw size, we account for the
96 * sizes of the first and last virtually mergeable segments
99 unsigned int bi_hw_front_size
;
100 unsigned int bi_hw_back_size
;
102 unsigned int bi_max_vecs
; /* max bvl_vecs we can hold */
104 struct bio_vec
*bi_io_vec
; /* the actual vec list */
106 bio_end_io_t
*bi_end_io
;
107 atomic_t bi_cnt
; /* pin count */
111 bio_destructor_t
*bi_destructor
; /* destructor */
117 #define BIO_UPTODATE 0 /* ok after I/O completion */
118 #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */
119 #define BIO_EOF 2 /* out-out-bounds error */
120 #define BIO_SEG_VALID 3 /* nr_hw_seg valid */
121 #define BIO_CLONED 4 /* doesn't own data */
122 #define BIO_BOUNCED 5 /* bio is a bounce bio */
123 #define BIO_USER_MAPPED 6 /* contains user pages */
124 #define BIO_EOPNOTSUPP 7 /* not supported */
125 #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
128 * top 4 bits of bio flags indicate the pool this bio came from
130 #define BIO_POOL_BITS (4)
131 #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS)
132 #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET)
133 #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET)
138 * bit 0 -- read (not set) or write (set)
139 * bit 1 -- rw-ahead when set
141 * bit 3 -- fail fast, don't want low level driver retries
142 * bit 4 -- synchronous I/O hint: the block layer will unplug immediately
145 #define BIO_RW_AHEAD 1
146 #define BIO_RW_BARRIER 2
147 #define BIO_RW_FAILFAST 3
148 #define BIO_RW_SYNC 4
151 * various member access, note that bio_data should of course not be used
152 * on highmem page vectors
154 #define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
155 #define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx)
156 #define bio_page(bio) bio_iovec((bio))->bv_page
157 #define bio_offset(bio) bio_iovec((bio))->bv_offset
158 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
159 #define bio_sectors(bio) ((bio)->bi_size >> 9)
160 #define bio_cur_sectors(bio) (bio_iovec(bio)->bv_len >> 9)
161 #define bio_data(bio) (page_address(bio_page((bio))) + bio_offset((bio)))
162 #define bio_barrier(bio) ((bio)->bi_rw & (1 << BIO_RW_BARRIER))
163 #define bio_sync(bio) ((bio)->bi_rw & (1 << BIO_RW_SYNC))
164 #define bio_failfast(bio) ((bio)->bi_rw & (1 << BIO_RW_FAILFAST))
165 #define bio_rw_ahead(bio) ((bio)->bi_rw & (1 << BIO_RW_AHEAD))
170 #define bio_to_phys(bio) (page_to_phys(bio_page((bio))) + (unsigned long) bio_offset((bio)))
171 #define bvec_to_phys(bv) (page_to_phys((bv)->bv_page) + (unsigned long) (bv)->bv_offset)
174 * queues that have highmem support enabled may still need to revert to
175 * PIO transfers occasionally and thus map high pages temporarily. For
176 * permanent PIO fall back, user is probably better off disabling highmem
177 * I/O completely on that queue (see ide-dma for example)
179 #define __bio_kmap_atomic(bio, idx, kmtype) \
180 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) + \
181 bio_iovec_idx((bio), (idx))->bv_offset)
183 #define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype)
189 #define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
190 #define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
191 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
192 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
193 #define BIOVEC_VIRT_MERGEABLE(vec1, vec2) \
194 ((((bvec_to_phys((vec1)) + (vec1)->bv_len) | bvec_to_phys((vec2))) & (BIO_VMERGE_BOUNDARY - 1)) == 0)
195 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
196 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
197 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
198 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
199 #define BIO_SEG_BOUNDARY(q, b1, b2) \
200 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
202 #define bio_io_error(bio, bytes) bio_endio((bio), (bytes), -EIO)
205 * drivers should not use the __ version unless they _really_ want to
206 * run through the entire bio and not just pending pieces
208 #define __bio_for_each_segment(bvl, bio, i, start_idx) \
209 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
210 i < (bio)->bi_vcnt; \
213 #define bio_for_each_segment(bvl, bio, i) \
214 __bio_for_each_segment(bvl, bio, i, (bio)->bi_idx)
217 * get a reference to a bio, so it won't disappear. the intended use is
221 * submit_bio(rw, bio);
222 * if (bio->bi_flags ...)
226 * without the bio_get(), it could potentially complete I/O before submit_bio
227 * returns. and then bio would be freed memory when if (bio->bi_flags ...)
230 #define bio_get(bio) atomic_inc(&(bio)->bi_cnt)
234 * A bio_pair is used when we need to split a bio.
235 * This can only happen for a bio that refers to just one
236 * page of data, and in the unusual situation when the
237 * page crosses a chunk/device boundary
239 * The address of the master bio is stored in bio1.bi_private
240 * The address of the pool the pair was allocated from is stored
244 struct bio bio1
, bio2
;
245 struct bio_vec bv1
, bv2
;
249 extern struct bio_pair
*bio_split(struct bio
*bi
, mempool_t
*pool
,
251 extern mempool_t
*bio_split_pool
;
252 extern void bio_pair_release(struct bio_pair
*dbio
);
254 extern struct bio
*bio_alloc(int, int);
255 extern void bio_put(struct bio
*);
257 extern void bio_endio(struct bio
*, unsigned int, int);
258 struct request_queue
;
259 extern int bio_phys_segments(struct request_queue
*, struct bio
*);
260 extern int bio_hw_segments(struct request_queue
*, struct bio
*);
262 extern void __bio_clone(struct bio
*, struct bio
*);
263 extern struct bio
*bio_clone(struct bio
*, int);
265 extern void bio_init(struct bio
*);
267 extern int bio_add_page(struct bio
*, struct page
*, unsigned int,unsigned int);
268 extern int bio_get_nr_vecs(struct block_device
*);
269 extern struct bio
*bio_map_user(struct request_queue
*, struct block_device
*,
270 unsigned long, unsigned int, int);
271 extern void bio_unmap_user(struct bio
*);
272 extern void bio_set_pages_dirty(struct bio
*bio
);
273 extern void bio_check_pages_dirty(struct bio
*bio
);
274 extern struct bio
*bio_copy_user(struct request_queue
*, unsigned long, unsigned int, int);
275 extern int bio_uncopy_user(struct bio
*);
277 #ifdef CONFIG_HIGHMEM
279 * remember to add offset! and never ever reenable interrupts between a
280 * bvec_kmap_irq and bvec_kunmap_irq!!
282 * This function MUST be inlined - it plays with the CPU interrupt flags.
283 * Hence the `extern inline'.
285 extern inline char *bvec_kmap_irq(struct bio_vec
*bvec
, unsigned long *flags
)
290 * might not be a highmem page, but the preempt/irq count
291 * balancing is a lot nicer this way
293 local_irq_save(*flags
);
294 addr
= (unsigned long) kmap_atomic(bvec
->bv_page
, KM_BIO_SRC_IRQ
);
296 BUG_ON(addr
& ~PAGE_MASK
);
298 return (char *) addr
+ bvec
->bv_offset
;
301 extern inline void bvec_kunmap_irq(char *buffer
, unsigned long *flags
)
303 unsigned long ptr
= (unsigned long) buffer
& PAGE_MASK
;
305 kunmap_atomic((void *) ptr
, KM_BIO_SRC_IRQ
);
306 local_irq_restore(*flags
);
310 #define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset)
311 #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0)
314 extern inline char *__bio_kmap_irq(struct bio
*bio
, unsigned short idx
,
315 unsigned long *flags
)
317 return bvec_kmap_irq(bio_iovec_idx(bio
, idx
), flags
);
319 #define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
321 #define bio_kmap_irq(bio, flags) \
322 __bio_kmap_irq((bio), (bio)->bi_idx, (flags))
323 #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
325 #endif /* __LINUX_BIO_H */