2 * Copyright (C) 2003 Sistina Software
4 * This file is released under the GPL.
10 #include <linux/mempool.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
15 static struct bio_set
*_bios
;
17 /* FIXME: can we shrink this ? */
21 struct task_struct
*sleeper
;
22 io_notify_fn callback
;
27 * io contexts are only dynamically allocated for asynchronous
28 * io. Since async io is likely to be the majority of io we'll
29 * have the same number of io contexts as buffer heads ! (FIXME:
32 static unsigned _num_ios
;
33 static mempool_t
*_io_pool
;
35 static void *alloc_io(unsigned int __nocast gfp_mask
, void *pool_data
)
37 return kmalloc(sizeof(struct io
), gfp_mask
);
40 static void free_io(void *element
, void *pool_data
)
45 static unsigned int pages_to_ios(unsigned int pages
)
47 return 4 * pages
; /* too many ? */
50 static int resize_pool(unsigned int new_ios
)
56 /* free off the pool */
57 mempool_destroy(_io_pool
);
63 r
= mempool_resize(_io_pool
, new_ios
, GFP_KERNEL
);
68 _io_pool
= mempool_create(new_ios
, alloc_io
, free_io
, NULL
);
72 _bios
= bioset_create(16, 16, 4);
74 mempool_destroy(_io_pool
);
86 int dm_io_get(unsigned int num_pages
)
88 return resize_pool(_num_ios
+ pages_to_ios(num_pages
));
91 void dm_io_put(unsigned int num_pages
)
93 resize_pool(_num_ios
- pages_to_ios(num_pages
));
96 /*-----------------------------------------------------------------
97 * We need to keep track of which region a bio is doing io for.
98 * In order to save a memory allocation we store this the last
99 * bvec which we know is unused (blech).
100 * XXX This is ugly and can OOPS with some configs... find another way.
101 *---------------------------------------------------------------*/
102 static inline void bio_set_region(struct bio
*bio
, unsigned region
)
104 bio
->bi_io_vec
[bio
->bi_max_vecs
- 1].bv_len
= region
;
107 static inline unsigned bio_get_region(struct bio
*bio
)
109 return bio
->bi_io_vec
[bio
->bi_max_vecs
- 1].bv_len
;
112 /*-----------------------------------------------------------------
113 * We need an io object to keep track of the number of bios that
114 * have been dispatched for a particular io.
115 *---------------------------------------------------------------*/
116 static void dec_count(struct io
*io
, unsigned int region
, int error
)
119 set_bit(region
, &io
->error
);
121 if (atomic_dec_and_test(&io
->count
)) {
123 wake_up_process(io
->sleeper
);
127 io_notify_fn fn
= io
->callback
;
128 void *context
= io
->context
;
130 mempool_free(io
, _io_pool
);
136 static int endio(struct bio
*bio
, unsigned int done
, int error
)
138 struct io
*io
= (struct io
*) bio
->bi_private
;
140 /* keep going until we've finished */
144 if (error
&& bio_data_dir(bio
) == READ
)
147 dec_count(io
, bio_get_region(bio
), error
);
153 /*-----------------------------------------------------------------
154 * These little objects provide an abstraction for getting a new
155 * destination page for io.
156 *---------------------------------------------------------------*/
158 void (*get_page
)(struct dpages
*dp
,
159 struct page
**p
, unsigned long *len
, unsigned *offset
);
160 void (*next_page
)(struct dpages
*dp
);
167 * Functions for getting the pages from a list.
169 static void list_get_page(struct dpages
*dp
,
170 struct page
**p
, unsigned long *len
, unsigned *offset
)
172 unsigned o
= dp
->context_u
;
173 struct page_list
*pl
= (struct page_list
*) dp
->context_ptr
;
176 *len
= PAGE_SIZE
- o
;
180 static void list_next_page(struct dpages
*dp
)
182 struct page_list
*pl
= (struct page_list
*) dp
->context_ptr
;
183 dp
->context_ptr
= pl
->next
;
187 static void list_dp_init(struct dpages
*dp
, struct page_list
*pl
, unsigned offset
)
189 dp
->get_page
= list_get_page
;
190 dp
->next_page
= list_next_page
;
191 dp
->context_u
= offset
;
192 dp
->context_ptr
= pl
;
196 * Functions for getting the pages from a bvec.
198 static void bvec_get_page(struct dpages
*dp
,
199 struct page
**p
, unsigned long *len
, unsigned *offset
)
201 struct bio_vec
*bvec
= (struct bio_vec
*) dp
->context_ptr
;
204 *offset
= bvec
->bv_offset
;
207 static void bvec_next_page(struct dpages
*dp
)
209 struct bio_vec
*bvec
= (struct bio_vec
*) dp
->context_ptr
;
210 dp
->context_ptr
= bvec
+ 1;
213 static void bvec_dp_init(struct dpages
*dp
, struct bio_vec
*bvec
)
215 dp
->get_page
= bvec_get_page
;
216 dp
->next_page
= bvec_next_page
;
217 dp
->context_ptr
= bvec
;
220 static void vm_get_page(struct dpages
*dp
,
221 struct page
**p
, unsigned long *len
, unsigned *offset
)
223 *p
= vmalloc_to_page(dp
->context_ptr
);
224 *offset
= dp
->context_u
;
225 *len
= PAGE_SIZE
- dp
->context_u
;
228 static void vm_next_page(struct dpages
*dp
)
230 dp
->context_ptr
+= PAGE_SIZE
- dp
->context_u
;
234 static void vm_dp_init(struct dpages
*dp
, void *data
)
236 dp
->get_page
= vm_get_page
;
237 dp
->next_page
= vm_next_page
;
238 dp
->context_u
= ((unsigned long) data
) & (PAGE_SIZE
- 1);
239 dp
->context_ptr
= data
;
242 static void dm_bio_destructor(struct bio
*bio
)
244 bio_free(bio
, _bios
);
247 /*-----------------------------------------------------------------
248 * IO routines that accept a list of pages.
249 *---------------------------------------------------------------*/
250 static void do_region(int rw
, unsigned int region
, struct io_region
*where
,
251 struct dpages
*dp
, struct io
*io
)
258 sector_t remaining
= where
->count
;
262 * Allocate a suitably sized bio, we add an extra
263 * bvec for bio_get/set_region().
265 num_bvecs
= (remaining
/ (PAGE_SIZE
>> 9)) + 2;
266 bio
= bio_alloc_bioset(GFP_NOIO
, num_bvecs
, _bios
);
267 bio
->bi_sector
= where
->sector
+ (where
->count
- remaining
);
268 bio
->bi_bdev
= where
->bdev
;
269 bio
->bi_end_io
= endio
;
270 bio
->bi_private
= io
;
271 bio
->bi_destructor
= dm_bio_destructor
;
272 bio_set_region(bio
, region
);
275 * Try and add as many pages as possible.
278 dp
->get_page(dp
, &page
, &len
, &offset
);
279 len
= min(len
, to_bytes(remaining
));
280 if (!bio_add_page(bio
, page
, len
, offset
))
284 remaining
-= to_sector(len
);
288 atomic_inc(&io
->count
);
293 static void dispatch_io(int rw
, unsigned int num_regions
,
294 struct io_region
*where
, struct dpages
*dp
,
295 struct io
*io
, int sync
)
298 struct dpages old_pages
= *dp
;
301 rw
|= (1 << BIO_RW_SYNC
);
304 * For multiple regions we need to be careful to rewind
305 * the dp object for each call to do_region.
307 for (i
= 0; i
< num_regions
; i
++) {
310 do_region(rw
, i
, where
+ i
, dp
, io
);
314 * Drop the extra refence that we were holding to avoid
315 * the io being completed too early.
320 static int sync_io(unsigned int num_regions
, struct io_region
*where
,
321 int rw
, struct dpages
*dp
, unsigned long *error_bits
)
325 if (num_regions
> 1 && rw
!= WRITE
) {
331 atomic_set(&io
.count
, 1); /* see dispatch_io() */
332 io
.sleeper
= current
;
334 dispatch_io(rw
, num_regions
, where
, dp
, &io
, 1);
337 set_current_state(TASK_UNINTERRUPTIBLE
);
339 if (!atomic_read(&io
.count
) || signal_pending(current
))
344 set_current_state(TASK_RUNNING
);
346 if (atomic_read(&io
.count
))
349 *error_bits
= io
.error
;
350 return io
.error
? -EIO
: 0;
353 static int async_io(unsigned int num_regions
, struct io_region
*where
, int rw
,
354 struct dpages
*dp
, io_notify_fn fn
, void *context
)
358 if (num_regions
> 1 && rw
!= WRITE
) {
364 io
= mempool_alloc(_io_pool
, GFP_NOIO
);
366 atomic_set(&io
->count
, 1); /* see dispatch_io() */
369 io
->context
= context
;
371 dispatch_io(rw
, num_regions
, where
, dp
, io
, 0);
375 int dm_io_sync(unsigned int num_regions
, struct io_region
*where
, int rw
,
376 struct page_list
*pl
, unsigned int offset
,
377 unsigned long *error_bits
)
380 list_dp_init(&dp
, pl
, offset
);
381 return sync_io(num_regions
, where
, rw
, &dp
, error_bits
);
384 int dm_io_sync_bvec(unsigned int num_regions
, struct io_region
*where
, int rw
,
385 struct bio_vec
*bvec
, unsigned long *error_bits
)
388 bvec_dp_init(&dp
, bvec
);
389 return sync_io(num_regions
, where
, rw
, &dp
, error_bits
);
392 int dm_io_sync_vm(unsigned int num_regions
, struct io_region
*where
, int rw
,
393 void *data
, unsigned long *error_bits
)
396 vm_dp_init(&dp
, data
);
397 return sync_io(num_regions
, where
, rw
, &dp
, error_bits
);
400 int dm_io_async(unsigned int num_regions
, struct io_region
*where
, int rw
,
401 struct page_list
*pl
, unsigned int offset
,
402 io_notify_fn fn
, void *context
)
405 list_dp_init(&dp
, pl
, offset
);
406 return async_io(num_regions
, where
, rw
, &dp
, fn
, context
);
409 int dm_io_async_bvec(unsigned int num_regions
, struct io_region
*where
, int rw
,
410 struct bio_vec
*bvec
, io_notify_fn fn
, void *context
)
413 bvec_dp_init(&dp
, bvec
);
414 return async_io(num_regions
, where
, rw
, &dp
, fn
, context
);
417 int dm_io_async_vm(unsigned int num_regions
, struct io_region
*where
, int rw
,
418 void *data
, io_notify_fn fn
, void *context
)
421 vm_dp_init(&dp
, data
);
422 return async_io(num_regions
, where
, rw
, &dp
, fn
, context
);
425 EXPORT_SYMBOL(dm_io_get
);
426 EXPORT_SYMBOL(dm_io_put
);
427 EXPORT_SYMBOL(dm_io_sync
);
428 EXPORT_SYMBOL(dm_io_async
);
429 EXPORT_SYMBOL(dm_io_sync_bvec
);
430 EXPORT_SYMBOL(dm_io_async_bvec
);
431 EXPORT_SYMBOL(dm_io_sync_vm
);
432 EXPORT_SYMBOL(dm_io_async_vm
);