[TG3]: Set minimal hw interrupt mitigation.
[linux-2.6/verdex.git] / drivers / md / dm-io.c
blob45754bb6a79994678901f967bc29dc3c91d34d62
1 /*
2 * Copyright (C) 2003 Sistina Software
4 * This file is released under the GPL.
5 */
7 #include "dm-io.h"
9 #include <linux/bio.h>
10 #include <linux/mempool.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
15 static struct bio_set *_bios;
17 /* FIXME: can we shrink this ? */
18 struct io {
19 unsigned long error;
20 atomic_t count;
21 struct task_struct *sleeper;
22 io_notify_fn callback;
23 void *context;
27 * io contexts are only dynamically allocated for asynchronous
28 * io. Since async io is likely to be the majority of io we'll
29 * have the same number of io contexts as buffer heads ! (FIXME:
30 * must reduce this).
32 static unsigned _num_ios;
33 static mempool_t *_io_pool;
35 static void *alloc_io(unsigned int __nocast gfp_mask, void *pool_data)
37 return kmalloc(sizeof(struct io), gfp_mask);
40 static void free_io(void *element, void *pool_data)
42 kfree(element);
45 static unsigned int pages_to_ios(unsigned int pages)
47 return 4 * pages; /* too many ? */
50 static int resize_pool(unsigned int new_ios)
52 int r = 0;
54 if (_io_pool) {
55 if (new_ios == 0) {
56 /* free off the pool */
57 mempool_destroy(_io_pool);
58 _io_pool = NULL;
59 bioset_free(_bios);
61 } else {
62 /* resize the pool */
63 r = mempool_resize(_io_pool, new_ios, GFP_KERNEL);
66 } else {
67 /* create new pool */
68 _io_pool = mempool_create(new_ios, alloc_io, free_io, NULL);
69 if (!_io_pool)
70 return -ENOMEM;
72 _bios = bioset_create(16, 16, 4);
73 if (!_bios) {
74 mempool_destroy(_io_pool);
75 _io_pool = NULL;
76 return -ENOMEM;
80 if (!r)
81 _num_ios = new_ios;
83 return r;
86 int dm_io_get(unsigned int num_pages)
88 return resize_pool(_num_ios + pages_to_ios(num_pages));
91 void dm_io_put(unsigned int num_pages)
93 resize_pool(_num_ios - pages_to_ios(num_pages));
96 /*-----------------------------------------------------------------
97 * We need to keep track of which region a bio is doing io for.
98 * In order to save a memory allocation we store this the last
99 * bvec which we know is unused (blech).
100 * XXX This is ugly and can OOPS with some configs... find another way.
101 *---------------------------------------------------------------*/
102 static inline void bio_set_region(struct bio *bio, unsigned region)
104 bio->bi_io_vec[bio->bi_max_vecs - 1].bv_len = region;
107 static inline unsigned bio_get_region(struct bio *bio)
109 return bio->bi_io_vec[bio->bi_max_vecs - 1].bv_len;
112 /*-----------------------------------------------------------------
113 * We need an io object to keep track of the number of bios that
114 * have been dispatched for a particular io.
115 *---------------------------------------------------------------*/
116 static void dec_count(struct io *io, unsigned int region, int error)
118 if (error)
119 set_bit(region, &io->error);
121 if (atomic_dec_and_test(&io->count)) {
122 if (io->sleeper)
123 wake_up_process(io->sleeper);
125 else {
126 int r = io->error;
127 io_notify_fn fn = io->callback;
128 void *context = io->context;
130 mempool_free(io, _io_pool);
131 fn(r, context);
136 static int endio(struct bio *bio, unsigned int done, int error)
138 struct io *io = (struct io *) bio->bi_private;
140 /* keep going until we've finished */
141 if (bio->bi_size)
142 return 1;
144 if (error && bio_data_dir(bio) == READ)
145 zero_fill_bio(bio);
147 dec_count(io, bio_get_region(bio), error);
148 bio_put(bio);
150 return 0;
153 /*-----------------------------------------------------------------
154 * These little objects provide an abstraction for getting a new
155 * destination page for io.
156 *---------------------------------------------------------------*/
157 struct dpages {
158 void (*get_page)(struct dpages *dp,
159 struct page **p, unsigned long *len, unsigned *offset);
160 void (*next_page)(struct dpages *dp);
162 unsigned context_u;
163 void *context_ptr;
167 * Functions for getting the pages from a list.
169 static void list_get_page(struct dpages *dp,
170 struct page **p, unsigned long *len, unsigned *offset)
172 unsigned o = dp->context_u;
173 struct page_list *pl = (struct page_list *) dp->context_ptr;
175 *p = pl->page;
176 *len = PAGE_SIZE - o;
177 *offset = o;
180 static void list_next_page(struct dpages *dp)
182 struct page_list *pl = (struct page_list *) dp->context_ptr;
183 dp->context_ptr = pl->next;
184 dp->context_u = 0;
187 static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
189 dp->get_page = list_get_page;
190 dp->next_page = list_next_page;
191 dp->context_u = offset;
192 dp->context_ptr = pl;
196 * Functions for getting the pages from a bvec.
198 static void bvec_get_page(struct dpages *dp,
199 struct page **p, unsigned long *len, unsigned *offset)
201 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
202 *p = bvec->bv_page;
203 *len = bvec->bv_len;
204 *offset = bvec->bv_offset;
207 static void bvec_next_page(struct dpages *dp)
209 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr;
210 dp->context_ptr = bvec + 1;
213 static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec)
215 dp->get_page = bvec_get_page;
216 dp->next_page = bvec_next_page;
217 dp->context_ptr = bvec;
220 static void vm_get_page(struct dpages *dp,
221 struct page **p, unsigned long *len, unsigned *offset)
223 *p = vmalloc_to_page(dp->context_ptr);
224 *offset = dp->context_u;
225 *len = PAGE_SIZE - dp->context_u;
228 static void vm_next_page(struct dpages *dp)
230 dp->context_ptr += PAGE_SIZE - dp->context_u;
231 dp->context_u = 0;
234 static void vm_dp_init(struct dpages *dp, void *data)
236 dp->get_page = vm_get_page;
237 dp->next_page = vm_next_page;
238 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
239 dp->context_ptr = data;
242 /*-----------------------------------------------------------------
243 * IO routines that accept a list of pages.
244 *---------------------------------------------------------------*/
245 static void do_region(int rw, unsigned int region, struct io_region *where,
246 struct dpages *dp, struct io *io)
248 struct bio *bio;
249 struct page *page;
250 unsigned long len;
251 unsigned offset;
252 unsigned num_bvecs;
253 sector_t remaining = where->count;
255 while (remaining) {
257 * Allocate a suitably sized bio, we add an extra
258 * bvec for bio_get/set_region().
260 num_bvecs = (remaining / (PAGE_SIZE >> 9)) + 2;
261 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, _bios);
262 bio->bi_sector = where->sector + (where->count - remaining);
263 bio->bi_bdev = where->bdev;
264 bio->bi_end_io = endio;
265 bio->bi_private = io;
266 bio_set_region(bio, region);
269 * Try and add as many pages as possible.
271 while (remaining) {
272 dp->get_page(dp, &page, &len, &offset);
273 len = min(len, to_bytes(remaining));
274 if (!bio_add_page(bio, page, len, offset))
275 break;
277 offset = 0;
278 remaining -= to_sector(len);
279 dp->next_page(dp);
282 atomic_inc(&io->count);
283 submit_bio(rw, bio);
287 static void dispatch_io(int rw, unsigned int num_regions,
288 struct io_region *where, struct dpages *dp,
289 struct io *io, int sync)
291 int i;
292 struct dpages old_pages = *dp;
294 if (sync)
295 rw |= (1 << BIO_RW_SYNC);
298 * For multiple regions we need to be careful to rewind
299 * the dp object for each call to do_region.
301 for (i = 0; i < num_regions; i++) {
302 *dp = old_pages;
303 if (where[i].count)
304 do_region(rw, i, where + i, dp, io);
308 * Drop the extra refence that we were holding to avoid
309 * the io being completed too early.
311 dec_count(io, 0, 0);
314 static int sync_io(unsigned int num_regions, struct io_region *where,
315 int rw, struct dpages *dp, unsigned long *error_bits)
317 struct io io;
319 if (num_regions > 1 && rw != WRITE) {
320 WARN_ON(1);
321 return -EIO;
324 io.error = 0;
325 atomic_set(&io.count, 1); /* see dispatch_io() */
326 io.sleeper = current;
328 dispatch_io(rw, num_regions, where, dp, &io, 1);
330 while (1) {
331 set_current_state(TASK_UNINTERRUPTIBLE);
333 if (!atomic_read(&io.count) || signal_pending(current))
334 break;
336 io_schedule();
338 set_current_state(TASK_RUNNING);
340 if (atomic_read(&io.count))
341 return -EINTR;
343 *error_bits = io.error;
344 return io.error ? -EIO : 0;
347 static int async_io(unsigned int num_regions, struct io_region *where, int rw,
348 struct dpages *dp, io_notify_fn fn, void *context)
350 struct io *io;
352 if (num_regions > 1 && rw != WRITE) {
353 WARN_ON(1);
354 fn(1, context);
355 return -EIO;
358 io = mempool_alloc(_io_pool, GFP_NOIO);
359 io->error = 0;
360 atomic_set(&io->count, 1); /* see dispatch_io() */
361 io->sleeper = NULL;
362 io->callback = fn;
363 io->context = context;
365 dispatch_io(rw, num_regions, where, dp, io, 0);
366 return 0;
369 int dm_io_sync(unsigned int num_regions, struct io_region *where, int rw,
370 struct page_list *pl, unsigned int offset,
371 unsigned long *error_bits)
373 struct dpages dp;
374 list_dp_init(&dp, pl, offset);
375 return sync_io(num_regions, where, rw, &dp, error_bits);
378 int dm_io_sync_bvec(unsigned int num_regions, struct io_region *where, int rw,
379 struct bio_vec *bvec, unsigned long *error_bits)
381 struct dpages dp;
382 bvec_dp_init(&dp, bvec);
383 return sync_io(num_regions, where, rw, &dp, error_bits);
386 int dm_io_sync_vm(unsigned int num_regions, struct io_region *where, int rw,
387 void *data, unsigned long *error_bits)
389 struct dpages dp;
390 vm_dp_init(&dp, data);
391 return sync_io(num_regions, where, rw, &dp, error_bits);
394 int dm_io_async(unsigned int num_regions, struct io_region *where, int rw,
395 struct page_list *pl, unsigned int offset,
396 io_notify_fn fn, void *context)
398 struct dpages dp;
399 list_dp_init(&dp, pl, offset);
400 return async_io(num_regions, where, rw, &dp, fn, context);
403 int dm_io_async_bvec(unsigned int num_regions, struct io_region *where, int rw,
404 struct bio_vec *bvec, io_notify_fn fn, void *context)
406 struct dpages dp;
407 bvec_dp_init(&dp, bvec);
408 return async_io(num_regions, where, rw, &dp, fn, context);
411 int dm_io_async_vm(unsigned int num_regions, struct io_region *where, int rw,
412 void *data, io_notify_fn fn, void *context)
414 struct dpages dp;
415 vm_dp_init(&dp, data);
416 return async_io(num_regions, where, rw, &dp, fn, context);
419 EXPORT_SYMBOL(dm_io_get);
420 EXPORT_SYMBOL(dm_io_put);
421 EXPORT_SYMBOL(dm_io_sync);
422 EXPORT_SYMBOL(dm_io_async);
423 EXPORT_SYMBOL(dm_io_sync_bvec);
424 EXPORT_SYMBOL(dm_io_async_bvec);
425 EXPORT_SYMBOL(dm_io_sync_vm);
426 EXPORT_SYMBOL(dm_io_async_vm);