mm owner: fix race between swapoff and exit
[linux-2.6/next.git] / drivers / md / raid0.c
blob18361063566113ffb3eddfd3d85f20fdbf6d5109
1 /*
2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
5 <maz@gloups.fdn.fr>
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
9 RAID-0 management functions.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
14 any later version.
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/module.h>
22 #include <linux/raid/raid0.h>
24 #define MAJOR_NR MD_MAJOR
25 #define MD_DRIVER
26 #define MD_PERSONALITY
28 static void raid0_unplug(struct request_queue *q)
30 mddev_t *mddev = q->queuedata;
31 raid0_conf_t *conf = mddev_to_conf(mddev);
32 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
33 int i;
35 for (i=0; i<mddev->raid_disks; i++) {
36 struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev);
38 blk_unplug(r_queue);
42 static int raid0_congested(void *data, int bits)
44 mddev_t *mddev = data;
45 raid0_conf_t *conf = mddev_to_conf(mddev);
46 mdk_rdev_t **devlist = conf->strip_zone[0].dev;
47 int i, ret = 0;
49 for (i = 0; i < mddev->raid_disks && !ret ; i++) {
50 struct request_queue *q = bdev_get_queue(devlist[i]->bdev);
52 ret |= bdi_congested(&q->backing_dev_info, bits);
54 return ret;
58 static int create_strip_zones (mddev_t *mddev)
60 int i, c, j;
61 sector_t current_offset, curr_zone_offset;
62 sector_t min_spacing;
63 raid0_conf_t *conf = mddev_to_conf(mddev);
64 mdk_rdev_t *smallest, *rdev1, *rdev2, *rdev;
65 struct list_head *tmp1, *tmp2;
66 struct strip_zone *zone;
67 int cnt;
68 char b[BDEVNAME_SIZE];
71 * The number of 'same size groups'
73 conf->nr_strip_zones = 0;
75 rdev_for_each(rdev1, tmp1, mddev) {
76 printk("raid0: looking at %s\n",
77 bdevname(rdev1->bdev,b));
78 c = 0;
79 rdev_for_each(rdev2, tmp2, mddev) {
80 printk("raid0: comparing %s(%llu)",
81 bdevname(rdev1->bdev,b),
82 (unsigned long long)rdev1->size);
83 printk(" with %s(%llu)\n",
84 bdevname(rdev2->bdev,b),
85 (unsigned long long)rdev2->size);
86 if (rdev2 == rdev1) {
87 printk("raid0: END\n");
88 break;
90 if (rdev2->size == rdev1->size)
93 * Not unique, don't count it as a new
94 * group
96 printk("raid0: EQUAL\n");
97 c = 1;
98 break;
100 printk("raid0: NOT EQUAL\n");
102 if (!c) {
103 printk("raid0: ==> UNIQUE\n");
104 conf->nr_strip_zones++;
105 printk("raid0: %d zones\n", conf->nr_strip_zones);
108 printk("raid0: FINAL %d zones\n", conf->nr_strip_zones);
110 conf->strip_zone = kzalloc(sizeof(struct strip_zone)*
111 conf->nr_strip_zones, GFP_KERNEL);
112 if (!conf->strip_zone)
113 return 1;
114 conf->devlist = kzalloc(sizeof(mdk_rdev_t*)*
115 conf->nr_strip_zones*mddev->raid_disks,
116 GFP_KERNEL);
117 if (!conf->devlist)
118 return 1;
120 /* The first zone must contain all devices, so here we check that
121 * there is a proper alignment of slots to devices and find them all
123 zone = &conf->strip_zone[0];
124 cnt = 0;
125 smallest = NULL;
126 zone->dev = conf->devlist;
127 rdev_for_each(rdev1, tmp1, mddev) {
128 int j = rdev1->raid_disk;
130 if (j < 0 || j >= mddev->raid_disks) {
131 printk("raid0: bad disk number %d - aborting!\n", j);
132 goto abort;
134 if (zone->dev[j]) {
135 printk("raid0: multiple devices for %d - aborting!\n",
137 goto abort;
139 zone->dev[j] = rdev1;
141 blk_queue_stack_limits(mddev->queue,
142 rdev1->bdev->bd_disk->queue);
143 /* as we don't honour merge_bvec_fn, we must never risk
144 * violating it, so limit ->max_sector to one PAGE, as
145 * a one page request is never in violation.
148 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
149 mddev->queue->max_sectors > (PAGE_SIZE>>9))
150 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
152 if (!smallest || (rdev1->size <smallest->size))
153 smallest = rdev1;
154 cnt++;
156 if (cnt != mddev->raid_disks) {
157 printk("raid0: too few disks (%d of %d) - aborting!\n",
158 cnt, mddev->raid_disks);
159 goto abort;
161 zone->nb_dev = cnt;
162 zone->size = smallest->size * cnt;
163 zone->zone_offset = 0;
165 current_offset = smallest->size;
166 curr_zone_offset = zone->size;
168 /* now do the other zones */
169 for (i = 1; i < conf->nr_strip_zones; i++)
171 zone = conf->strip_zone + i;
172 zone->dev = conf->strip_zone[i-1].dev + mddev->raid_disks;
174 printk("raid0: zone %d\n", i);
175 zone->dev_offset = current_offset;
176 smallest = NULL;
177 c = 0;
179 for (j=0; j<cnt; j++) {
180 char b[BDEVNAME_SIZE];
181 rdev = conf->strip_zone[0].dev[j];
182 printk("raid0: checking %s ...", bdevname(rdev->bdev,b));
183 if (rdev->size > current_offset)
185 printk(" contained as device %d\n", c);
186 zone->dev[c] = rdev;
187 c++;
188 if (!smallest || (rdev->size <smallest->size)) {
189 smallest = rdev;
190 printk(" (%llu) is smallest!.\n",
191 (unsigned long long)rdev->size);
193 } else
194 printk(" nope.\n");
197 zone->nb_dev = c;
198 zone->size = (smallest->size - current_offset) * c;
199 printk("raid0: zone->nb_dev: %d, size: %llu\n",
200 zone->nb_dev, (unsigned long long)zone->size);
202 zone->zone_offset = curr_zone_offset;
203 curr_zone_offset += zone->size;
205 current_offset = smallest->size;
206 printk("raid0: current zone offset: %llu\n",
207 (unsigned long long)current_offset);
210 /* Now find appropriate hash spacing.
211 * We want a number which causes most hash entries to cover
212 * at most two strips, but the hash table must be at most
213 * 1 PAGE. We choose the smallest strip, or contiguous collection
214 * of strips, that has big enough size. We never consider the last
215 * strip though as it's size has no bearing on the efficacy of the hash
216 * table.
218 conf->hash_spacing = curr_zone_offset;
219 min_spacing = curr_zone_offset;
220 sector_div(min_spacing, PAGE_SIZE/sizeof(struct strip_zone*));
221 for (i=0; i < conf->nr_strip_zones-1; i++) {
222 sector_t sz = 0;
223 for (j=i; j<conf->nr_strip_zones-1 &&
224 sz < min_spacing ; j++)
225 sz += conf->strip_zone[j].size;
226 if (sz >= min_spacing && sz < conf->hash_spacing)
227 conf->hash_spacing = sz;
230 mddev->queue->unplug_fn = raid0_unplug;
232 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
233 mddev->queue->backing_dev_info.congested_data = mddev;
235 printk("raid0: done.\n");
236 return 0;
237 abort:
238 return 1;
242 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
243 * @q: request queue
244 * @bvm: properties of new bio
245 * @biovec: the request that could be merged to it.
247 * Return amount of bytes we can accept at this offset
249 static int raid0_mergeable_bvec(struct request_queue *q,
250 struct bvec_merge_data *bvm,
251 struct bio_vec *biovec)
253 mddev_t *mddev = q->queuedata;
254 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev);
255 int max;
256 unsigned int chunk_sectors = mddev->chunk_size >> 9;
257 unsigned int bio_sectors = bvm->bi_size >> 9;
259 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9;
260 if (max < 0) max = 0; /* bio_add cannot handle a negative return */
261 if (max <= biovec->bv_len && bio_sectors == 0)
262 return biovec->bv_len;
263 else
264 return max;
267 static int raid0_run (mddev_t *mddev)
269 unsigned cur=0, i=0, nb_zone;
270 s64 size;
271 raid0_conf_t *conf;
272 mdk_rdev_t *rdev;
273 struct list_head *tmp;
275 if (mddev->chunk_size == 0) {
276 printk(KERN_ERR "md/raid0: non-zero chunk size required.\n");
277 return -EINVAL;
279 printk(KERN_INFO "%s: setting max_sectors to %d, segment boundary to %d\n",
280 mdname(mddev),
281 mddev->chunk_size >> 9,
282 (mddev->chunk_size>>1)-1);
283 blk_queue_max_sectors(mddev->queue, mddev->chunk_size >> 9);
284 blk_queue_segment_boundary(mddev->queue, (mddev->chunk_size>>1) - 1);
285 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
287 conf = kmalloc(sizeof (raid0_conf_t), GFP_KERNEL);
288 if (!conf)
289 goto out;
290 mddev->private = (void *)conf;
292 conf->strip_zone = NULL;
293 conf->devlist = NULL;
294 if (create_strip_zones (mddev))
295 goto out_free_conf;
297 /* calculate array device size */
298 mddev->array_sectors = 0;
299 rdev_for_each(rdev, tmp, mddev)
300 mddev->array_sectors += rdev->size * 2;
302 printk("raid0 : md_size is %llu blocks.\n",
303 (unsigned long long)mddev->array_sectors / 2);
304 printk("raid0 : conf->hash_spacing is %llu blocks.\n",
305 (unsigned long long)conf->hash_spacing);
307 sector_t s = mddev->array_sectors / 2;
308 sector_t space = conf->hash_spacing;
309 int round;
310 conf->preshift = 0;
311 if (sizeof(sector_t) > sizeof(u32)) {
312 /*shift down space and s so that sector_div will work */
313 while (space > (sector_t) (~(u32)0)) {
314 s >>= 1;
315 space >>= 1;
316 s += 1; /* force round-up */
317 conf->preshift++;
320 round = sector_div(s, (u32)space) ? 1 : 0;
321 nb_zone = s + round;
323 printk("raid0 : nb_zone is %d.\n", nb_zone);
325 printk("raid0 : Allocating %Zd bytes for hash.\n",
326 nb_zone*sizeof(struct strip_zone*));
327 conf->hash_table = kmalloc (sizeof (struct strip_zone *)*nb_zone, GFP_KERNEL);
328 if (!conf->hash_table)
329 goto out_free_conf;
330 size = conf->strip_zone[cur].size;
332 conf->hash_table[0] = conf->strip_zone + cur;
333 for (i=1; i< nb_zone; i++) {
334 while (size <= conf->hash_spacing) {
335 cur++;
336 size += conf->strip_zone[cur].size;
338 size -= conf->hash_spacing;
339 conf->hash_table[i] = conf->strip_zone + cur;
341 if (conf->preshift) {
342 conf->hash_spacing >>= conf->preshift;
343 /* round hash_spacing up so when we divide by it, we
344 * err on the side of too-low, which is safest
346 conf->hash_spacing++;
349 /* calculate the max read-ahead size.
350 * For read-ahead of large files to be effective, we need to
351 * readahead at least twice a whole stripe. i.e. number of devices
352 * multiplied by chunk size times 2.
353 * If an individual device has an ra_pages greater than the
354 * chunk size, then we will not drive that device as hard as it
355 * wants. We consider this a configuration error: a larger
356 * chunksize should be used in that case.
359 int stripe = mddev->raid_disks * mddev->chunk_size / PAGE_SIZE;
360 if (mddev->queue->backing_dev_info.ra_pages < 2* stripe)
361 mddev->queue->backing_dev_info.ra_pages = 2* stripe;
365 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
366 return 0;
368 out_free_conf:
369 kfree(conf->strip_zone);
370 kfree(conf->devlist);
371 kfree(conf);
372 mddev->private = NULL;
373 out:
374 return -ENOMEM;
377 static int raid0_stop (mddev_t *mddev)
379 raid0_conf_t *conf = mddev_to_conf(mddev);
381 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/
382 kfree(conf->hash_table);
383 conf->hash_table = NULL;
384 kfree(conf->strip_zone);
385 conf->strip_zone = NULL;
386 kfree(conf);
387 mddev->private = NULL;
389 return 0;
392 static int raid0_make_request (struct request_queue *q, struct bio *bio)
394 mddev_t *mddev = q->queuedata;
395 unsigned int sect_in_chunk, chunksize_bits, chunk_size, chunk_sects;
396 raid0_conf_t *conf = mddev_to_conf(mddev);
397 struct strip_zone *zone;
398 mdk_rdev_t *tmp_dev;
399 sector_t chunk;
400 sector_t block, rsect;
401 const int rw = bio_data_dir(bio);
403 if (unlikely(bio_barrier(bio))) {
404 bio_endio(bio, -EOPNOTSUPP);
405 return 0;
408 disk_stat_inc(mddev->gendisk, ios[rw]);
409 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio));
411 chunk_size = mddev->chunk_size >> 10;
412 chunk_sects = mddev->chunk_size >> 9;
413 chunksize_bits = ffz(~chunk_size);
414 block = bio->bi_sector >> 1;
417 if (unlikely(chunk_sects < (bio->bi_sector & (chunk_sects - 1)) + (bio->bi_size >> 9))) {
418 struct bio_pair *bp;
419 /* Sanity check -- queue functions should prevent this happening */
420 if (bio->bi_vcnt != 1 ||
421 bio->bi_idx != 0)
422 goto bad_map;
423 /* This is a one page bio that upper layers
424 * refuse to split for us, so we need to split it.
426 bp = bio_split(bio, bio_split_pool, chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
427 if (raid0_make_request(q, &bp->bio1))
428 generic_make_request(&bp->bio1);
429 if (raid0_make_request(q, &bp->bio2))
430 generic_make_request(&bp->bio2);
432 bio_pair_release(bp);
433 return 0;
438 sector_t x = block >> conf->preshift;
439 sector_div(x, (u32)conf->hash_spacing);
440 zone = conf->hash_table[x];
443 while (block >= (zone->zone_offset + zone->size))
444 zone++;
446 sect_in_chunk = bio->bi_sector & ((chunk_size<<1) -1);
450 sector_t x = (block - zone->zone_offset) >> chunksize_bits;
452 sector_div(x, zone->nb_dev);
453 chunk = x;
455 x = block >> chunksize_bits;
456 tmp_dev = zone->dev[sector_div(x, zone->nb_dev)];
458 rsect = (((chunk << chunksize_bits) + zone->dev_offset)<<1)
459 + sect_in_chunk;
461 bio->bi_bdev = tmp_dev->bdev;
462 bio->bi_sector = rsect + tmp_dev->data_offset;
465 * Let the main block layer submit the IO and resolve recursion:
467 return 1;
469 bad_map:
470 printk("raid0_make_request bug: can't convert block across chunks"
471 " or bigger than %dk %llu %d\n", chunk_size,
472 (unsigned long long)bio->bi_sector, bio->bi_size >> 10);
474 bio_io_error(bio);
475 return 0;
478 static void raid0_status (struct seq_file *seq, mddev_t *mddev)
480 #undef MD_DEBUG
481 #ifdef MD_DEBUG
482 int j, k, h;
483 char b[BDEVNAME_SIZE];
484 raid0_conf_t *conf = mddev_to_conf(mddev);
486 h = 0;
487 for (j = 0; j < conf->nr_strip_zones; j++) {
488 seq_printf(seq, " z%d", j);
489 if (conf->hash_table[h] == conf->strip_zone+j)
490 seq_printf(seq, "(h%d)", h++);
491 seq_printf(seq, "=[");
492 for (k = 0; k < conf->strip_zone[j].nb_dev; k++)
493 seq_printf(seq, "%s/", bdevname(
494 conf->strip_zone[j].dev[k]->bdev,b));
496 seq_printf(seq, "] zo=%d do=%d s=%d\n",
497 conf->strip_zone[j].zone_offset,
498 conf->strip_zone[j].dev_offset,
499 conf->strip_zone[j].size);
501 #endif
502 seq_printf(seq, " %dk chunks", mddev->chunk_size/1024);
503 return;
506 static struct mdk_personality raid0_personality=
508 .name = "raid0",
509 .level = 0,
510 .owner = THIS_MODULE,
511 .make_request = raid0_make_request,
512 .run = raid0_run,
513 .stop = raid0_stop,
514 .status = raid0_status,
517 static int __init raid0_init (void)
519 return register_md_personality (&raid0_personality);
522 static void raid0_exit (void)
524 unregister_md_personality (&raid0_personality);
527 module_init(raid0_init);
528 module_exit(raid0_exit);
529 MODULE_LICENSE("GPL");
530 MODULE_ALIAS("md-personality-2"); /* RAID0 */
531 MODULE_ALIAS("md-raid0");
532 MODULE_ALIAS("md-level-0");