2 raid0.c : Multiple Devices driver for Linux
3 Copyright (C) 1994-96 Marc ZYNGIER
4 <zyngier@ufr-info-p7.ibp.fr> or
6 Copyright (C) 1999, 2000 Ingo Molnar, Red Hat
9 RAID-0 management functions.
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 You should have received a copy of the GNU General Public License
17 (for example /usr/src/linux/COPYING); if not, write to the Free
18 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 #include <linux/raid/raid0.h>
23 static void raid0_unplug(struct request_queue
*q
)
25 mddev_t
*mddev
= q
->queuedata
;
26 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
27 mdk_rdev_t
**devlist
= conf
->strip_zone
[0].dev
;
30 for (i
=0; i
<mddev
->raid_disks
; i
++) {
31 struct request_queue
*r_queue
= bdev_get_queue(devlist
[i
]->bdev
);
37 static int raid0_congested(void *data
, int bits
)
39 mddev_t
*mddev
= data
;
40 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
41 mdk_rdev_t
**devlist
= conf
->strip_zone
[0].dev
;
44 for (i
= 0; i
< mddev
->raid_disks
&& !ret
; i
++) {
45 struct request_queue
*q
= bdev_get_queue(devlist
[i
]->bdev
);
47 ret
|= bdi_congested(&q
->backing_dev_info
, bits
);
53 static int create_strip_zones (mddev_t
*mddev
)
56 sector_t current_offset
, curr_zone_offset
;
58 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
59 mdk_rdev_t
*smallest
, *rdev1
, *rdev2
, *rdev
;
60 struct list_head
*tmp1
, *tmp2
;
61 struct strip_zone
*zone
;
63 char b
[BDEVNAME_SIZE
];
66 * The number of 'same size groups'
68 conf
->nr_strip_zones
= 0;
70 rdev_for_each(rdev1
, tmp1
, mddev
) {
71 printk("raid0: looking at %s\n",
72 bdevname(rdev1
->bdev
,b
));
74 rdev_for_each(rdev2
, tmp2
, mddev
) {
75 printk("raid0: comparing %s(%llu)",
76 bdevname(rdev1
->bdev
,b
),
77 (unsigned long long)rdev1
->size
);
78 printk(" with %s(%llu)\n",
79 bdevname(rdev2
->bdev
,b
),
80 (unsigned long long)rdev2
->size
);
82 printk("raid0: END\n");
85 if (rdev2
->size
== rdev1
->size
)
88 * Not unique, don't count it as a new
91 printk("raid0: EQUAL\n");
95 printk("raid0: NOT EQUAL\n");
98 printk("raid0: ==> UNIQUE\n");
99 conf
->nr_strip_zones
++;
100 printk("raid0: %d zones\n", conf
->nr_strip_zones
);
103 printk("raid0: FINAL %d zones\n", conf
->nr_strip_zones
);
105 conf
->strip_zone
= kzalloc(sizeof(struct strip_zone
)*
106 conf
->nr_strip_zones
, GFP_KERNEL
);
107 if (!conf
->strip_zone
)
109 conf
->devlist
= kzalloc(sizeof(mdk_rdev_t
*)*
110 conf
->nr_strip_zones
*mddev
->raid_disks
,
115 /* The first zone must contain all devices, so here we check that
116 * there is a proper alignment of slots to devices and find them all
118 zone
= &conf
->strip_zone
[0];
121 zone
->dev
= conf
->devlist
;
122 rdev_for_each(rdev1
, tmp1
, mddev
) {
123 int j
= rdev1
->raid_disk
;
125 if (j
< 0 || j
>= mddev
->raid_disks
) {
126 printk("raid0: bad disk number %d - aborting!\n", j
);
130 printk("raid0: multiple devices for %d - aborting!\n",
134 zone
->dev
[j
] = rdev1
;
136 blk_queue_stack_limits(mddev
->queue
,
137 rdev1
->bdev
->bd_disk
->queue
);
138 /* as we don't honour merge_bvec_fn, we must never risk
139 * violating it, so limit ->max_sector to one PAGE, as
140 * a one page request is never in violation.
143 if (rdev1
->bdev
->bd_disk
->queue
->merge_bvec_fn
&&
144 mddev
->queue
->max_sectors
> (PAGE_SIZE
>>9))
145 blk_queue_max_sectors(mddev
->queue
, PAGE_SIZE
>>9);
147 if (!smallest
|| (rdev1
->size
<smallest
->size
))
151 if (cnt
!= mddev
->raid_disks
) {
152 printk("raid0: too few disks (%d of %d) - aborting!\n",
153 cnt
, mddev
->raid_disks
);
157 zone
->size
= smallest
->size
* cnt
;
158 zone
->zone_offset
= 0;
160 current_offset
= smallest
->size
;
161 curr_zone_offset
= zone
->size
;
163 /* now do the other zones */
164 for (i
= 1; i
< conf
->nr_strip_zones
; i
++)
166 zone
= conf
->strip_zone
+ i
;
167 zone
->dev
= conf
->strip_zone
[i
-1].dev
+ mddev
->raid_disks
;
169 printk("raid0: zone %d\n", i
);
170 zone
->dev_offset
= current_offset
;
174 for (j
=0; j
<cnt
; j
++) {
175 char b
[BDEVNAME_SIZE
];
176 rdev
= conf
->strip_zone
[0].dev
[j
];
177 printk("raid0: checking %s ...", bdevname(rdev
->bdev
,b
));
178 if (rdev
->size
> current_offset
)
180 printk(" contained as device %d\n", c
);
183 if (!smallest
|| (rdev
->size
<smallest
->size
)) {
185 printk(" (%llu) is smallest!.\n",
186 (unsigned long long)rdev
->size
);
193 zone
->size
= (smallest
->size
- current_offset
) * c
;
194 printk("raid0: zone->nb_dev: %d, size: %llu\n",
195 zone
->nb_dev
, (unsigned long long)zone
->size
);
197 zone
->zone_offset
= curr_zone_offset
;
198 curr_zone_offset
+= zone
->size
;
200 current_offset
= smallest
->size
;
201 printk("raid0: current zone offset: %llu\n",
202 (unsigned long long)current_offset
);
205 /* Now find appropriate hash spacing.
206 * We want a number which causes most hash entries to cover
207 * at most two strips, but the hash table must be at most
208 * 1 PAGE. We choose the smallest strip, or contiguous collection
209 * of strips, that has big enough size. We never consider the last
210 * strip though as it's size has no bearing on the efficacy of the hash
213 conf
->hash_spacing
= curr_zone_offset
;
214 min_spacing
= curr_zone_offset
;
215 sector_div(min_spacing
, PAGE_SIZE
/sizeof(struct strip_zone
*));
216 for (i
=0; i
< conf
->nr_strip_zones
-1; i
++) {
218 for (j
=i
; j
<conf
->nr_strip_zones
-1 &&
219 sz
< min_spacing
; j
++)
220 sz
+= conf
->strip_zone
[j
].size
;
221 if (sz
>= min_spacing
&& sz
< conf
->hash_spacing
)
222 conf
->hash_spacing
= sz
;
225 mddev
->queue
->unplug_fn
= raid0_unplug
;
227 mddev
->queue
->backing_dev_info
.congested_fn
= raid0_congested
;
228 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
230 printk("raid0: done.\n");
237 * raid0_mergeable_bvec -- tell bio layer if a two requests can be merged
239 * @bvm: properties of new bio
240 * @biovec: the request that could be merged to it.
242 * Return amount of bytes we can accept at this offset
244 static int raid0_mergeable_bvec(struct request_queue
*q
,
245 struct bvec_merge_data
*bvm
,
246 struct bio_vec
*biovec
)
248 mddev_t
*mddev
= q
->queuedata
;
249 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
251 unsigned int chunk_sectors
= mddev
->chunk_size
>> 9;
252 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
254 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
255 if (max
< 0) max
= 0; /* bio_add cannot handle a negative return */
256 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
257 return biovec
->bv_len
;
262 static int raid0_run (mddev_t
*mddev
)
264 unsigned cur
=0, i
=0, nb_zone
;
268 struct list_head
*tmp
;
270 if (mddev
->chunk_size
== 0) {
271 printk(KERN_ERR
"md/raid0: non-zero chunk size required.\n");
274 printk(KERN_INFO
"%s: setting max_sectors to %d, segment boundary to %d\n",
276 mddev
->chunk_size
>> 9,
277 (mddev
->chunk_size
>>1)-1);
278 blk_queue_max_sectors(mddev
->queue
, mddev
->chunk_size
>> 9);
279 blk_queue_segment_boundary(mddev
->queue
, (mddev
->chunk_size
>>1) - 1);
280 mddev
->queue
->queue_lock
= &mddev
->queue
->__queue_lock
;
282 conf
= kmalloc(sizeof (raid0_conf_t
), GFP_KERNEL
);
285 mddev
->private = (void *)conf
;
287 conf
->strip_zone
= NULL
;
288 conf
->devlist
= NULL
;
289 if (create_strip_zones (mddev
))
292 /* calculate array device size */
293 mddev
->array_sectors
= 0;
294 rdev_for_each(rdev
, tmp
, mddev
)
295 mddev
->array_sectors
+= rdev
->size
* 2;
297 printk("raid0 : md_size is %llu blocks.\n",
298 (unsigned long long)mddev
->array_sectors
/ 2);
299 printk("raid0 : conf->hash_spacing is %llu blocks.\n",
300 (unsigned long long)conf
->hash_spacing
);
302 sector_t s
= mddev
->array_sectors
/ 2;
303 sector_t space
= conf
->hash_spacing
;
306 if (sizeof(sector_t
) > sizeof(u32
)) {
307 /*shift down space and s so that sector_div will work */
308 while (space
> (sector_t
) (~(u32
)0)) {
311 s
+= 1; /* force round-up */
315 round
= sector_div(s
, (u32
)space
) ? 1 : 0;
318 printk("raid0 : nb_zone is %d.\n", nb_zone
);
320 printk("raid0 : Allocating %Zd bytes for hash.\n",
321 nb_zone
*sizeof(struct strip_zone
*));
322 conf
->hash_table
= kmalloc (sizeof (struct strip_zone
*)*nb_zone
, GFP_KERNEL
);
323 if (!conf
->hash_table
)
325 size
= conf
->strip_zone
[cur
].size
;
327 conf
->hash_table
[0] = conf
->strip_zone
+ cur
;
328 for (i
=1; i
< nb_zone
; i
++) {
329 while (size
<= conf
->hash_spacing
) {
331 size
+= conf
->strip_zone
[cur
].size
;
333 size
-= conf
->hash_spacing
;
334 conf
->hash_table
[i
] = conf
->strip_zone
+ cur
;
336 if (conf
->preshift
) {
337 conf
->hash_spacing
>>= conf
->preshift
;
338 /* round hash_spacing up so when we divide by it, we
339 * err on the side of too-low, which is safest
341 conf
->hash_spacing
++;
344 /* calculate the max read-ahead size.
345 * For read-ahead of large files to be effective, we need to
346 * readahead at least twice a whole stripe. i.e. number of devices
347 * multiplied by chunk size times 2.
348 * If an individual device has an ra_pages greater than the
349 * chunk size, then we will not drive that device as hard as it
350 * wants. We consider this a configuration error: a larger
351 * chunksize should be used in that case.
354 int stripe
= mddev
->raid_disks
* mddev
->chunk_size
/ PAGE_SIZE
;
355 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2* stripe
)
356 mddev
->queue
->backing_dev_info
.ra_pages
= 2* stripe
;
360 blk_queue_merge_bvec(mddev
->queue
, raid0_mergeable_bvec
);
364 kfree(conf
->strip_zone
);
365 kfree(conf
->devlist
);
367 mddev
->private = NULL
;
372 static int raid0_stop (mddev_t
*mddev
)
374 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
376 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
377 kfree(conf
->hash_table
);
378 conf
->hash_table
= NULL
;
379 kfree(conf
->strip_zone
);
380 conf
->strip_zone
= NULL
;
382 mddev
->private = NULL
;
387 static int raid0_make_request (struct request_queue
*q
, struct bio
*bio
)
389 mddev_t
*mddev
= q
->queuedata
;
390 unsigned int sect_in_chunk
, chunksize_bits
, chunk_size
, chunk_sects
;
391 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
392 struct strip_zone
*zone
;
395 sector_t block
, rsect
;
396 const int rw
= bio_data_dir(bio
);
399 if (unlikely(bio_barrier(bio
))) {
400 bio_endio(bio
, -EOPNOTSUPP
);
404 cpu
= part_stat_lock();
405 part_stat_inc(cpu
, &mddev
->gendisk
->part0
, ios
[rw
]);
406 part_stat_add(cpu
, &mddev
->gendisk
->part0
, sectors
[rw
],
410 chunk_size
= mddev
->chunk_size
>> 10;
411 chunk_sects
= mddev
->chunk_size
>> 9;
412 chunksize_bits
= ffz(~chunk_size
);
413 block
= bio
->bi_sector
>> 1;
416 if (unlikely(chunk_sects
< (bio
->bi_sector
& (chunk_sects
- 1)) + (bio
->bi_size
>> 9))) {
418 /* Sanity check -- queue functions should prevent this happening */
419 if (bio
->bi_vcnt
!= 1 ||
422 /* This is a one page bio that upper layers
423 * refuse to split for us, so we need to split it.
425 bp
= bio_split(bio
, chunk_sects
- (bio
->bi_sector
& (chunk_sects
- 1)));
426 if (raid0_make_request(q
, &bp
->bio1
))
427 generic_make_request(&bp
->bio1
);
428 if (raid0_make_request(q
, &bp
->bio2
))
429 generic_make_request(&bp
->bio2
);
431 bio_pair_release(bp
);
437 sector_t x
= block
>> conf
->preshift
;
438 sector_div(x
, (u32
)conf
->hash_spacing
);
439 zone
= conf
->hash_table
[x
];
442 while (block
>= (zone
->zone_offset
+ zone
->size
))
445 sect_in_chunk
= bio
->bi_sector
& ((chunk_size
<<1) -1);
449 sector_t x
= (block
- zone
->zone_offset
) >> chunksize_bits
;
451 sector_div(x
, zone
->nb_dev
);
454 x
= block
>> chunksize_bits
;
455 tmp_dev
= zone
->dev
[sector_div(x
, zone
->nb_dev
)];
457 rsect
= (((chunk
<< chunksize_bits
) + zone
->dev_offset
)<<1)
460 bio
->bi_bdev
= tmp_dev
->bdev
;
461 bio
->bi_sector
= rsect
+ tmp_dev
->data_offset
;
464 * Let the main block layer submit the IO and resolve recursion:
469 printk("raid0_make_request bug: can't convert block across chunks"
470 " or bigger than %dk %llu %d\n", chunk_size
,
471 (unsigned long long)bio
->bi_sector
, bio
->bi_size
>> 10);
477 static void raid0_status (struct seq_file
*seq
, mddev_t
*mddev
)
482 char b
[BDEVNAME_SIZE
];
483 raid0_conf_t
*conf
= mddev_to_conf(mddev
);
486 for (j
= 0; j
< conf
->nr_strip_zones
; j
++) {
487 seq_printf(seq
, " z%d", j
);
488 if (conf
->hash_table
[h
] == conf
->strip_zone
+j
)
489 seq_printf(seq
, "(h%d)", h
++);
490 seq_printf(seq
, "=[");
491 for (k
= 0; k
< conf
->strip_zone
[j
].nb_dev
; k
++)
492 seq_printf(seq
, "%s/", bdevname(
493 conf
->strip_zone
[j
].dev
[k
]->bdev
,b
));
495 seq_printf(seq
, "] zo=%d do=%d s=%d\n",
496 conf
->strip_zone
[j
].zone_offset
,
497 conf
->strip_zone
[j
].dev_offset
,
498 conf
->strip_zone
[j
].size
);
501 seq_printf(seq
, " %dk chunks", mddev
->chunk_size
/1024);
505 static struct mdk_personality raid0_personality
=
509 .owner
= THIS_MODULE
,
510 .make_request
= raid0_make_request
,
513 .status
= raid0_status
,
516 static int __init
raid0_init (void)
518 return register_md_personality (&raid0_personality
);
521 static void raid0_exit (void)
523 unregister_md_personality (&raid0_personality
);
526 module_init(raid0_init
);
527 module_exit(raid0_exit
);
528 MODULE_LICENSE("GPL");
529 MODULE_ALIAS("md-personality-2"); /* RAID0 */
530 MODULE_ALIAS("md-raid0");
531 MODULE_ALIAS("md-level-0");