1 /* $NetBSD: rf_netbsdkintf.c,v 1.270 2009/11/21 21:57:47 christos Exp $ */
3 * Copyright (c) 1996, 1997, 1998, 2008 The NetBSD Foundation, Inc.
6 * This code is derived from software contributed to The NetBSD Foundation
7 * by Greg Oster; Jason R. Thorpe.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
32 * Copyright (c) 1990, 1993
33 * The Regents of the University of California. All rights reserved.
35 * This code is derived from software contributed to Berkeley by
36 * the Systems Programming Group of the University of Utah Computer
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
42 * 1. Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * 2. Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in the
46 * documentation and/or other materials provided with the distribution.
47 * 3. Neither the name of the University nor the names of its contributors
48 * may be used to endorse or promote products derived from this software
49 * without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
52 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
54 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
55 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
57 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
58 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
59 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
60 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * from: Utah $Hdr: cd.c 1.6 90/11/28$
65 * @(#)cd.c 8.2 (Berkeley) 11/16/93
69 * Copyright (c) 1988 University of Utah.
71 * This code is derived from software contributed to Berkeley by
72 * the Systems Programming Group of the University of Utah Computer
75 * Redistribution and use in source and binary forms, with or without
76 * modification, are permitted provided that the following conditions
78 * 1. Redistributions of source code must retain the above copyright
79 * notice, this list of conditions and the following disclaimer.
80 * 2. Redistributions in binary form must reproduce the above copyright
81 * notice, this list of conditions and the following disclaimer in the
82 * documentation and/or other materials provided with the distribution.
83 * 3. All advertising materials mentioning features or use of this software
84 * must display the following acknowledgement:
85 * This product includes software developed by the University of
86 * California, Berkeley and its contributors.
87 * 4. Neither the name of the University nor the names of its contributors
88 * may be used to endorse or promote products derived from this software
89 * without specific prior written permission.
91 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
92 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
93 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
94 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
95 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
96 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
97 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
98 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
99 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
100 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
103 * from: Utah $Hdr: cd.c 1.6 90/11/28$
105 * @(#)cd.c 8.2 (Berkeley) 11/16/93
109 * Copyright (c) 1995 Carnegie-Mellon University.
110 * All rights reserved.
112 * Authors: Mark Holland, Jim Zelenka
114 * Permission to use, copy, modify and distribute this software and
115 * its documentation is hereby granted, provided that both the copyright
116 * notice and this permission notice appear in all copies of the
117 * software, derivative works or modified versions, and any portions
118 * thereof, and that both notices appear in supporting documentation.
120 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
121 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
122 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
124 * Carnegie Mellon requests users of this software to return to
126 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
127 * School of Computer Science
128 * Carnegie Mellon University
129 * Pittsburgh PA 15213-3890
131 * any improvements or extensions that they make and grant Carnegie the
132 * rights to redistribute these changes.
135 /***********************************************************
137 * rf_kintf.c -- the kernel interface routines for RAIDframe
139 ***********************************************************/
141 #include <sys/cdefs.h>
142 __KERNEL_RCSID(0, "$NetBSD: rf_netbsdkintf.c,v 1.270 2009/11/21 21:57:47 christos Exp $");
145 #include "opt_compat_netbsd.h"
146 #include "opt_raid_autoconfig.h"
150 #include <sys/param.h>
151 #include <sys/errno.h>
152 #include <sys/pool.h>
153 #include <sys/proc.h>
154 #include <sys/queue.h>
155 #include <sys/disk.h>
156 #include <sys/device.h>
157 #include <sys/stat.h>
158 #include <sys/ioctl.h>
159 #include <sys/fcntl.h>
160 #include <sys/systm.h>
161 #include <sys/vnode.h>
162 #include <sys/disklabel.h>
163 #include <sys/conf.h>
165 #include <sys/bufq.h>
166 #include <sys/reboot.h>
167 #include <sys/kauth.h>
169 #include <prop/proplib.h>
171 #include <dev/raidframe/raidframevar.h>
172 #include <dev/raidframe/raidframeio.h>
173 #include <dev/raidframe/rf_paritymap.h>
176 #include "rf_copyback.h"
178 #include "rf_dagflags.h"
180 #include "rf_diskqueue.h"
181 #include "rf_etimer.h"
182 #include "rf_general.h"
183 #include "rf_kintf.h"
184 #include "rf_options.h"
185 #include "rf_driver.h"
186 #include "rf_parityscan.h"
187 #include "rf_threadstuff.h"
190 #include "rf_compat50.h"
194 int rf_kdebug_level
= 0;
195 #define db1_printf(a) if (rf_kdebug_level > 0) printf a
197 #define db1_printf(a) { }
200 static RF_Raid_t
**raidPtrs
; /* global raid device descriptors */
202 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
203 RF_DECLARE_STATIC_MUTEX(rf_sparet_wait_mutex
)
205 static RF_SparetWait_t
*rf_sparet_wait_queue
; /* requests to install a
207 static RF_SparetWait_t
*rf_sparet_resp_queue
; /* responses from
208 * installation process */
211 MALLOC_DEFINE(M_RAIDFRAME
, "RAIDframe", "RAIDframe structures");
214 static void KernelWakeupFunc(struct buf
*);
215 static void InitBP(struct buf
*, struct vnode
*, unsigned,
216 dev_t
, RF_SectorNum_t
, RF_SectorCount_t
, void *, void (*) (struct buf
*),
217 void *, int, struct proc
*);
218 static void raidinit(RF_Raid_t
*);
220 void raidattach(int);
221 static int raid_match(device_t
, cfdata_t
, void *);
222 static void raid_attach(device_t
, device_t
, void *);
223 static int raid_detach(device_t
, int);
225 static int raidread_component_area(dev_t
, struct vnode
*, void *, size_t,
227 static int raidwrite_component_area(dev_t
, struct vnode
*, void *, size_t,
228 daddr_t
, daddr_t
, int);
230 static int raidwrite_component_label(dev_t
, struct vnode
*,
231 RF_ComponentLabel_t
*);
232 static int raidread_component_label(dev_t
, struct vnode
*,
233 RF_ComponentLabel_t
*);
236 dev_type_open(raidopen
);
237 dev_type_close(raidclose
);
238 dev_type_read(raidread
);
239 dev_type_write(raidwrite
);
240 dev_type_ioctl(raidioctl
);
241 dev_type_strategy(raidstrategy
);
242 dev_type_dump(raiddump
);
243 dev_type_size(raidsize
);
245 const struct bdevsw raid_bdevsw
= {
246 raidopen
, raidclose
, raidstrategy
, raidioctl
,
247 raiddump
, raidsize
, D_DISK
250 const struct cdevsw raid_cdevsw
= {
251 raidopen
, raidclose
, raidread
, raidwrite
, raidioctl
,
252 nostop
, notty
, nopoll
, nommap
, nokqfilter
, D_DISK
255 static struct dkdriver rf_dkdriver
= { raidstrategy
, minphys
};
257 /* XXX Not sure if the following should be replacing the raidPtrs above,
258 or if it should be used in conjunction with that...
263 int sc_flags
; /* flags */
264 int sc_cflags
; /* configuration flags */
265 uint64_t sc_size
; /* size of the raid device */
266 char sc_xname
[20]; /* XXX external name */
267 struct disk sc_dkdev
; /* generic disk device info */
268 struct bufq_state
*buf_queue
; /* used for the device queue */
271 #define RAIDF_INITED 0x01 /* unit has been initialized */
272 #define RAIDF_WLABEL 0x02 /* label area is writable */
273 #define RAIDF_LABELLING 0x04 /* unit is currently being labelled */
274 #define RAIDF_SHUTDOWN 0x08 /* unit is being shutdown */
275 #define RAIDF_WANTED 0x40 /* someone is waiting to obtain a lock */
276 #define RAIDF_LOCKED 0x80 /* unit is locked */
278 #define raidunit(x) DISKUNIT(x)
281 extern struct cfdriver raid_cd
;
282 CFATTACH_DECL3_NEW(raid
, sizeof(struct raid_softc
),
283 raid_match
, raid_attach
, raid_detach
, NULL
, NULL
, NULL
,
284 DVF_DETACH_SHUTDOWN
);
287 * Allow RAIDOUTSTANDING number of simultaneous IO's to this RAID device.
288 * Be aware that large numbers can allow the driver to consume a lot of
289 * kernel memory, especially on writes, and in degraded mode reads.
291 * For example: with a stripe width of 64 blocks (32k) and 5 disks,
292 * a single 64K write will typically require 64K for the old data,
293 * 64K for the old parity, and 64K for the new parity, for a total
294 * of 192K (if the parity buffer is not re-used immediately).
295 * Even it if is used immediately, that's still 128K, which when multiplied
296 * by say 10 requests, is 1280K, *on top* of the 640K of incoming data.
298 * Now in degraded mode, for example, a 64K read on the above setup may
299 * require data reconstruction, which will require *all* of the 4 remaining
300 * disks to participate -- 4 * 32K/disk == 128K again.
303 #ifndef RAIDOUTSTANDING
304 #define RAIDOUTSTANDING 6
307 #define RAIDLABELDEV(dev) \
308 (MAKEDISKDEV(major((dev)), raidunit((dev)), RAW_PART))
310 /* declared here, and made public, for the benefit of KVM stuff.. */
311 struct raid_softc
*raid_softc
;
313 static void raidgetdefaultlabel(RF_Raid_t
*, struct raid_softc
*,
315 static void raidgetdisklabel(dev_t
);
316 static void raidmakedisklabel(struct raid_softc
*);
318 static int raidlock(struct raid_softc
*);
319 static void raidunlock(struct raid_softc
*);
321 static int raid_detach_unlocked(struct raid_softc
*);
323 static void rf_markalldirty(RF_Raid_t
*);
324 static void rf_set_properties(struct raid_softc
*, RF_Raid_t
*);
326 void rf_ReconThread(struct rf_recon_req
*);
327 void rf_RewriteParityThread(RF_Raid_t
*raidPtr
);
328 void rf_CopybackThread(RF_Raid_t
*raidPtr
);
329 void rf_ReconstructInPlaceThread(struct rf_recon_req
*);
330 int rf_autoconfig(device_t
);
331 void rf_buildroothack(RF_ConfigSet_t
*);
333 RF_AutoConfig_t
*rf_find_raid_components(void);
334 RF_ConfigSet_t
*rf_create_auto_sets(RF_AutoConfig_t
*);
335 static int rf_does_it_fit(RF_ConfigSet_t
*,RF_AutoConfig_t
*);
336 static int rf_reasonable_label(RF_ComponentLabel_t
*);
337 void rf_create_configuration(RF_AutoConfig_t
*,RF_Config_t
*, RF_Raid_t
*);
338 int rf_set_autoconfig(RF_Raid_t
*, int);
339 int rf_set_rootpartition(RF_Raid_t
*, int);
340 void rf_release_all_vps(RF_ConfigSet_t
*);
341 void rf_cleanup_config_set(RF_ConfigSet_t
*);
342 int rf_have_enough_components(RF_ConfigSet_t
*);
343 int rf_auto_config_set(RF_ConfigSet_t
*, int *);
345 static int raidautoconfig
= 0; /* Debugging, mostly. Set to 0 to not
346 allow autoconfig to take place.
347 Note that this is overridden by having
348 RAID_AUTOCONFIG as an option in the
349 kernel config file. */
351 struct RF_Pools_s rf_pools
;
359 aprint_debug("raidattach: Asked for %d units\n", num
);
363 panic("raidattach: count <= 0");
367 /* This is where all the initialization stuff gets done. */
371 /* Make some space for requested number of units... */
373 RF_Malloc(raidPtrs
, num
* sizeof(RF_Raid_t
*), (RF_Raid_t
**));
374 if (raidPtrs
== NULL
) {
375 panic("raidPtrs is NULL!!");
378 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
379 rf_mutex_init(&rf_sparet_wait_mutex
);
381 rf_sparet_wait_queue
= rf_sparet_resp_queue
= NULL
;
384 for (i
= 0; i
< num
; i
++)
386 rc
= rf_BootRaidframe();
388 aprint_normal("Kernelized RAIDframe activated\n");
390 panic("Serious error booting RAID!!");
392 /* put together some datastructures like the CCD device does.. This
393 * lets us lock the device and what-not when it gets opened. */
395 raid_softc
= (struct raid_softc
*)
396 malloc(num
* sizeof(struct raid_softc
),
397 M_RAIDFRAME
, M_NOWAIT
);
398 if (raid_softc
== NULL
) {
399 aprint_error("WARNING: no memory for RAIDframe driver\n");
403 memset(raid_softc
, 0, num
* sizeof(struct raid_softc
));
405 for (raidID
= 0; raidID
< num
; raidID
++) {
406 bufq_alloc(&raid_softc
[raidID
].buf_queue
, "fcfs", 0);
408 RF_Malloc(raidPtrs
[raidID
], sizeof(RF_Raid_t
),
410 if (raidPtrs
[raidID
] == NULL
) {
411 aprint_error("WARNING: raidPtrs[%d] is NULL\n", raidID
);
417 if (config_cfattach_attach(raid_cd
.cd_name
, &raid_ca
)) {
418 aprint_error("raidattach: config_cfattach_attach failed?\n");
421 #ifdef RAID_AUTOCONFIG
426 * Register a finalizer which will be used to auto-config RAID
427 * sets once all real hardware devices have been found.
429 if (config_finalize_register(NULL
, rf_autoconfig
) != 0)
430 aprint_error("WARNING: unable to register RAIDframe finalizer\n");
434 rf_autoconfig(device_t self
)
436 RF_AutoConfig_t
*ac_list
;
437 RF_ConfigSet_t
*config_sets
;
439 if (raidautoconfig
== 0)
442 /* XXX This code can only be run once. */
445 /* 1. locate all RAID components on the system */
446 aprint_debug("Searching for RAID components...\n");
447 ac_list
= rf_find_raid_components();
449 /* 2. Sort them into their respective sets. */
450 config_sets
= rf_create_auto_sets(ac_list
);
453 * 3. Evaluate each set andconfigure the valid ones.
454 * This gets done in rf_buildroothack().
456 rf_buildroothack(config_sets
);
462 rf_buildroothack(RF_ConfigSet_t
*config_sets
)
464 RF_ConfigSet_t
*cset
;
465 RF_ConfigSet_t
*next_cset
;
476 while (cset
!= NULL
) {
477 next_cset
= cset
->next
;
478 if (rf_have_enough_components(cset
) &&
479 cset
->ac
->clabel
->autoconfigure
==1) {
480 retcode
= rf_auto_config_set(cset
,&raidID
);
482 aprint_debug("raid%d: configured ok\n", raidID
);
483 if (cset
->rootable
) {
488 /* The autoconfig didn't work :( */
489 aprint_debug("Autoconfig failed with code %d for raid%d\n", retcode
, raidID
);
490 rf_release_all_vps(cset
);
493 /* we're not autoconfiguring this set...
494 release the associated resources */
495 rf_release_all_vps(cset
);
498 rf_cleanup_config_set(cset
);
502 /* if the user has specified what the root device should be
503 then we don't touch booted_device or boothowto... */
505 if (rootspec
!= NULL
)
508 /* we found something bootable... */
511 booted_device
= raid_softc
[rootID
].sc_dev
;
512 } else if (num_root
> 1) {
515 * Maybe the MD code can help. If it cannot, then
516 * setroot() will discover that we have no
517 * booted_device and will ask the user if nothing was
518 * hardwired in the kernel config file
521 if (booted_device
== NULL
)
523 if (booted_device
== NULL
)
527 for (raidID
= 0; raidID
< numraid
; raidID
++) {
528 if (raidPtrs
[raidID
]->valid
== 0)
531 if (raidPtrs
[raidID
]->root_partition
== 0)
534 for (col
= 0; col
< raidPtrs
[raidID
]->numCol
; col
++) {
535 devname
= raidPtrs
[raidID
]->Disks
[col
].devname
;
536 devname
+= sizeof("/dev/") - 1;
537 if (strncmp(devname
, device_xname(booted_device
),
538 strlen(device_xname(booted_device
))) != 0)
540 aprint_debug("raid%d includes boot device %s\n",
548 booted_device
= raid_softc
[rootID
].sc_dev
;
550 /* we can't guess.. require the user to answer... */
551 boothowto
|= RB_ASKNAME
;
560 struct raid_softc
*rs
;
561 struct disklabel
*lp
;
562 int part
, unit
, omask
, size
;
564 unit
= raidunit(dev
);
567 rs
= &raid_softc
[unit
];
569 if ((rs
->sc_flags
& RAIDF_INITED
) == 0)
572 part
= DISKPART(dev
);
573 omask
= rs
->sc_dkdev
.dk_openmask
& (1 << part
);
574 lp
= rs
->sc_dkdev
.dk_label
;
576 if (omask
== 0 && raidopen(dev
, 0, S_IFBLK
, curlwp
))
579 if (lp
->d_partitions
[part
].p_fstype
!= FS_SWAP
)
582 size
= lp
->d_partitions
[part
].p_size
*
583 (lp
->d_secsize
/ DEV_BSIZE
);
585 if (omask
== 0 && raidclose(dev
, 0, S_IFBLK
, curlwp
))
593 raiddump(dev_t dev
, daddr_t blkno
, void *va
, size_t size
)
595 int unit
= raidunit(dev
);
596 struct raid_softc
*rs
;
597 const struct bdevsw
*bdev
;
598 struct disklabel
*lp
;
601 int part
, c
, sparecol
, j
, scol
, dumpto
;
607 rs
= &raid_softc
[unit
];
608 raidPtr
= raidPtrs
[unit
];
610 if ((rs
->sc_flags
& RAIDF_INITED
) == 0)
613 /* we only support dumping to RAID 1 sets */
614 if (raidPtr
->Layout
.numDataCol
!= 1 ||
615 raidPtr
->Layout
.numParityCol
!= 1)
619 if ((error
= raidlock(rs
)) != 0)
622 if (size
% DEV_BSIZE
!= 0) {
627 if (blkno
+ size
/ DEV_BSIZE
> rs
->sc_size
) {
628 printf("%s: blkno (%" PRIu64
") + size / DEV_BSIZE (%zu) > "
629 "sc->sc_size (%" PRIu64
")\n", __func__
, blkno
,
630 size
/ DEV_BSIZE
, rs
->sc_size
);
635 part
= DISKPART(dev
);
636 lp
= rs
->sc_dkdev
.dk_label
;
637 offset
= lp
->d_partitions
[part
].p_offset
+ RF_PROTECTED_SECTORS
;
639 /* figure out what device is alive.. */
642 Look for a component to dump to. The preference for the
643 component to dump to is as follows:
645 2) a used_spare of the master
647 4) a used_spare of the slave
651 for (c
= 0; c
< raidPtr
->numCol
; c
++) {
652 if (raidPtr
->Disks
[c
].status
== rf_ds_optimal
) {
653 /* this might be the one */
660 At this point we have possibly selected a live master or a
661 live slave. We now check to see if there is a spared
662 master (or a spared slave), if we didn't find a live master
666 for (c
= 0; c
< raidPtr
->numSpare
; c
++) {
667 sparecol
= raidPtr
->numCol
+ c
;
668 if (raidPtr
->Disks
[sparecol
].status
== rf_ds_used_spare
) {
669 /* How about this one? */
671 for(j
=0;j
<raidPtr
->numCol
;j
++) {
672 if (raidPtr
->Disks
[j
].spareCol
== sparecol
) {
679 We must have found a spared master!
680 We'll take that over anything else
681 found so far. (We couldn't have
682 found a real master before, since
683 this is a used spare, and it's
684 saying that it's replacing the
685 master.) On reboot (with
686 autoconfiguration turned on)
687 sparecol will become the 1st
688 component (component0) of this set.
692 } else if (scol
!= -1) {
694 Must be a spared slave. We'll dump
695 to that if we havn't found anything
705 /* we couldn't find any live components to dump to!?!?
711 bdev
= bdevsw_lookup(raidPtr
->Disks
[dumpto
].dev
);
714 Note that blkno is relative to this particular partition.
715 By adding the offset of this partition in the RAID
716 set, and also adding RF_PROTECTED_SECTORS, we get a
717 value that is relative to the partition used for the
718 underlying component.
721 error
= (*bdev
->d_dump
)(raidPtr
->Disks
[dumpto
].dev
,
722 blkno
+ offset
, va
, size
);
731 raidopen(dev_t dev
, int flags
, int fmt
,
734 int unit
= raidunit(dev
);
735 struct raid_softc
*rs
;
736 struct disklabel
*lp
;
742 rs
= &raid_softc
[unit
];
744 if ((error
= raidlock(rs
)) != 0)
747 if ((rs
->sc_flags
& RAIDF_SHUTDOWN
) != 0) {
752 lp
= rs
->sc_dkdev
.dk_label
;
754 part
= DISKPART(dev
);
757 * If there are wedges, and this is not RAW_PART, then we
760 if (rs
->sc_dkdev
.dk_nwedges
!= 0 && part
!= RAW_PART
) {
766 if ((rs
->sc_flags
& RAIDF_INITED
) &&
767 (rs
->sc_dkdev
.dk_openmask
== 0))
768 raidgetdisklabel(dev
);
770 /* make sure that this partition exists */
772 if (part
!= RAW_PART
) {
773 if (((rs
->sc_flags
& RAIDF_INITED
) == 0) ||
774 ((part
>= lp
->d_npartitions
) ||
775 (lp
->d_partitions
[part
].p_fstype
== FS_UNUSED
))) {
780 /* Prevent this unit from being unconfigured while open. */
783 rs
->sc_dkdev
.dk_copenmask
|= pmask
;
787 rs
->sc_dkdev
.dk_bopenmask
|= pmask
;
791 if ((rs
->sc_dkdev
.dk_openmask
== 0) &&
792 ((rs
->sc_flags
& RAIDF_INITED
) != 0)) {
793 /* First one... mark things as dirty... Note that we *MUST*
794 have done a configure before this. I DO NOT WANT TO BE
795 SCRIBBLING TO RANDOM COMPONENTS UNTIL IT'S BEEN DETERMINED
796 THAT THEY BELONG TOGETHER!!!!! */
797 /* XXX should check to see if we're only open for reading
798 here... If so, we needn't do this, but then need some
799 other way of keeping track of what's happened.. */
801 rf_markalldirty(raidPtrs
[unit
]);
805 rs
->sc_dkdev
.dk_openmask
=
806 rs
->sc_dkdev
.dk_copenmask
| rs
->sc_dkdev
.dk_bopenmask
;
817 raidclose(dev_t dev
, int flags
, int fmt
, struct lwp
*l
)
819 int unit
= raidunit(dev
);
820 struct raid_softc
*rs
;
826 rs
= &raid_softc
[unit
];
828 if ((error
= raidlock(rs
)) != 0)
831 part
= DISKPART(dev
);
833 /* ...that much closer to allowing unconfiguration... */
836 rs
->sc_dkdev
.dk_copenmask
&= ~(1 << part
);
840 rs
->sc_dkdev
.dk_bopenmask
&= ~(1 << part
);
843 rs
->sc_dkdev
.dk_openmask
=
844 rs
->sc_dkdev
.dk_copenmask
| rs
->sc_dkdev
.dk_bopenmask
;
846 if ((rs
->sc_dkdev
.dk_openmask
== 0) &&
847 ((rs
->sc_flags
& RAIDF_INITED
) != 0)) {
848 /* Last one... device is not unconfigured yet.
849 Device shutdown has taken care of setting the
850 clean bits if RAIDF_INITED is not set
851 mark things as clean... */
853 rf_update_component_labels(raidPtrs
[unit
],
854 RF_FINAL_COMPONENT_UPDATE
);
856 /* If the kernel is shutting down, it will detach
857 * this RAID set soon enough.
867 raidstrategy(struct buf
*bp
)
871 unsigned int raidID
= raidunit(bp
->b_dev
);
873 struct raid_softc
*rs
= &raid_softc
[raidID
];
876 if ((rs
->sc_flags
& RAIDF_INITED
) ==0) {
880 if (raidID
>= numraid
|| !raidPtrs
[raidID
]) {
881 bp
->b_error
= ENODEV
;
884 raidPtr
= raidPtrs
[raidID
];
885 if (!raidPtr
->valid
) {
886 bp
->b_error
= ENODEV
;
889 if (bp
->b_bcount
== 0) {
890 db1_printf(("b_bcount is zero..\n"));
895 * Do bounds checking and adjust transfer. If there's an
896 * error, the bounds check will flag that for us.
899 wlabel
= rs
->sc_flags
& (RAIDF_WLABEL
| RAIDF_LABELLING
);
900 if (DISKPART(bp
->b_dev
) == RAW_PART
) {
901 uint64_t size
; /* device size in DEV_BSIZE unit */
903 if (raidPtr
->logBytesPerSector
> DEV_BSHIFT
) {
904 size
= raidPtr
->totalSectors
<<
905 (raidPtr
->logBytesPerSector
- DEV_BSHIFT
);
907 size
= raidPtr
->totalSectors
>>
908 (DEV_BSHIFT
- raidPtr
->logBytesPerSector
);
910 if (bounds_check_with_mediasize(bp
, DEV_BSIZE
, size
) <= 0) {
914 if (bounds_check_with_label(&rs
->sc_dkdev
, bp
, wlabel
) <= 0) {
915 db1_printf(("Bounds check failed!!:%d %d\n",
916 (int) bp
->b_blkno
, (int) wlabel
));
924 /* stuff it onto our queue */
925 bufq_put(rs
->buf_queue
, bp
);
927 /* scheduled the IO to happen at the next convenient time */
928 wakeup(&(raidPtrs
[raidID
]->iodone
));
934 bp
->b_resid
= bp
->b_bcount
;
939 raidread(dev_t dev
, struct uio
*uio
, int flags
)
941 int unit
= raidunit(dev
);
942 struct raid_softc
*rs
;
946 rs
= &raid_softc
[unit
];
948 if ((rs
->sc_flags
& RAIDF_INITED
) == 0)
951 return (physio(raidstrategy
, NULL
, dev
, B_READ
, minphys
, uio
));
956 raidwrite(dev_t dev
, struct uio
*uio
, int flags
)
958 int unit
= raidunit(dev
);
959 struct raid_softc
*rs
;
963 rs
= &raid_softc
[unit
];
965 if ((rs
->sc_flags
& RAIDF_INITED
) == 0)
968 return (physio(raidstrategy
, NULL
, dev
, B_WRITE
, minphys
, uio
));
973 raid_detach_unlocked(struct raid_softc
*rs
)
978 raidPtr
= raidPtrs
[device_unit(rs
->sc_dev
)];
981 * If somebody has a partition mounted, we shouldn't
984 if (rs
->sc_dkdev
.dk_openmask
!= 0)
987 if ((rs
->sc_flags
& RAIDF_INITED
) == 0)
988 ; /* not initialized: nothing to do */
989 else if ((error
= rf_Shutdown(raidPtr
)) != 0)
992 rs
->sc_flags
&= ~(RAIDF_INITED
|RAIDF_SHUTDOWN
);
994 /* Detach the disk. */
995 disk_detach(&rs
->sc_dkdev
);
996 disk_destroy(&rs
->sc_dkdev
);
1002 raidioctl(dev_t dev
, u_long cmd
, void *data
, int flag
, struct lwp
*l
)
1004 int unit
= raidunit(dev
);
1008 struct raid_softc
*rs
;
1009 RF_Config_t
*k_cfg
, *u_cfg
;
1011 RF_RaidDisk_t
*diskPtr
;
1012 RF_AccTotals_t
*totals
;
1013 RF_DeviceConfig_t
*d_cfg
, **ucfgp
;
1014 u_char
*specific_buf
;
1018 struct rf_recon_req
*rrcopy
, *rr
;
1019 RF_ComponentLabel_t
*clabel
;
1020 RF_ComponentLabel_t
*ci_label
;
1021 RF_ComponentLabel_t
**clabel_ptr
;
1022 RF_SingleComponent_t
*sparePtr
,*componentPtr
;
1023 RF_SingleComponent_t component
;
1024 RF_ProgressInfo_t progressInfo
, **progressInfoPtr
;
1026 #ifdef __HAVE_OLD_DISKLABEL
1027 struct disklabel newlabel
;
1029 struct dkwedge_info
*dkw
;
1031 if (unit
>= numraid
)
1033 rs
= &raid_softc
[unit
];
1034 raidPtr
= raidPtrs
[unit
];
1036 db1_printf(("raidioctl: %d %d %d %d\n", (int) dev
,
1037 (int) DISKPART(dev
), (int) unit
, (int) cmd
));
1039 /* Must be open for writes for these commands... */
1041 #ifdef DIOCGSECTORSIZE
1042 case DIOCGSECTORSIZE
:
1043 *(u_int
*)data
= raidPtr
->bytesPerSector
;
1045 case DIOCGMEDIASIZE
:
1047 (off_t
)raidPtr
->totalSectors
* raidPtr
->bytesPerSector
;
1052 #ifdef __HAVE_OLD_DISKLABEL
1059 if ((flag
& FWRITE
) == 0)
1063 /* Must be initialized for these... */
1068 #ifdef __HAVE_OLD_DISKLABEL
1072 case ODIOCGDEFLABEL
:
1081 case RAIDFRAME_SHUTDOWN
:
1082 case RAIDFRAME_REWRITEPARITY
:
1083 case RAIDFRAME_GET_INFO
:
1084 case RAIDFRAME_RESET_ACCTOTALS
:
1085 case RAIDFRAME_GET_ACCTOTALS
:
1086 case RAIDFRAME_KEEP_ACCTOTALS
:
1087 case RAIDFRAME_GET_SIZE
:
1088 case RAIDFRAME_FAIL_DISK
:
1089 case RAIDFRAME_COPYBACK
:
1090 case RAIDFRAME_CHECK_RECON_STATUS
:
1091 case RAIDFRAME_CHECK_RECON_STATUS_EXT
:
1092 case RAIDFRAME_GET_COMPONENT_LABEL
:
1093 case RAIDFRAME_SET_COMPONENT_LABEL
:
1094 case RAIDFRAME_ADD_HOT_SPARE
:
1095 case RAIDFRAME_REMOVE_HOT_SPARE
:
1096 case RAIDFRAME_INIT_LABELS
:
1097 case RAIDFRAME_REBUILD_IN_PLACE
:
1098 case RAIDFRAME_CHECK_PARITY
:
1099 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS
:
1100 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT
:
1101 case RAIDFRAME_CHECK_COPYBACK_STATUS
:
1102 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT
:
1103 case RAIDFRAME_SET_AUTOCONFIG
:
1104 case RAIDFRAME_SET_ROOT
:
1105 case RAIDFRAME_DELETE_COMPONENT
:
1106 case RAIDFRAME_INCORPORATE_HOT_SPARE
:
1107 case RAIDFRAME_PARITYMAP_STATUS
:
1108 case RAIDFRAME_PARITYMAP_GET_DISABLE
:
1109 case RAIDFRAME_PARITYMAP_SET_DISABLE
:
1110 case RAIDFRAME_PARITYMAP_SET_PARAMS
:
1111 if ((rs
->sc_flags
& RAIDF_INITED
) == 0)
1117 case RAIDFRAME_GET_INFO50
:
1118 return rf_get_info50(raidPtr
, data
);
1120 case RAIDFRAME_CONFIGURE50
:
1121 if ((retcode
= rf_config50(raidPtr
, unit
, data
, &k_cfg
)) != 0)
1125 /* configure the system */
1126 case RAIDFRAME_CONFIGURE
:
1128 if (raidPtr
->valid
) {
1129 /* There is a valid RAID set running on this unit! */
1130 printf("raid%d: Device already configured!\n",unit
);
1134 /* copy-in the configuration information */
1135 /* data points to a pointer to the configuration structure */
1137 u_cfg
= *((RF_Config_t
**) data
);
1138 RF_Malloc(k_cfg
, sizeof(RF_Config_t
), (RF_Config_t
*));
1139 if (k_cfg
== NULL
) {
1142 retcode
= copyin(u_cfg
, k_cfg
, sizeof(RF_Config_t
));
1144 RF_Free(k_cfg
, sizeof(RF_Config_t
));
1145 db1_printf(("rf_ioctl: retcode=%d copyin.1\n",
1151 /* allocate a buffer for the layout-specific data, and copy it
1153 if (k_cfg
->layoutSpecificSize
) {
1154 if (k_cfg
->layoutSpecificSize
> 10000) {
1156 RF_Free(k_cfg
, sizeof(RF_Config_t
));
1159 RF_Malloc(specific_buf
, k_cfg
->layoutSpecificSize
,
1161 if (specific_buf
== NULL
) {
1162 RF_Free(k_cfg
, sizeof(RF_Config_t
));
1165 retcode
= copyin(k_cfg
->layoutSpecific
, specific_buf
,
1166 k_cfg
->layoutSpecificSize
);
1168 RF_Free(k_cfg
, sizeof(RF_Config_t
));
1169 RF_Free(specific_buf
,
1170 k_cfg
->layoutSpecificSize
);
1171 db1_printf(("rf_ioctl: retcode=%d copyin.2\n",
1176 specific_buf
= NULL
;
1177 k_cfg
->layoutSpecific
= specific_buf
;
1179 /* should do some kind of sanity check on the configuration.
1180 * Store the sum of all the bytes in the last byte? */
1182 /* configure the system */
1185 * Clear the entire RAID descriptor, just to make sure
1186 * there is no stale data left in the case of a
1189 memset((char *) raidPtr
, 0, sizeof(RF_Raid_t
));
1190 raidPtr
->raidid
= unit
;
1192 retcode
= rf_Configure(raidPtr
, k_cfg
, NULL
);
1196 /* allow this many simultaneous IO's to
1198 raidPtr
->openings
= RAIDOUTSTANDING
;
1201 rf_markalldirty(raidPtr
);
1203 /* free the buffers. No return code here. */
1204 if (k_cfg
->layoutSpecificSize
) {
1205 RF_Free(specific_buf
, k_cfg
->layoutSpecificSize
);
1207 RF_Free(k_cfg
, sizeof(RF_Config_t
));
1211 /* shutdown the system */
1212 case RAIDFRAME_SHUTDOWN
:
1214 part
= DISKPART(dev
);
1215 pmask
= (1 << part
);
1217 if ((error
= raidlock(rs
)) != 0)
1220 if ((rs
->sc_dkdev
.dk_openmask
& ~pmask
) ||
1221 ((rs
->sc_dkdev
.dk_bopenmask
& pmask
) &&
1222 (rs
->sc_dkdev
.dk_copenmask
& pmask
)))
1225 rs
->sc_flags
|= RAIDF_SHUTDOWN
;
1226 rs
->sc_dkdev
.dk_copenmask
&= ~pmask
;
1227 rs
->sc_dkdev
.dk_bopenmask
&= ~pmask
;
1228 rs
->sc_dkdev
.dk_openmask
&= ~pmask
;
1237 /* free the pseudo device attach bits */
1239 cf
= device_cfdata(rs
->sc_dev
);
1240 if ((retcode
= config_detach(rs
->sc_dev
, DETACH_QUIET
)) == 0)
1241 free(cf
, M_RAIDFRAME
);
1244 case RAIDFRAME_GET_COMPONENT_LABEL
:
1245 clabel_ptr
= (RF_ComponentLabel_t
**) data
;
1246 /* need to read the component label for the disk indicated
1247 by row,column in clabel */
1250 * Perhaps there should be an option to skip the in-core
1251 * copy and hit the disk, as with disklabel(8).
1253 RF_Malloc(clabel
, sizeof(*clabel
), (RF_ComponentLabel_t
*));
1255 retcode
= copyin( *clabel_ptr
, clabel
,
1256 sizeof(RF_ComponentLabel_t
));
1262 clabel
->row
= 0; /* Don't allow looking at anything else.*/
1264 column
= clabel
->column
;
1266 if ((column
< 0) || (column
>= raidPtr
->numCol
+
1267 raidPtr
->numSpare
)) {
1271 RF_Free(clabel
, sizeof(*clabel
));
1273 clabel
= raidget_component_label(raidPtr
, column
);
1276 retcode
= copyout(clabel
, *clabel_ptr
,
1277 sizeof(RF_ComponentLabel_t
));
1282 case RAIDFRAME_SET_COMPONENT_LABEL
:
1283 clabel
= (RF_ComponentLabel_t
*) data
;
1285 /* XXX check the label for valid stuff... */
1286 /* Note that some things *should not* get modified --
1287 the user should be re-initing the labels instead of
1288 trying to patch things.
1291 raidid
= raidPtr
->raidid
;
1293 printf("raid%d: Got component label:\n", raidid
);
1294 printf("raid%d: Version: %d\n", raidid
, clabel
->version
);
1295 printf("raid%d: Serial Number: %d\n", raidid
, clabel
->serial_number
);
1296 printf("raid%d: Mod counter: %d\n", raidid
, clabel
->mod_counter
);
1297 printf("raid%d: Column: %d\n", raidid
, clabel
->column
);
1298 printf("raid%d: Num Columns: %d\n", raidid
, clabel
->num_columns
);
1299 printf("raid%d: Clean: %d\n", raidid
, clabel
->clean
);
1300 printf("raid%d: Status: %d\n", raidid
, clabel
->status
);
1303 column
= clabel
->column
;
1305 if ((column
< 0) || (column
>= raidPtr
->numCol
)) {
1309 /* XXX this isn't allowed to do anything for now :-) */
1311 /* XXX and before it is, we need to fill in the rest
1312 of the fields!?!?!?! */
1313 memcpy(raidget_component_label(raidPtr
, column
),
1314 clabel
, sizeof(*clabel
));
1315 raidflush_component_label(raidPtr
, column
);
1319 case RAIDFRAME_INIT_LABELS
:
1320 clabel
= (RF_ComponentLabel_t
*) data
;
1322 we only want the serial number from
1323 the above. We get all the rest of the information
1324 from the config that was used to create this RAID
1328 raidPtr
->serial_number
= clabel
->serial_number
;
1330 for(column
=0;column
<raidPtr
->numCol
;column
++) {
1331 diskPtr
= &raidPtr
->Disks
[column
];
1332 if (!RF_DEAD_DISK(diskPtr
->status
)) {
1333 ci_label
= raidget_component_label(raidPtr
,
1335 /* Zeroing this is important. */
1336 memset(ci_label
, 0, sizeof(*ci_label
));
1337 raid_init_component_label(raidPtr
, ci_label
);
1338 ci_label
->serial_number
=
1339 raidPtr
->serial_number
;
1340 ci_label
->row
= 0; /* we dont' pretend to support more */
1341 ci_label
->partitionSize
=
1342 diskPtr
->partitionSize
;
1343 ci_label
->column
= column
;
1344 raidflush_component_label(raidPtr
, column
);
1346 /* XXXjld what about the spares? */
1350 case RAIDFRAME_SET_AUTOCONFIG
:
1351 d
= rf_set_autoconfig(raidPtr
, *(int *) data
);
1352 printf("raid%d: New autoconfig value is: %d\n",
1353 raidPtr
->raidid
, d
);
1357 case RAIDFRAME_SET_ROOT
:
1358 d
= rf_set_rootpartition(raidPtr
, *(int *) data
);
1359 printf("raid%d: New rootpartition value is: %d\n",
1360 raidPtr
->raidid
, d
);
1364 /* initialize all parity */
1365 case RAIDFRAME_REWRITEPARITY
:
1367 if (raidPtr
->Layout
.map
->faultsTolerated
== 0) {
1368 /* Parity for RAID 0 is trivially correct */
1369 raidPtr
->parity_good
= RF_RAID_CLEAN
;
1373 if (raidPtr
->parity_rewrite_in_progress
== 1) {
1374 /* Re-write is already in progress! */
1378 retcode
= RF_CREATE_THREAD(raidPtr
->parity_rewrite_thread
,
1379 rf_RewriteParityThread
,
1380 raidPtr
,"raid_parity");
1384 case RAIDFRAME_ADD_HOT_SPARE
:
1385 sparePtr
= (RF_SingleComponent_t
*) data
;
1386 memcpy( &component
, sparePtr
, sizeof(RF_SingleComponent_t
));
1387 retcode
= rf_add_hot_spare(raidPtr
, &component
);
1390 case RAIDFRAME_REMOVE_HOT_SPARE
:
1393 case RAIDFRAME_DELETE_COMPONENT
:
1394 componentPtr
= (RF_SingleComponent_t
*)data
;
1395 memcpy( &component
, componentPtr
,
1396 sizeof(RF_SingleComponent_t
));
1397 retcode
= rf_delete_component(raidPtr
, &component
);
1400 case RAIDFRAME_INCORPORATE_HOT_SPARE
:
1401 componentPtr
= (RF_SingleComponent_t
*)data
;
1402 memcpy( &component
, componentPtr
,
1403 sizeof(RF_SingleComponent_t
));
1404 retcode
= rf_incorporate_hot_spare(raidPtr
, &component
);
1407 case RAIDFRAME_REBUILD_IN_PLACE
:
1409 if (raidPtr
->Layout
.map
->faultsTolerated
== 0) {
1410 /* Can't do this on a RAID 0!! */
1414 if (raidPtr
->recon_in_progress
== 1) {
1415 /* a reconstruct is already in progress! */
1419 componentPtr
= (RF_SingleComponent_t
*) data
;
1420 memcpy( &component
, componentPtr
,
1421 sizeof(RF_SingleComponent_t
));
1422 component
.row
= 0; /* we don't support any more */
1423 column
= component
.column
;
1425 if ((column
< 0) || (column
>= raidPtr
->numCol
)) {
1429 RF_LOCK_MUTEX(raidPtr
->mutex
);
1430 if ((raidPtr
->Disks
[column
].status
== rf_ds_optimal
) &&
1431 (raidPtr
->numFailures
> 0)) {
1432 /* XXX 0 above shouldn't be constant!!! */
1433 /* some component other than this has failed.
1434 Let's not make things worse than they already
1436 printf("raid%d: Unable to reconstruct to disk at:\n",
1438 printf("raid%d: Col: %d Too many failures.\n",
1439 raidPtr
->raidid
, column
);
1440 RF_UNLOCK_MUTEX(raidPtr
->mutex
);
1443 if (raidPtr
->Disks
[column
].status
==
1444 rf_ds_reconstructing
) {
1445 printf("raid%d: Unable to reconstruct to disk at:\n",
1447 printf("raid%d: Col: %d Reconstruction already occuring!\n", raidPtr
->raidid
, column
);
1449 RF_UNLOCK_MUTEX(raidPtr
->mutex
);
1452 if (raidPtr
->Disks
[column
].status
== rf_ds_spared
) {
1453 RF_UNLOCK_MUTEX(raidPtr
->mutex
);
1456 RF_UNLOCK_MUTEX(raidPtr
->mutex
);
1458 RF_Malloc(rrcopy
, sizeof(*rrcopy
), (struct rf_recon_req
*));
1462 rrcopy
->raidPtr
= (void *) raidPtr
;
1463 rrcopy
->col
= column
;
1465 retcode
= RF_CREATE_THREAD(raidPtr
->recon_thread
,
1466 rf_ReconstructInPlaceThread
,
1467 rrcopy
,"raid_reconip");
1470 case RAIDFRAME_GET_INFO
:
1471 if (!raidPtr
->valid
)
1473 ucfgp
= (RF_DeviceConfig_t
**) data
;
1474 RF_Malloc(d_cfg
, sizeof(RF_DeviceConfig_t
),
1475 (RF_DeviceConfig_t
*));
1478 d_cfg
->rows
= 1; /* there is only 1 row now */
1479 d_cfg
->cols
= raidPtr
->numCol
;
1480 d_cfg
->ndevs
= raidPtr
->numCol
;
1481 if (d_cfg
->ndevs
>= RF_MAX_DISKS
) {
1482 RF_Free(d_cfg
, sizeof(RF_DeviceConfig_t
));
1485 d_cfg
->nspares
= raidPtr
->numSpare
;
1486 if (d_cfg
->nspares
>= RF_MAX_DISKS
) {
1487 RF_Free(d_cfg
, sizeof(RF_DeviceConfig_t
));
1490 d_cfg
->maxqdepth
= raidPtr
->maxQueueDepth
;
1492 for (j
= 0; j
< d_cfg
->cols
; j
++) {
1493 d_cfg
->devs
[d
] = raidPtr
->Disks
[j
];
1496 for (j
= d_cfg
->cols
, i
= 0; i
< d_cfg
->nspares
; i
++, j
++) {
1497 d_cfg
->spares
[i
] = raidPtr
->Disks
[j
];
1499 retcode
= copyout(d_cfg
, *ucfgp
, sizeof(RF_DeviceConfig_t
));
1500 RF_Free(d_cfg
, sizeof(RF_DeviceConfig_t
));
1504 case RAIDFRAME_CHECK_PARITY
:
1505 *(int *) data
= raidPtr
->parity_good
;
1508 case RAIDFRAME_PARITYMAP_STATUS
:
1509 rf_paritymap_status(raidPtr
->parity_map
,
1510 (struct rf_pmstat
*)data
);
1513 case RAIDFRAME_PARITYMAP_SET_PARAMS
:
1514 if (raidPtr
->parity_map
== NULL
)
1515 return ENOENT
; /* ??? */
1516 if (0 != rf_paritymap_set_params(raidPtr
->parity_map
,
1517 (struct rf_pmparams
*)data
, 1))
1521 case RAIDFRAME_PARITYMAP_GET_DISABLE
:
1522 *(int *) data
= rf_paritymap_get_disable(raidPtr
);
1525 case RAIDFRAME_PARITYMAP_SET_DISABLE
:
1526 rf_paritymap_set_disable(raidPtr
, *(int *)data
);
1527 /* XXX should errors be passed up? */
1530 case RAIDFRAME_RESET_ACCTOTALS
:
1531 memset(&raidPtr
->acc_totals
, 0, sizeof(raidPtr
->acc_totals
));
1534 case RAIDFRAME_GET_ACCTOTALS
:
1535 totals
= (RF_AccTotals_t
*) data
;
1536 *totals
= raidPtr
->acc_totals
;
1539 case RAIDFRAME_KEEP_ACCTOTALS
:
1540 raidPtr
->keep_acc_totals
= *(int *)data
;
1543 case RAIDFRAME_GET_SIZE
:
1544 *(int *) data
= raidPtr
->totalSectors
;
1547 /* fail a disk & optionally start reconstruction */
1548 case RAIDFRAME_FAIL_DISK
:
1550 if (raidPtr
->Layout
.map
->faultsTolerated
== 0) {
1551 /* Can't do this on a RAID 0!! */
1555 rr
= (struct rf_recon_req
*) data
;
1557 if (rr
->col
< 0 || rr
->col
>= raidPtr
->numCol
)
1561 RF_LOCK_MUTEX(raidPtr
->mutex
);
1562 if (raidPtr
->status
== rf_rs_reconstructing
) {
1563 /* you can't fail a disk while we're reconstructing! */
1564 /* XXX wrong for RAID6 */
1565 RF_UNLOCK_MUTEX(raidPtr
->mutex
);
1568 if ((raidPtr
->Disks
[rr
->col
].status
==
1569 rf_ds_optimal
) && (raidPtr
->numFailures
> 0)) {
1570 /* some other component has failed. Let's not make
1571 things worse. XXX wrong for RAID6 */
1572 RF_UNLOCK_MUTEX(raidPtr
->mutex
);
1575 if (raidPtr
->Disks
[rr
->col
].status
== rf_ds_spared
) {
1576 /* Can't fail a spared disk! */
1577 RF_UNLOCK_MUTEX(raidPtr
->mutex
);
1580 RF_UNLOCK_MUTEX(raidPtr
->mutex
);
1582 /* make a copy of the recon request so that we don't rely on
1583 * the user's buffer */
1584 RF_Malloc(rrcopy
, sizeof(*rrcopy
), (struct rf_recon_req
*));
1587 memcpy(rrcopy
, rr
, sizeof(*rr
));
1588 rrcopy
->raidPtr
= (void *) raidPtr
;
1590 retcode
= RF_CREATE_THREAD(raidPtr
->recon_thread
,
1592 rrcopy
,"raid_recon");
1595 /* invoke a copyback operation after recon on whatever disk
1596 * needs it, if any */
1597 case RAIDFRAME_COPYBACK
:
1599 if (raidPtr
->Layout
.map
->faultsTolerated
== 0) {
1600 /* This makes no sense on a RAID 0!! */
1604 if (raidPtr
->copyback_in_progress
== 1) {
1605 /* Copyback is already in progress! */
1609 retcode
= RF_CREATE_THREAD(raidPtr
->copyback_thread
,
1611 raidPtr
,"raid_copyback");
1614 /* return the percentage completion of reconstruction */
1615 case RAIDFRAME_CHECK_RECON_STATUS
:
1616 if (raidPtr
->Layout
.map
->faultsTolerated
== 0) {
1617 /* This makes no sense on a RAID 0, so tell the
1619 *(int *) data
= 100;
1622 if (raidPtr
->status
!= rf_rs_reconstructing
)
1623 *(int *) data
= 100;
1625 if (raidPtr
->reconControl
->numRUsTotal
> 0) {
1626 *(int *) data
= (raidPtr
->reconControl
->numRUsComplete
* 100 / raidPtr
->reconControl
->numRUsTotal
);
1632 case RAIDFRAME_CHECK_RECON_STATUS_EXT
:
1633 progressInfoPtr
= (RF_ProgressInfo_t
**) data
;
1634 if (raidPtr
->status
!= rf_rs_reconstructing
) {
1635 progressInfo
.remaining
= 0;
1636 progressInfo
.completed
= 100;
1637 progressInfo
.total
= 100;
1639 progressInfo
.total
=
1640 raidPtr
->reconControl
->numRUsTotal
;
1641 progressInfo
.completed
=
1642 raidPtr
->reconControl
->numRUsComplete
;
1643 progressInfo
.remaining
= progressInfo
.total
-
1644 progressInfo
.completed
;
1646 retcode
= copyout(&progressInfo
, *progressInfoPtr
,
1647 sizeof(RF_ProgressInfo_t
));
1650 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS
:
1651 if (raidPtr
->Layout
.map
->faultsTolerated
== 0) {
1652 /* This makes no sense on a RAID 0, so tell the
1654 *(int *) data
= 100;
1657 if (raidPtr
->parity_rewrite_in_progress
== 1) {
1658 *(int *) data
= 100 *
1659 raidPtr
->parity_rewrite_stripes_done
/
1660 raidPtr
->Layout
.numStripe
;
1662 *(int *) data
= 100;
1666 case RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT
:
1667 progressInfoPtr
= (RF_ProgressInfo_t
**) data
;
1668 if (raidPtr
->parity_rewrite_in_progress
== 1) {
1669 progressInfo
.total
= raidPtr
->Layout
.numStripe
;
1670 progressInfo
.completed
=
1671 raidPtr
->parity_rewrite_stripes_done
;
1672 progressInfo
.remaining
= progressInfo
.total
-
1673 progressInfo
.completed
;
1675 progressInfo
.remaining
= 0;
1676 progressInfo
.completed
= 100;
1677 progressInfo
.total
= 100;
1679 retcode
= copyout(&progressInfo
, *progressInfoPtr
,
1680 sizeof(RF_ProgressInfo_t
));
1683 case RAIDFRAME_CHECK_COPYBACK_STATUS
:
1684 if (raidPtr
->Layout
.map
->faultsTolerated
== 0) {
1685 /* This makes no sense on a RAID 0 */
1686 *(int *) data
= 100;
1689 if (raidPtr
->copyback_in_progress
== 1) {
1690 *(int *) data
= 100 * raidPtr
->copyback_stripes_done
/
1691 raidPtr
->Layout
.numStripe
;
1693 *(int *) data
= 100;
1697 case RAIDFRAME_CHECK_COPYBACK_STATUS_EXT
:
1698 progressInfoPtr
= (RF_ProgressInfo_t
**) data
;
1699 if (raidPtr
->copyback_in_progress
== 1) {
1700 progressInfo
.total
= raidPtr
->Layout
.numStripe
;
1701 progressInfo
.completed
=
1702 raidPtr
->copyback_stripes_done
;
1703 progressInfo
.remaining
= progressInfo
.total
-
1704 progressInfo
.completed
;
1706 progressInfo
.remaining
= 0;
1707 progressInfo
.completed
= 100;
1708 progressInfo
.total
= 100;
1710 retcode
= copyout(&progressInfo
, *progressInfoPtr
,
1711 sizeof(RF_ProgressInfo_t
));
1714 /* the sparetable daemon calls this to wait for the kernel to
1715 * need a spare table. this ioctl does not return until a
1716 * spare table is needed. XXX -- calling mpsleep here in the
1717 * ioctl code is almost certainly wrong and evil. -- XXX XXX
1718 * -- I should either compute the spare table in the kernel,
1719 * or have a different -- XXX XXX -- interface (a different
1720 * character device) for delivering the table -- XXX */
1722 case RAIDFRAME_SPARET_WAIT
:
1723 RF_LOCK_MUTEX(rf_sparet_wait_mutex
);
1724 while (!rf_sparet_wait_queue
)
1725 mpsleep(&rf_sparet_wait_queue
, (PZERO
+ 1) | PCATCH
, "sparet wait", 0, (void *) simple_lock_addr(rf_sparet_wait_mutex
), MS_LOCK_SIMPLE
);
1726 waitreq
= rf_sparet_wait_queue
;
1727 rf_sparet_wait_queue
= rf_sparet_wait_queue
->next
;
1728 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex
);
1730 /* structure assignment */
1731 *((RF_SparetWait_t
*) data
) = *waitreq
;
1733 RF_Free(waitreq
, sizeof(*waitreq
));
1736 /* wakes up a process waiting on SPARET_WAIT and puts an error
1737 * code in it that will cause the dameon to exit */
1738 case RAIDFRAME_ABORT_SPARET_WAIT
:
1739 RF_Malloc(waitreq
, sizeof(*waitreq
), (RF_SparetWait_t
*));
1741 RF_LOCK_MUTEX(rf_sparet_wait_mutex
);
1742 waitreq
->next
= rf_sparet_wait_queue
;
1743 rf_sparet_wait_queue
= waitreq
;
1744 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex
);
1745 wakeup(&rf_sparet_wait_queue
);
1748 /* used by the spare table daemon to deliver a spare table
1749 * into the kernel */
1750 case RAIDFRAME_SEND_SPARET
:
1752 /* install the spare table */
1753 retcode
= rf_SetSpareTable(raidPtr
, *(void **) data
);
1755 /* respond to the requestor. the return status of the spare
1756 * table installation is passed in the "fcol" field */
1757 RF_Malloc(waitreq
, sizeof(*waitreq
), (RF_SparetWait_t
*));
1758 waitreq
->fcol
= retcode
;
1759 RF_LOCK_MUTEX(rf_sparet_wait_mutex
);
1760 waitreq
->next
= rf_sparet_resp_queue
;
1761 rf_sparet_resp_queue
= waitreq
;
1762 wakeup(&rf_sparet_resp_queue
);
1763 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex
);
1769 break; /* fall through to the os-specific code below */
1773 if (!raidPtr
->valid
)
1777 * Add support for "regular" device ioctls here.
1780 error
= disk_ioctl(&rs
->sc_dkdev
, cmd
, data
, flag
, l
);
1781 if (error
!= EPASSTHROUGH
)
1786 *(struct disklabel
*) data
= *(rs
->sc_dkdev
.dk_label
);
1788 #ifdef __HAVE_OLD_DISKLABEL
1790 newlabel
= *(rs
->sc_dkdev
.dk_label
);
1791 if (newlabel
.d_npartitions
> OLDMAXPARTITIONS
)
1793 memcpy(data
, &newlabel
, sizeof (struct olddisklabel
));
1798 ((struct partinfo
*) data
)->disklab
= rs
->sc_dkdev
.dk_label
;
1799 ((struct partinfo
*) data
)->part
=
1800 &rs
->sc_dkdev
.dk_label
->d_partitions
[DISKPART(dev
)];
1805 #ifdef __HAVE_OLD_DISKLABEL
1810 struct disklabel
*lp
;
1811 #ifdef __HAVE_OLD_DISKLABEL
1812 if (cmd
== ODIOCSDINFO
|| cmd
== ODIOCWDINFO
) {
1813 memset(&newlabel
, 0, sizeof newlabel
);
1814 memcpy(&newlabel
, data
, sizeof (struct olddisklabel
));
1818 lp
= (struct disklabel
*)data
;
1820 if ((error
= raidlock(rs
)) != 0)
1823 rs
->sc_flags
|= RAIDF_LABELLING
;
1825 error
= setdisklabel(rs
->sc_dkdev
.dk_label
,
1826 lp
, 0, rs
->sc_dkdev
.dk_cpulabel
);
1828 if (cmd
== DIOCWDINFO
1829 #ifdef __HAVE_OLD_DISKLABEL
1830 || cmd
== ODIOCWDINFO
1833 error
= writedisklabel(RAIDLABELDEV(dev
),
1834 raidstrategy
, rs
->sc_dkdev
.dk_label
,
1835 rs
->sc_dkdev
.dk_cpulabel
);
1837 rs
->sc_flags
&= ~RAIDF_LABELLING
;
1847 if (*(int *) data
!= 0)
1848 rs
->sc_flags
|= RAIDF_WLABEL
;
1850 rs
->sc_flags
&= ~RAIDF_WLABEL
;
1854 raidgetdefaultlabel(raidPtr
, rs
, (struct disklabel
*) data
);
1857 #ifdef __HAVE_OLD_DISKLABEL
1858 case ODIOCGDEFLABEL
:
1859 raidgetdefaultlabel(raidPtr
, rs
, &newlabel
);
1860 if (newlabel
.d_npartitions
> OLDMAXPARTITIONS
)
1862 memcpy(data
, &newlabel
, sizeof (struct olddisklabel
));
1870 /* If the ioctl happens here, the parent is us. */
1871 (void)strcpy(dkw
->dkw_parent
, rs
->sc_xname
);
1872 return cmd
== DIOCAWEDGE
? dkwedge_add(dkw
) : dkwedge_del(dkw
);
1875 return dkwedge_list(&rs
->sc_dkdev
,
1876 (struct dkwedge_list
*)data
, l
);
1878 return rf_sync_component_caches(raidPtr
);
1887 /* raidinit -- complete the rest of the initialization for the
1888 RAIDframe device. */
1892 raidinit(RF_Raid_t
*raidPtr
)
1895 struct raid_softc
*rs
;
1898 unit
= raidPtr
->raidid
;
1900 rs
= &raid_softc
[unit
];
1902 /* XXX should check return code first... */
1903 rs
->sc_flags
|= RAIDF_INITED
;
1905 /* XXX doesn't check bounds. */
1906 snprintf(rs
->sc_xname
, sizeof(rs
->sc_xname
), "raid%d", unit
);
1908 /* attach the pseudo device */
1909 cf
= malloc(sizeof(*cf
), M_RAIDFRAME
, M_WAITOK
);
1910 cf
->cf_name
= raid_cd
.cd_name
;
1911 cf
->cf_atname
= raid_cd
.cd_name
;
1913 cf
->cf_fstate
= FSTATE_STAR
;
1915 rs
->sc_dev
= config_attach_pseudo(cf
);
1917 if (rs
->sc_dev
== NULL
) {
1918 printf("raid%d: config_attach_pseudo failed\n",
1920 rs
->sc_flags
&= ~RAIDF_INITED
;
1921 free(cf
, M_RAIDFRAME
);
1925 /* disk_attach actually creates space for the CPU disklabel, among
1926 * other things, so it's critical to call this *BEFORE* we try putzing
1927 * with disklabels. */
1929 disk_init(&rs
->sc_dkdev
, rs
->sc_xname
, &rf_dkdriver
);
1930 disk_attach(&rs
->sc_dkdev
);
1932 /* XXX There may be a weird interaction here between this, and
1933 * protectedSectors, as used in RAIDframe. */
1935 rs
->sc_size
= raidPtr
->totalSectors
;
1937 dkwedge_discover(&rs
->sc_dkdev
);
1939 rf_set_properties(rs
, raidPtr
);
1942 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
1943 /* wake up the daemon & tell it to get us a spare table
1945 * the entries in the queues should be tagged with the raidPtr
1946 * so that in the extremely rare case that two recons happen at once,
1947 * we know for which device were requesting a spare table
1950 * XXX This code is not currently used. GO
1953 rf_GetSpareTableFromDaemon(RF_SparetWait_t
*req
)
1957 RF_LOCK_MUTEX(rf_sparet_wait_mutex
);
1958 req
->next
= rf_sparet_wait_queue
;
1959 rf_sparet_wait_queue
= req
;
1960 wakeup(&rf_sparet_wait_queue
);
1962 /* mpsleep unlocks the mutex */
1963 while (!rf_sparet_resp_queue
) {
1964 tsleep(&rf_sparet_resp_queue
, PRIBIO
,
1965 "raidframe getsparetable", 0);
1967 req
= rf_sparet_resp_queue
;
1968 rf_sparet_resp_queue
= req
->next
;
1969 RF_UNLOCK_MUTEX(rf_sparet_wait_mutex
);
1971 retcode
= req
->fcol
;
1972 RF_Free(req
, sizeof(*req
)); /* this is not the same req as we
1978 /* a wrapper around rf_DoAccess that extracts appropriate info from the
1979 * bp & passes it down.
1980 * any calls originating in the kernel must use non-blocking I/O
1981 * do some extra sanity checking to return "appropriate" error values for
1982 * certain conditions (to make some standard utilities work)
1984 * Formerly known as: rf_DoAccessKernel
1987 raidstart(RF_Raid_t
*raidPtr
)
1989 RF_SectorCount_t num_blocks
, pb
, sum
;
1990 RF_RaidAddr_t raid_addr
;
1991 struct partition
*pp
;
1994 struct raid_softc
*rs
;
1999 unit
= raidPtr
->raidid
;
2000 rs
= &raid_softc
[unit
];
2002 /* quick check to see if anything has died recently */
2003 RF_LOCK_MUTEX(raidPtr
->mutex
);
2004 if (raidPtr
->numNewFailures
> 0) {
2005 RF_UNLOCK_MUTEX(raidPtr
->mutex
);
2006 rf_update_component_labels(raidPtr
,
2007 RF_NORMAL_COMPONENT_UPDATE
);
2008 RF_LOCK_MUTEX(raidPtr
->mutex
);
2009 raidPtr
->numNewFailures
--;
2012 /* Check to see if we're at the limit... */
2013 while (raidPtr
->openings
> 0) {
2014 RF_UNLOCK_MUTEX(raidPtr
->mutex
);
2016 /* get the next item, if any, from the queue */
2017 if ((bp
= bufq_get(rs
->buf_queue
)) == NULL
) {
2018 /* nothing more to do */
2022 /* Ok, for the bp we have here, bp->b_blkno is relative to the
2023 * partition.. Need to make it absolute to the underlying
2026 blocknum
= bp
->b_blkno
;
2027 if (DISKPART(bp
->b_dev
) != RAW_PART
) {
2028 pp
= &rs
->sc_dkdev
.dk_label
->d_partitions
[DISKPART(bp
->b_dev
)];
2029 blocknum
+= pp
->p_offset
;
2032 db1_printf(("Blocks: %d, %d\n", (int) bp
->b_blkno
,
2035 db1_printf(("bp->b_bcount = %d\n", (int) bp
->b_bcount
));
2036 db1_printf(("bp->b_resid = %d\n", (int) bp
->b_resid
));
2038 /* *THIS* is where we adjust what block we're going to...
2039 * but DO NOT TOUCH bp->b_blkno!!! */
2040 raid_addr
= blocknum
;
2042 num_blocks
= bp
->b_bcount
>> raidPtr
->logBytesPerSector
;
2043 pb
= (bp
->b_bcount
& raidPtr
->sectorMask
) ? 1 : 0;
2044 sum
= raid_addr
+ num_blocks
+ pb
;
2045 if (1 || rf_debugKernelAccess
) {
2046 db1_printf(("raid_addr=%d sum=%d num_blocks=%d(+%d) (%d)\n",
2047 (int) raid_addr
, (int) sum
, (int) num_blocks
,
2048 (int) pb
, (int) bp
->b_resid
));
2050 if ((sum
> raidPtr
->totalSectors
) || (sum
< raid_addr
)
2051 || (sum
< num_blocks
) || (sum
< pb
)) {
2052 bp
->b_error
= ENOSPC
;
2053 bp
->b_resid
= bp
->b_bcount
;
2055 RF_LOCK_MUTEX(raidPtr
->mutex
);
2059 * XXX rf_DoAccess() should do this, not just DoAccessKernel()
2062 if (bp
->b_bcount
& raidPtr
->sectorMask
) {
2063 bp
->b_error
= EINVAL
;
2064 bp
->b_resid
= bp
->b_bcount
;
2066 RF_LOCK_MUTEX(raidPtr
->mutex
);
2070 db1_printf(("Calling DoAccess..\n"));
2073 RF_LOCK_MUTEX(raidPtr
->mutex
);
2074 raidPtr
->openings
--;
2075 RF_UNLOCK_MUTEX(raidPtr
->mutex
);
2078 * Everything is async.
2082 disk_busy(&rs
->sc_dkdev
);
2084 /* XXX we're still at splbio() here... do we *really*
2087 /* don't ever condition on bp->b_flags & B_WRITE.
2088 * always condition on B_READ instead */
2090 rc
= rf_DoAccess(raidPtr
, (bp
->b_flags
& B_READ
) ?
2091 RF_IO_TYPE_READ
: RF_IO_TYPE_WRITE
,
2092 do_async
, raid_addr
, num_blocks
,
2093 bp
->b_data
, bp
, RF_DAG_NONBLOCKING_IO
);
2097 bp
->b_resid
= bp
->b_bcount
;
2102 RF_LOCK_MUTEX(raidPtr
->mutex
);
2104 RF_UNLOCK_MUTEX(raidPtr
->mutex
);
2110 /* invoke an I/O from kernel mode. Disk queue should be locked upon entry */
2113 rf_DispatchKernelIO(RF_DiskQueue_t
*queue
, RF_DiskQueueData_t
*req
)
2115 int op
= (req
->type
== RF_IO_TYPE_READ
) ? B_READ
: B_WRITE
;
2121 switch (req
->type
) {
2122 case RF_IO_TYPE_NOP
: /* used primarily to unlock a locked queue */
2123 /* XXX need to do something extra here.. */
2124 /* I'm leaving this in, as I've never actually seen it used,
2125 * and I'd like folks to report it... GO */
2126 printf(("WAKEUP CALLED\n"));
2127 queue
->numOutstanding
++;
2130 bp
->b_private
= req
;
2132 KernelWakeupFunc(bp
);
2135 case RF_IO_TYPE_READ
:
2136 case RF_IO_TYPE_WRITE
:
2137 #if RF_ACC_TRACE > 0
2138 if (req
->tracerec
) {
2139 RF_ETIMER_START(req
->tracerec
->timer
);
2142 InitBP(bp
, queue
->rf_cinfo
->ci_vp
,
2143 op
, queue
->rf_cinfo
->ci_dev
,
2144 req
->sectorOffset
, req
->numSector
,
2145 req
->buf
, KernelWakeupFunc
, (void *) req
,
2146 queue
->raidPtr
->logBytesPerSector
, req
->b_proc
);
2148 if (rf_debugKernelAccess
) {
2149 db1_printf(("dispatch: bp->b_blkno = %ld\n",
2150 (long) bp
->b_blkno
));
2152 queue
->numOutstanding
++;
2153 queue
->last_deq_sector
= req
->sectorOffset
;
2154 /* acc wouldn't have been let in if there were any pending
2155 * reqs at any other priority */
2156 queue
->curPriority
= req
->priority
;
2158 db1_printf(("Going for %c to unit %d col %d\n",
2159 req
->type
, queue
->raidPtr
->raidid
,
2161 db1_printf(("sector %d count %d (%d bytes) %d\n",
2162 (int) req
->sectorOffset
, (int) req
->numSector
,
2163 (int) (req
->numSector
<<
2164 queue
->raidPtr
->logBytesPerSector
),
2165 (int) queue
->raidPtr
->logBytesPerSector
));
2168 * XXX: drop lock here since this can block at
2169 * least with backing SCSI devices. Retake it
2170 * to minimize fuss with calling interfaces.
2173 RF_UNLOCK_QUEUE_MUTEX(queue
, "unusedparam");
2175 RF_LOCK_QUEUE_MUTEX(queue
, "unusedparam");
2179 panic("bad req->type in rf_DispatchKernelIO");
2181 db1_printf(("Exiting from DispatchKernelIO\n"));
2185 /* this is the callback function associated with a I/O invoked from
2189 KernelWakeupFunc(struct buf
*bp
)
2191 RF_DiskQueueData_t
*req
= NULL
;
2192 RF_DiskQueue_t
*queue
;
2196 db1_printf(("recovering the request queue:\n"));
2197 req
= bp
->b_private
;
2199 queue
= (RF_DiskQueue_t
*) req
->queue
;
2201 #if RF_ACC_TRACE > 0
2202 if (req
->tracerec
) {
2203 RF_ETIMER_STOP(req
->tracerec
->timer
);
2204 RF_ETIMER_EVAL(req
->tracerec
->timer
);
2205 RF_LOCK_MUTEX(rf_tracing_mutex
);
2206 req
->tracerec
->diskwait_us
+= RF_ETIMER_VAL_US(req
->tracerec
->timer
);
2207 req
->tracerec
->phys_io_us
+= RF_ETIMER_VAL_US(req
->tracerec
->timer
);
2208 req
->tracerec
->num_phys_ios
++;
2209 RF_UNLOCK_MUTEX(rf_tracing_mutex
);
2213 /* XXX Ok, let's get aggressive... If b_error is set, let's go
2214 * ballistic, and mark the component as hosed... */
2216 if (bp
->b_error
!= 0) {
2217 /* Mark the disk as dead */
2218 /* but only mark it once... */
2219 /* and only if it wouldn't leave this RAID set
2220 completely broken */
2221 if (((queue
->raidPtr
->Disks
[queue
->col
].status
==
2223 (queue
->raidPtr
->Disks
[queue
->col
].status
==
2224 rf_ds_used_spare
)) &&
2225 (queue
->raidPtr
->numFailures
<
2226 queue
->raidPtr
->Layout
.map
->faultsTolerated
)) {
2227 printf("raid%d: IO Error. Marking %s as failed.\n",
2228 queue
->raidPtr
->raidid
,
2229 queue
->raidPtr
->Disks
[queue
->col
].devname
);
2230 queue
->raidPtr
->Disks
[queue
->col
].status
=
2232 queue
->raidPtr
->status
= rf_rs_degraded
;
2233 queue
->raidPtr
->numFailures
++;
2234 queue
->raidPtr
->numNewFailures
++;
2235 } else { /* Disk is already dead... */
2236 /* printf("Disk already marked as dead!\n"); */
2241 /* Fill in the error value */
2243 req
->error
= bp
->b_error
;
2245 simple_lock(&queue
->raidPtr
->iodone_lock
);
2247 /* Drop this one on the "finished" queue... */
2248 TAILQ_INSERT_TAIL(&(queue
->raidPtr
->iodone
), req
, iodone_entries
);
2250 /* Let the raidio thread know there is work to be done. */
2251 wakeup(&(queue
->raidPtr
->iodone
));
2253 simple_unlock(&queue
->raidPtr
->iodone_lock
);
2261 * initialize a buf structure for doing an I/O in the kernel.
2264 InitBP(struct buf
*bp
, struct vnode
*b_vp
, unsigned rw_flag
, dev_t dev
,
2265 RF_SectorNum_t startSect
, RF_SectorCount_t numSect
, void *bf
,
2266 void (*cbFunc
) (struct buf
*), void *cbArg
, int logBytesPerSector
,
2267 struct proc
*b_proc
)
2269 /* bp->b_flags = B_PHYS | rw_flag; */
2270 bp
->b_flags
= rw_flag
; /* XXX need B_PHYS here too??? */
2273 bp
->b_bcount
= numSect
<< logBytesPerSector
;
2274 bp
->b_bufsize
= bp
->b_bcount
;
2278 bp
->b_blkno
= startSect
;
2279 bp
->b_resid
= bp
->b_bcount
; /* XXX is this right!??!?!! */
2280 if (bp
->b_bcount
== 0) {
2281 panic("bp->b_bcount is zero in InitBP!!");
2283 bp
->b_proc
= b_proc
;
2284 bp
->b_iodone
= cbFunc
;
2285 bp
->b_private
= cbArg
;
2289 raidgetdefaultlabel(RF_Raid_t
*raidPtr
, struct raid_softc
*rs
,
2290 struct disklabel
*lp
)
2292 memset(lp
, 0, sizeof(*lp
));
2294 /* fabricate a label... */
2295 lp
->d_secperunit
= raidPtr
->totalSectors
;
2296 lp
->d_secsize
= raidPtr
->bytesPerSector
;
2297 lp
->d_nsectors
= raidPtr
->Layout
.dataSectorsPerStripe
;
2298 lp
->d_ntracks
= 4 * raidPtr
->numCol
;
2299 lp
->d_ncylinders
= raidPtr
->totalSectors
/
2300 (lp
->d_nsectors
* lp
->d_ntracks
);
2301 lp
->d_secpercyl
= lp
->d_ntracks
* lp
->d_nsectors
;
2303 strncpy(lp
->d_typename
, "raid", sizeof(lp
->d_typename
));
2304 lp
->d_type
= DTYPE_RAID
;
2305 strncpy(lp
->d_packname
, "fictitious", sizeof(lp
->d_packname
));
2307 lp
->d_interleave
= 1;
2310 lp
->d_partitions
[RAW_PART
].p_offset
= 0;
2311 lp
->d_partitions
[RAW_PART
].p_size
= raidPtr
->totalSectors
;
2312 lp
->d_partitions
[RAW_PART
].p_fstype
= FS_UNUSED
;
2313 lp
->d_npartitions
= RAW_PART
+ 1;
2315 lp
->d_magic
= DISKMAGIC
;
2316 lp
->d_magic2
= DISKMAGIC
;
2317 lp
->d_checksum
= dkcksum(rs
->sc_dkdev
.dk_label
);
2321 * Read the disklabel from the raid device. If one is not present, fake one
2325 raidgetdisklabel(dev_t dev
)
2327 int unit
= raidunit(dev
);
2328 struct raid_softc
*rs
= &raid_softc
[unit
];
2329 const char *errstring
;
2330 struct disklabel
*lp
= rs
->sc_dkdev
.dk_label
;
2331 struct cpu_disklabel
*clp
= rs
->sc_dkdev
.dk_cpulabel
;
2334 db1_printf(("Getting the disklabel...\n"));
2336 memset(clp
, 0, sizeof(*clp
));
2338 raidPtr
= raidPtrs
[unit
];
2340 raidgetdefaultlabel(raidPtr
, rs
, lp
);
2343 * Call the generic disklabel extraction routine.
2345 errstring
= readdisklabel(RAIDLABELDEV(dev
), raidstrategy
,
2346 rs
->sc_dkdev
.dk_label
, rs
->sc_dkdev
.dk_cpulabel
);
2348 raidmakedisklabel(rs
);
2351 struct partition
*pp
;
2354 * Sanity check whether the found disklabel is valid.
2356 * This is necessary since total size of the raid device
2357 * may vary when an interleave is changed even though exactly
2358 * same components are used, and old disklabel may used
2361 if (lp
->d_secperunit
!= rs
->sc_size
)
2362 printf("raid%d: WARNING: %s: "
2363 "total sector size in disklabel (%" PRIu32
") != "
2364 "the size of raid (%" PRIu64
")\n", unit
, rs
->sc_xname
,
2365 lp
->d_secperunit
, rs
->sc_size
);
2366 for (i
= 0; i
< lp
->d_npartitions
; i
++) {
2367 pp
= &lp
->d_partitions
[i
];
2368 if (pp
->p_offset
+ pp
->p_size
> rs
->sc_size
)
2369 printf("raid%d: WARNING: %s: end of partition `%c' "
2370 "exceeds the size of raid (%" PRIu64
")\n",
2371 unit
, rs
->sc_xname
, 'a' + i
, rs
->sc_size
);
2377 * Take care of things one might want to take care of in the event
2378 * that a disklabel isn't present.
2381 raidmakedisklabel(struct raid_softc
*rs
)
2383 struct disklabel
*lp
= rs
->sc_dkdev
.dk_label
;
2384 db1_printf(("Making a label..\n"));
2387 * For historical reasons, if there's no disklabel present
2388 * the raw partition must be marked FS_BSDFFS.
2391 lp
->d_partitions
[RAW_PART
].p_fstype
= FS_BSDFFS
;
2393 strncpy(lp
->d_packname
, "default label", sizeof(lp
->d_packname
));
2395 lp
->d_checksum
= dkcksum(lp
);
2398 * Wait interruptibly for an exclusive lock.
2401 * Several drivers do this; it should be abstracted and made MP-safe.
2402 * (Hmm... where have we seen this warning before :-> GO )
2405 raidlock(struct raid_softc
*rs
)
2409 while ((rs
->sc_flags
& RAIDF_LOCKED
) != 0) {
2410 rs
->sc_flags
|= RAIDF_WANTED
;
2412 tsleep(rs
, PRIBIO
| PCATCH
, "raidlck", 0)) != 0)
2415 rs
->sc_flags
|= RAIDF_LOCKED
;
2419 * Unlock and wake up any waiters.
2422 raidunlock(struct raid_softc
*rs
)
2425 rs
->sc_flags
&= ~RAIDF_LOCKED
;
2426 if ((rs
->sc_flags
& RAIDF_WANTED
) != 0) {
2427 rs
->sc_flags
&= ~RAIDF_WANTED
;
2433 #define RF_COMPONENT_INFO_OFFSET 16384 /* bytes */
2434 #define RF_COMPONENT_INFO_SIZE 1024 /* bytes */
2435 #define RF_PARITY_MAP_OFFSET \
2436 (RF_COMPONENT_INFO_OFFSET + RF_COMPONENT_INFO_SIZE)
2437 #define RF_PARITY_MAP_SIZE RF_PARITYMAP_NBYTE
2440 raidmarkclean(RF_Raid_t
*raidPtr
, RF_RowCol_t col
)
2442 RF_ComponentLabel_t
*clabel
;
2444 clabel
= raidget_component_label(raidPtr
, col
);
2445 clabel
->clean
= RF_RAID_CLEAN
;
2446 raidflush_component_label(raidPtr
, col
);
2452 raidmarkdirty(RF_Raid_t
*raidPtr
, RF_RowCol_t col
)
2454 RF_ComponentLabel_t
*clabel
;
2456 clabel
= raidget_component_label(raidPtr
, col
);
2457 clabel
->clean
= RF_RAID_DIRTY
;
2458 raidflush_component_label(raidPtr
, col
);
2463 raidfetch_component_label(RF_Raid_t
*raidPtr
, RF_RowCol_t col
)
2465 return raidread_component_label(raidPtr
->Disks
[col
].dev
,
2466 raidPtr
->raid_cinfo
[col
].ci_vp
,
2467 &raidPtr
->raid_cinfo
[col
].ci_label
);
2470 RF_ComponentLabel_t
*
2471 raidget_component_label(RF_Raid_t
*raidPtr
, RF_RowCol_t col
)
2473 return &raidPtr
->raid_cinfo
[col
].ci_label
;
2477 raidflush_component_label(RF_Raid_t
*raidPtr
, RF_RowCol_t col
)
2479 RF_ComponentLabel_t
*label
;
2481 label
= &raidPtr
->raid_cinfo
[col
].ci_label
;
2482 label
->mod_counter
= raidPtr
->mod_counter
;
2483 #ifndef RF_NO_PARITY_MAP
2484 label
->parity_map_modcount
= label
->mod_counter
;
2486 return raidwrite_component_label(raidPtr
->Disks
[col
].dev
,
2487 raidPtr
->raid_cinfo
[col
].ci_vp
, label
);
2492 raidread_component_label(dev_t dev
, struct vnode
*b_vp
,
2493 RF_ComponentLabel_t
*clabel
)
2495 return raidread_component_area(dev
, b_vp
, clabel
,
2496 sizeof(RF_ComponentLabel_t
),
2497 RF_COMPONENT_INFO_OFFSET
, RF_COMPONENT_INFO_SIZE
);
2502 raidread_component_area(dev_t dev
, struct vnode
*b_vp
, void *data
,
2503 size_t msize
, daddr_t offset
, daddr_t dsize
)
2506 const struct bdevsw
*bdev
;
2509 /* XXX should probably ensure that we don't try to do this if
2510 someone has changed rf_protected_sectors. */
2513 /* For whatever reason, this component is not valid.
2514 Don't try to read a component label from it. */
2518 /* get a block of the appropriate size... */
2519 bp
= geteblk((int)dsize
);
2522 /* get our ducks in a row for the read */
2523 bp
->b_blkno
= offset
/ DEV_BSIZE
;
2524 bp
->b_bcount
= dsize
;
2525 bp
->b_flags
|= B_READ
;
2526 bp
->b_resid
= dsize
;
2528 bdev
= bdevsw_lookup(bp
->b_dev
);
2531 (*bdev
->d_strategy
)(bp
);
2533 error
= biowait(bp
);
2536 memcpy(data
, bp
->b_data
, msize
);
2545 raidwrite_component_label(dev_t dev
, struct vnode
*b_vp
,
2546 RF_ComponentLabel_t
*clabel
)
2548 return raidwrite_component_area(dev
, b_vp
, clabel
,
2549 sizeof(RF_ComponentLabel_t
),
2550 RF_COMPONENT_INFO_OFFSET
, RF_COMPONENT_INFO_SIZE
, 0);
2555 raidwrite_component_area(dev_t dev
, struct vnode
*b_vp
, void *data
,
2556 size_t msize
, daddr_t offset
, daddr_t dsize
, int asyncp
)
2559 const struct bdevsw
*bdev
;
2562 /* get a block of the appropriate size... */
2563 bp
= geteblk((int)dsize
);
2566 /* get our ducks in a row for the write */
2567 bp
->b_blkno
= offset
/ DEV_BSIZE
;
2568 bp
->b_bcount
= dsize
;
2569 bp
->b_flags
|= B_WRITE
| (asyncp
? B_ASYNC
: 0);
2570 bp
->b_resid
= dsize
;
2572 memset(bp
->b_data
, 0, dsize
);
2573 memcpy(bp
->b_data
, data
, msize
);
2575 bdev
= bdevsw_lookup(bp
->b_dev
);
2578 (*bdev
->d_strategy
)(bp
);
2581 error
= biowait(bp
);
2585 printf("Failed to write RAID component info!\n");
2593 rf_paritymap_kern_write(RF_Raid_t
*raidPtr
, struct rf_paritymap_ondisk
*map
)
2597 for (c
= 0; c
< raidPtr
->numCol
; c
++) {
2598 /* Skip dead disks. */
2599 if (RF_DEAD_DISK(raidPtr
->Disks
[c
].status
))
2601 /* XXXjld: what if an error occurs here? */
2602 raidwrite_component_area(raidPtr
->Disks
[c
].dev
,
2603 raidPtr
->raid_cinfo
[c
].ci_vp
, map
,
2605 RF_PARITY_MAP_OFFSET
, RF_PARITY_MAP_SIZE
, 0);
2610 rf_paritymap_kern_read(RF_Raid_t
*raidPtr
, struct rf_paritymap_ondisk
*map
)
2612 struct rf_paritymap_ondisk tmp
;
2615 for (c
= 0; c
< raidPtr
->numCol
; c
++) {
2616 /* Skip dead disks. */
2617 if (RF_DEAD_DISK(raidPtr
->Disks
[c
].status
))
2619 raidread_component_area(raidPtr
->Disks
[c
].dev
,
2620 raidPtr
->raid_cinfo
[c
].ci_vp
, &tmp
,
2622 RF_PARITY_MAP_OFFSET
, RF_PARITY_MAP_SIZE
);
2624 memcpy(map
, &tmp
, sizeof(*map
));
2626 rf_paritymap_merge(map
, &tmp
);
2632 rf_markalldirty(RF_Raid_t
*raidPtr
)
2634 RF_ComponentLabel_t
*clabel
;
2640 raidPtr
->mod_counter
++;
2641 for (c
= 0; c
< raidPtr
->numCol
; c
++) {
2642 /* we don't want to touch (at all) a disk that has
2644 if (!RF_DEAD_DISK(raidPtr
->Disks
[c
].status
)) {
2645 clabel
= raidget_component_label(raidPtr
, c
);
2646 if (clabel
->status
== rf_ds_spared
) {
2647 /* XXX do something special...
2648 but whatever you do, don't
2649 try to access it!! */
2651 raidmarkdirty(raidPtr
, c
);
2656 for( c
= 0; c
< raidPtr
->numSpare
; c
++) {
2657 sparecol
= raidPtr
->numCol
+ c
;
2658 if (raidPtr
->Disks
[sparecol
].status
== rf_ds_used_spare
) {
2661 we claim this disk is "optimal" if it's
2662 rf_ds_used_spare, as that means it should be
2663 directly substitutable for the disk it replaced.
2668 for(j
=0;j
<raidPtr
->numCol
;j
++) {
2669 if (raidPtr
->Disks
[j
].spareCol
== sparecol
) {
2675 clabel
= raidget_component_label(raidPtr
, sparecol
);
2676 /* make sure status is noted */
2678 raid_init_component_label(raidPtr
, clabel
);
2681 clabel
->column
= scol
;
2682 /* Note: we *don't* change status from rf_ds_used_spare
2684 /* clabel.status = rf_ds_optimal; */
2686 raidmarkdirty(raidPtr
, sparecol
);
2693 rf_update_component_labels(RF_Raid_t
*raidPtr
, int final
)
2695 RF_ComponentLabel_t
*clabel
;
2703 /* XXX should do extra checks to make sure things really are clean,
2704 rather than blindly setting the clean bit... */
2706 raidPtr
->mod_counter
++;
2708 for (c
= 0; c
< raidPtr
->numCol
; c
++) {
2709 if (raidPtr
->Disks
[c
].status
== rf_ds_optimal
) {
2710 clabel
= raidget_component_label(raidPtr
, c
);
2711 /* make sure status is noted */
2712 clabel
->status
= rf_ds_optimal
;
2714 /* note what unit we are configured as */
2715 clabel
->last_unit
= raidPtr
->raidid
;
2717 raidflush_component_label(raidPtr
, c
);
2718 if (final
== RF_FINAL_COMPONENT_UPDATE
) {
2719 if (raidPtr
->parity_good
== RF_RAID_CLEAN
) {
2720 raidmarkclean(raidPtr
, c
);
2724 /* else we don't touch it.. */
2727 for( c
= 0; c
< raidPtr
->numSpare
; c
++) {
2728 sparecol
= raidPtr
->numCol
+ c
;
2729 /* Need to ensure that the reconstruct actually completed! */
2730 if (raidPtr
->Disks
[sparecol
].status
== rf_ds_used_spare
) {
2733 we claim this disk is "optimal" if it's
2734 rf_ds_used_spare, as that means it should be
2735 directly substitutable for the disk it replaced.
2740 for(j
=0;j
<raidPtr
->numCol
;j
++) {
2741 if (raidPtr
->Disks
[j
].spareCol
== sparecol
) {
2747 /* XXX shouldn't *really* need this... */
2748 clabel
= raidget_component_label(raidPtr
, sparecol
);
2749 /* make sure status is noted */
2751 raid_init_component_label(raidPtr
, clabel
);
2753 clabel
->column
= scol
;
2754 clabel
->status
= rf_ds_optimal
;
2755 clabel
->last_unit
= raidPtr
->raidid
;
2757 raidflush_component_label(raidPtr
, sparecol
);
2758 if (final
== RF_FINAL_COMPONENT_UPDATE
) {
2759 if (raidPtr
->parity_good
== RF_RAID_CLEAN
) {
2760 raidmarkclean(raidPtr
, sparecol
);
2768 rf_close_component(RF_Raid_t
*raidPtr
, struct vnode
*vp
, int auto_configured
)
2772 if (auto_configured
== 1) {
2773 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
2774 VOP_CLOSE(vp
, FREAD
| FWRITE
, NOCRED
);
2778 (void) vn_close(vp
, FREAD
| FWRITE
, curlwp
->l_cred
);
2785 rf_UnconfigureVnodes(RF_Raid_t
*raidPtr
)
2792 /* We take this opportunity to close the vnodes like we should.. */
2794 for (c
= 0; c
< raidPtr
->numCol
; c
++) {
2795 vp
= raidPtr
->raid_cinfo
[c
].ci_vp
;
2796 acd
= raidPtr
->Disks
[c
].auto_configured
;
2797 rf_close_component(raidPtr
, vp
, acd
);
2798 raidPtr
->raid_cinfo
[c
].ci_vp
= NULL
;
2799 raidPtr
->Disks
[c
].auto_configured
= 0;
2802 for (r
= 0; r
< raidPtr
->numSpare
; r
++) {
2803 vp
= raidPtr
->raid_cinfo
[raidPtr
->numCol
+ r
].ci_vp
;
2804 acd
= raidPtr
->Disks
[raidPtr
->numCol
+ r
].auto_configured
;
2805 rf_close_component(raidPtr
, vp
, acd
);
2806 raidPtr
->raid_cinfo
[raidPtr
->numCol
+ r
].ci_vp
= NULL
;
2807 raidPtr
->Disks
[raidPtr
->numCol
+ r
].auto_configured
= 0;
2813 rf_ReconThread(struct rf_recon_req
*req
)
2819 raidPtr
= (RF_Raid_t
*) req
->raidPtr
;
2820 raidPtr
->recon_in_progress
= 1;
2822 rf_FailDisk((RF_Raid_t
*) req
->raidPtr
, req
->col
,
2823 ((req
->flags
& RF_FDFLAGS_RECON
) ? 1 : 0));
2825 RF_Free(req
, sizeof(*req
));
2827 raidPtr
->recon_in_progress
= 0;
2831 kthread_exit(0); /* does not return */
2835 rf_RewriteParityThread(RF_Raid_t
*raidPtr
)
2840 raidPtr
->parity_rewrite_stripes_done
= 0;
2841 raidPtr
->parity_rewrite_in_progress
= 1;
2843 retcode
= rf_RewriteParity(raidPtr
);
2846 printf("raid%d: Error re-writing parity!\n",raidPtr
->raidid
);
2848 /* set the clean bit! If we shutdown correctly,
2849 the clean bit on each component label will get
2851 raidPtr
->parity_good
= RF_RAID_CLEAN
;
2853 raidPtr
->parity_rewrite_in_progress
= 0;
2855 /* Anyone waiting for us to stop? If so, inform them... */
2856 if (raidPtr
->waitShutdown
) {
2857 wakeup(&raidPtr
->parity_rewrite_in_progress
);
2861 kthread_exit(0); /* does not return */
2866 rf_CopybackThread(RF_Raid_t
*raidPtr
)
2870 raidPtr
->copyback_in_progress
= 1;
2872 rf_CopybackReconstructedData(raidPtr
);
2874 raidPtr
->copyback_in_progress
= 0;
2877 kthread_exit(0); /* does not return */
2882 rf_ReconstructInPlaceThread(struct rf_recon_req
*req
)
2888 raidPtr
= req
->raidPtr
;
2889 raidPtr
->recon_in_progress
= 1;
2890 rf_ReconstructInPlace(raidPtr
, req
->col
);
2891 RF_Free(req
, sizeof(*req
));
2892 raidPtr
->recon_in_progress
= 0;
2896 kthread_exit(0); /* does not return */
2899 static RF_AutoConfig_t
*
2900 rf_get_component(RF_AutoConfig_t
*ac_list
, dev_t dev
, struct vnode
*vp
,
2901 const char *cname
, RF_SectorCount_t size
)
2904 RF_ComponentLabel_t
*clabel
;
2905 RF_AutoConfig_t
*ac
;
2907 clabel
= malloc(sizeof(RF_ComponentLabel_t
), M_RAIDFRAME
, M_NOWAIT
);
2908 if (clabel
== NULL
) {
2913 free(ac
->clabel
, M_RAIDFRAME
);
2914 ac_list
= ac_list
->next
;
2915 free(ac
, M_RAIDFRAME
);
2917 printf("RAID auto config: out of memory!\n");
2918 return NULL
; /* XXX probably should panic? */
2921 if (!raidread_component_label(dev
, vp
, clabel
)) {
2922 /* Got the label. Does it look reasonable? */
2923 if (rf_reasonable_label(clabel
) &&
2924 (clabel
->partitionSize
<= size
)) {
2926 printf("Component on: %s: %llu\n",
2927 cname
, (unsigned long long)size
);
2928 rf_print_component_label(clabel
);
2930 /* if it's reasonable, add it, else ignore it. */
2931 ac
= malloc(sizeof(RF_AutoConfig_t
), M_RAIDFRAME
,
2934 free(clabel
, M_RAIDFRAME
);
2937 strlcpy(ac
->devname
, cname
, sizeof(ac
->devname
));
2940 ac
->clabel
= clabel
;
2948 free(clabel
, M_RAIDFRAME
);
2949 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
2950 VOP_CLOSE(vp
, FREAD
| FWRITE
, NOCRED
);
2957 rf_find_raid_components(void)
2960 struct disklabel label
;
2964 int bmajor
, bminor
, wedge
;
2967 RF_AutoConfig_t
*ac_list
;
2970 /* initialize the AutoConfig list */
2973 /* we begin by trolling through *all* the devices on the system */
2975 for (dv
= deviter_first(&di
, DEVITER_F_ROOT_FIRST
); dv
!= NULL
;
2976 dv
= deviter_next(&di
)) {
2978 /* we are only interested in disks... */
2979 if (device_class(dv
) != DV_DISK
)
2982 /* we don't care about floppies... */
2983 if (device_is_a(dv
, "fd")) {
2987 /* we don't care about CD's... */
2988 if (device_is_a(dv
, "cd")) {
2992 /* we don't care about md's... */
2993 if (device_is_a(dv
, "md")) {
2997 /* hdfd is the Atari/Hades floppy driver */
2998 if (device_is_a(dv
, "hdfd")) {
3002 /* fdisa is the Atari/Milan floppy driver */
3003 if (device_is_a(dv
, "fdisa")) {
3007 /* need to find the device_name_to_block_device_major stuff */
3008 bmajor
= devsw_name2blk(device_xname(dv
), NULL
, 0);
3010 /* get a vnode for the raw partition of this disk */
3012 wedge
= device_is_a(dv
, "dk");
3013 bminor
= minor(device_unit(dv
));
3014 dev
= wedge
? makedev(bmajor
, bminor
) :
3015 MAKEDISKDEV(bmajor
, bminor
, RAW_PART
);
3016 if (bdevvp(dev
, &vp
))
3017 panic("RAID can't alloc vnode");
3019 error
= VOP_OPEN(vp
, FREAD
, NOCRED
);
3022 /* "Who cares." Continue looking
3023 for something that exists*/
3029 struct dkwedge_info dkw
;
3030 error
= VOP_IOCTL(vp
, DIOCGWEDGEINFO
, &dkw
, FREAD
,
3033 printf("RAIDframe: can't get wedge info for "
3034 "dev %s (%d)\n", device_xname(dv
), error
);
3035 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
3036 VOP_CLOSE(vp
, FREAD
| FWRITE
, NOCRED
);
3041 if (strcmp(dkw
.dkw_ptype
, DKW_PTYPE_RAIDFRAME
) != 0) {
3042 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
3043 VOP_CLOSE(vp
, FREAD
| FWRITE
, NOCRED
);
3048 ac_list
= rf_get_component(ac_list
, dev
, vp
,
3049 device_xname(dv
), dkw
.dkw_size
);
3053 /* Ok, the disk exists. Go get the disklabel. */
3054 error
= VOP_IOCTL(vp
, DIOCGDINFO
, &label
, FREAD
, NOCRED
);
3057 * XXX can't happen - open() would
3058 * have errored out (or faked up one)
3060 if (error
!= ENOTTY
)
3061 printf("RAIDframe: can't get label for dev "
3062 "%s (%d)\n", device_xname(dv
), error
);
3065 /* don't need this any more. We'll allocate it again
3066 a little later if we really do... */
3067 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
3068 VOP_CLOSE(vp
, FREAD
| FWRITE
, NOCRED
);
3074 for (i
= 0; i
< label
.d_npartitions
; i
++) {
3075 char cname
[sizeof(ac_list
->devname
)];
3077 /* We only support partitions marked as RAID */
3078 if (label
.d_partitions
[i
].p_fstype
!= FS_RAID
)
3081 dev
= MAKEDISKDEV(bmajor
, device_unit(dv
), i
);
3082 if (bdevvp(dev
, &vp
))
3083 panic("RAID can't alloc vnode");
3085 error
= VOP_OPEN(vp
, FREAD
, NOCRED
);
3091 snprintf(cname
, sizeof(cname
), "%s%c",
3092 device_xname(dv
), 'a' + i
);
3093 ac_list
= rf_get_component(ac_list
, dev
, vp
, cname
,
3094 label
.d_partitions
[i
].p_size
);
3097 deviter_release(&di
);
3103 rf_reasonable_label(RF_ComponentLabel_t
*clabel
)
3106 if (((clabel
->version
==RF_COMPONENT_LABEL_VERSION_1
) ||
3107 (clabel
->version
==RF_COMPONENT_LABEL_VERSION
)) &&
3108 ((clabel
->clean
== RF_RAID_CLEAN
) ||
3109 (clabel
->clean
== RF_RAID_DIRTY
)) &&
3111 clabel
->column
>= 0 &&
3112 clabel
->num_rows
> 0 &&
3113 clabel
->num_columns
> 0 &&
3114 clabel
->row
< clabel
->num_rows
&&
3115 clabel
->column
< clabel
->num_columns
&&
3116 clabel
->blockSize
> 0 &&
3117 clabel
->numBlocks
> 0) {
3118 /* label looks reasonable enough... */
3127 rf_print_component_label(RF_ComponentLabel_t
*clabel
)
3129 printf(" Row: %d Column: %d Num Rows: %d Num Columns: %d\n",
3130 clabel
->row
, clabel
->column
,
3131 clabel
->num_rows
, clabel
->num_columns
);
3132 printf(" Version: %d Serial Number: %d Mod Counter: %d\n",
3133 clabel
->version
, clabel
->serial_number
,
3134 clabel
->mod_counter
);
3135 printf(" Clean: %s Status: %d\n",
3136 clabel
->clean
? "Yes" : "No", clabel
->status
);
3137 printf(" sectPerSU: %d SUsPerPU: %d SUsPerRU: %d\n",
3138 clabel
->sectPerSU
, clabel
->SUsPerPU
, clabel
->SUsPerRU
);
3139 printf(" RAID Level: %c blocksize: %d numBlocks: %d\n",
3140 (char) clabel
->parityConfig
, clabel
->blockSize
,
3142 printf(" Autoconfig: %s\n", clabel
->autoconfigure
? "Yes" : "No");
3143 printf(" Contains root partition: %s\n",
3144 clabel
->root_partition
? "Yes" : "No");
3145 printf(" Last configured as: raid%d\n", clabel
->last_unit
);
3147 printf(" Config order: %d\n", clabel
->config_order
);
3154 rf_create_auto_sets(RF_AutoConfig_t
*ac_list
)
3156 RF_AutoConfig_t
*ac
;
3157 RF_ConfigSet_t
*config_sets
;
3158 RF_ConfigSet_t
*cset
;
3159 RF_AutoConfig_t
*ac_next
;
3164 /* Go through the AutoConfig list, and figure out which components
3165 belong to what sets. */
3168 /* we're going to putz with ac->next, so save it here
3169 for use at the end of the loop */
3172 if (config_sets
== NULL
) {
3173 /* will need at least this one... */
3174 config_sets
= (RF_ConfigSet_t
*)
3175 malloc(sizeof(RF_ConfigSet_t
),
3176 M_RAIDFRAME
, M_NOWAIT
);
3177 if (config_sets
== NULL
) {
3178 panic("rf_create_auto_sets: No memory!");
3180 /* this one is easy :) */
3181 config_sets
->ac
= ac
;
3182 config_sets
->next
= NULL
;
3183 config_sets
->rootable
= 0;
3186 /* which set does this component fit into? */
3189 if (rf_does_it_fit(cset
, ac
)) {
3190 /* looks like it matches... */
3191 ac
->next
= cset
->ac
;
3198 /* didn't find a match above... new set..*/
3199 cset
= (RF_ConfigSet_t
*)
3200 malloc(sizeof(RF_ConfigSet_t
),
3201 M_RAIDFRAME
, M_NOWAIT
);
3203 panic("rf_create_auto_sets: No memory!");
3207 cset
->next
= config_sets
;
3216 return(config_sets
);
3220 rf_does_it_fit(RF_ConfigSet_t
*cset
, RF_AutoConfig_t
*ac
)
3222 RF_ComponentLabel_t
*clabel1
, *clabel2
;
3224 /* If this one matches the *first* one in the set, that's good
3225 enough, since the other members of the set would have been
3226 through here too... */
3227 /* note that we are not checking partitionSize here..
3229 Note that we are also not checking the mod_counters here.
3230 If everything else matches execpt the mod_counter, that's
3231 good enough for this test. We will deal with the mod_counters
3232 a little later in the autoconfiguration process.
3234 (clabel1->mod_counter == clabel2->mod_counter) &&
3236 The reason we don't check for this is that failed disks
3237 will have lower modification counts. If those disks are
3238 not added to the set they used to belong to, then they will
3239 form their own set, which may result in 2 different sets,
3240 for example, competing to be configured at raid0, and
3241 perhaps competing to be the root filesystem set. If the
3242 wrong ones get configured, or both attempt to become /,
3243 weird behaviour and or serious lossage will occur. Thus we
3244 need to bring them into the fold here, and kick them out at
3249 clabel1
= cset
->ac
->clabel
;
3250 clabel2
= ac
->clabel
;
3251 if ((clabel1
->version
== clabel2
->version
) &&
3252 (clabel1
->serial_number
== clabel2
->serial_number
) &&
3253 (clabel1
->num_rows
== clabel2
->num_rows
) &&
3254 (clabel1
->num_columns
== clabel2
->num_columns
) &&
3255 (clabel1
->sectPerSU
== clabel2
->sectPerSU
) &&
3256 (clabel1
->SUsPerPU
== clabel2
->SUsPerPU
) &&
3257 (clabel1
->SUsPerRU
== clabel2
->SUsPerRU
) &&
3258 (clabel1
->parityConfig
== clabel2
->parityConfig
) &&
3259 (clabel1
->maxOutstanding
== clabel2
->maxOutstanding
) &&
3260 (clabel1
->blockSize
== clabel2
->blockSize
) &&
3261 (clabel1
->numBlocks
== clabel2
->numBlocks
) &&
3262 (clabel1
->autoconfigure
== clabel2
->autoconfigure
) &&
3263 (clabel1
->root_partition
== clabel2
->root_partition
) &&
3264 (clabel1
->last_unit
== clabel2
->last_unit
) &&
3265 (clabel1
->config_order
== clabel2
->config_order
)) {
3266 /* if it get's here, it almost *has* to be a match */
3268 /* it's not consistent with somebody in the set..
3272 /* all was fine.. it must fit... */
3277 rf_have_enough_components(RF_ConfigSet_t
*cset
)
3279 RF_AutoConfig_t
*ac
;
3280 RF_AutoConfig_t
*auto_config
;
3281 RF_ComponentLabel_t
*clabel
;
3286 int mod_counter_found
;
3287 int even_pair_failed
;
3291 /* check to see that we have enough 'live' components
3292 of this set. If so, we can configure it if necessary */
3294 num_cols
= cset
->ac
->clabel
->num_columns
;
3295 parity_type
= cset
->ac
->clabel
->parityConfig
;
3297 /* XXX Check for duplicate components!?!?!? */
3299 /* Determine what the mod_counter is supposed to be for this set. */
3301 mod_counter_found
= 0;
3305 if (mod_counter_found
==0) {
3306 mod_counter
= ac
->clabel
->mod_counter
;
3307 mod_counter_found
= 1;
3309 if (ac
->clabel
->mod_counter
> mod_counter
) {
3310 mod_counter
= ac
->clabel
->mod_counter
;
3317 auto_config
= cset
->ac
;
3319 even_pair_failed
= 0;
3320 for(c
=0; c
<num_cols
; c
++) {
3323 if ((ac
->clabel
->column
== c
) &&
3324 (ac
->clabel
->mod_counter
== mod_counter
)) {
3325 /* it's this one... */
3327 printf("Found: %s at %d\n",
3335 /* Didn't find one here! */
3336 /* special case for RAID 1, especially
3337 where there are more than 2
3338 components (where RAIDframe treats
3339 things a little differently :( ) */
3340 if (parity_type
== '1') {
3341 if (c
%2 == 0) { /* even component */
3342 even_pair_failed
= 1;
3343 } else { /* odd component. If
3347 "Good Night, Charlie" */
3348 if (even_pair_failed
== 1) {
3353 /* normal accounting */
3357 if ((parity_type
== '1') && (c
%2 == 1)) {
3358 /* Just did an even component, and we didn't
3359 bail.. reset the even_pair_failed flag,
3360 and go on to the next component.... */
3361 even_pair_failed
= 0;
3365 clabel
= cset
->ac
->clabel
;
3367 if (((clabel
->parityConfig
== '0') && (num_missing
> 0)) ||
3368 ((clabel
->parityConfig
== '4') && (num_missing
> 1)) ||
3369 ((clabel
->parityConfig
== '5') && (num_missing
> 1))) {
3370 /* XXX this needs to be made *much* more general */
3371 /* Too many failures */
3374 /* otherwise, all is well, and we've got enough to take a kick
3375 at autoconfiguring this set */
3380 rf_create_configuration(RF_AutoConfig_t
*ac
, RF_Config_t
*config
,
3383 RF_ComponentLabel_t
*clabel
;
3386 clabel
= ac
->clabel
;
3388 /* 1. Fill in the common stuff */
3389 config
->numRow
= clabel
->num_rows
= 1;
3390 config
->numCol
= clabel
->num_columns
;
3391 config
->numSpare
= 0; /* XXX should this be set here? */
3392 config
->sectPerSU
= clabel
->sectPerSU
;
3393 config
->SUsPerPU
= clabel
->SUsPerPU
;
3394 config
->SUsPerRU
= clabel
->SUsPerRU
;
3395 config
->parityConfig
= clabel
->parityConfig
;
3397 strcpy(config
->diskQueueType
,"fifo");
3398 config
->maxOutstandingDiskReqs
= clabel
->maxOutstanding
;
3399 config
->layoutSpecificSize
= 0; /* XXX ?? */
3402 /* row/col values will be in range due to the checks
3403 in reasonable_label() */
3404 strcpy(config
->devnames
[0][ac
->clabel
->column
],
3409 for(i
=0;i
<RF_MAXDBGV
;i
++) {
3410 config
->debugVars
[i
][0] = 0;
3415 rf_set_autoconfig(RF_Raid_t
*raidPtr
, int new_value
)
3417 RF_ComponentLabel_t
*clabel
;
3421 raidPtr
->autoconfigure
= new_value
;
3423 for(column
=0; column
<raidPtr
->numCol
; column
++) {
3424 if (raidPtr
->Disks
[column
].status
== rf_ds_optimal
) {
3425 clabel
= raidget_component_label(raidPtr
, column
);
3426 clabel
->autoconfigure
= new_value
;
3427 raidflush_component_label(raidPtr
, column
);
3430 for(column
= 0; column
< raidPtr
->numSpare
; column
++) {
3431 sparecol
= raidPtr
->numCol
+ column
;
3432 if (raidPtr
->Disks
[sparecol
].status
== rf_ds_used_spare
) {
3433 clabel
= raidget_component_label(raidPtr
, sparecol
);
3434 clabel
->autoconfigure
= new_value
;
3435 raidflush_component_label(raidPtr
, sparecol
);
3442 rf_set_rootpartition(RF_Raid_t
*raidPtr
, int new_value
)
3444 RF_ComponentLabel_t
*clabel
;
3448 raidPtr
->root_partition
= new_value
;
3449 for(column
=0; column
<raidPtr
->numCol
; column
++) {
3450 if (raidPtr
->Disks
[column
].status
== rf_ds_optimal
) {
3451 clabel
= raidget_component_label(raidPtr
, column
);
3452 clabel
->root_partition
= new_value
;
3453 raidflush_component_label(raidPtr
, column
);
3456 for(column
= 0; column
< raidPtr
->numSpare
; column
++) {
3457 sparecol
= raidPtr
->numCol
+ column
;
3458 if (raidPtr
->Disks
[sparecol
].status
== rf_ds_used_spare
) {
3459 clabel
= raidget_component_label(raidPtr
, sparecol
);
3460 clabel
->root_partition
= new_value
;
3461 raidflush_component_label(raidPtr
, sparecol
);
3468 rf_release_all_vps(RF_ConfigSet_t
*cset
)
3470 RF_AutoConfig_t
*ac
;
3474 /* Close the vp, and give it back */
3476 vn_lock(ac
->vp
, LK_EXCLUSIVE
| LK_RETRY
);
3477 VOP_CLOSE(ac
->vp
, FREAD
, NOCRED
);
3487 rf_cleanup_config_set(RF_ConfigSet_t
*cset
)
3489 RF_AutoConfig_t
*ac
;
3490 RF_AutoConfig_t
*next_ac
;
3495 /* nuke the label */
3496 free(ac
->clabel
, M_RAIDFRAME
);
3497 /* cleanup the config structure */
3498 free(ac
, M_RAIDFRAME
);
3502 /* and, finally, nuke the config set */
3503 free(cset
, M_RAIDFRAME
);
3508 raid_init_component_label(RF_Raid_t
*raidPtr
, RF_ComponentLabel_t
*clabel
)
3510 /* current version number */
3511 clabel
->version
= RF_COMPONENT_LABEL_VERSION
;
3512 clabel
->serial_number
= raidPtr
->serial_number
;
3513 clabel
->mod_counter
= raidPtr
->mod_counter
;
3515 clabel
->num_rows
= 1;
3516 clabel
->num_columns
= raidPtr
->numCol
;
3517 clabel
->clean
= RF_RAID_DIRTY
; /* not clean */
3518 clabel
->status
= rf_ds_optimal
; /* "It's good!" */
3520 clabel
->sectPerSU
= raidPtr
->Layout
.sectorsPerStripeUnit
;
3521 clabel
->SUsPerPU
= raidPtr
->Layout
.SUsPerPU
;
3522 clabel
->SUsPerRU
= raidPtr
->Layout
.SUsPerRU
;
3524 clabel
->blockSize
= raidPtr
->bytesPerSector
;
3525 clabel
->numBlocks
= raidPtr
->sectorsPerDisk
;
3527 /* XXX not portable */
3528 clabel
->parityConfig
= raidPtr
->Layout
.map
->parityConfig
;
3529 clabel
->maxOutstanding
= raidPtr
->maxOutstanding
;
3530 clabel
->autoconfigure
= raidPtr
->autoconfigure
;
3531 clabel
->root_partition
= raidPtr
->root_partition
;
3532 clabel
->last_unit
= raidPtr
->raidid
;
3533 clabel
->config_order
= raidPtr
->config_order
;
3535 #ifndef RF_NO_PARITY_MAP
3536 rf_paritymap_init_label(raidPtr
->parity_map
, clabel
);
3541 rf_auto_config_set(RF_ConfigSet_t
*cset
, int *unit
)
3544 RF_Config_t
*config
;
3549 printf("RAID autoconfigure\n");
3555 /* 1. Create a config structure */
3557 config
= (RF_Config_t
*)malloc(sizeof(RF_Config_t
),
3561 printf("Out of mem!?!?\n");
3562 /* XXX do something more intelligent here. */
3566 memset(config
, 0, sizeof(RF_Config_t
));
3569 2. Figure out what RAID ID this one is supposed to live at
3570 See if we can get the same RAID dev that it was configured
3574 raidID
= cset
->ac
->clabel
->last_unit
;
3575 if ((raidID
< 0) || (raidID
>= numraid
)) {
3576 /* let's not wander off into lala land. */
3577 raidID
= numraid
- 1;
3579 if (raidPtrs
[raidID
]->valid
!= 0) {
3582 Nope... Go looking for an alternative...
3583 Start high so we don't immediately use raid0 if that's
3587 for(raidID
= numraid
- 1; raidID
>= 0; raidID
--) {
3588 if (raidPtrs
[raidID
]->valid
== 0) {
3589 /* can use this one! */
3597 printf("Unable to auto configure this set!\n");
3598 printf("(Out of RAID devs!)\n");
3599 free(config
, M_RAIDFRAME
);
3604 printf("Configuring raid%d:\n",raidID
);
3607 raidPtr
= raidPtrs
[raidID
];
3609 /* XXX all this stuff should be done SOMEWHERE ELSE! */
3610 raidPtr
->raidid
= raidID
;
3611 raidPtr
->openings
= RAIDOUTSTANDING
;
3613 /* 3. Build the configuration structure */
3614 rf_create_configuration(cset
->ac
, config
, raidPtr
);
3616 /* 4. Do the configuration */
3617 retcode
= rf_Configure(raidPtr
, config
, cset
->ac
);
3621 raidinit(raidPtrs
[raidID
]);
3623 rf_markalldirty(raidPtrs
[raidID
]);
3624 raidPtrs
[raidID
]->autoconfigure
= 1; /* XXX do this here? */
3625 if (cset
->ac
->clabel
->root_partition
==1) {
3626 /* everything configured just fine. Make a note
3627 that this set is eligible to be root. */
3629 /* XXX do this here? */
3630 raidPtrs
[raidID
]->root_partition
= 1;
3635 free(config
, M_RAIDFRAME
);
3642 rf_disk_unbusy(RF_RaidAccessDesc_t
*desc
)
3646 bp
= (struct buf
*)desc
->bp
;
3647 disk_unbusy(&raid_softc
[desc
->raidPtr
->raidid
].sc_dkdev
,
3648 (bp
->b_bcount
- bp
->b_resid
), (bp
->b_flags
& B_READ
));
3652 rf_pool_init(struct pool
*p
, size_t size
, const char *w_chan
,
3653 size_t xmin
, size_t xmax
)
3655 pool_init(p
, size
, 0, 0, 0, w_chan
, NULL
, IPL_BIO
);
3656 pool_sethiwat(p
, xmax
);
3657 pool_prime(p
, xmin
);
3658 pool_setlowat(p
, xmin
);
3662 * rf_buf_queue_check(int raidid) -- looks into the buf_queue to see
3663 * if there is IO pending and if that IO could possibly be done for a
3664 * given RAID set. Returns 0 if IO is waiting and can be done, 1
3670 rf_buf_queue_check(int raidid
)
3672 if ((bufq_peek(raid_softc
[raidid
].buf_queue
) != NULL
) &&
3673 raidPtrs
[raidid
]->openings
> 0) {
3674 /* there is work to do */
3677 /* default is nothing to do */
3682 rf_getdisksize(struct vnode
*vp
, struct lwp
*l
, RF_RaidDisk_t
*diskPtr
)
3684 struct partinfo dpart
;
3685 struct dkwedge_info dkw
;
3688 error
= VOP_IOCTL(vp
, DIOCGPART
, &dpart
, FREAD
, l
->l_cred
);
3690 diskPtr
->blockSize
= dpart
.disklab
->d_secsize
;
3691 diskPtr
->numBlocks
= dpart
.part
->p_size
- rf_protectedSectors
;
3692 diskPtr
->partitionSize
= dpart
.part
->p_size
;
3696 error
= VOP_IOCTL(vp
, DIOCGWEDGEINFO
, &dkw
, FREAD
, l
->l_cred
);
3698 diskPtr
->blockSize
= 512; /* XXX */
3699 diskPtr
->numBlocks
= dkw
.dkw_size
- rf_protectedSectors
;
3700 diskPtr
->partitionSize
= dkw
.dkw_size
;
3707 raid_match(device_t self
, cfdata_t cfdata
, void *aux
)
3713 raid_attach(device_t parent
, device_t self
, void *aux
)
3720 raid_detach(device_t self
, int flags
)
3723 struct raid_softc
*rs
= &raid_softc
[device_unit(self
)];
3725 if ((error
= raidlock(rs
)) != 0)
3728 error
= raid_detach_unlocked(rs
);
3736 rf_set_properties(struct raid_softc
*rs
, RF_Raid_t
*raidPtr
)
3738 prop_dictionary_t disk_info
, odisk_info
, geom
;
3739 disk_info
= prop_dictionary_create();
3740 geom
= prop_dictionary_create();
3741 prop_dictionary_set_uint64(geom
, "sectors-per-unit",
3742 raidPtr
->totalSectors
);
3743 prop_dictionary_set_uint32(geom
, "sector-size",
3744 raidPtr
->bytesPerSector
);
3746 prop_dictionary_set_uint16(geom
, "sectors-per-track",
3747 raidPtr
->Layout
.dataSectorsPerStripe
);
3748 prop_dictionary_set_uint16(geom
, "tracks-per-cylinder",
3749 4 * raidPtr
->numCol
);
3751 prop_dictionary_set_uint64(geom
, "cylinders-per-unit",
3752 raidPtr
->totalSectors
/ (raidPtr
->Layout
.dataSectorsPerStripe
*
3753 (4 * raidPtr
->numCol
)));
3755 prop_dictionary_set(disk_info
, "geometry", geom
);
3756 prop_object_release(geom
);
3757 prop_dictionary_set(device_properties(rs
->sc_dev
),
3758 "disk-info", disk_info
);
3759 odisk_info
= rs
->sc_dkdev
.dk_info
;
3760 rs
->sc_dkdev
.dk_info
= disk_info
;
3762 prop_object_release(odisk_info
);
3766 * Implement forwarding of the DIOCCACHESYNC ioctl to each of the components.
3767 * We end up returning whatever error was returned by the first cache flush
3772 rf_sync_component_caches(RF_Raid_t
*raidPtr
)
3779 for (c
= 0; c
< raidPtr
->numCol
; c
++) {
3780 if (raidPtr
->Disks
[c
].status
== rf_ds_optimal
) {
3781 e
= VOP_IOCTL(raidPtr
->raid_cinfo
[c
].ci_vp
, DIOCCACHESYNC
,
3782 &force
, FWRITE
, NOCRED
);
3785 printf("raid%d: cache flush to component %s failed.\n",
3786 raidPtr
->raidid
, raidPtr
->Disks
[c
].devname
);
3794 for( c
= 0; c
< raidPtr
->numSpare
; c
++) {
3795 sparecol
= raidPtr
->numCol
+ c
;
3796 /* Need to ensure that the reconstruct actually completed! */
3797 if (raidPtr
->Disks
[sparecol
].status
== rf_ds_used_spare
) {
3798 e
= VOP_IOCTL(raidPtr
->raid_cinfo
[sparecol
].ci_vp
,
3799 DIOCCACHESYNC
, &force
, FWRITE
, NOCRED
);
3802 printf("raid%d: cache flush to component %s failed.\n",
3803 raidPtr
->raidid
, raidPtr
->Disks
[sparecol
].devname
);