1 /* $NetBSD: rf_reconutil.c,v 1.27 2006/04/26 17:08:48 oster Exp $ */
3 * Copyright (c) 1995 Carnegie-Mellon University.
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 * Carnegie Mellon requests users of this software to return to
20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
29 /********************************************
30 * rf_reconutil.c -- reconstruction utilities
31 ********************************************/
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: rf_reconutil.c,v 1.27 2006/04/26 17:08:48 oster Exp $");
36 #include <dev/raidframe/raidframevar.h>
40 #include "rf_reconutil.h"
41 #include "rf_reconbuffer.h"
42 #include "rf_general.h"
43 #include "rf_decluster.h"
44 #include "rf_raid5_rotatedspare.h"
45 #include "rf_interdecluster.h"
46 #include "rf_chaindecluster.h"
48 /*******************************************************************
49 * allocates/frees the reconstruction control information structures
50 *******************************************************************/
52 /* fcol - failed column
53 * scol - identifies which spare we are using
57 rf_MakeReconControl(RF_RaidReconDesc_t
*reconDesc
,
58 RF_RowCol_t fcol
, RF_RowCol_t scol
)
60 RF_Raid_t
*raidPtr
= reconDesc
->raidPtr
;
61 RF_RaidLayout_t
*layoutPtr
= &raidPtr
->Layout
;
62 RF_ReconUnitCount_t RUsPerPU
= layoutPtr
->SUsPerPU
/ layoutPtr
->SUsPerRU
;
63 RF_ReconUnitCount_t numSpareRUs
;
64 RF_ReconCtrl_t
*reconCtrlPtr
;
65 RF_ReconBuffer_t
*rbuf
;
66 const RF_LayoutSW_t
*lp
;
67 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
72 lp
= raidPtr
->Layout
.map
;
74 /* make and zero the global reconstruction structure and the per-disk
76 RF_Malloc(reconCtrlPtr
, sizeof(RF_ReconCtrl_t
), (RF_ReconCtrl_t
*));
78 /* note: this zeros the perDiskInfo */
79 RF_Malloc(reconCtrlPtr
->perDiskInfo
, raidPtr
->numCol
*
80 sizeof(RF_PerDiskReconCtrl_t
), (RF_PerDiskReconCtrl_t
*));
81 reconCtrlPtr
->reconDesc
= reconDesc
;
82 reconCtrlPtr
->fcol
= fcol
;
83 reconCtrlPtr
->spareCol
= scol
;
84 reconCtrlPtr
->lastPSID
= layoutPtr
->numStripe
/ layoutPtr
->SUsPerPU
;
85 reconCtrlPtr
->percentComplete
= 0;
86 reconCtrlPtr
->error
= 0;
87 reconCtrlPtr
->pending_writes
= 0;
89 /* initialize each per-disk recon information structure */
90 for (i
= 0; i
< raidPtr
->numCol
; i
++) {
91 reconCtrlPtr
->perDiskInfo
[i
].reconCtrl
= reconCtrlPtr
;
92 reconCtrlPtr
->perDiskInfo
[i
].col
= i
;
93 /* make it appear as if we just finished an RU */
94 reconCtrlPtr
->perDiskInfo
[i
].curPSID
= -1;
95 reconCtrlPtr
->perDiskInfo
[i
].ru_count
= RUsPerPU
- 1;
98 /* Get the number of spare units per disk and the sparemap in case
99 * spare is distributed */
101 if (lp
->GetNumSpareRUs
) {
102 numSpareRUs
= lp
->GetNumSpareRUs(raidPtr
);
107 #if (RF_INCLUDE_PARITY_DECLUSTERING_DS > 0)
109 * Not all distributed sparing archs need dynamic mappings
111 if (lp
->InstallSpareTable
) {
112 retcode
= rf_InstallSpareTable(raidPtr
, 0, fcol
);
114 RF_PANIC(); /* XXX fix this */
118 /* make the reconstruction map */
119 reconCtrlPtr
->reconMap
= rf_MakeReconMap(raidPtr
, (int) (layoutPtr
->SUsPerRU
* layoutPtr
->sectorsPerStripeUnit
),
120 raidPtr
->sectorsPerDisk
, numSpareRUs
);
122 /* make the per-disk reconstruction buffers */
123 for (i
= 0; i
< raidPtr
->numCol
; i
++) {
124 reconCtrlPtr
->perDiskInfo
[i
].rbuf
= (i
== fcol
) ? NULL
: rf_MakeReconBuffer(raidPtr
, i
, RF_RBUF_TYPE_EXCLUSIVE
);
127 /* initialize the event queue */
128 simple_lock_init(&reconCtrlPtr
->eq_mutex
);
130 reconCtrlPtr
->eventQueue
= NULL
;
131 reconCtrlPtr
->eq_count
= 0;
133 /* make the floating recon buffers and append them to the free list */
134 simple_lock_init(&reconCtrlPtr
->rb_mutex
);
136 reconCtrlPtr
->fullBufferList
= NULL
;
137 reconCtrlPtr
->floatingRbufs
= NULL
;
138 reconCtrlPtr
->committedRbufs
= NULL
;
139 for (i
= 0; i
< raidPtr
->numFloatingReconBufs
; i
++) {
140 rbuf
= rf_MakeReconBuffer(raidPtr
, fcol
,
141 RF_RBUF_TYPE_FLOATING
);
142 rbuf
->next
= reconCtrlPtr
->floatingRbufs
;
143 reconCtrlPtr
->floatingRbufs
= rbuf
;
146 /* create the parity stripe status table */
147 reconCtrlPtr
->pssTable
= rf_MakeParityStripeStatusTable(raidPtr
);
149 /* set the initial min head sep counter val */
150 reconCtrlPtr
->minHeadSepCounter
= 0;
152 return (reconCtrlPtr
);
156 rf_FreeReconControl(RF_Raid_t
*raidPtr
)
158 RF_ReconCtrl_t
*reconCtrlPtr
= raidPtr
->reconControl
;
162 RF_ASSERT(reconCtrlPtr
);
163 for (i
= 0; i
< raidPtr
->numCol
; i
++)
164 if (reconCtrlPtr
->perDiskInfo
[i
].rbuf
)
165 rf_FreeReconBuffer(reconCtrlPtr
->perDiskInfo
[i
].rbuf
);
167 t
= reconCtrlPtr
->floatingRbufs
;
169 reconCtrlPtr
->floatingRbufs
= t
->next
;
170 rf_FreeReconBuffer(t
);
171 t
= reconCtrlPtr
->floatingRbufs
;
174 rf_FreeReconMap(reconCtrlPtr
->reconMap
);
175 rf_FreeParityStripeStatusTable(raidPtr
, reconCtrlPtr
->pssTable
);
176 RF_Free(reconCtrlPtr
->perDiskInfo
,
177 raidPtr
->numCol
* sizeof(RF_PerDiskReconCtrl_t
));
178 RF_Free(reconCtrlPtr
, sizeof(*reconCtrlPtr
));
182 /******************************************************************************
183 * computes the default head separation limit
184 *****************************************************************************/
186 rf_GetDefaultHeadSepLimit(RF_Raid_t
*raidPtr
)
188 RF_HeadSepLimit_t hsl
;
189 const RF_LayoutSW_t
*lp
;
191 lp
= raidPtr
->Layout
.map
;
192 if (lp
->GetDefaultHeadSepLimit
== NULL
)
194 hsl
= lp
->GetDefaultHeadSepLimit(raidPtr
);
199 /******************************************************************************
200 * computes the default number of floating recon buffers
201 *****************************************************************************/
203 rf_GetDefaultNumFloatingReconBuffers(RF_Raid_t
*raidPtr
)
205 const RF_LayoutSW_t
*lp
;
208 lp
= raidPtr
->Layout
.map
;
209 if (lp
->GetDefaultNumFloatingReconBuffers
== NULL
)
210 return (3 * raidPtr
->numCol
);
211 nrb
= lp
->GetDefaultNumFloatingReconBuffers(raidPtr
);
216 /******************************************************************************
217 * creates and initializes a reconstruction buffer
218 *****************************************************************************/
220 rf_MakeReconBuffer(RF_Raid_t
*raidPtr
, RF_RowCol_t col
, RF_RbufType_t type
)
222 RF_RaidLayout_t
*layoutPtr
= &raidPtr
->Layout
;
224 u_int recon_buffer_size
= rf_RaidAddressToByte(raidPtr
, layoutPtr
->SUsPerRU
* layoutPtr
->sectorsPerStripeUnit
);
226 t
= pool_get(&rf_pools
.reconbuffer
, PR_WAITOK
);
227 RF_Malloc(t
->buffer
, recon_buffer_size
, (void *));
228 t
->raidPtr
= raidPtr
;
230 t
->priority
= RF_IO_RECON_PRIORITY
;
236 /******************************************************************************
237 * frees a reconstruction buffer
238 *****************************************************************************/
240 rf_FreeReconBuffer(RF_ReconBuffer_t
*rbuf
)
242 RF_Raid_t
*raidPtr
= rbuf
->raidPtr
;
243 u_int recon_buffer_size
;
245 recon_buffer_size
= rf_RaidAddressToByte(raidPtr
, raidPtr
->Layout
.SUsPerRU
* raidPtr
->Layout
.sectorsPerStripeUnit
);
247 RF_Free(rbuf
->buffer
, recon_buffer_size
);
248 pool_put(&rf_pools
.reconbuffer
, rbuf
);
252 XXXX IF you use
this, you really want to fix the locking in here
.
253 /******************************************************************************
254 * debug only: sanity check the number of floating recon bufs in use
255 *****************************************************************************/
257 rf_CheckFloatingRbufCount(RF_Raid_t
*raidPtr
, int dolock
)
259 RF_ReconParityStripeStatus_t
*p
;
260 RF_PSStatusHeader_t
*pssTable
;
261 RF_ReconBuffer_t
*rbuf
;
265 RF_LOCK_MUTEX(raidPtr
->reconControl
->rb_mutex
);
266 pssTable
= raidPtr
->reconControl
->pssTable
;
268 for (i
= 0; i
< raidPtr
->pssTableSize
; i
++) {
269 RF_LOCK_MUTEX(pssTable
[i
].mutex
);
270 for (p
= pssTable
[i
].chain
; p
; p
= p
->next
) {
271 rbuf
= (RF_ReconBuffer_t
*) p
->rbuf
;
272 if (rbuf
&& rbuf
->type
== RF_RBUF_TYPE_FLOATING
)
275 rbuf
= (RF_ReconBuffer_t
*) p
->writeRbuf
;
276 if (rbuf
&& rbuf
->type
== RF_RBUF_TYPE_FLOATING
)
279 for (j
= 0; j
< p
->xorBufCount
; j
++) {
280 rbuf
= (RF_ReconBuffer_t
*) p
->rbufsForXor
[j
];
282 if (rbuf
->type
== RF_RBUF_TYPE_FLOATING
)
286 RF_UNLOCK_MUTEX(pssTable
[i
].mutex
);
289 for (rbuf
= raidPtr
->reconControl
->floatingRbufs
; rbuf
;
291 if (rbuf
->type
== RF_RBUF_TYPE_FLOATING
)
294 for (rbuf
= raidPtr
->reconControl
->committedRbufs
; rbuf
;
296 if (rbuf
->type
== RF_RBUF_TYPE_FLOATING
)
299 for (rbuf
= raidPtr
->reconControl
->fullBufferList
; rbuf
;
301 if (rbuf
->type
== RF_RBUF_TYPE_FLOATING
)
304 RF_ASSERT(sum
== raidPtr
->numFloatingReconBufs
);
307 RF_UNLOCK_MUTEX(raidPtr
->reconControl
->rb_mutex
);