1 /* $NetBSD: rf_dagfuncs.c,v 1.29 2007/03/04 06:02:36 christos Exp $ */
3 * Copyright (c) 1995 Carnegie-Mellon University.
6 * Author: Mark Holland, William V. Courtright II
8 * Permission to use, copy, modify and distribute this software and
9 * its documentation is hereby granted, provided that both the copyright
10 * notice and this permission notice appear in all copies of the
11 * software, derivative works or modified versions, and any portions
12 * thereof, and that both notices appear in supporting documentation.
14 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
15 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
16 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
18 * Carnegie Mellon requests users of this software to return to
20 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
21 * School of Computer Science
22 * Carnegie Mellon University
23 * Pittsburgh PA 15213-3890
25 * any improvements or extensions that they make and grant Carnegie the
26 * rights to redistribute these changes.
30 * dagfuncs.c -- DAG node execution routines
33 * 1. Every DAG execution function must eventually cause node->status to
34 * get set to "good" or "bad", and "FinishNode" to be called. In the
35 * case of nodes that complete immediately (xor, NullNodeFunc, etc),
36 * the node execution function can do these two things directly. In
37 * the case of nodes that have to wait for some event (a disk read to
38 * complete, a lock to be released, etc) to occur before they can
39 * complete, this is typically achieved by having whatever module
40 * is doing the operation call GenericWakeupFunc upon completion.
41 * 2. DAG execution functions should check the status in the DAG header
42 * and NOP out their operations if the status is not "enable". However,
43 * execution functions that release resources must be sure to release
44 * them even when they NOP out the function that would use them.
45 * Functions that acquire resources should go ahead and acquire them
46 * even when they NOP, so that a downstream release node will not have
47 * to check to find out whether or not the acquire was suppressed.
50 #include <sys/cdefs.h>
51 __KERNEL_RCSID(0, "$NetBSD: rf_dagfuncs.c,v 1.29 2007/03/04 06:02:36 christos Exp $");
53 #include <sys/param.h>
54 #include <sys/ioctl.h>
59 #include "rf_layout.h"
60 #include "rf_etimer.h"
61 #include "rf_acctrace.h"
62 #include "rf_diskqueue.h"
63 #include "rf_dagfuncs.h"
64 #include "rf_general.h"
65 #include "rf_engine.h"
66 #include "rf_dagutils.h"
70 #if RF_INCLUDE_PARITYLOGGING > 0
71 #include "rf_paritylog.h"
72 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */
74 int (*rf_DiskReadFunc
) (RF_DagNode_t
*);
75 int (*rf_DiskWriteFunc
) (RF_DagNode_t
*);
76 int (*rf_DiskReadUndoFunc
) (RF_DagNode_t
*);
77 int (*rf_DiskWriteUndoFunc
) (RF_DagNode_t
*);
78 int (*rf_RegularXorUndoFunc
) (RF_DagNode_t
*);
79 int (*rf_SimpleXorUndoFunc
) (RF_DagNode_t
*);
80 int (*rf_RecoveryXorUndoFunc
) (RF_DagNode_t
*);
82 /*****************************************************************************
83 * main (only) configuration routine for this module
84 ****************************************************************************/
86 rf_ConfigureDAGFuncs(RF_ShutdownList_t
**listp
)
88 RF_ASSERT(((sizeof(long) == 8) && RF_LONGSHIFT
== 3) ||
89 ((sizeof(long) == 4) && RF_LONGSHIFT
== 2));
90 rf_DiskReadFunc
= rf_DiskReadFuncForThreads
;
91 rf_DiskReadUndoFunc
= rf_DiskUndoFunc
;
92 rf_DiskWriteFunc
= rf_DiskWriteFuncForThreads
;
93 rf_DiskWriteUndoFunc
= rf_DiskUndoFunc
;
94 rf_RegularXorUndoFunc
= rf_NullNodeUndoFunc
;
95 rf_SimpleXorUndoFunc
= rf_NullNodeUndoFunc
;
96 rf_RecoveryXorUndoFunc
= rf_NullNodeUndoFunc
;
102 /*****************************************************************************
103 * the execution function associated with a terminate node
104 ****************************************************************************/
106 rf_TerminateFunc(RF_DagNode_t
*node
)
108 RF_ASSERT(node
->dagHdr
->numCommits
== node
->dagHdr
->numCommitNodes
);
109 node
->status
= rf_good
;
110 return (rf_FinishNode(node
, RF_THREAD_CONTEXT
));
114 rf_TerminateUndoFunc(RF_DagNode_t
*node
)
120 /*****************************************************************************
121 * execution functions associated with a mirror node
125 * 0 - physical disk addres of data
126 * 1 - buffer for holding read data
127 * 2 - parity stripe ID
129 * 4 - physical disk address of mirror (parity)
131 ****************************************************************************/
134 rf_DiskReadMirrorIdleFunc(RF_DagNode_t
*node
)
136 /* select the mirror copy with the shortest queue and fill in node
137 * parameters with physical disk address */
139 rf_SelectMirrorDiskIdle(node
);
140 return (rf_DiskReadFunc(node
));
143 #if (RF_INCLUDE_CHAINDECLUSTER > 0) || (RF_INCLUDE_INTERDECLUSTER > 0) || (RF_DEBUG_VALIDATE_DAG > 0)
145 rf_DiskReadMirrorPartitionFunc(RF_DagNode_t
*node
)
147 /* select the mirror copy with the shortest queue and fill in node
148 * parameters with physical disk address */
150 rf_SelectMirrorDiskPartition(node
);
151 return (rf_DiskReadFunc(node
));
156 rf_DiskReadMirrorUndoFunc(RF_DagNode_t
*node
)
163 #if RF_INCLUDE_PARITYLOGGING > 0
164 /*****************************************************************************
165 * the execution function associated with a parity log update node
166 ****************************************************************************/
168 rf_ParityLogUpdateFunc(RF_DagNode_t
*node
)
170 RF_PhysDiskAddr_t
*pda
= (RF_PhysDiskAddr_t
*) node
->params
[0].p
;
171 void *bf
= (void *) node
->params
[1].p
;
172 RF_ParityLogData_t
*logData
;
174 RF_AccTraceEntry_t
*tracerec
= node
->dagHdr
->tracerec
;
178 if (node
->dagHdr
->status
== rf_enable
) {
180 RF_ETIMER_START(timer
);
182 logData
= rf_CreateParityLogData(RF_UPDATE
, pda
, bf
,
183 (RF_Raid_t
*) (node
->dagHdr
->raidPtr
),
184 node
->wakeFunc
, (void *) node
,
185 node
->dagHdr
->tracerec
, timer
);
187 rf_ParityLogAppend(logData
, RF_FALSE
, NULL
, RF_FALSE
);
190 RF_ETIMER_STOP(timer
);
191 RF_ETIMER_EVAL(timer
);
192 tracerec
->plog_us
+= RF_ETIMER_VAL_US(timer
);
194 (node
->wakeFunc
) (node
, ENOMEM
);
201 /*****************************************************************************
202 * the execution function associated with a parity log overwrite node
203 ****************************************************************************/
205 rf_ParityLogOverwriteFunc(RF_DagNode_t
*node
)
207 RF_PhysDiskAddr_t
*pda
= (RF_PhysDiskAddr_t
*) node
->params
[0].p
;
208 void *bf
= (void *) node
->params
[1].p
;
209 RF_ParityLogData_t
*logData
;
211 RF_AccTraceEntry_t
*tracerec
= node
->dagHdr
->tracerec
;
215 if (node
->dagHdr
->status
== rf_enable
) {
217 RF_ETIMER_START(timer
);
219 logData
= rf_CreateParityLogData(RF_OVERWRITE
, pda
, bf
,
220 (RF_Raid_t
*) (node
->dagHdr
->raidPtr
),
221 node
->wakeFunc
, (void *) node
, node
->dagHdr
->tracerec
, timer
);
223 rf_ParityLogAppend(logData
, RF_FALSE
, NULL
, RF_FALSE
);
226 RF_ETIMER_STOP(timer
);
227 RF_ETIMER_EVAL(timer
);
228 tracerec
->plog_us
+= RF_ETIMER_VAL_US(timer
);
230 (node
->wakeFunc
) (node
, ENOMEM
);
237 rf_ParityLogUpdateUndoFunc(RF_DagNode_t
*node
)
243 rf_ParityLogOverwriteUndoFunc(RF_DagNode_t
*node
)
247 #endif /* RF_INCLUDE_PARITYLOGGING > 0 */
249 /*****************************************************************************
250 * the execution function associated with a NOP node
251 ****************************************************************************/
253 rf_NullNodeFunc(RF_DagNode_t
*node
)
255 node
->status
= rf_good
;
256 return (rf_FinishNode(node
, RF_THREAD_CONTEXT
));
260 rf_NullNodeUndoFunc(RF_DagNode_t
*node
)
262 node
->status
= rf_undone
;
263 return (rf_FinishNode(node
, RF_THREAD_CONTEXT
));
267 /*****************************************************************************
268 * the execution function associated with a disk-read node
269 ****************************************************************************/
271 rf_DiskReadFuncForThreads(RF_DagNode_t
*node
)
273 RF_DiskQueueData_t
*req
;
274 RF_PhysDiskAddr_t
*pda
= (RF_PhysDiskAddr_t
*) node
->params
[0].p
;
275 void *bf
= (void *) node
->params
[1].p
;
276 RF_StripeNum_t parityStripeID
= (RF_StripeNum_t
) node
->params
[2].v
;
277 unsigned priority
= RF_EXTRACT_PRIORITY(node
->params
[3].v
);
278 unsigned which_ru
= RF_EXTRACT_RU(node
->params
[3].v
);
279 RF_IoType_t iotype
= (node
->dagHdr
->status
== rf_enable
) ? RF_IO_TYPE_READ
: RF_IO_TYPE_NOP
;
280 RF_DiskQueue_t
*dqs
= ((RF_Raid_t
*) (node
->dagHdr
->raidPtr
))->Queues
;
283 if (node
->dagHdr
->bp
)
284 b_proc
= (void *) ((struct buf
*) node
->dagHdr
->bp
)->b_proc
;
286 req
= rf_CreateDiskQueueData(iotype
, pda
->startSector
, pda
->numSector
,
287 bf
, parityStripeID
, which_ru
,
288 (int (*) (void *, int)) node
->wakeFunc
,
291 node
->dagHdr
->tracerec
,
295 (void *) (node
->dagHdr
->raidPtr
), 0, b_proc
, PR_NOWAIT
);
297 (node
->wakeFunc
) (node
, ENOMEM
);
299 node
->dagFuncData
= (void *) req
;
300 rf_DiskIOEnqueue(&(dqs
[pda
->col
]), req
, priority
);
306 /*****************************************************************************
307 * the execution function associated with a disk-write node
308 ****************************************************************************/
310 rf_DiskWriteFuncForThreads(RF_DagNode_t
*node
)
312 RF_DiskQueueData_t
*req
;
313 RF_PhysDiskAddr_t
*pda
= (RF_PhysDiskAddr_t
*) node
->params
[0].p
;
314 void *bf
= (void *) node
->params
[1].p
;
315 RF_StripeNum_t parityStripeID
= (RF_StripeNum_t
) node
->params
[2].v
;
316 unsigned priority
= RF_EXTRACT_PRIORITY(node
->params
[3].v
);
317 unsigned which_ru
= RF_EXTRACT_RU(node
->params
[3].v
);
318 RF_IoType_t iotype
= (node
->dagHdr
->status
== rf_enable
) ? RF_IO_TYPE_WRITE
: RF_IO_TYPE_NOP
;
319 RF_DiskQueue_t
*dqs
= ((RF_Raid_t
*) (node
->dagHdr
->raidPtr
))->Queues
;
322 if (node
->dagHdr
->bp
)
323 b_proc
= (void *) ((struct buf
*) node
->dagHdr
->bp
)->b_proc
;
325 /* normal processing (rollaway or forward recovery) begins here */
326 req
= rf_CreateDiskQueueData(iotype
, pda
->startSector
, pda
->numSector
,
327 bf
, parityStripeID
, which_ru
,
328 (int (*) (void *, int)) node
->wakeFunc
,
331 node
->dagHdr
->tracerec
,
335 (void *) (node
->dagHdr
->raidPtr
),
336 0, b_proc
, PR_NOWAIT
);
339 (node
->wakeFunc
) (node
, ENOMEM
);
341 node
->dagFuncData
= (void *) req
;
342 rf_DiskIOEnqueue(&(dqs
[pda
->col
]), req
, priority
);
347 /*****************************************************************************
348 * the undo function for disk nodes
349 * Note: this is not a proper undo of a write node, only locks are released.
350 * old data is not restored to disk!
351 ****************************************************************************/
353 rf_DiskUndoFunc(RF_DagNode_t
*node
)
355 RF_DiskQueueData_t
*req
;
356 RF_PhysDiskAddr_t
*pda
= (RF_PhysDiskAddr_t
*) node
->params
[0].p
;
357 RF_DiskQueue_t
*dqs
= ((RF_Raid_t
*) (node
->dagHdr
->raidPtr
))->Queues
;
359 req
= rf_CreateDiskQueueData(RF_IO_TYPE_NOP
,
361 (int (*) (void *, int)) node
->wakeFunc
,
364 node
->dagHdr
->tracerec
,
368 (void *) (node
->dagHdr
->raidPtr
),
371 (node
->wakeFunc
) (node
, ENOMEM
);
373 node
->dagFuncData
= (void *) req
;
374 rf_DiskIOEnqueue(&(dqs
[pda
->col
]), req
, RF_IO_NORMAL_PRIORITY
);
380 /*****************************************************************************
381 * Callback routine for DiskRead and DiskWrite nodes. When the disk
382 * op completes, the routine is called to set the node status and
383 * inform the execution engine that the node has fired.
384 ****************************************************************************/
386 rf_GenericWakeupFunc(RF_DagNode_t
*node
, int status
)
389 switch (node
->status
) {
392 node
->status
= rf_bad
;
394 node
->status
= rf_good
;
397 /* probably should never reach this case */
399 node
->status
= rf_panic
;
401 node
->status
= rf_undone
;
404 printf("rf_GenericWakeupFunc:");
405 printf("node->status is %d,", node
->status
);
406 printf("status is %d \n", status
);
410 if (node
->dagFuncData
)
411 rf_FreeDiskQueueData((RF_DiskQueueData_t
*) node
->dagFuncData
);
412 return (rf_FinishNode(node
, RF_INTR_CONTEXT
));
416 /*****************************************************************************
417 * there are three distinct types of xor nodes:
419 * A "regular xor" is used in the fault-free case where the access
420 * spans a complete stripe unit. It assumes that the result buffer is
421 * one full stripe unit in size, and uses the stripe-unit-offset
422 * values that it computes from the PDAs to determine where within the
423 * stripe unit to XOR each argument buffer.
425 * A "simple xor" is used in the fault-free case where the access
426 * touches only a portion of one (or two, in some cases) stripe
427 * unit(s). It assumes that all the argument buffers are of the same
428 * size and have the same stripe unit offset.
430 * A "recovery xor" is used in the degraded-mode case. It's similar
431 * to the regular xor function except that it takes the failed PDA as
432 * an additional parameter, and uses it to determine what portions of
433 * the argument buffers need to be xor'd into the result buffer, and
434 * where in the result buffer they should go.
435 ****************************************************************************/
437 /* xor the params together and store the result in the result field.
438 * assume the result field points to a buffer that is the size of one
439 * SU, and use the pda params to determine where within the buffer to
440 * XOR the input buffers. */
442 rf_RegularXorFunc(RF_DagNode_t
*node
)
444 RF_Raid_t
*raidPtr
= (RF_Raid_t
*) node
->params
[node
->numParams
- 1].p
;
446 RF_AccTraceEntry_t
*tracerec
= node
->dagHdr
->tracerec
;
452 if (node
->dagHdr
->status
== rf_enable
) {
453 /* don't do the XOR if the input is the same as the output */
455 RF_ETIMER_START(timer
);
457 for (i
= 0; i
< node
->numParams
- 1; i
+= 2)
458 if (node
->params
[i
+ 1].p
!= node
->results
[0]) {
459 retcode
= rf_XorIntoBuffer(raidPtr
, (RF_PhysDiskAddr_t
*) node
->params
[i
].p
,
460 (char *) node
->params
[i
+ 1].p
, (char *) node
->results
[0]);
463 RF_ETIMER_STOP(timer
);
464 RF_ETIMER_EVAL(timer
);
465 tracerec
->xor_us
+= RF_ETIMER_VAL_US(timer
);
468 return (rf_GenericWakeupFunc(node
, retcode
)); /* call wake func
469 * explicitly since no
470 * I/O in this node */
472 /* xor the inputs into the result buffer, ignoring placement issues */
474 rf_SimpleXorFunc(RF_DagNode_t
*node
)
476 RF_Raid_t
*raidPtr
= (RF_Raid_t
*) node
->params
[node
->numParams
- 1].p
;
479 RF_AccTraceEntry_t
*tracerec
= node
->dagHdr
->tracerec
;
483 if (node
->dagHdr
->status
== rf_enable
) {
485 RF_ETIMER_START(timer
);
487 /* don't do the XOR if the input is the same as the output */
488 for (i
= 0; i
< node
->numParams
- 1; i
+= 2)
489 if (node
->params
[i
+ 1].p
!= node
->results
[0]) {
490 retcode
= rf_bxor((char *) node
->params
[i
+ 1].p
, (char *) node
->results
[0],
491 rf_RaidAddressToByte(raidPtr
, ((RF_PhysDiskAddr_t
*) node
->params
[i
].p
)->numSector
));
494 RF_ETIMER_STOP(timer
);
495 RF_ETIMER_EVAL(timer
);
496 tracerec
->xor_us
+= RF_ETIMER_VAL_US(timer
);
499 return (rf_GenericWakeupFunc(node
, retcode
)); /* call wake func
500 * explicitly since no
501 * I/O in this node */
503 /* this xor is used by the degraded-mode dag functions to recover lost
504 * data. the second-to-last parameter is the PDA for the failed
505 * portion of the access. the code here looks at this PDA and assumes
506 * that the xor target buffer is equal in size to the number of
507 * sectors in the failed PDA. It then uses the other PDAs in the
508 * parameter list to determine where within the target buffer the
509 * corresponding data should be xored. */
511 rf_RecoveryXorFunc(RF_DagNode_t
*node
)
513 RF_Raid_t
*raidPtr
= (RF_Raid_t
*) node
->params
[node
->numParams
- 1].p
;
514 RF_RaidLayout_t
*layoutPtr
= (RF_RaidLayout_t
*) & raidPtr
->Layout
;
515 RF_PhysDiskAddr_t
*failedPDA
= (RF_PhysDiskAddr_t
*) node
->params
[node
->numParams
- 2].p
;
517 RF_PhysDiskAddr_t
*pda
;
518 int suoffset
, failedSUOffset
= rf_StripeUnitOffset(layoutPtr
, failedPDA
->startSector
);
519 char *srcbuf
, *destbuf
;
521 RF_AccTraceEntry_t
*tracerec
= node
->dagHdr
->tracerec
;
525 if (node
->dagHdr
->status
== rf_enable
) {
527 RF_ETIMER_START(timer
);
529 for (i
= 0; i
< node
->numParams
- 2; i
+= 2)
530 if (node
->params
[i
+ 1].p
!= node
->results
[0]) {
531 pda
= (RF_PhysDiskAddr_t
*) node
->params
[i
].p
;
532 srcbuf
= (char *) node
->params
[i
+ 1].p
;
533 suoffset
= rf_StripeUnitOffset(layoutPtr
, pda
->startSector
);
534 destbuf
= ((char *) node
->results
[0]) + rf_RaidAddressToByte(raidPtr
, suoffset
- failedSUOffset
);
535 retcode
= rf_bxor(srcbuf
, destbuf
, rf_RaidAddressToByte(raidPtr
, pda
->numSector
));
538 RF_ETIMER_STOP(timer
);
539 RF_ETIMER_EVAL(timer
);
540 tracerec
->xor_us
+= RF_ETIMER_VAL_US(timer
);
543 return (rf_GenericWakeupFunc(node
, retcode
));
545 /*****************************************************************************
546 * The next three functions are utilities used by the above
547 * xor-execution functions.
548 ****************************************************************************/
552 * this is just a glorified buffer xor. targbuf points to a buffer
553 * that is one full stripe unit in size. srcbuf points to a buffer
554 * that may be less than 1 SU, but never more. When the access
555 * described by pda is one SU in size (which by implication means it's
556 * SU-aligned), all that happens is (targbuf) <- (srcbuf ^ targbuf).
557 * When the access is less than one SU in size the XOR occurs on only
558 * the portion of targbuf identified in the pda. */
561 rf_XorIntoBuffer(RF_Raid_t
*raidPtr
, RF_PhysDiskAddr_t
*pda
,
562 char *srcbuf
, char *targbuf
)
565 int sectPerSU
= raidPtr
->Layout
.sectorsPerStripeUnit
;
566 int SUOffset
= pda
->startSector
% sectPerSU
;
567 int length
, retcode
= 0;
569 RF_ASSERT(pda
->numSector
<= sectPerSU
);
571 targptr
= targbuf
+ rf_RaidAddressToByte(raidPtr
, SUOffset
);
572 length
= rf_RaidAddressToByte(raidPtr
, pda
->numSector
);
573 retcode
= rf_bxor(srcbuf
, targptr
, length
);
576 /* it really should be the case that the buffer pointers (returned by
577 * malloc) are aligned to the natural word size of the machine, so
578 * this is the only case we optimize for. The length should always be
579 * a multiple of the sector size, so there should be no problem with
580 * leftover bytes at the end. */
582 rf_bxor(char *src
, char *dest
, int len
)
584 unsigned mask
= sizeof(long) - 1, retcode
= 0;
586 if (!(((unsigned long) src
) & mask
) &&
587 !(((unsigned long) dest
) & mask
) && !(len
& mask
)) {
588 retcode
= rf_longword_bxor((unsigned long *) src
,
589 (unsigned long *) dest
,
590 len
>> RF_LONGSHIFT
);
597 /* When XORing in kernel mode, we need to map each user page to kernel
598 * space before we can access it. We don't want to assume anything
599 * about which input buffers are in kernel/user space, nor about their
600 * alignment, so in each loop we compute the maximum number of bytes
601 * that we can xor without crossing any page boundaries, and do only
602 * this many bytes before the next remap.
604 * len - is in longwords
607 rf_longword_bxor(unsigned long *src
, unsigned long *dest
, int len
)
609 unsigned long *end
= src
+ len
;
610 unsigned long d0
, d1
, d2
, d3
, s0
, s1
, s2
, s3
; /* temps */
611 unsigned long *pg_src
, *pg_dest
; /* per-page source/dest pointers */
612 int longs_this_time
;/* # longwords to xor in the current iteration */
616 if (!pg_src
|| !pg_dest
)
620 longs_this_time
= RF_MIN(len
, RF_MIN(RF_BLIP(pg_src
), RF_BLIP(pg_dest
)) >> RF_LONGSHIFT
); /* note len in longwords */
621 src
+= longs_this_time
;
622 dest
+= longs_this_time
;
623 len
-= longs_this_time
;
624 while (longs_this_time
>= 4) {
633 pg_dest
[0] = d0
^ s0
;
634 pg_dest
[1] = d1
^ s1
;
635 pg_dest
[2] = d2
^ s2
;
636 pg_dest
[3] = d3
^ s3
;
639 longs_this_time
-= 4;
641 while (longs_this_time
> 0) { /* cannot cross any page
643 *pg_dest
++ ^= *pg_src
++;
647 /* either we're done, or we've reached a page boundary on one
648 * (or possibly both) of the pointers */
650 if (RF_PAGE_ALIGNED(src
))
652 if (RF_PAGE_ALIGNED(dest
))
654 if (!pg_src
|| !pg_dest
)
659 *pg_dest
++ ^= *pg_src
++;
663 if (RF_PAGE_ALIGNED(src
))
665 if (RF_PAGE_ALIGNED(dest
))
676 see comment above longword_bxor
677 len is length in longwords
680 rf_longword_bxor3(unsigned long *dst
, unsigned long *a
, unsigned long *b
,
681 unsigned long *c
, int len
, void *bp
)
683 unsigned long a0
, a1
, a2
, a3
, b0
, b1
, b2
, b3
;
684 unsigned long *pg_a
, *pg_b
, *pg_c
, *pg_dst
; /* per-page source/dest
686 int longs_this_time
;/* # longs to xor in the current iteration */
699 /* align dest to cache line. Can't cross a pg boundary on dst here. */
700 while ((((unsigned long) pg_dst
) & 0x1f)) {
701 *pg_dst
++ = *pg_a
++ ^ *pg_b
++ ^ *pg_c
++;
706 if (RF_PAGE_ALIGNED(a
)) {
711 if (RF_PAGE_ALIGNED(b
)) {
716 if (RF_PAGE_ALIGNED(c
)) {
725 longs_this_time
= RF_MIN(len
, RF_MIN(RF_BLIP(a
), RF_MIN(RF_BLIP(b
), RF_MIN(RF_BLIP(c
), RF_BLIP(dst
)))) >> RF_LONGSHIFT
);
726 a
+= longs_this_time
;
727 b
+= longs_this_time
;
728 c
+= longs_this_time
;
729 dst
+= longs_this_time
;
730 len
-= longs_this_time
;
731 while (longs_this_time
>= 4) {
733 longs_this_time
-= 4;
746 /* start dual issue */
773 while (longs_this_time
> 0) { /* cannot cross any page
775 *pg_dst
++ = *pg_a
++ ^ *pg_b
++ ^ *pg_c
++;
780 if (RF_PAGE_ALIGNED(a
)) {
787 if (RF_PAGE_ALIGNED(b
)) {
792 if (RF_PAGE_ALIGNED(c
)) {
798 if (RF_PAGE_ALIGNED(dst
)) {
806 *pg_dst
++ = *pg_a
++ ^ *pg_b
++ ^ *pg_c
++;
811 if (RF_PAGE_ALIGNED(a
)) {
818 if (RF_PAGE_ALIGNED(b
)) {
823 if (RF_PAGE_ALIGNED(c
)) {
829 if (RF_PAGE_ALIGNED(dst
)) {
840 rf_bxor3(unsigned char *dst
, unsigned char *a
, unsigned char *b
,
841 unsigned char *c
, unsigned long len
, void *bp
)
843 RF_ASSERT(((RF_UL(dst
) | RF_UL(a
) | RF_UL(b
) | RF_UL(c
) | len
) & 0x7) == 0);
845 return (rf_longword_bxor3((unsigned long *) dst
, (unsigned long *) a
,
846 (unsigned long *) b
, (unsigned long *) c
, len
>> RF_LONGSHIFT
, bp
));