1 // SPDX-License-Identifier: GPL-2.0-only
3 * asynchronous raid6 recovery self test
4 * Copyright (c) 2009, Intel Corporation.
6 * based on drivers/md/raid6test/test.c:
7 * Copyright 2002-2007 H. Peter Anvin
9 #include <linux/async_tx.h>
10 #include <linux/gfp.h>
12 #include <linux/random.h>
13 #include <linux/module.h>
16 #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
18 #define NDISKS 64 /* Including P and Q */
20 static struct page
*dataptrs
[NDISKS
];
21 static addr_conv_t addr_conv
[NDISKS
];
22 static struct page
*data
[NDISKS
+3];
23 static struct page
*spare
;
24 static struct page
*recovi
;
25 static struct page
*recovj
;
27 static void callback(void *param
)
29 struct completion
*cmp
= param
;
34 static void makedata(int disks
)
38 for (i
= 0; i
< disks
; i
++) {
39 prandom_bytes(page_address(data
[i
]), PAGE_SIZE
);
40 dataptrs
[i
] = data
[i
];
44 static char disk_type(int d
, int disks
)
48 else if (d
== disks
- 1)
54 /* Recover two failed blocks. */
55 static void raid6_dual_recov(int disks
, size_t bytes
, int faila
, int failb
, struct page
**ptrs
)
57 struct async_submit_ctl submit
;
58 struct completion cmp
;
59 struct dma_async_tx_descriptor
*tx
= NULL
;
60 enum sum_check_flags result
= ~0;
65 if (failb
== disks
-1) {
66 if (faila
== disks
-2) {
67 /* P+Q failure. Just rebuild the syndrome. */
68 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
, addr_conv
);
69 tx
= async_gen_syndrome(ptrs
, 0, disks
, bytes
, &submit
);
71 struct page
*blocks
[NDISKS
];
76 BUG_ON(disks
> NDISKS
);
78 /* data+Q failure. Reconstruct data from P,
79 * then rebuild syndrome
81 for (i
= disks
; i
-- ; ) {
82 if (i
== faila
|| i
== failb
)
84 blocks
[count
++] = ptrs
[i
];
87 init_async_submit(&submit
, ASYNC_TX_XOR_ZERO_DST
, NULL
,
88 NULL
, NULL
, addr_conv
);
89 tx
= async_xor(dest
, blocks
, 0, count
, bytes
, &submit
);
91 init_async_submit(&submit
, 0, tx
, NULL
, NULL
, addr_conv
);
92 tx
= async_gen_syndrome(ptrs
, 0, disks
, bytes
, &submit
);
95 if (failb
== disks
-2) {
97 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
, addr_conv
);
98 tx
= async_raid6_datap_recov(disks
, bytes
, faila
, ptrs
, &submit
);
100 /* data+data failure. */
101 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
, addr_conv
);
102 tx
= async_raid6_2data_recov(disks
, bytes
, faila
, failb
, ptrs
, &submit
);
105 init_completion(&cmp
);
106 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, callback
, &cmp
, addr_conv
);
107 tx
= async_syndrome_val(ptrs
, 0, disks
, bytes
, &result
, spare
, &submit
);
108 async_tx_issue_pending(tx
);
110 if (wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000)) == 0)
111 pr("%s: timeout! (faila: %d failb: %d disks: %d)\n",
112 __func__
, faila
, failb
, disks
);
115 pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n",
116 __func__
, faila
, failb
, result
);
119 static int test_disks(int i
, int j
, int disks
)
123 memset(page_address(recovi
), 0xf0, PAGE_SIZE
);
124 memset(page_address(recovj
), 0xba, PAGE_SIZE
);
126 dataptrs
[i
] = recovi
;
127 dataptrs
[j
] = recovj
;
129 raid6_dual_recov(disks
, PAGE_SIZE
, i
, j
, dataptrs
);
131 erra
= memcmp(page_address(data
[i
]), page_address(recovi
), PAGE_SIZE
);
132 errb
= memcmp(page_address(data
[j
]), page_address(recovj
), PAGE_SIZE
);
134 pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n",
135 __func__
, i
, j
, i
, disk_type(i
, disks
), j
, disk_type(j
, disks
),
136 (!erra
&& !errb
) ? "OK" : !erra
? "ERRB" : !errb
? "ERRA" : "ERRAB");
138 dataptrs
[i
] = data
[i
];
139 dataptrs
[j
] = data
[j
];
144 static int test(int disks
, int *tests
)
146 struct dma_async_tx_descriptor
*tx
;
147 struct async_submit_ctl submit
;
148 struct completion cmp
;
152 recovi
= data
[disks
];
153 recovj
= data
[disks
+1];
154 spare
= data
[disks
+2];
159 memset(page_address(data
[disks
-2]), 0xee, PAGE_SIZE
);
160 memset(page_address(data
[disks
-1]), 0xee, PAGE_SIZE
);
162 /* Generate assumed good syndrome */
163 init_completion(&cmp
);
164 init_async_submit(&submit
, ASYNC_TX_ACK
, NULL
, callback
, &cmp
, addr_conv
);
165 tx
= async_gen_syndrome(dataptrs
, 0, disks
, PAGE_SIZE
, &submit
);
166 async_tx_issue_pending(tx
);
168 if (wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000)) == 0) {
169 pr("error: initial gen_syndrome(%d) timed out\n", disks
);
173 pr("testing the %d-disk case...\n", disks
);
174 for (i
= 0; i
< disks
-1; i
++)
175 for (j
= i
+1; j
< disks
; j
++) {
177 err
+= test_disks(i
, j
, disks
);
184 static int raid6_test(void)
190 for (i
= 0; i
< NDISKS
+3; i
++) {
191 data
[i
] = alloc_page(GFP_KERNEL
);
199 /* the 4-disk and 5-disk cases are special for the recovery code */
201 err
+= test(4, &tests
);
203 err
+= test(5, &tests
);
204 /* the 11 and 12 disk cases are special for ioatdma (p-disabled
205 * q-continuation without extended descriptor)
208 err
+= test(11, &tests
);
209 err
+= test(12, &tests
);
212 /* the 24 disk case is special for ioatdma as it is the boudary point
213 * at which it needs to switch from 8-source ops to 16-source
214 * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set)
217 err
+= test(24, &tests
);
219 err
+= test(NDISKS
, &tests
);
222 pr("complete (%d tests, %d failure%s)\n",
223 tests
, err
, err
== 1 ? "" : "s");
225 for (i
= 0; i
< NDISKS
+3; i
++)
231 static void raid6_test_exit(void)
235 /* when compiled-in wait for drivers to load first (assumes dma drivers
236 * are also compliled-in)
238 late_initcall(raid6_test
);
239 module_exit(raid6_test_exit
);
240 MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
241 MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests");
242 MODULE_LICENSE("GPL");