1 // SPDX-License-Identifier: GPL-2.0-only
3 * asynchronous raid6 recovery self test
4 * Copyright (c) 2009, Intel Corporation.
6 * based on drivers/md/raid6test/test.c:
7 * Copyright 2002-2007 H. Peter Anvin
9 #include <linux/async_tx.h>
10 #include <linux/gfp.h>
12 #include <linux/random.h>
13 #include <linux/module.h>
16 #define pr(fmt, args...) pr_info("raid6test: " fmt, ##args)
18 #define NDISKS 64 /* Including P and Q */
20 static struct page
*dataptrs
[NDISKS
];
21 unsigned int dataoffs
[NDISKS
];
22 static addr_conv_t addr_conv
[NDISKS
];
23 static struct page
*data
[NDISKS
+3];
24 static struct page
*spare
;
25 static struct page
*recovi
;
26 static struct page
*recovj
;
28 static void callback(void *param
)
30 struct completion
*cmp
= param
;
35 static void makedata(int disks
)
39 for (i
= 0; i
< disks
; i
++) {
40 get_random_bytes(page_address(data
[i
]), PAGE_SIZE
);
41 dataptrs
[i
] = data
[i
];
46 static char disk_type(int d
, int disks
)
50 else if (d
== disks
- 1)
56 /* Recover two failed blocks. */
57 static void raid6_dual_recov(int disks
, size_t bytes
, int faila
, int failb
,
58 struct page
**ptrs
, unsigned int *offs
)
60 struct async_submit_ctl submit
;
61 struct completion cmp
;
62 struct dma_async_tx_descriptor
*tx
= NULL
;
63 enum sum_check_flags result
= ~0;
68 if (failb
== disks
-1) {
69 if (faila
== disks
-2) {
70 /* P+Q failure. Just rebuild the syndrome. */
71 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
, addr_conv
);
72 tx
= async_gen_syndrome(ptrs
, offs
,
73 disks
, bytes
, &submit
);
75 struct page
*blocks
[NDISKS
];
80 BUG_ON(disks
> NDISKS
);
82 /* data+Q failure. Reconstruct data from P,
83 * then rebuild syndrome
85 for (i
= disks
; i
-- ; ) {
86 if (i
== faila
|| i
== failb
)
88 blocks
[count
++] = ptrs
[i
];
91 init_async_submit(&submit
, ASYNC_TX_XOR_ZERO_DST
, NULL
,
92 NULL
, NULL
, addr_conv
);
93 tx
= async_xor(dest
, blocks
, 0, count
, bytes
, &submit
);
95 init_async_submit(&submit
, 0, tx
, NULL
, NULL
, addr_conv
);
96 tx
= async_gen_syndrome(ptrs
, offs
,
97 disks
, bytes
, &submit
);
100 if (failb
== disks
-2) {
101 /* data+P failure. */
102 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
, addr_conv
);
103 tx
= async_raid6_datap_recov(disks
, bytes
,
104 faila
, ptrs
, offs
, &submit
);
106 /* data+data failure. */
107 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
, addr_conv
);
108 tx
= async_raid6_2data_recov(disks
, bytes
,
109 faila
, failb
, ptrs
, offs
, &submit
);
112 init_completion(&cmp
);
113 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, callback
, &cmp
, addr_conv
);
114 tx
= async_syndrome_val(ptrs
, offs
,
115 disks
, bytes
, &result
, spare
, 0, &submit
);
116 async_tx_issue_pending(tx
);
118 if (wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000)) == 0)
119 pr("%s: timeout! (faila: %d failb: %d disks: %d)\n",
120 __func__
, faila
, failb
, disks
);
123 pr("%s: validation failure! faila: %d failb: %d sum_check_flags: %x\n",
124 __func__
, faila
, failb
, result
);
127 static int test_disks(int i
, int j
, int disks
)
131 memset(page_address(recovi
), 0xf0, PAGE_SIZE
);
132 memset(page_address(recovj
), 0xba, PAGE_SIZE
);
134 dataptrs
[i
] = recovi
;
135 dataptrs
[j
] = recovj
;
137 raid6_dual_recov(disks
, PAGE_SIZE
, i
, j
, dataptrs
, dataoffs
);
139 erra
= memcmp(page_address(data
[i
]), page_address(recovi
), PAGE_SIZE
);
140 errb
= memcmp(page_address(data
[j
]), page_address(recovj
), PAGE_SIZE
);
142 pr("%s(%d, %d): faila=%3d(%c) failb=%3d(%c) %s\n",
143 __func__
, i
, j
, i
, disk_type(i
, disks
), j
, disk_type(j
, disks
),
144 (!erra
&& !errb
) ? "OK" : !erra
? "ERRB" : !errb
? "ERRA" : "ERRAB");
146 dataptrs
[i
] = data
[i
];
147 dataptrs
[j
] = data
[j
];
152 static int test(int disks
, int *tests
)
154 struct dma_async_tx_descriptor
*tx
;
155 struct async_submit_ctl submit
;
156 struct completion cmp
;
160 recovi
= data
[disks
];
161 recovj
= data
[disks
+1];
162 spare
= data
[disks
+2];
167 memset(page_address(data
[disks
-2]), 0xee, PAGE_SIZE
);
168 memset(page_address(data
[disks
-1]), 0xee, PAGE_SIZE
);
170 /* Generate assumed good syndrome */
171 init_completion(&cmp
);
172 init_async_submit(&submit
, ASYNC_TX_ACK
, NULL
, callback
, &cmp
, addr_conv
);
173 tx
= async_gen_syndrome(dataptrs
, dataoffs
, disks
, PAGE_SIZE
, &submit
);
174 async_tx_issue_pending(tx
);
176 if (wait_for_completion_timeout(&cmp
, msecs_to_jiffies(3000)) == 0) {
177 pr("error: initial gen_syndrome(%d) timed out\n", disks
);
181 pr("testing the %d-disk case...\n", disks
);
182 for (i
= 0; i
< disks
-1; i
++)
183 for (j
= i
+1; j
< disks
; j
++) {
185 err
+= test_disks(i
, j
, disks
);
192 static int __init
raid6_test(void)
198 for (i
= 0; i
< NDISKS
+3; i
++) {
199 data
[i
] = alloc_page(GFP_KERNEL
);
207 /* the 4-disk and 5-disk cases are special for the recovery code */
209 err
+= test(4, &tests
);
211 err
+= test(5, &tests
);
212 /* the 11 and 12 disk cases are special for ioatdma (p-disabled
213 * q-continuation without extended descriptor)
216 err
+= test(11, &tests
);
217 err
+= test(12, &tests
);
220 /* the 24 disk case is special for ioatdma as it is the boundary point
221 * at which it needs to switch from 8-source ops to 16-source
222 * ops for continuation (assumes DMA_HAS_PQ_CONTINUE is not set)
225 err
+= test(24, &tests
);
227 err
+= test(NDISKS
, &tests
);
230 pr("complete (%d tests, %d failure%s)\n",
231 tests
, err
, err
== 1 ? "" : "s");
233 for (i
= 0; i
< NDISKS
+3; i
++)
239 static void __exit
raid6_test_exit(void)
243 /* when compiled-in wait for drivers to load first (assumes dma drivers
244 * are also compiled-in)
246 late_initcall(raid6_test
);
247 module_exit(raid6_test_exit
);
248 MODULE_AUTHOR("Dan Williams <dan.j.williams@intel.com>");
249 MODULE_DESCRIPTION("asynchronous RAID-6 recovery self tests");
250 MODULE_LICENSE("GPL");