1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
4 * Copyright(c) 2009 Intel Corporation
6 #include <linux/kernel.h>
7 #include <linux/interrupt.h>
8 #include <linux/module.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/raid/pq.h>
11 #include <linux/async_tx.h>
12 #include <linux/gfp.h>
15 * pq_scribble_page - space to hold throwaway P or Q buffer for
16 * synchronous gen_syndrome
18 static struct page
*pq_scribble_page
;
20 /* the struct page *blocks[] parameter passed to async_gen_syndrome()
21 * and async_syndrome_val() contains the 'P' destination address at
22 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
24 * note: these are macros as they are used as lvalues
26 #define P(b, d) (b[d-2])
27 #define Q(b, d) (b[d-1])
32 * do_async_gen_syndrome - asynchronously calculate P and/or Q
34 static __async_inline
struct dma_async_tx_descriptor
*
35 do_async_gen_syndrome(struct dma_chan
*chan
,
36 const unsigned char *scfs
, int disks
,
37 struct dmaengine_unmap_data
*unmap
,
38 enum dma_ctrl_flags dma_flags
,
39 struct async_submit_ctl
*submit
)
41 struct dma_async_tx_descriptor
*tx
= NULL
;
42 struct dma_device
*dma
= chan
->device
;
43 enum async_tx_flags flags_orig
= submit
->flags
;
44 dma_async_tx_callback cb_fn_orig
= submit
->cb_fn
;
45 dma_async_tx_callback cb_param_orig
= submit
->cb_param
;
46 int src_cnt
= disks
- 2;
47 unsigned short pq_src_cnt
;
48 dma_addr_t dma_dest
[2];
52 submit
->flags
= flags_orig
;
53 pq_src_cnt
= min(src_cnt
, dma_maxpq(dma
, dma_flags
));
54 /* if we are submitting additional pqs, leave the chain open,
55 * clear the callback parameters, and leave the destination
58 if (src_cnt
> pq_src_cnt
) {
59 submit
->flags
&= ~ASYNC_TX_ACK
;
60 submit
->flags
|= ASYNC_TX_FENCE
;
62 submit
->cb_param
= NULL
;
64 submit
->cb_fn
= cb_fn_orig
;
65 submit
->cb_param
= cb_param_orig
;
67 dma_flags
|= DMA_PREP_INTERRUPT
;
69 if (submit
->flags
& ASYNC_TX_FENCE
)
70 dma_flags
|= DMA_PREP_FENCE
;
72 /* Drivers force forward progress in case they can not provide
76 dma_dest
[0] = unmap
->addr
[disks
- 2];
77 dma_dest
[1] = unmap
->addr
[disks
- 1];
78 tx
= dma
->device_prep_dma_pq(chan
, dma_dest
,
79 &unmap
->addr
[src_off
],
81 &scfs
[src_off
], unmap
->len
,
85 async_tx_quiesce(&submit
->depend_tx
);
86 dma_async_issue_pending(chan
);
89 dma_set_unmap(tx
, unmap
);
90 async_tx_submit(chan
, tx
, submit
);
91 submit
->depend_tx
= tx
;
93 /* drop completed sources */
94 src_cnt
-= pq_src_cnt
;
95 src_off
+= pq_src_cnt
;
97 dma_flags
|= DMA_PREP_CONTINUE
;
104 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
107 do_sync_gen_syndrome(struct page
**blocks
, unsigned int offset
, int disks
,
108 size_t len
, struct async_submit_ctl
*submit
)
112 int start
= -1, stop
= disks
- 3;
114 if (submit
->scribble
)
115 srcs
= submit
->scribble
;
117 srcs
= (void **) blocks
;
119 for (i
= 0; i
< disks
; i
++) {
120 if (blocks
[i
] == NULL
) {
121 BUG_ON(i
> disks
- 3); /* P or Q can't be zero */
122 srcs
[i
] = (void*)raid6_empty_zero_page
;
124 srcs
[i
] = page_address(blocks
[i
]) + offset
;
132 if (submit
->flags
& ASYNC_TX_PQ_XOR_DST
) {
133 BUG_ON(!raid6_call
.xor_syndrome
);
135 raid6_call
.xor_syndrome(disks
, start
, stop
, len
, srcs
);
137 raid6_call
.gen_syndrome(disks
, len
, srcs
);
138 async_tx_sync_epilog(submit
);
142 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
143 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
144 * @offset: common offset into each block (src and dest) to start transaction
145 * @disks: number of blocks (including missing P or Q, see below)
146 * @len: length of operation in bytes
147 * @submit: submission/completion modifiers
149 * General note: This routine assumes a field of GF(2^8) with a
150 * primitive polynomial of 0x11d and a generator of {02}.
152 * 'disks' note: callers can optionally omit either P or Q (but not
153 * both) from the calculation by setting blocks[disks-2] or
154 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
155 * PAGE_SIZE as a temporary buffer of this size is used in the
156 * synchronous path. 'disks' always accounts for both destination
157 * buffers. If any source buffers (blocks[i] where i < disks - 2) are
158 * set to NULL those buffers will be replaced with the raid6_zero_page
159 * in the synchronous path and omitted in the hardware-asynchronous
162 struct dma_async_tx_descriptor
*
163 async_gen_syndrome(struct page
**blocks
, unsigned int offset
, int disks
,
164 size_t len
, struct async_submit_ctl
*submit
)
166 int src_cnt
= disks
- 2;
167 struct dma_chan
*chan
= async_tx_find_channel(submit
, DMA_PQ
,
168 &P(blocks
, disks
), 2,
169 blocks
, src_cnt
, len
);
170 struct dma_device
*device
= chan
? chan
->device
: NULL
;
171 struct dmaengine_unmap_data
*unmap
= NULL
;
173 BUG_ON(disks
> MAX_DISKS
|| !(P(blocks
, disks
) || Q(blocks
, disks
)));
176 unmap
= dmaengine_get_unmap_data(device
->dev
, disks
, GFP_NOWAIT
);
178 /* XORing P/Q is only implemented in software */
179 if (unmap
&& !(submit
->flags
& ASYNC_TX_PQ_XOR_DST
) &&
180 (src_cnt
<= dma_maxpq(device
, 0) ||
181 dma_maxpq(device
, DMA_PREP_CONTINUE
) > 0) &&
182 is_dma_pq_aligned(device
, offset
, 0, len
)) {
183 struct dma_async_tx_descriptor
*tx
;
184 enum dma_ctrl_flags dma_flags
= 0;
185 unsigned char coefs
[MAX_DISKS
];
188 /* run the p+q asynchronously */
189 pr_debug("%s: (async) disks: %d len: %zu\n",
190 __func__
, disks
, len
);
192 /* convert source addresses being careful to collapse 'empty'
193 * sources and update the coefficients accordingly
196 for (i
= 0, j
= 0; i
< src_cnt
; i
++) {
197 if (blocks
[i
] == NULL
)
199 unmap
->addr
[j
] = dma_map_page(device
->dev
, blocks
[i
], offset
,
201 coefs
[j
] = raid6_gfexp
[i
];
207 * DMAs use destinations as sources,
208 * so use BIDIRECTIONAL mapping
211 if (P(blocks
, disks
))
212 unmap
->addr
[j
++] = dma_map_page(device
->dev
, P(blocks
, disks
),
213 offset
, len
, DMA_BIDIRECTIONAL
);
215 unmap
->addr
[j
++] = 0;
216 dma_flags
|= DMA_PREP_PQ_DISABLE_P
;
220 if (Q(blocks
, disks
))
221 unmap
->addr
[j
++] = dma_map_page(device
->dev
, Q(blocks
, disks
),
222 offset
, len
, DMA_BIDIRECTIONAL
);
224 unmap
->addr
[j
++] = 0;
225 dma_flags
|= DMA_PREP_PQ_DISABLE_Q
;
228 tx
= do_async_gen_syndrome(chan
, coefs
, j
, unmap
, dma_flags
, submit
);
229 dmaengine_unmap_put(unmap
);
233 dmaengine_unmap_put(unmap
);
235 /* run the pq synchronously */
236 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__
, disks
, len
);
238 /* wait for any prerequisite operations */
239 async_tx_quiesce(&submit
->depend_tx
);
241 if (!P(blocks
, disks
)) {
242 P(blocks
, disks
) = pq_scribble_page
;
243 BUG_ON(len
+ offset
> PAGE_SIZE
);
245 if (!Q(blocks
, disks
)) {
246 Q(blocks
, disks
) = pq_scribble_page
;
247 BUG_ON(len
+ offset
> PAGE_SIZE
);
249 do_sync_gen_syndrome(blocks
, offset
, disks
, len
, submit
);
253 EXPORT_SYMBOL_GPL(async_gen_syndrome
);
255 static inline struct dma_chan
*
256 pq_val_chan(struct async_submit_ctl
*submit
, struct page
**blocks
, int disks
, size_t len
)
258 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
261 return async_tx_find_channel(submit
, DMA_PQ_VAL
, NULL
, 0, blocks
,
266 * async_syndrome_val - asynchronously validate a raid6 syndrome
267 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
268 * @offset: common offset into each block (src and dest) to start transaction
269 * @disks: number of blocks (including missing P or Q, see below)
270 * @len: length of operation in bytes
271 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
272 * @spare: temporary result buffer for the synchronous case
273 * @submit: submission / completion modifiers
275 * The same notes from async_gen_syndrome apply to the 'blocks',
276 * and 'disks' parameters of this routine. The synchronous path
277 * requires a temporary result buffer and submit->scribble to be
280 struct dma_async_tx_descriptor
*
281 async_syndrome_val(struct page
**blocks
, unsigned int offset
, int disks
,
282 size_t len
, enum sum_check_flags
*pqres
, struct page
*spare
,
283 struct async_submit_ctl
*submit
)
285 struct dma_chan
*chan
= pq_val_chan(submit
, blocks
, disks
, len
);
286 struct dma_device
*device
= chan
? chan
->device
: NULL
;
287 struct dma_async_tx_descriptor
*tx
;
288 unsigned char coefs
[MAX_DISKS
];
289 enum dma_ctrl_flags dma_flags
= submit
->cb_fn
? DMA_PREP_INTERRUPT
: 0;
290 struct dmaengine_unmap_data
*unmap
= NULL
;
292 BUG_ON(disks
< 4 || disks
> MAX_DISKS
);
295 unmap
= dmaengine_get_unmap_data(device
->dev
, disks
, GFP_NOWAIT
);
297 if (unmap
&& disks
<= dma_maxpq(device
, 0) &&
298 is_dma_pq_aligned(device
, offset
, 0, len
)) {
299 struct device
*dev
= device
->dev
;
301 int i
, j
= 0, src_cnt
= 0;
303 pr_debug("%s: (async) disks: %d len: %zu\n",
304 __func__
, disks
, len
);
307 for (i
= 0; i
< disks
-2; i
++)
308 if (likely(blocks
[i
])) {
309 unmap
->addr
[j
] = dma_map_page(dev
, blocks
[i
],
312 coefs
[j
] = raid6_gfexp
[i
];
318 if (!P(blocks
, disks
)) {
320 dma_flags
|= DMA_PREP_PQ_DISABLE_P
;
322 pq
[0] = dma_map_page(dev
, P(blocks
, disks
),
325 unmap
->addr
[j
++] = pq
[0];
328 if (!Q(blocks
, disks
)) {
330 dma_flags
|= DMA_PREP_PQ_DISABLE_Q
;
332 pq
[1] = dma_map_page(dev
, Q(blocks
, disks
),
335 unmap
->addr
[j
++] = pq
[1];
339 if (submit
->flags
& ASYNC_TX_FENCE
)
340 dma_flags
|= DMA_PREP_FENCE
;
342 tx
= device
->device_prep_dma_pq_val(chan
, pq
,
350 async_tx_quiesce(&submit
->depend_tx
);
351 dma_async_issue_pending(chan
);
354 dma_set_unmap(tx
, unmap
);
355 async_tx_submit(chan
, tx
, submit
);
357 struct page
*p_src
= P(blocks
, disks
);
358 struct page
*q_src
= Q(blocks
, disks
);
359 enum async_tx_flags flags_orig
= submit
->flags
;
360 dma_async_tx_callback cb_fn_orig
= submit
->cb_fn
;
361 void *scribble
= submit
->scribble
;
362 void *cb_param_orig
= submit
->cb_param
;
365 pr_debug("%s: (sync) disks: %d len: %zu\n",
366 __func__
, disks
, len
);
368 /* caller must provide a temporary result buffer and
369 * allow the input parameters to be preserved
371 BUG_ON(!spare
|| !scribble
);
373 /* wait for any prerequisite operations */
374 async_tx_quiesce(&submit
->depend_tx
);
376 /* recompute p and/or q into the temporary buffer and then
377 * check to see the result matches the current value
382 init_async_submit(submit
, ASYNC_TX_XOR_ZERO_DST
, NULL
,
383 NULL
, NULL
, scribble
);
384 tx
= async_xor(spare
, blocks
, offset
, disks
-2, len
, submit
);
385 async_tx_quiesce(&tx
);
386 p
= page_address(p_src
) + offset
;
387 s
= page_address(spare
) + offset
;
388 *pqres
|= !!memcmp(p
, s
, len
) << SUM_CHECK_P
;
392 P(blocks
, disks
) = NULL
;
393 Q(blocks
, disks
) = spare
;
394 init_async_submit(submit
, 0, NULL
, NULL
, NULL
, scribble
);
395 tx
= async_gen_syndrome(blocks
, offset
, disks
, len
, submit
);
396 async_tx_quiesce(&tx
);
397 q
= page_address(q_src
) + offset
;
398 s
= page_address(spare
) + offset
;
399 *pqres
|= !!memcmp(q
, s
, len
) << SUM_CHECK_Q
;
402 /* restore P, Q and submit */
403 P(blocks
, disks
) = p_src
;
404 Q(blocks
, disks
) = q_src
;
406 submit
->cb_fn
= cb_fn_orig
;
407 submit
->cb_param
= cb_param_orig
;
408 submit
->flags
= flags_orig
;
409 async_tx_sync_epilog(submit
);
412 dmaengine_unmap_put(unmap
);
416 EXPORT_SYMBOL_GPL(async_syndrome_val
);
418 static int __init
async_pq_init(void)
420 pq_scribble_page
= alloc_page(GFP_KERNEL
);
422 if (pq_scribble_page
)
425 pr_err("%s: failed to allocate required spare page\n", __func__
);
430 static void __exit
async_pq_exit(void)
432 __free_page(pq_scribble_page
);
435 module_init(async_pq_init
);
436 module_exit(async_pq_exit
);
438 MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
439 MODULE_LICENSE("GPL");