1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
4 * Copyright(c) 2009 Intel Corporation
6 #include <linux/kernel.h>
7 #include <linux/interrupt.h>
8 #include <linux/module.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/raid/pq.h>
11 #include <linux/async_tx.h>
12 #include <linux/gfp.h>
15 * struct pq_scribble_page - space to hold throwaway P or Q buffer for
16 * synchronous gen_syndrome
18 static struct page
*pq_scribble_page
;
20 /* the struct page *blocks[] parameter passed to async_gen_syndrome()
21 * and async_syndrome_val() contains the 'P' destination address at
22 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
24 * note: these are macros as they are used as lvalues
26 #define P(b, d) (b[d-2])
27 #define Q(b, d) (b[d-1])
32 * do_async_gen_syndrome - asynchronously calculate P and/or Q
34 static __async_inline
struct dma_async_tx_descriptor
*
35 do_async_gen_syndrome(struct dma_chan
*chan
,
36 const unsigned char *scfs
, int disks
,
37 struct dmaengine_unmap_data
*unmap
,
38 enum dma_ctrl_flags dma_flags
,
39 struct async_submit_ctl
*submit
)
41 struct dma_async_tx_descriptor
*tx
= NULL
;
42 struct dma_device
*dma
= chan
->device
;
43 enum async_tx_flags flags_orig
= submit
->flags
;
44 dma_async_tx_callback cb_fn_orig
= submit
->cb_fn
;
45 dma_async_tx_callback cb_param_orig
= submit
->cb_param
;
46 int src_cnt
= disks
- 2;
47 unsigned short pq_src_cnt
;
48 dma_addr_t dma_dest
[2];
52 submit
->flags
= flags_orig
;
53 pq_src_cnt
= min(src_cnt
, dma_maxpq(dma
, dma_flags
));
54 /* if we are submitting additional pqs, leave the chain open,
55 * clear the callback parameters, and leave the destination
58 if (src_cnt
> pq_src_cnt
) {
59 submit
->flags
&= ~ASYNC_TX_ACK
;
60 submit
->flags
|= ASYNC_TX_FENCE
;
62 submit
->cb_param
= NULL
;
64 submit
->cb_fn
= cb_fn_orig
;
65 submit
->cb_param
= cb_param_orig
;
67 dma_flags
|= DMA_PREP_INTERRUPT
;
69 if (submit
->flags
& ASYNC_TX_FENCE
)
70 dma_flags
|= DMA_PREP_FENCE
;
72 /* Drivers force forward progress in case they can not provide
76 dma_dest
[0] = unmap
->addr
[disks
- 2];
77 dma_dest
[1] = unmap
->addr
[disks
- 1];
78 tx
= dma
->device_prep_dma_pq(chan
, dma_dest
,
79 &unmap
->addr
[src_off
],
81 &scfs
[src_off
], unmap
->len
,
85 async_tx_quiesce(&submit
->depend_tx
);
86 dma_async_issue_pending(chan
);
89 dma_set_unmap(tx
, unmap
);
90 async_tx_submit(chan
, tx
, submit
);
91 submit
->depend_tx
= tx
;
93 /* drop completed sources */
94 src_cnt
-= pq_src_cnt
;
95 src_off
+= pq_src_cnt
;
97 dma_flags
|= DMA_PREP_CONTINUE
;
104 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
107 do_sync_gen_syndrome(struct page
**blocks
, unsigned int *offsets
, int disks
,
108 size_t len
, struct async_submit_ctl
*submit
)
112 int start
= -1, stop
= disks
- 3;
114 if (submit
->scribble
)
115 srcs
= submit
->scribble
;
117 srcs
= (void **) blocks
;
119 for (i
= 0; i
< disks
; i
++) {
120 if (blocks
[i
] == NULL
) {
121 BUG_ON(i
> disks
- 3); /* P or Q can't be zero */
122 srcs
[i
] = (void*)raid6_empty_zero_page
;
124 srcs
[i
] = page_address(blocks
[i
]) + offsets
[i
];
133 if (submit
->flags
& ASYNC_TX_PQ_XOR_DST
) {
134 BUG_ON(!raid6_call
.xor_syndrome
);
136 raid6_call
.xor_syndrome(disks
, start
, stop
, len
, srcs
);
138 raid6_call
.gen_syndrome(disks
, len
, srcs
);
139 async_tx_sync_epilog(submit
);
143 is_dma_pq_aligned_offs(struct dma_device
*dev
, unsigned int *offs
,
144 int src_cnt
, size_t len
)
148 for (i
= 0; i
< src_cnt
; i
++) {
149 if (!is_dma_pq_aligned(dev
, offs
[i
], 0, len
))
156 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
157 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
158 * @offsets: offset array into each block (src and dest) to start transaction
159 * @disks: number of blocks (including missing P or Q, see below)
160 * @len: length of operation in bytes
161 * @submit: submission/completion modifiers
163 * General note: This routine assumes a field of GF(2^8) with a
164 * primitive polynomial of 0x11d and a generator of {02}.
166 * 'disks' note: callers can optionally omit either P or Q (but not
167 * both) from the calculation by setting blocks[disks-2] or
168 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
169 * PAGE_SIZE as a temporary buffer of this size is used in the
170 * synchronous path. 'disks' always accounts for both destination
171 * buffers. If any source buffers (blocks[i] where i < disks - 2) are
172 * set to NULL those buffers will be replaced with the raid6_zero_page
173 * in the synchronous path and omitted in the hardware-asynchronous
176 struct dma_async_tx_descriptor
*
177 async_gen_syndrome(struct page
**blocks
, unsigned int *offsets
, int disks
,
178 size_t len
, struct async_submit_ctl
*submit
)
180 int src_cnt
= disks
- 2;
181 struct dma_chan
*chan
= async_tx_find_channel(submit
, DMA_PQ
,
182 &P(blocks
, disks
), 2,
183 blocks
, src_cnt
, len
);
184 struct dma_device
*device
= chan
? chan
->device
: NULL
;
185 struct dmaengine_unmap_data
*unmap
= NULL
;
187 BUG_ON(disks
> MAX_DISKS
|| !(P(blocks
, disks
) || Q(blocks
, disks
)));
190 unmap
= dmaengine_get_unmap_data(device
->dev
, disks
, GFP_NOWAIT
);
192 /* XORing P/Q is only implemented in software */
193 if (unmap
&& !(submit
->flags
& ASYNC_TX_PQ_XOR_DST
) &&
194 (src_cnt
<= dma_maxpq(device
, 0) ||
195 dma_maxpq(device
, DMA_PREP_CONTINUE
) > 0) &&
196 is_dma_pq_aligned_offs(device
, offsets
, disks
, len
)) {
197 struct dma_async_tx_descriptor
*tx
;
198 enum dma_ctrl_flags dma_flags
= 0;
199 unsigned char coefs
[MAX_DISKS
];
202 /* run the p+q asynchronously */
203 pr_debug("%s: (async) disks: %d len: %zu\n",
204 __func__
, disks
, len
);
206 /* convert source addresses being careful to collapse 'empty'
207 * sources and update the coefficients accordingly
210 for (i
= 0, j
= 0; i
< src_cnt
; i
++) {
211 if (blocks
[i
] == NULL
)
213 unmap
->addr
[j
] = dma_map_page(device
->dev
, blocks
[i
],
214 offsets
[i
], len
, DMA_TO_DEVICE
);
215 coefs
[j
] = raid6_gfexp
[i
];
221 * DMAs use destinations as sources,
222 * so use BIDIRECTIONAL mapping
225 if (P(blocks
, disks
))
226 unmap
->addr
[j
++] = dma_map_page(device
->dev
, P(blocks
, disks
),
228 len
, DMA_BIDIRECTIONAL
);
230 unmap
->addr
[j
++] = 0;
231 dma_flags
|= DMA_PREP_PQ_DISABLE_P
;
235 if (Q(blocks
, disks
))
236 unmap
->addr
[j
++] = dma_map_page(device
->dev
, Q(blocks
, disks
),
238 len
, DMA_BIDIRECTIONAL
);
240 unmap
->addr
[j
++] = 0;
241 dma_flags
|= DMA_PREP_PQ_DISABLE_Q
;
244 tx
= do_async_gen_syndrome(chan
, coefs
, j
, unmap
, dma_flags
, submit
);
245 dmaengine_unmap_put(unmap
);
249 dmaengine_unmap_put(unmap
);
251 /* run the pq synchronously */
252 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__
, disks
, len
);
254 /* wait for any prerequisite operations */
255 async_tx_quiesce(&submit
->depend_tx
);
257 if (!P(blocks
, disks
)) {
258 P(blocks
, disks
) = pq_scribble_page
;
259 P(offsets
, disks
) = 0;
261 if (!Q(blocks
, disks
)) {
262 Q(blocks
, disks
) = pq_scribble_page
;
263 Q(offsets
, disks
) = 0;
265 do_sync_gen_syndrome(blocks
, offsets
, disks
, len
, submit
);
269 EXPORT_SYMBOL_GPL(async_gen_syndrome
);
271 static inline struct dma_chan
*
272 pq_val_chan(struct async_submit_ctl
*submit
, struct page
**blocks
, int disks
, size_t len
)
274 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
277 return async_tx_find_channel(submit
, DMA_PQ_VAL
, NULL
, 0, blocks
,
282 * async_syndrome_val - asynchronously validate a raid6 syndrome
283 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
284 * @offsets: common offset into each block (src and dest) to start transaction
285 * @disks: number of blocks (including missing P or Q, see below)
286 * @len: length of operation in bytes
287 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
288 * @spare: temporary result buffer for the synchronous case
289 * @s_off: spare buffer page offset
290 * @submit: submission / completion modifiers
292 * The same notes from async_gen_syndrome apply to the 'blocks',
293 * and 'disks' parameters of this routine. The synchronous path
294 * requires a temporary result buffer and submit->scribble to be
297 struct dma_async_tx_descriptor
*
298 async_syndrome_val(struct page
**blocks
, unsigned int *offsets
, int disks
,
299 size_t len
, enum sum_check_flags
*pqres
, struct page
*spare
,
300 unsigned int s_off
, struct async_submit_ctl
*submit
)
302 struct dma_chan
*chan
= pq_val_chan(submit
, blocks
, disks
, len
);
303 struct dma_device
*device
= chan
? chan
->device
: NULL
;
304 struct dma_async_tx_descriptor
*tx
;
305 unsigned char coefs
[MAX_DISKS
];
306 enum dma_ctrl_flags dma_flags
= submit
->cb_fn
? DMA_PREP_INTERRUPT
: 0;
307 struct dmaengine_unmap_data
*unmap
= NULL
;
309 BUG_ON(disks
< 4 || disks
> MAX_DISKS
);
312 unmap
= dmaengine_get_unmap_data(device
->dev
, disks
, GFP_NOWAIT
);
314 if (unmap
&& disks
<= dma_maxpq(device
, 0) &&
315 is_dma_pq_aligned_offs(device
, offsets
, disks
, len
)) {
316 struct device
*dev
= device
->dev
;
318 int i
, j
= 0, src_cnt
= 0;
320 pr_debug("%s: (async) disks: %d len: %zu\n",
321 __func__
, disks
, len
);
324 for (i
= 0; i
< disks
-2; i
++)
325 if (likely(blocks
[i
])) {
326 unmap
->addr
[j
] = dma_map_page(dev
, blocks
[i
],
329 coefs
[j
] = raid6_gfexp
[i
];
335 if (!P(blocks
, disks
)) {
337 dma_flags
|= DMA_PREP_PQ_DISABLE_P
;
339 pq
[0] = dma_map_page(dev
, P(blocks
, disks
),
340 P(offsets
, disks
), len
,
342 unmap
->addr
[j
++] = pq
[0];
345 if (!Q(blocks
, disks
)) {
347 dma_flags
|= DMA_PREP_PQ_DISABLE_Q
;
349 pq
[1] = dma_map_page(dev
, Q(blocks
, disks
),
350 Q(offsets
, disks
), len
,
352 unmap
->addr
[j
++] = pq
[1];
356 if (submit
->flags
& ASYNC_TX_FENCE
)
357 dma_flags
|= DMA_PREP_FENCE
;
359 tx
= device
->device_prep_dma_pq_val(chan
, pq
,
367 async_tx_quiesce(&submit
->depend_tx
);
368 dma_async_issue_pending(chan
);
371 dma_set_unmap(tx
, unmap
);
372 async_tx_submit(chan
, tx
, submit
);
374 struct page
*p_src
= P(blocks
, disks
);
375 unsigned int p_off
= P(offsets
, disks
);
376 struct page
*q_src
= Q(blocks
, disks
);
377 unsigned int q_off
= Q(offsets
, disks
);
378 enum async_tx_flags flags_orig
= submit
->flags
;
379 dma_async_tx_callback cb_fn_orig
= submit
->cb_fn
;
380 void *scribble
= submit
->scribble
;
381 void *cb_param_orig
= submit
->cb_param
;
384 pr_debug("%s: (sync) disks: %d len: %zu\n",
385 __func__
, disks
, len
);
387 /* caller must provide a temporary result buffer and
388 * allow the input parameters to be preserved
390 BUG_ON(!spare
|| !scribble
);
392 /* wait for any prerequisite operations */
393 async_tx_quiesce(&submit
->depend_tx
);
395 /* recompute p and/or q into the temporary buffer and then
396 * check to see the result matches the current value
401 init_async_submit(submit
, ASYNC_TX_XOR_ZERO_DST
, NULL
,
402 NULL
, NULL
, scribble
);
403 tx
= async_xor_offs(spare
, s_off
,
404 blocks
, offsets
, disks
-2, len
, submit
);
405 async_tx_quiesce(&tx
);
406 p
= page_address(p_src
) + p_off
;
407 s
= page_address(spare
) + s_off
;
408 *pqres
|= !!memcmp(p
, s
, len
) << SUM_CHECK_P
;
412 P(blocks
, disks
) = NULL
;
413 Q(blocks
, disks
) = spare
;
414 Q(offsets
, disks
) = s_off
;
415 init_async_submit(submit
, 0, NULL
, NULL
, NULL
, scribble
);
416 tx
= async_gen_syndrome(blocks
, offsets
, disks
,
418 async_tx_quiesce(&tx
);
419 q
= page_address(q_src
) + q_off
;
420 s
= page_address(spare
) + s_off
;
421 *pqres
|= !!memcmp(q
, s
, len
) << SUM_CHECK_Q
;
424 /* restore P, Q and submit */
425 P(blocks
, disks
) = p_src
;
426 P(offsets
, disks
) = p_off
;
427 Q(blocks
, disks
) = q_src
;
428 Q(offsets
, disks
) = q_off
;
430 submit
->cb_fn
= cb_fn_orig
;
431 submit
->cb_param
= cb_param_orig
;
432 submit
->flags
= flags_orig
;
433 async_tx_sync_epilog(submit
);
436 dmaengine_unmap_put(unmap
);
440 EXPORT_SYMBOL_GPL(async_syndrome_val
);
442 static int __init
async_pq_init(void)
444 pq_scribble_page
= alloc_page(GFP_KERNEL
);
446 if (pq_scribble_page
)
449 pr_err("%s: failed to allocate required spare page\n", __func__
);
454 static void __exit
async_pq_exit(void)
456 __free_page(pq_scribble_page
);
459 module_init(async_pq_init
);
460 module_exit(async_pq_exit
);
462 MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
463 MODULE_LICENSE("GPL");