QE/FHCI: fixed the CONTROL bug
[zen-stable.git] / crypto / async_tx / async_pq.c
blobfdd8257d35d9d8133098f3592f5e3d1c2fd09261
1 /*
2 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
3 * Copyright(c) 2009 Intel Corporation
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59
17 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * The full GNU General Public License is included in this distribution in the
20 * file called COPYING.
22 #include <linux/kernel.h>
23 #include <linux/interrupt.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/raid/pq.h>
26 #include <linux/async_tx.h>
27 #include <linux/gfp.h>
29 /**
30 * pq_scribble_page - space to hold throwaway P or Q buffer for
31 * synchronous gen_syndrome
33 static struct page *pq_scribble_page;
35 /* the struct page *blocks[] parameter passed to async_gen_syndrome()
36 * and async_syndrome_val() contains the 'P' destination address at
37 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
39 * note: these are macros as they are used as lvalues
41 #define P(b, d) (b[d-2])
42 #define Q(b, d) (b[d-1])
44 /**
45 * do_async_gen_syndrome - asynchronously calculate P and/or Q
47 static __async_inline struct dma_async_tx_descriptor *
48 do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
49 const unsigned char *scfs, unsigned int offset, int disks,
50 size_t len, dma_addr_t *dma_src,
51 struct async_submit_ctl *submit)
53 struct dma_async_tx_descriptor *tx = NULL;
54 struct dma_device *dma = chan->device;
55 enum dma_ctrl_flags dma_flags = 0;
56 enum async_tx_flags flags_orig = submit->flags;
57 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
58 dma_async_tx_callback cb_param_orig = submit->cb_param;
59 int src_cnt = disks - 2;
60 unsigned char coefs[src_cnt];
61 unsigned short pq_src_cnt;
62 dma_addr_t dma_dest[2];
63 int src_off = 0;
64 int idx;
65 int i;
67 /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
68 if (P(blocks, disks))
69 dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
70 len, DMA_BIDIRECTIONAL);
71 else
72 dma_flags |= DMA_PREP_PQ_DISABLE_P;
73 if (Q(blocks, disks))
74 dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
75 len, DMA_BIDIRECTIONAL);
76 else
77 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
79 /* convert source addresses being careful to collapse 'empty'
80 * sources and update the coefficients accordingly
82 for (i = 0, idx = 0; i < src_cnt; i++) {
83 if (blocks[i] == NULL)
84 continue;
85 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
86 DMA_TO_DEVICE);
87 coefs[idx] = scfs[i];
88 idx++;
90 src_cnt = idx;
92 while (src_cnt > 0) {
93 submit->flags = flags_orig;
94 pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
95 /* if we are submitting additional pqs, leave the chain open,
96 * clear the callback parameters, and leave the destination
97 * buffers mapped
99 if (src_cnt > pq_src_cnt) {
100 submit->flags &= ~ASYNC_TX_ACK;
101 submit->flags |= ASYNC_TX_FENCE;
102 dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
103 submit->cb_fn = NULL;
104 submit->cb_param = NULL;
105 } else {
106 dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
107 submit->cb_fn = cb_fn_orig;
108 submit->cb_param = cb_param_orig;
109 if (cb_fn_orig)
110 dma_flags |= DMA_PREP_INTERRUPT;
112 if (submit->flags & ASYNC_TX_FENCE)
113 dma_flags |= DMA_PREP_FENCE;
115 /* Since we have clobbered the src_list we are committed
116 * to doing this asynchronously. Drivers force forward
117 * progress in case they can not provide a descriptor
119 for (;;) {
120 tx = dma->device_prep_dma_pq(chan, dma_dest,
121 &dma_src[src_off],
122 pq_src_cnt,
123 &coefs[src_off], len,
124 dma_flags);
125 if (likely(tx))
126 break;
127 async_tx_quiesce(&submit->depend_tx);
128 dma_async_issue_pending(chan);
131 async_tx_submit(chan, tx, submit);
132 submit->depend_tx = tx;
134 /* drop completed sources */
135 src_cnt -= pq_src_cnt;
136 src_off += pq_src_cnt;
138 dma_flags |= DMA_PREP_CONTINUE;
141 return tx;
145 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
147 static void
148 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
149 size_t len, struct async_submit_ctl *submit)
151 void **srcs;
152 int i;
154 if (submit->scribble)
155 srcs = submit->scribble;
156 else
157 srcs = (void **) blocks;
159 for (i = 0; i < disks; i++) {
160 if (blocks[i] == NULL) {
161 BUG_ON(i > disks - 3); /* P or Q can't be zero */
162 srcs[i] = (void*)raid6_empty_zero_page;
163 } else
164 srcs[i] = page_address(blocks[i]) + offset;
166 raid6_call.gen_syndrome(disks, len, srcs);
167 async_tx_sync_epilog(submit);
171 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
172 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
173 * @offset: common offset into each block (src and dest) to start transaction
174 * @disks: number of blocks (including missing P or Q, see below)
175 * @len: length of operation in bytes
176 * @submit: submission/completion modifiers
178 * General note: This routine assumes a field of GF(2^8) with a
179 * primitive polynomial of 0x11d and a generator of {02}.
181 * 'disks' note: callers can optionally omit either P or Q (but not
182 * both) from the calculation by setting blocks[disks-2] or
183 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
184 * PAGE_SIZE as a temporary buffer of this size is used in the
185 * synchronous path. 'disks' always accounts for both destination
186 * buffers. If any source buffers (blocks[i] where i < disks - 2) are
187 * set to NULL those buffers will be replaced with the raid6_zero_page
188 * in the synchronous path and omitted in the hardware-asynchronous
189 * path.
191 * 'blocks' note: if submit->scribble is NULL then the contents of
192 * 'blocks' may be overwritten to perform address conversions
193 * (dma_map_page() or page_address()).
195 struct dma_async_tx_descriptor *
196 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
197 size_t len, struct async_submit_ctl *submit)
199 int src_cnt = disks - 2;
200 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
201 &P(blocks, disks), 2,
202 blocks, src_cnt, len);
203 struct dma_device *device = chan ? chan->device : NULL;
204 dma_addr_t *dma_src = NULL;
206 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
208 if (submit->scribble)
209 dma_src = submit->scribble;
210 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
211 dma_src = (dma_addr_t *) blocks;
213 if (dma_src && device &&
214 (src_cnt <= dma_maxpq(device, 0) ||
215 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
216 is_dma_pq_aligned(device, offset, 0, len)) {
217 /* run the p+q asynchronously */
218 pr_debug("%s: (async) disks: %d len: %zu\n",
219 __func__, disks, len);
220 return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
221 disks, len, dma_src, submit);
224 /* run the pq synchronously */
225 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
227 /* wait for any prerequisite operations */
228 async_tx_quiesce(&submit->depend_tx);
230 if (!P(blocks, disks)) {
231 P(blocks, disks) = pq_scribble_page;
232 BUG_ON(len + offset > PAGE_SIZE);
234 if (!Q(blocks, disks)) {
235 Q(blocks, disks) = pq_scribble_page;
236 BUG_ON(len + offset > PAGE_SIZE);
238 do_sync_gen_syndrome(blocks, offset, disks, len, submit);
240 return NULL;
242 EXPORT_SYMBOL_GPL(async_gen_syndrome);
244 static inline struct dma_chan *
245 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
247 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
248 return NULL;
249 #endif
250 return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
251 disks, len);
255 * async_syndrome_val - asynchronously validate a raid6 syndrome
256 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
257 * @offset: common offset into each block (src and dest) to start transaction
258 * @disks: number of blocks (including missing P or Q, see below)
259 * @len: length of operation in bytes
260 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
261 * @spare: temporary result buffer for the synchronous case
262 * @submit: submission / completion modifiers
264 * The same notes from async_gen_syndrome apply to the 'blocks',
265 * and 'disks' parameters of this routine. The synchronous path
266 * requires a temporary result buffer and submit->scribble to be
267 * specified.
269 struct dma_async_tx_descriptor *
270 async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
271 size_t len, enum sum_check_flags *pqres, struct page *spare,
272 struct async_submit_ctl *submit)
274 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
275 struct dma_device *device = chan ? chan->device : NULL;
276 struct dma_async_tx_descriptor *tx;
277 unsigned char coefs[disks-2];
278 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
279 dma_addr_t *dma_src = NULL;
280 int src_cnt = 0;
282 BUG_ON(disks < 4);
284 if (submit->scribble)
285 dma_src = submit->scribble;
286 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
287 dma_src = (dma_addr_t *) blocks;
289 if (dma_src && device && disks <= dma_maxpq(device, 0) &&
290 is_dma_pq_aligned(device, offset, 0, len)) {
291 struct device *dev = device->dev;
292 dma_addr_t *pq = &dma_src[disks-2];
293 int i;
295 pr_debug("%s: (async) disks: %d len: %zu\n",
296 __func__, disks, len);
297 if (!P(blocks, disks))
298 dma_flags |= DMA_PREP_PQ_DISABLE_P;
299 else
300 pq[0] = dma_map_page(dev, P(blocks, disks),
301 offset, len,
302 DMA_TO_DEVICE);
303 if (!Q(blocks, disks))
304 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
305 else
306 pq[1] = dma_map_page(dev, Q(blocks, disks),
307 offset, len,
308 DMA_TO_DEVICE);
310 if (submit->flags & ASYNC_TX_FENCE)
311 dma_flags |= DMA_PREP_FENCE;
312 for (i = 0; i < disks-2; i++)
313 if (likely(blocks[i])) {
314 dma_src[src_cnt] = dma_map_page(dev, blocks[i],
315 offset, len,
316 DMA_TO_DEVICE);
317 coefs[src_cnt] = raid6_gfexp[i];
318 src_cnt++;
321 for (;;) {
322 tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
323 src_cnt,
324 coefs,
325 len, pqres,
326 dma_flags);
327 if (likely(tx))
328 break;
329 async_tx_quiesce(&submit->depend_tx);
330 dma_async_issue_pending(chan);
332 async_tx_submit(chan, tx, submit);
334 return tx;
335 } else {
336 struct page *p_src = P(blocks, disks);
337 struct page *q_src = Q(blocks, disks);
338 enum async_tx_flags flags_orig = submit->flags;
339 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
340 void *scribble = submit->scribble;
341 void *cb_param_orig = submit->cb_param;
342 void *p, *q, *s;
344 pr_debug("%s: (sync) disks: %d len: %zu\n",
345 __func__, disks, len);
347 /* caller must provide a temporary result buffer and
348 * allow the input parameters to be preserved
350 BUG_ON(!spare || !scribble);
352 /* wait for any prerequisite operations */
353 async_tx_quiesce(&submit->depend_tx);
355 /* recompute p and/or q into the temporary buffer and then
356 * check to see the result matches the current value
358 tx = NULL;
359 *pqres = 0;
360 if (p_src) {
361 init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
362 NULL, NULL, scribble);
363 tx = async_xor(spare, blocks, offset, disks-2, len, submit);
364 async_tx_quiesce(&tx);
365 p = page_address(p_src) + offset;
366 s = page_address(spare) + offset;
367 *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
370 if (q_src) {
371 P(blocks, disks) = NULL;
372 Q(blocks, disks) = spare;
373 init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
374 tx = async_gen_syndrome(blocks, offset, disks, len, submit);
375 async_tx_quiesce(&tx);
376 q = page_address(q_src) + offset;
377 s = page_address(spare) + offset;
378 *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
381 /* restore P, Q and submit */
382 P(blocks, disks) = p_src;
383 Q(blocks, disks) = q_src;
385 submit->cb_fn = cb_fn_orig;
386 submit->cb_param = cb_param_orig;
387 submit->flags = flags_orig;
388 async_tx_sync_epilog(submit);
390 return NULL;
393 EXPORT_SYMBOL_GPL(async_syndrome_val);
395 static int __init async_pq_init(void)
397 pq_scribble_page = alloc_page(GFP_KERNEL);
399 if (pq_scribble_page)
400 return 0;
402 pr_err("%s: failed to allocate required spare page\n", __func__);
404 return -ENOMEM;
407 static void __exit async_pq_exit(void)
409 put_page(pq_scribble_page);
412 module_init(async_pq_init);
413 module_exit(async_pq_exit);
415 MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
416 MODULE_LICENSE("GPL");