ALSA: hda-intel - AD1984 thinkpad - add analog beep input control
[linux/fpc-iii.git] / crypto / async_tx / async_pq.c
blobec87f53d50595f0c545df398932799d5d2054c0d
1 /*
2 * Copyright(c) 2007 Yuri Tikhonov <yur@emcraft.com>
3 * Copyright(c) 2009 Intel Corporation
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the Free
7 * Software Foundation; either version 2 of the License, or (at your option)
8 * any later version.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc., 59
17 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 * The full GNU General Public License is included in this distribution in the
20 * file called COPYING.
22 #include <linux/kernel.h>
23 #include <linux/interrupt.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/raid/pq.h>
26 #include <linux/async_tx.h>
28 /**
29 * pq_scribble_page - space to hold throwaway P or Q buffer for
30 * synchronous gen_syndrome
32 static struct page *pq_scribble_page;
34 /* the struct page *blocks[] parameter passed to async_gen_syndrome()
35 * and async_syndrome_val() contains the 'P' destination address at
36 * blocks[disks-2] and the 'Q' destination address at blocks[disks-1]
38 * note: these are macros as they are used as lvalues
40 #define P(b, d) (b[d-2])
41 #define Q(b, d) (b[d-1])
43 /**
44 * do_async_gen_syndrome - asynchronously calculate P and/or Q
46 static __async_inline struct dma_async_tx_descriptor *
47 do_async_gen_syndrome(struct dma_chan *chan, struct page **blocks,
48 const unsigned char *scfs, unsigned int offset, int disks,
49 size_t len, dma_addr_t *dma_src,
50 struct async_submit_ctl *submit)
52 struct dma_async_tx_descriptor *tx = NULL;
53 struct dma_device *dma = chan->device;
54 enum dma_ctrl_flags dma_flags = 0;
55 enum async_tx_flags flags_orig = submit->flags;
56 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
57 dma_async_tx_callback cb_param_orig = submit->cb_param;
58 int src_cnt = disks - 2;
59 unsigned char coefs[src_cnt];
60 unsigned short pq_src_cnt;
61 dma_addr_t dma_dest[2];
62 int src_off = 0;
63 int idx;
64 int i;
66 /* DMAs use destinations as sources, so use BIDIRECTIONAL mapping */
67 if (P(blocks, disks))
68 dma_dest[0] = dma_map_page(dma->dev, P(blocks, disks), offset,
69 len, DMA_BIDIRECTIONAL);
70 else
71 dma_flags |= DMA_PREP_PQ_DISABLE_P;
72 if (Q(blocks, disks))
73 dma_dest[1] = dma_map_page(dma->dev, Q(blocks, disks), offset,
74 len, DMA_BIDIRECTIONAL);
75 else
76 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
78 /* convert source addresses being careful to collapse 'empty'
79 * sources and update the coefficients accordingly
81 for (i = 0, idx = 0; i < src_cnt; i++) {
82 if (blocks[i] == NULL)
83 continue;
84 dma_src[idx] = dma_map_page(dma->dev, blocks[i], offset, len,
85 DMA_TO_DEVICE);
86 coefs[idx] = scfs[i];
87 idx++;
89 src_cnt = idx;
91 while (src_cnt > 0) {
92 submit->flags = flags_orig;
93 pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags));
94 /* if we are submitting additional pqs, leave the chain open,
95 * clear the callback parameters, and leave the destination
96 * buffers mapped
98 if (src_cnt > pq_src_cnt) {
99 submit->flags &= ~ASYNC_TX_ACK;
100 submit->flags |= ASYNC_TX_FENCE;
101 dma_flags |= DMA_COMPL_SKIP_DEST_UNMAP;
102 submit->cb_fn = NULL;
103 submit->cb_param = NULL;
104 } else {
105 dma_flags &= ~DMA_COMPL_SKIP_DEST_UNMAP;
106 submit->cb_fn = cb_fn_orig;
107 submit->cb_param = cb_param_orig;
108 if (cb_fn_orig)
109 dma_flags |= DMA_PREP_INTERRUPT;
111 if (submit->flags & ASYNC_TX_FENCE)
112 dma_flags |= DMA_PREP_FENCE;
114 /* Since we have clobbered the src_list we are committed
115 * to doing this asynchronously. Drivers force forward
116 * progress in case they can not provide a descriptor
118 for (;;) {
119 tx = dma->device_prep_dma_pq(chan, dma_dest,
120 &dma_src[src_off],
121 pq_src_cnt,
122 &coefs[src_off], len,
123 dma_flags);
124 if (likely(tx))
125 break;
126 async_tx_quiesce(&submit->depend_tx);
127 dma_async_issue_pending(chan);
130 async_tx_submit(chan, tx, submit);
131 submit->depend_tx = tx;
133 /* drop completed sources */
134 src_cnt -= pq_src_cnt;
135 src_off += pq_src_cnt;
137 dma_flags |= DMA_PREP_CONTINUE;
140 return tx;
144 * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome
146 static void
147 do_sync_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
148 size_t len, struct async_submit_ctl *submit)
150 void **srcs;
151 int i;
153 if (submit->scribble)
154 srcs = submit->scribble;
155 else
156 srcs = (void **) blocks;
158 for (i = 0; i < disks; i++) {
159 if (blocks[i] == NULL) {
160 BUG_ON(i > disks - 3); /* P or Q can't be zero */
161 srcs[i] = (void*)raid6_empty_zero_page;
162 } else
163 srcs[i] = page_address(blocks[i]) + offset;
165 raid6_call.gen_syndrome(disks, len, srcs);
166 async_tx_sync_epilog(submit);
170 * async_gen_syndrome - asynchronously calculate a raid6 syndrome
171 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
172 * @offset: common offset into each block (src and dest) to start transaction
173 * @disks: number of blocks (including missing P or Q, see below)
174 * @len: length of operation in bytes
175 * @submit: submission/completion modifiers
177 * General note: This routine assumes a field of GF(2^8) with a
178 * primitive polynomial of 0x11d and a generator of {02}.
180 * 'disks' note: callers can optionally omit either P or Q (but not
181 * both) from the calculation by setting blocks[disks-2] or
182 * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <=
183 * PAGE_SIZE as a temporary buffer of this size is used in the
184 * synchronous path. 'disks' always accounts for both destination
185 * buffers. If any source buffers (blocks[i] where i < disks - 2) are
186 * set to NULL those buffers will be replaced with the raid6_zero_page
187 * in the synchronous path and omitted in the hardware-asynchronous
188 * path.
190 * 'blocks' note: if submit->scribble is NULL then the contents of
191 * 'blocks' may be overwritten to perform address conversions
192 * (dma_map_page() or page_address()).
194 struct dma_async_tx_descriptor *
195 async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
196 size_t len, struct async_submit_ctl *submit)
198 int src_cnt = disks - 2;
199 struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ,
200 &P(blocks, disks), 2,
201 blocks, src_cnt, len);
202 struct dma_device *device = chan ? chan->device : NULL;
203 dma_addr_t *dma_src = NULL;
205 BUG_ON(disks > 255 || !(P(blocks, disks) || Q(blocks, disks)));
207 if (submit->scribble)
208 dma_src = submit->scribble;
209 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
210 dma_src = (dma_addr_t *) blocks;
212 if (dma_src && device &&
213 (src_cnt <= dma_maxpq(device, 0) ||
214 dma_maxpq(device, DMA_PREP_CONTINUE) > 0) &&
215 is_dma_pq_aligned(device, offset, 0, len)) {
216 /* run the p+q asynchronously */
217 pr_debug("%s: (async) disks: %d len: %zu\n",
218 __func__, disks, len);
219 return do_async_gen_syndrome(chan, blocks, raid6_gfexp, offset,
220 disks, len, dma_src, submit);
223 /* run the pq synchronously */
224 pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len);
226 /* wait for any prerequisite operations */
227 async_tx_quiesce(&submit->depend_tx);
229 if (!P(blocks, disks)) {
230 P(blocks, disks) = pq_scribble_page;
231 BUG_ON(len + offset > PAGE_SIZE);
233 if (!Q(blocks, disks)) {
234 Q(blocks, disks) = pq_scribble_page;
235 BUG_ON(len + offset > PAGE_SIZE);
237 do_sync_gen_syndrome(blocks, offset, disks, len, submit);
239 return NULL;
241 EXPORT_SYMBOL_GPL(async_gen_syndrome);
243 static inline struct dma_chan *
244 pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
246 #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
247 return NULL;
248 #endif
249 return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks,
250 disks, len);
254 * async_syndrome_val - asynchronously validate a raid6 syndrome
255 * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
256 * @offset: common offset into each block (src and dest) to start transaction
257 * @disks: number of blocks (including missing P or Q, see below)
258 * @len: length of operation in bytes
259 * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set
260 * @spare: temporary result buffer for the synchronous case
261 * @submit: submission / completion modifiers
263 * The same notes from async_gen_syndrome apply to the 'blocks',
264 * and 'disks' parameters of this routine. The synchronous path
265 * requires a temporary result buffer and submit->scribble to be
266 * specified.
268 struct dma_async_tx_descriptor *
269 async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
270 size_t len, enum sum_check_flags *pqres, struct page *spare,
271 struct async_submit_ctl *submit)
273 struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
274 struct dma_device *device = chan ? chan->device : NULL;
275 struct dma_async_tx_descriptor *tx;
276 unsigned char coefs[disks-2];
277 enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0;
278 dma_addr_t *dma_src = NULL;
279 int src_cnt = 0;
281 BUG_ON(disks < 4);
283 if (submit->scribble)
284 dma_src = submit->scribble;
285 else if (sizeof(dma_addr_t) <= sizeof(struct page *))
286 dma_src = (dma_addr_t *) blocks;
288 if (dma_src && device && disks <= dma_maxpq(device, 0) &&
289 is_dma_pq_aligned(device, offset, 0, len)) {
290 struct device *dev = device->dev;
291 dma_addr_t *pq = &dma_src[disks-2];
292 int i;
294 pr_debug("%s: (async) disks: %d len: %zu\n",
295 __func__, disks, len);
296 if (!P(blocks, disks))
297 dma_flags |= DMA_PREP_PQ_DISABLE_P;
298 else
299 pq[0] = dma_map_page(dev, P(blocks, disks),
300 offset, len,
301 DMA_TO_DEVICE);
302 if (!Q(blocks, disks))
303 dma_flags |= DMA_PREP_PQ_DISABLE_Q;
304 else
305 pq[1] = dma_map_page(dev, Q(blocks, disks),
306 offset, len,
307 DMA_TO_DEVICE);
309 if (submit->flags & ASYNC_TX_FENCE)
310 dma_flags |= DMA_PREP_FENCE;
311 for (i = 0; i < disks-2; i++)
312 if (likely(blocks[i])) {
313 dma_src[src_cnt] = dma_map_page(dev, blocks[i],
314 offset, len,
315 DMA_TO_DEVICE);
316 coefs[src_cnt] = raid6_gfexp[i];
317 src_cnt++;
320 for (;;) {
321 tx = device->device_prep_dma_pq_val(chan, pq, dma_src,
322 src_cnt,
323 coefs,
324 len, pqres,
325 dma_flags);
326 if (likely(tx))
327 break;
328 async_tx_quiesce(&submit->depend_tx);
329 dma_async_issue_pending(chan);
331 async_tx_submit(chan, tx, submit);
333 return tx;
334 } else {
335 struct page *p_src = P(blocks, disks);
336 struct page *q_src = Q(blocks, disks);
337 enum async_tx_flags flags_orig = submit->flags;
338 dma_async_tx_callback cb_fn_orig = submit->cb_fn;
339 void *scribble = submit->scribble;
340 void *cb_param_orig = submit->cb_param;
341 void *p, *q, *s;
343 pr_debug("%s: (sync) disks: %d len: %zu\n",
344 __func__, disks, len);
346 /* caller must provide a temporary result buffer and
347 * allow the input parameters to be preserved
349 BUG_ON(!spare || !scribble);
351 /* wait for any prerequisite operations */
352 async_tx_quiesce(&submit->depend_tx);
354 /* recompute p and/or q into the temporary buffer and then
355 * check to see the result matches the current value
357 tx = NULL;
358 *pqres = 0;
359 if (p_src) {
360 init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL,
361 NULL, NULL, scribble);
362 tx = async_xor(spare, blocks, offset, disks-2, len, submit);
363 async_tx_quiesce(&tx);
364 p = page_address(p_src) + offset;
365 s = page_address(spare) + offset;
366 *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P;
369 if (q_src) {
370 P(blocks, disks) = NULL;
371 Q(blocks, disks) = spare;
372 init_async_submit(submit, 0, NULL, NULL, NULL, scribble);
373 tx = async_gen_syndrome(blocks, offset, disks, len, submit);
374 async_tx_quiesce(&tx);
375 q = page_address(q_src) + offset;
376 s = page_address(spare) + offset;
377 *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q;
380 /* restore P, Q and submit */
381 P(blocks, disks) = p_src;
382 Q(blocks, disks) = q_src;
384 submit->cb_fn = cb_fn_orig;
385 submit->cb_param = cb_param_orig;
386 submit->flags = flags_orig;
387 async_tx_sync_epilog(submit);
389 return NULL;
392 EXPORT_SYMBOL_GPL(async_syndrome_val);
394 static int __init async_pq_init(void)
396 pq_scribble_page = alloc_page(GFP_KERNEL);
398 if (pq_scribble_page)
399 return 0;
401 pr_err("%s: failed to allocate required spare page\n", __func__);
403 return -ENOMEM;
406 static void __exit async_pq_exit(void)
408 put_page(pq_scribble_page);
411 module_init(async_pq_init);
412 module_exit(async_pq_exit);
414 MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation");
415 MODULE_LICENSE("GPL");