dm thin metadata: fix __udivdi3 undefined on 32-bit
[linux/fpc-iii.git] / drivers / crypto / talitos.c
blob62ce93568e11930ea65233876cbc7b2af37c955c
1 /*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
56 #include "talitos.h"
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
69 dst_ptr->ptr = src_ptr->ptr;
70 if (!is_sec1)
71 dst_ptr->eptr = src_ptr->eptr;
74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
75 bool is_sec1)
77 if (is_sec1) {
78 ptr->res = 0;
79 ptr->len1 = cpu_to_be16(len);
80 } else {
81 ptr->len = cpu_to_be16(len);
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86 bool is_sec1)
88 if (is_sec1)
89 return be16_to_cpu(ptr->len1);
90 else
91 return be16_to_cpu(ptr->len);
94 static void to_talitos_ptr_extent_clear(struct talitos_ptr *ptr, bool is_sec1)
96 if (!is_sec1)
97 ptr->j_extent = 0;
101 * map virtual single (contiguous) pointer to h/w descriptor pointer
103 static void map_single_talitos_ptr(struct device *dev,
104 struct talitos_ptr *ptr,
105 unsigned int len, void *data,
106 enum dma_data_direction dir)
108 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
109 struct talitos_private *priv = dev_get_drvdata(dev);
110 bool is_sec1 = has_ftr_sec1(priv);
112 to_talitos_ptr_len(ptr, len, is_sec1);
113 to_talitos_ptr(ptr, dma_addr, is_sec1);
114 to_talitos_ptr_extent_clear(ptr, is_sec1);
118 * unmap bus single (contiguous) h/w descriptor pointer
120 static void unmap_single_talitos_ptr(struct device *dev,
121 struct talitos_ptr *ptr,
122 enum dma_data_direction dir)
124 struct talitos_private *priv = dev_get_drvdata(dev);
125 bool is_sec1 = has_ftr_sec1(priv);
127 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
128 from_talitos_ptr_len(ptr, is_sec1), dir);
131 static int reset_channel(struct device *dev, int ch)
133 struct talitos_private *priv = dev_get_drvdata(dev);
134 unsigned int timeout = TALITOS_TIMEOUT;
135 bool is_sec1 = has_ftr_sec1(priv);
137 if (is_sec1) {
138 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
139 TALITOS1_CCCR_LO_RESET);
141 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
142 TALITOS1_CCCR_LO_RESET) && --timeout)
143 cpu_relax();
144 } else {
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
146 TALITOS2_CCCR_RESET);
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
149 TALITOS2_CCCR_RESET) && --timeout)
150 cpu_relax();
153 if (timeout == 0) {
154 dev_err(dev, "failed to reset channel %d\n", ch);
155 return -EIO;
158 /* set 36-bit addressing, done writeback enable and done IRQ enable */
159 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
160 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
162 /* and ICCR writeback, if available */
163 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
164 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
165 TALITOS_CCCR_LO_IWSE);
167 return 0;
170 static int reset_device(struct device *dev)
172 struct talitos_private *priv = dev_get_drvdata(dev);
173 unsigned int timeout = TALITOS_TIMEOUT;
174 bool is_sec1 = has_ftr_sec1(priv);
175 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
177 setbits32(priv->reg + TALITOS_MCR, mcr);
179 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
180 && --timeout)
181 cpu_relax();
183 if (priv->irq[1]) {
184 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
185 setbits32(priv->reg + TALITOS_MCR, mcr);
188 if (timeout == 0) {
189 dev_err(dev, "failed to reset device\n");
190 return -EIO;
193 return 0;
197 * Reset and initialize the device
199 static int init_device(struct device *dev)
201 struct talitos_private *priv = dev_get_drvdata(dev);
202 int ch, err;
203 bool is_sec1 = has_ftr_sec1(priv);
206 * Master reset
207 * errata documentation: warning: certain SEC interrupts
208 * are not fully cleared by writing the MCR:SWR bit,
209 * set bit twice to completely reset
211 err = reset_device(dev);
212 if (err)
213 return err;
215 err = reset_device(dev);
216 if (err)
217 return err;
219 /* reset channels */
220 for (ch = 0; ch < priv->num_channels; ch++) {
221 err = reset_channel(dev, ch);
222 if (err)
223 return err;
226 /* enable channel done and error interrupts */
227 if (is_sec1) {
228 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
229 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
230 /* disable parity error check in DEU (erroneous? test vect.) */
231 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
232 } else {
233 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
234 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
237 /* disable integrity check error interrupts (use writeback instead) */
238 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
239 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
240 TALITOS_MDEUICR_LO_ICE);
242 return 0;
246 * talitos_submit - submits a descriptor to the device for processing
247 * @dev: the SEC device to be used
248 * @ch: the SEC device channel to be used
249 * @desc: the descriptor to be processed by the device
250 * @callback: whom to call when processing is complete
251 * @context: a handle for use by caller (optional)
253 * desc must contain valid dma-mapped (bus physical) address pointers.
254 * callback must check err and feedback in descriptor header
255 * for device processing status.
257 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
258 void (*callback)(struct device *dev,
259 struct talitos_desc *desc,
260 void *context, int error),
261 void *context)
263 struct talitos_private *priv = dev_get_drvdata(dev);
264 struct talitos_request *request;
265 unsigned long flags;
266 int head;
267 bool is_sec1 = has_ftr_sec1(priv);
269 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
271 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
272 /* h/w fifo is full */
273 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
274 return -EAGAIN;
277 head = priv->chan[ch].head;
278 request = &priv->chan[ch].fifo[head];
280 /* map descriptor and save caller data */
281 if (is_sec1) {
282 desc->hdr1 = desc->hdr;
283 desc->next_desc = 0;
284 request->dma_desc = dma_map_single(dev, &desc->hdr1,
285 TALITOS_DESC_SIZE,
286 DMA_BIDIRECTIONAL);
287 } else {
288 request->dma_desc = dma_map_single(dev, desc,
289 TALITOS_DESC_SIZE,
290 DMA_BIDIRECTIONAL);
292 request->callback = callback;
293 request->context = context;
295 /* increment fifo head */
296 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
298 smp_wmb();
299 request->desc = desc;
301 /* GO! */
302 wmb();
303 out_be32(priv->chan[ch].reg + TALITOS_FF,
304 upper_32_bits(request->dma_desc));
305 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
306 lower_32_bits(request->dma_desc));
308 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
310 return -EINPROGRESS;
312 EXPORT_SYMBOL(talitos_submit);
315 * process what was done, notify callback of error if not
317 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
319 struct talitos_private *priv = dev_get_drvdata(dev);
320 struct talitos_request *request, saved_req;
321 unsigned long flags;
322 int tail, status;
323 bool is_sec1 = has_ftr_sec1(priv);
325 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
327 tail = priv->chan[ch].tail;
328 while (priv->chan[ch].fifo[tail].desc) {
329 __be32 hdr;
331 request = &priv->chan[ch].fifo[tail];
333 /* descriptors with their done bits set don't get the error */
334 rmb();
335 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
337 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
338 status = 0;
339 else
340 if (!error)
341 break;
342 else
343 status = error;
345 dma_unmap_single(dev, request->dma_desc,
346 TALITOS_DESC_SIZE,
347 DMA_BIDIRECTIONAL);
349 /* copy entries so we can call callback outside lock */
350 saved_req.desc = request->desc;
351 saved_req.callback = request->callback;
352 saved_req.context = request->context;
354 /* release request entry in fifo */
355 smp_wmb();
356 request->desc = NULL;
358 /* increment fifo tail */
359 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
361 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
363 atomic_dec(&priv->chan[ch].submit_count);
365 saved_req.callback(dev, saved_req.desc, saved_req.context,
366 status);
367 /* channel may resume processing in single desc error case */
368 if (error && !reset_ch && status == error)
369 return;
370 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
371 tail = priv->chan[ch].tail;
374 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
378 * process completed requests for channels that have done status
380 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
381 static void talitos1_done_##name(unsigned long data) \
383 struct device *dev = (struct device *)data; \
384 struct talitos_private *priv = dev_get_drvdata(dev); \
385 unsigned long flags; \
387 if (ch_done_mask & 0x10000000) \
388 flush_channel(dev, 0, 0, 0); \
389 if (priv->num_channels == 1) \
390 goto out; \
391 if (ch_done_mask & 0x40000000) \
392 flush_channel(dev, 1, 0, 0); \
393 if (ch_done_mask & 0x00010000) \
394 flush_channel(dev, 2, 0, 0); \
395 if (ch_done_mask & 0x00040000) \
396 flush_channel(dev, 3, 0, 0); \
398 out: \
399 /* At this point, all completed channels have been processed */ \
400 /* Unmask done interrupts for channels completed later on. */ \
401 spin_lock_irqsave(&priv->reg_lock, flags); \
402 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
403 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
404 spin_unlock_irqrestore(&priv->reg_lock, flags); \
407 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
409 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
410 static void talitos2_done_##name(unsigned long data) \
412 struct device *dev = (struct device *)data; \
413 struct talitos_private *priv = dev_get_drvdata(dev); \
414 unsigned long flags; \
416 if (ch_done_mask & 1) \
417 flush_channel(dev, 0, 0, 0); \
418 if (priv->num_channels == 1) \
419 goto out; \
420 if (ch_done_mask & (1 << 2)) \
421 flush_channel(dev, 1, 0, 0); \
422 if (ch_done_mask & (1 << 4)) \
423 flush_channel(dev, 2, 0, 0); \
424 if (ch_done_mask & (1 << 6)) \
425 flush_channel(dev, 3, 0, 0); \
427 out: \
428 /* At this point, all completed channels have been processed */ \
429 /* Unmask done interrupts for channels completed later on. */ \
430 spin_lock_irqsave(&priv->reg_lock, flags); \
431 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
432 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
433 spin_unlock_irqrestore(&priv->reg_lock, flags); \
436 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
437 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
438 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
441 * locate current (offending) descriptor
443 static u32 current_desc_hdr(struct device *dev, int ch)
445 struct talitos_private *priv = dev_get_drvdata(dev);
446 int tail, iter;
447 dma_addr_t cur_desc;
449 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
450 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
452 if (!cur_desc) {
453 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
454 return 0;
457 tail = priv->chan[ch].tail;
459 iter = tail;
460 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
461 iter = (iter + 1) & (priv->fifo_len - 1);
462 if (iter == tail) {
463 dev_err(dev, "couldn't locate current descriptor\n");
464 return 0;
468 return priv->chan[ch].fifo[iter].desc->hdr;
472 * user diagnostics; report root cause of error based on execution unit status
474 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
476 struct talitos_private *priv = dev_get_drvdata(dev);
477 int i;
479 if (!desc_hdr)
480 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
482 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
483 case DESC_HDR_SEL0_AFEU:
484 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
485 in_be32(priv->reg_afeu + TALITOS_EUISR),
486 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
487 break;
488 case DESC_HDR_SEL0_DEU:
489 dev_err(dev, "DEUISR 0x%08x_%08x\n",
490 in_be32(priv->reg_deu + TALITOS_EUISR),
491 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
492 break;
493 case DESC_HDR_SEL0_MDEUA:
494 case DESC_HDR_SEL0_MDEUB:
495 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
496 in_be32(priv->reg_mdeu + TALITOS_EUISR),
497 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
498 break;
499 case DESC_HDR_SEL0_RNG:
500 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
501 in_be32(priv->reg_rngu + TALITOS_ISR),
502 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
503 break;
504 case DESC_HDR_SEL0_PKEU:
505 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
506 in_be32(priv->reg_pkeu + TALITOS_EUISR),
507 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
508 break;
509 case DESC_HDR_SEL0_AESU:
510 dev_err(dev, "AESUISR 0x%08x_%08x\n",
511 in_be32(priv->reg_aesu + TALITOS_EUISR),
512 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
513 break;
514 case DESC_HDR_SEL0_CRCU:
515 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
516 in_be32(priv->reg_crcu + TALITOS_EUISR),
517 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
518 break;
519 case DESC_HDR_SEL0_KEU:
520 dev_err(dev, "KEUISR 0x%08x_%08x\n",
521 in_be32(priv->reg_pkeu + TALITOS_EUISR),
522 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
523 break;
526 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
527 case DESC_HDR_SEL1_MDEUA:
528 case DESC_HDR_SEL1_MDEUB:
529 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
530 in_be32(priv->reg_mdeu + TALITOS_EUISR),
531 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
532 break;
533 case DESC_HDR_SEL1_CRCU:
534 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
535 in_be32(priv->reg_crcu + TALITOS_EUISR),
536 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
537 break;
540 for (i = 0; i < 8; i++)
541 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
542 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
543 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
547 * recover from error interrupts
549 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
551 struct talitos_private *priv = dev_get_drvdata(dev);
552 unsigned int timeout = TALITOS_TIMEOUT;
553 int ch, error, reset_dev = 0;
554 u32 v_lo;
555 bool is_sec1 = has_ftr_sec1(priv);
556 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
558 for (ch = 0; ch < priv->num_channels; ch++) {
559 /* skip channels without errors */
560 if (is_sec1) {
561 /* bits 29, 31, 17, 19 */
562 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
563 continue;
564 } else {
565 if (!(isr & (1 << (ch * 2 + 1))))
566 continue;
569 error = -EINVAL;
571 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
573 if (v_lo & TALITOS_CCPSR_LO_DOF) {
574 dev_err(dev, "double fetch fifo overflow error\n");
575 error = -EAGAIN;
576 reset_ch = 1;
578 if (v_lo & TALITOS_CCPSR_LO_SOF) {
579 /* h/w dropped descriptor */
580 dev_err(dev, "single fetch fifo overflow error\n");
581 error = -EAGAIN;
583 if (v_lo & TALITOS_CCPSR_LO_MDTE)
584 dev_err(dev, "master data transfer error\n");
585 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
586 dev_err(dev, is_sec1 ? "pointeur not complete error\n"
587 : "s/g data length zero error\n");
588 if (v_lo & TALITOS_CCPSR_LO_FPZ)
589 dev_err(dev, is_sec1 ? "parity error\n"
590 : "fetch pointer zero error\n");
591 if (v_lo & TALITOS_CCPSR_LO_IDH)
592 dev_err(dev, "illegal descriptor header error\n");
593 if (v_lo & TALITOS_CCPSR_LO_IEU)
594 dev_err(dev, is_sec1 ? "static assignment error\n"
595 : "invalid exec unit error\n");
596 if (v_lo & TALITOS_CCPSR_LO_EU)
597 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
598 if (!is_sec1) {
599 if (v_lo & TALITOS_CCPSR_LO_GB)
600 dev_err(dev, "gather boundary error\n");
601 if (v_lo & TALITOS_CCPSR_LO_GRL)
602 dev_err(dev, "gather return/length error\n");
603 if (v_lo & TALITOS_CCPSR_LO_SB)
604 dev_err(dev, "scatter boundary error\n");
605 if (v_lo & TALITOS_CCPSR_LO_SRL)
606 dev_err(dev, "scatter return/length error\n");
609 flush_channel(dev, ch, error, reset_ch);
611 if (reset_ch) {
612 reset_channel(dev, ch);
613 } else {
614 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
615 TALITOS2_CCCR_CONT);
616 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
617 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
618 TALITOS2_CCCR_CONT) && --timeout)
619 cpu_relax();
620 if (timeout == 0) {
621 dev_err(dev, "failed to restart channel %d\n",
622 ch);
623 reset_dev = 1;
627 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
628 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
629 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
630 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
631 isr, isr_lo);
632 else
633 dev_err(dev, "done overflow, internal time out, or "
634 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
636 /* purge request queues */
637 for (ch = 0; ch < priv->num_channels; ch++)
638 flush_channel(dev, ch, -EIO, 1);
640 /* reset and reinitialize the device */
641 init_device(dev);
645 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
646 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
648 struct device *dev = data; \
649 struct talitos_private *priv = dev_get_drvdata(dev); \
650 u32 isr, isr_lo; \
651 unsigned long flags; \
653 spin_lock_irqsave(&priv->reg_lock, flags); \
654 isr = in_be32(priv->reg + TALITOS_ISR); \
655 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
656 /* Acknowledge interrupt */ \
657 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
658 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
660 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
661 spin_unlock_irqrestore(&priv->reg_lock, flags); \
662 talitos_error(dev, isr & ch_err_mask, isr_lo); \
664 else { \
665 if (likely(isr & ch_done_mask)) { \
666 /* mask further done interrupts. */ \
667 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
668 /* done_task will unmask done interrupts at exit */ \
669 tasklet_schedule(&priv->done_task[tlet]); \
671 spin_unlock_irqrestore(&priv->reg_lock, flags); \
674 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
675 IRQ_NONE; \
678 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
680 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
681 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
683 struct device *dev = data; \
684 struct talitos_private *priv = dev_get_drvdata(dev); \
685 u32 isr, isr_lo; \
686 unsigned long flags; \
688 spin_lock_irqsave(&priv->reg_lock, flags); \
689 isr = in_be32(priv->reg + TALITOS_ISR); \
690 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
691 /* Acknowledge interrupt */ \
692 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
693 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
695 if (unlikely(isr & ch_err_mask || isr_lo)) { \
696 spin_unlock_irqrestore(&priv->reg_lock, flags); \
697 talitos_error(dev, isr & ch_err_mask, isr_lo); \
699 else { \
700 if (likely(isr & ch_done_mask)) { \
701 /* mask further done interrupts. */ \
702 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
703 /* done_task will unmask done interrupts at exit */ \
704 tasklet_schedule(&priv->done_task[tlet]); \
706 spin_unlock_irqrestore(&priv->reg_lock, flags); \
709 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
710 IRQ_NONE; \
713 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
714 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
716 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
720 * hwrng
722 static int talitos_rng_data_present(struct hwrng *rng, int wait)
724 struct device *dev = (struct device *)rng->priv;
725 struct talitos_private *priv = dev_get_drvdata(dev);
726 u32 ofl;
727 int i;
729 for (i = 0; i < 20; i++) {
730 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
731 TALITOS_RNGUSR_LO_OFL;
732 if (ofl || !wait)
733 break;
734 udelay(10);
737 return !!ofl;
740 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
742 struct device *dev = (struct device *)rng->priv;
743 struct talitos_private *priv = dev_get_drvdata(dev);
745 /* rng fifo requires 64-bit accesses */
746 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
747 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
749 return sizeof(u32);
752 static int talitos_rng_init(struct hwrng *rng)
754 struct device *dev = (struct device *)rng->priv;
755 struct talitos_private *priv = dev_get_drvdata(dev);
756 unsigned int timeout = TALITOS_TIMEOUT;
758 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
759 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
760 & TALITOS_RNGUSR_LO_RD)
761 && --timeout)
762 cpu_relax();
763 if (timeout == 0) {
764 dev_err(dev, "failed to reset rng hw\n");
765 return -ENODEV;
768 /* start generating */
769 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
771 return 0;
774 static int talitos_register_rng(struct device *dev)
776 struct talitos_private *priv = dev_get_drvdata(dev);
777 int err;
779 priv->rng.name = dev_driver_string(dev),
780 priv->rng.init = talitos_rng_init,
781 priv->rng.data_present = talitos_rng_data_present,
782 priv->rng.data_read = talitos_rng_data_read,
783 priv->rng.priv = (unsigned long)dev;
785 err = hwrng_register(&priv->rng);
786 if (!err)
787 priv->rng_registered = true;
789 return err;
792 static void talitos_unregister_rng(struct device *dev)
794 struct talitos_private *priv = dev_get_drvdata(dev);
796 if (!priv->rng_registered)
797 return;
799 hwrng_unregister(&priv->rng);
800 priv->rng_registered = false;
804 * crypto alg
806 #define TALITOS_CRA_PRIORITY 3000
807 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
808 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
810 struct talitos_ctx {
811 struct device *dev;
812 int ch;
813 __be32 desc_hdr_template;
814 u8 key[TALITOS_MAX_KEY_SIZE];
815 u8 iv[TALITOS_MAX_IV_LENGTH];
816 unsigned int keylen;
817 unsigned int enckeylen;
818 unsigned int authkeylen;
821 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
822 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
824 struct talitos_ahash_req_ctx {
825 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
826 unsigned int hw_context_size;
827 u8 buf[HASH_MAX_BLOCK_SIZE];
828 u8 bufnext[HASH_MAX_BLOCK_SIZE];
829 unsigned int swinit;
830 unsigned int first;
831 unsigned int last;
832 unsigned int to_hash_later;
833 unsigned int nbuf;
834 struct scatterlist bufsl[2];
835 struct scatterlist *psrc;
838 struct talitos_export_state {
839 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
840 u8 buf[HASH_MAX_BLOCK_SIZE];
841 unsigned int swinit;
842 unsigned int first;
843 unsigned int last;
844 unsigned int to_hash_later;
845 unsigned int nbuf;
848 static int aead_setkey(struct crypto_aead *authenc,
849 const u8 *key, unsigned int keylen)
851 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
852 struct crypto_authenc_keys keys;
854 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
855 goto badkey;
857 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
858 goto badkey;
860 memcpy(ctx->key, keys.authkey, keys.authkeylen);
861 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
863 ctx->keylen = keys.authkeylen + keys.enckeylen;
864 ctx->enckeylen = keys.enckeylen;
865 ctx->authkeylen = keys.authkeylen;
867 return 0;
869 badkey:
870 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
871 return -EINVAL;
875 * talitos_edesc - s/w-extended descriptor
876 * @src_nents: number of segments in input scatterlist
877 * @dst_nents: number of segments in output scatterlist
878 * @icv_ool: whether ICV is out-of-line
879 * @iv_dma: dma address of iv for checking continuity and link table
880 * @dma_len: length of dma mapped link_tbl space
881 * @dma_link_tbl: bus physical address of link_tbl/buf
882 * @desc: h/w descriptor
883 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
884 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
886 * if decrypting (with authcheck), or either one of src_nents or dst_nents
887 * is greater than 1, an integrity check value is concatenated to the end
888 * of link_tbl data
890 struct talitos_edesc {
891 int src_nents;
892 int dst_nents;
893 bool icv_ool;
894 dma_addr_t iv_dma;
895 int dma_len;
896 dma_addr_t dma_link_tbl;
897 struct talitos_desc desc;
898 union {
899 struct talitos_ptr link_tbl[0];
900 u8 buf[0];
904 static void talitos_sg_unmap(struct device *dev,
905 struct talitos_edesc *edesc,
906 struct scatterlist *src,
907 struct scatterlist *dst)
909 unsigned int src_nents = edesc->src_nents ? : 1;
910 unsigned int dst_nents = edesc->dst_nents ? : 1;
912 if (src != dst) {
913 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
915 if (dst) {
916 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
918 } else
919 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
922 static void ipsec_esp_unmap(struct device *dev,
923 struct talitos_edesc *edesc,
924 struct aead_request *areq)
926 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6], DMA_FROM_DEVICE);
927 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
928 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
929 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
931 talitos_sg_unmap(dev, edesc, areq->src, areq->dst);
933 if (edesc->dma_len)
934 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
935 DMA_BIDIRECTIONAL);
939 * ipsec_esp descriptor callbacks
941 static void ipsec_esp_encrypt_done(struct device *dev,
942 struct talitos_desc *desc, void *context,
943 int err)
945 struct aead_request *areq = context;
946 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
947 unsigned int authsize = crypto_aead_authsize(authenc);
948 struct talitos_edesc *edesc;
949 struct scatterlist *sg;
950 void *icvdata;
952 edesc = container_of(desc, struct talitos_edesc, desc);
954 ipsec_esp_unmap(dev, edesc, areq);
956 /* copy the generated ICV to dst */
957 if (edesc->icv_ool) {
958 icvdata = &edesc->link_tbl[edesc->src_nents +
959 edesc->dst_nents + 2];
960 sg = sg_last(areq->dst, edesc->dst_nents);
961 memcpy((char *)sg_virt(sg) + sg->length - authsize,
962 icvdata, authsize);
965 kfree(edesc);
967 aead_request_complete(areq, err);
970 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
971 struct talitos_desc *desc,
972 void *context, int err)
974 struct aead_request *req = context;
975 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
976 unsigned int authsize = crypto_aead_authsize(authenc);
977 struct talitos_edesc *edesc;
978 struct scatterlist *sg;
979 char *oicv, *icv;
981 edesc = container_of(desc, struct talitos_edesc, desc);
983 ipsec_esp_unmap(dev, edesc, req);
985 if (!err) {
986 /* auth check */
987 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
988 icv = (char *)sg_virt(sg) + sg->length - authsize;
990 if (edesc->dma_len) {
991 oicv = (char *)&edesc->link_tbl[edesc->src_nents +
992 edesc->dst_nents + 2];
993 if (edesc->icv_ool)
994 icv = oicv + authsize;
995 } else
996 oicv = (char *)&edesc->link_tbl[0];
998 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1001 kfree(edesc);
1003 aead_request_complete(req, err);
1006 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1007 struct talitos_desc *desc,
1008 void *context, int err)
1010 struct aead_request *req = context;
1011 struct talitos_edesc *edesc;
1013 edesc = container_of(desc, struct talitos_edesc, desc);
1015 ipsec_esp_unmap(dev, edesc, req);
1017 /* check ICV auth status */
1018 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1019 DESC_HDR_LO_ICCR1_PASS))
1020 err = -EBADMSG;
1022 kfree(edesc);
1024 aead_request_complete(req, err);
1028 * convert scatterlist to SEC h/w link table format
1029 * stop at cryptlen bytes
1031 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1032 unsigned int offset, int cryptlen,
1033 struct talitos_ptr *link_tbl_ptr)
1035 int n_sg = sg_count;
1036 int count = 0;
1038 while (cryptlen && sg && n_sg--) {
1039 unsigned int len = sg_dma_len(sg);
1041 if (offset >= len) {
1042 offset -= len;
1043 goto next;
1046 len -= offset;
1048 if (len > cryptlen)
1049 len = cryptlen;
1051 to_talitos_ptr(link_tbl_ptr + count,
1052 sg_dma_address(sg) + offset, 0);
1053 link_tbl_ptr[count].len = cpu_to_be16(len);
1054 link_tbl_ptr[count].j_extent = 0;
1055 count++;
1056 cryptlen -= len;
1057 offset = 0;
1059 next:
1060 sg = sg_next(sg);
1063 /* tag end of link table */
1064 if (count > 0)
1065 link_tbl_ptr[count - 1].j_extent = DESC_PTR_LNKTBL_RETURN;
1067 return count;
1070 static inline int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
1071 int cryptlen,
1072 struct talitos_ptr *link_tbl_ptr)
1074 return sg_to_link_tbl_offset(sg, sg_count, 0, cryptlen,
1075 link_tbl_ptr);
1079 * fill in and submit ipsec_esp descriptor
1081 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1082 void (*callback)(struct device *dev,
1083 struct talitos_desc *desc,
1084 void *context, int error))
1086 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1087 unsigned int authsize = crypto_aead_authsize(aead);
1088 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1089 struct device *dev = ctx->dev;
1090 struct talitos_desc *desc = &edesc->desc;
1091 unsigned int cryptlen = areq->cryptlen;
1092 unsigned int ivsize = crypto_aead_ivsize(aead);
1093 int tbl_off = 0;
1094 int sg_count, ret;
1095 int sg_link_tbl_len;
1097 /* hmac key */
1098 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1099 DMA_TO_DEVICE);
1101 sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
1102 (areq->src == areq->dst) ? DMA_BIDIRECTIONAL
1103 : DMA_TO_DEVICE);
1104 /* hmac data */
1105 desc->ptr[1].len = cpu_to_be16(areq->assoclen);
1106 if (sg_count > 1 &&
1107 (ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
1108 areq->assoclen,
1109 &edesc->link_tbl[tbl_off])) > 1) {
1110 to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
1111 sizeof(struct talitos_ptr), 0);
1112 desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
1114 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1115 edesc->dma_len, DMA_BIDIRECTIONAL);
1117 tbl_off += ret;
1118 } else {
1119 to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
1120 desc->ptr[1].j_extent = 0;
1123 /* cipher iv */
1124 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, 0);
1125 desc->ptr[2].len = cpu_to_be16(ivsize);
1126 desc->ptr[2].j_extent = 0;
1128 /* cipher key */
1129 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1130 (char *)&ctx->key + ctx->authkeylen,
1131 DMA_TO_DEVICE);
1134 * cipher in
1135 * map and adjust cipher len to aead request cryptlen.
1136 * extent is bytes of HMAC postpended to ciphertext,
1137 * typically 12 for ipsec
1139 desc->ptr[4].len = cpu_to_be16(cryptlen);
1140 desc->ptr[4].j_extent = authsize;
1142 sg_link_tbl_len = cryptlen;
1143 if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
1144 sg_link_tbl_len += authsize;
1146 if (sg_count == 1) {
1147 to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
1148 areq->assoclen, 0);
1149 } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
1150 areq->assoclen, sg_link_tbl_len,
1151 &edesc->link_tbl[tbl_off])) >
1152 1) {
1153 desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
1154 to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
1155 tbl_off *
1156 sizeof(struct talitos_ptr), 0);
1157 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1158 edesc->dma_len,
1159 DMA_BIDIRECTIONAL);
1160 tbl_off += ret;
1161 } else {
1162 copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
1165 /* cipher out */
1166 desc->ptr[5].len = cpu_to_be16(cryptlen);
1167 desc->ptr[5].j_extent = authsize;
1169 if (areq->src != areq->dst)
1170 sg_count = dma_map_sg(dev, areq->dst, edesc->dst_nents ? : 1,
1171 DMA_FROM_DEVICE);
1173 edesc->icv_ool = false;
1175 if (sg_count == 1) {
1176 to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
1177 areq->assoclen, 0);
1178 } else if ((sg_count =
1179 sg_to_link_tbl_offset(areq->dst, sg_count,
1180 areq->assoclen, cryptlen,
1181 &edesc->link_tbl[tbl_off])) > 1) {
1182 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1184 to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
1185 tbl_off * sizeof(struct talitos_ptr), 0);
1187 /* Add an entry to the link table for ICV data */
1188 tbl_ptr += sg_count - 1;
1189 tbl_ptr->j_extent = 0;
1190 tbl_ptr++;
1191 tbl_ptr->j_extent = DESC_PTR_LNKTBL_RETURN;
1192 tbl_ptr->len = cpu_to_be16(authsize);
1194 /* icv data follows link tables */
1195 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl +
1196 (edesc->src_nents + edesc->dst_nents +
1197 2) * sizeof(struct talitos_ptr) +
1198 authsize, 0);
1199 desc->ptr[5].j_extent |= DESC_PTR_LNKTBL_JUMP;
1200 dma_sync_single_for_device(ctx->dev, edesc->dma_link_tbl,
1201 edesc->dma_len, DMA_BIDIRECTIONAL);
1203 edesc->icv_ool = true;
1204 } else {
1205 copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
1208 /* iv out */
1209 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1210 DMA_FROM_DEVICE);
1212 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1213 if (ret != -EINPROGRESS) {
1214 ipsec_esp_unmap(dev, edesc, areq);
1215 kfree(edesc);
1217 return ret;
1221 * allocate and map the extended descriptor
1223 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1224 struct scatterlist *src,
1225 struct scatterlist *dst,
1226 u8 *iv,
1227 unsigned int assoclen,
1228 unsigned int cryptlen,
1229 unsigned int authsize,
1230 unsigned int ivsize,
1231 int icv_stashing,
1232 u32 cryptoflags,
1233 bool encrypt)
1235 struct talitos_edesc *edesc;
1236 int src_nents, dst_nents, alloc_len, dma_len;
1237 dma_addr_t iv_dma = 0;
1238 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1239 GFP_ATOMIC;
1240 struct talitos_private *priv = dev_get_drvdata(dev);
1241 bool is_sec1 = has_ftr_sec1(priv);
1242 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1244 if (cryptlen + authsize > max_len) {
1245 dev_err(dev, "length exceeds h/w max limit\n");
1246 return ERR_PTR(-EINVAL);
1249 if (ivsize)
1250 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1252 if (!dst || dst == src) {
1253 src_nents = sg_nents_for_len(src,
1254 assoclen + cryptlen + authsize);
1255 src_nents = (src_nents == 1) ? 0 : src_nents;
1256 dst_nents = dst ? src_nents : 0;
1257 } else { /* dst && dst != src*/
1258 src_nents = sg_nents_for_len(src, assoclen + cryptlen +
1259 (encrypt ? 0 : authsize));
1260 src_nents = (src_nents == 1) ? 0 : src_nents;
1261 dst_nents = sg_nents_for_len(dst, assoclen + cryptlen +
1262 (encrypt ? authsize : 0));
1263 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1267 * allocate space for base edesc plus the link tables,
1268 * allowing for two separate entries for AD and generated ICV (+ 2),
1269 * and space for two sets of ICVs (stashed and generated)
1271 alloc_len = sizeof(struct talitos_edesc);
1272 if (src_nents || dst_nents) {
1273 if (is_sec1)
1274 dma_len = (src_nents ? cryptlen : 0) +
1275 (dst_nents ? cryptlen : 0);
1276 else
1277 dma_len = (src_nents + dst_nents + 2) *
1278 sizeof(struct talitos_ptr) + authsize * 2;
1279 alloc_len += dma_len;
1280 } else {
1281 dma_len = 0;
1282 alloc_len += icv_stashing ? authsize : 0;
1285 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1286 if (!edesc) {
1287 if (iv_dma)
1288 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1290 dev_err(dev, "could not allocate edescriptor\n");
1291 return ERR_PTR(-ENOMEM);
1294 edesc->src_nents = src_nents;
1295 edesc->dst_nents = dst_nents;
1296 edesc->iv_dma = iv_dma;
1297 edesc->dma_len = dma_len;
1298 if (dma_len)
1299 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1300 edesc->dma_len,
1301 DMA_BIDIRECTIONAL);
1303 return edesc;
1306 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1307 int icv_stashing, bool encrypt)
1309 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1310 unsigned int authsize = crypto_aead_authsize(authenc);
1311 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1312 unsigned int ivsize = crypto_aead_ivsize(authenc);
1314 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1315 iv, areq->assoclen, areq->cryptlen,
1316 authsize, ivsize, icv_stashing,
1317 areq->base.flags, encrypt);
1320 static int aead_encrypt(struct aead_request *req)
1322 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1323 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1324 struct talitos_edesc *edesc;
1326 /* allocate extended descriptor */
1327 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1328 if (IS_ERR(edesc))
1329 return PTR_ERR(edesc);
1331 /* set encrypt */
1332 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1334 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1337 static int aead_decrypt(struct aead_request *req)
1339 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1340 unsigned int authsize = crypto_aead_authsize(authenc);
1341 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1342 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1343 struct talitos_edesc *edesc;
1344 struct scatterlist *sg;
1345 void *icvdata;
1347 req->cryptlen -= authsize;
1349 /* allocate extended descriptor */
1350 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1351 if (IS_ERR(edesc))
1352 return PTR_ERR(edesc);
1354 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1355 ((!edesc->src_nents && !edesc->dst_nents) ||
1356 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1358 /* decrypt and check the ICV */
1359 edesc->desc.hdr = ctx->desc_hdr_template |
1360 DESC_HDR_DIR_INBOUND |
1361 DESC_HDR_MODE1_MDEU_CICV;
1363 /* reset integrity check result bits */
1364 edesc->desc.hdr_lo = 0;
1366 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1369 /* Have to check the ICV with software */
1370 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1372 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1373 if (edesc->dma_len)
1374 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1375 edesc->dst_nents + 2];
1376 else
1377 icvdata = &edesc->link_tbl[0];
1379 sg = sg_last(req->src, edesc->src_nents ? : 1);
1381 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1383 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1386 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1387 const u8 *key, unsigned int keylen)
1389 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1391 if (keylen > TALITOS_MAX_KEY_SIZE) {
1392 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1393 return -EINVAL;
1396 memcpy(&ctx->key, key, keylen);
1397 ctx->keylen = keylen;
1399 return 0;
1402 static void unmap_sg_talitos_ptr(struct device *dev, struct scatterlist *src,
1403 struct scatterlist *dst, unsigned int len,
1404 struct talitos_edesc *edesc)
1406 struct talitos_private *priv = dev_get_drvdata(dev);
1407 bool is_sec1 = has_ftr_sec1(priv);
1409 if (is_sec1) {
1410 if (!edesc->src_nents) {
1411 dma_unmap_sg(dev, src, 1,
1412 dst != src ? DMA_TO_DEVICE
1413 : DMA_BIDIRECTIONAL);
1415 if (dst && edesc->dst_nents) {
1416 dma_sync_single_for_device(dev,
1417 edesc->dma_link_tbl + len,
1418 len, DMA_FROM_DEVICE);
1419 sg_copy_from_buffer(dst, edesc->dst_nents ? : 1,
1420 edesc->buf + len, len);
1421 } else if (dst && dst != src) {
1422 dma_unmap_sg(dev, dst, 1, DMA_FROM_DEVICE);
1424 } else {
1425 talitos_sg_unmap(dev, edesc, src, dst);
1429 static void common_nonsnoop_unmap(struct device *dev,
1430 struct talitos_edesc *edesc,
1431 struct ablkcipher_request *areq)
1433 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1435 unmap_sg_talitos_ptr(dev, areq->src, areq->dst, areq->nbytes, edesc);
1436 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1437 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1439 if (edesc->dma_len)
1440 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1441 DMA_BIDIRECTIONAL);
1444 static void ablkcipher_done(struct device *dev,
1445 struct talitos_desc *desc, void *context,
1446 int err)
1448 struct ablkcipher_request *areq = context;
1449 struct talitos_edesc *edesc;
1451 edesc = container_of(desc, struct talitos_edesc, desc);
1453 common_nonsnoop_unmap(dev, edesc, areq);
1455 kfree(edesc);
1457 areq->base.complete(&areq->base, err);
1460 int map_sg_in_talitos_ptr(struct device *dev, struct scatterlist *src,
1461 unsigned int len, struct talitos_edesc *edesc,
1462 enum dma_data_direction dir, struct talitos_ptr *ptr)
1464 int sg_count;
1465 struct talitos_private *priv = dev_get_drvdata(dev);
1466 bool is_sec1 = has_ftr_sec1(priv);
1468 to_talitos_ptr_len(ptr, len, is_sec1);
1470 if (is_sec1) {
1471 sg_count = edesc->src_nents ? : 1;
1473 if (sg_count == 1) {
1474 dma_map_sg(dev, src, 1, dir);
1475 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1476 } else {
1477 sg_copy_to_buffer(src, sg_count, edesc->buf, len);
1478 to_talitos_ptr(ptr, edesc->dma_link_tbl, is_sec1);
1479 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1480 len, DMA_TO_DEVICE);
1482 } else {
1483 to_talitos_ptr_extent_clear(ptr, is_sec1);
1485 sg_count = dma_map_sg(dev, src, edesc->src_nents ? : 1, dir);
1487 if (sg_count == 1) {
1488 to_talitos_ptr(ptr, sg_dma_address(src), is_sec1);
1489 } else {
1490 sg_count = sg_to_link_tbl(src, sg_count, len,
1491 &edesc->link_tbl[0]);
1492 if (sg_count > 1) {
1493 to_talitos_ptr(ptr, edesc->dma_link_tbl, 0);
1494 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1495 dma_sync_single_for_device(dev,
1496 edesc->dma_link_tbl,
1497 edesc->dma_len,
1498 DMA_BIDIRECTIONAL);
1499 } else {
1500 /* Only one segment now, so no link tbl needed*/
1501 to_talitos_ptr(ptr, sg_dma_address(src),
1502 is_sec1);
1506 return sg_count;
1509 void map_sg_out_talitos_ptr(struct device *dev, struct scatterlist *dst,
1510 unsigned int len, struct talitos_edesc *edesc,
1511 enum dma_data_direction dir,
1512 struct talitos_ptr *ptr, int sg_count)
1514 struct talitos_private *priv = dev_get_drvdata(dev);
1515 bool is_sec1 = has_ftr_sec1(priv);
1517 if (dir != DMA_NONE)
1518 sg_count = dma_map_sg(dev, dst, edesc->dst_nents ? : 1, dir);
1520 to_talitos_ptr_len(ptr, len, is_sec1);
1522 if (is_sec1) {
1523 if (sg_count == 1) {
1524 if (dir != DMA_NONE)
1525 dma_map_sg(dev, dst, 1, dir);
1526 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1527 } else {
1528 to_talitos_ptr(ptr, edesc->dma_link_tbl + len, is_sec1);
1529 dma_sync_single_for_device(dev,
1530 edesc->dma_link_tbl + len,
1531 len, DMA_FROM_DEVICE);
1533 } else {
1534 to_talitos_ptr_extent_clear(ptr, is_sec1);
1536 if (sg_count == 1) {
1537 to_talitos_ptr(ptr, sg_dma_address(dst), is_sec1);
1538 } else {
1539 struct talitos_ptr *link_tbl_ptr =
1540 &edesc->link_tbl[edesc->src_nents + 1];
1542 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1543 (edesc->src_nents + 1) *
1544 sizeof(struct talitos_ptr), 0);
1545 ptr->j_extent |= DESC_PTR_LNKTBL_JUMP;
1546 sg_to_link_tbl(dst, sg_count, len, link_tbl_ptr);
1547 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1548 edesc->dma_len,
1549 DMA_BIDIRECTIONAL);
1554 static int common_nonsnoop(struct talitos_edesc *edesc,
1555 struct ablkcipher_request *areq,
1556 void (*callback) (struct device *dev,
1557 struct talitos_desc *desc,
1558 void *context, int error))
1560 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1561 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1562 struct device *dev = ctx->dev;
1563 struct talitos_desc *desc = &edesc->desc;
1564 unsigned int cryptlen = areq->nbytes;
1565 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1566 int sg_count, ret;
1567 struct talitos_private *priv = dev_get_drvdata(dev);
1568 bool is_sec1 = has_ftr_sec1(priv);
1570 /* first DWORD empty */
1571 desc->ptr[0] = zero_entry;
1573 /* cipher iv */
1574 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1575 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1576 to_talitos_ptr_extent_clear(&desc->ptr[1], is_sec1);
1578 /* cipher key */
1579 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1580 (char *)&ctx->key, DMA_TO_DEVICE);
1583 * cipher in
1585 sg_count = map_sg_in_talitos_ptr(dev, areq->src, cryptlen, edesc,
1586 (areq->src == areq->dst) ?
1587 DMA_BIDIRECTIONAL : DMA_TO_DEVICE,
1588 &desc->ptr[3]);
1590 /* cipher out */
1591 map_sg_out_talitos_ptr(dev, areq->dst, cryptlen, edesc,
1592 (areq->src == areq->dst) ? DMA_NONE
1593 : DMA_FROM_DEVICE,
1594 &desc->ptr[4], sg_count);
1596 /* iv out */
1597 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1598 DMA_FROM_DEVICE);
1600 /* last DWORD empty */
1601 desc->ptr[6] = zero_entry;
1603 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1604 if (ret != -EINPROGRESS) {
1605 common_nonsnoop_unmap(dev, edesc, areq);
1606 kfree(edesc);
1608 return ret;
1611 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1612 areq, bool encrypt)
1614 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1615 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1616 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1618 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1619 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1620 areq->base.flags, encrypt);
1623 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1625 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1626 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1627 struct talitos_edesc *edesc;
1629 /* allocate extended descriptor */
1630 edesc = ablkcipher_edesc_alloc(areq, true);
1631 if (IS_ERR(edesc))
1632 return PTR_ERR(edesc);
1634 /* set encrypt */
1635 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1637 return common_nonsnoop(edesc, areq, ablkcipher_done);
1640 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1642 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1643 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1644 struct talitos_edesc *edesc;
1646 /* allocate extended descriptor */
1647 edesc = ablkcipher_edesc_alloc(areq, false);
1648 if (IS_ERR(edesc))
1649 return PTR_ERR(edesc);
1651 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1653 return common_nonsnoop(edesc, areq, ablkcipher_done);
1656 static void common_nonsnoop_hash_unmap(struct device *dev,
1657 struct talitos_edesc *edesc,
1658 struct ahash_request *areq)
1660 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1661 struct talitos_private *priv = dev_get_drvdata(dev);
1662 bool is_sec1 = has_ftr_sec1(priv);
1664 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1666 unmap_sg_talitos_ptr(dev, req_ctx->psrc, NULL, 0, edesc);
1668 /* When using hashctx-in, must unmap it. */
1669 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1670 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1671 DMA_TO_DEVICE);
1673 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1674 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1675 DMA_TO_DEVICE);
1677 if (edesc->dma_len)
1678 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1679 DMA_BIDIRECTIONAL);
1683 static void ahash_done(struct device *dev,
1684 struct talitos_desc *desc, void *context,
1685 int err)
1687 struct ahash_request *areq = context;
1688 struct talitos_edesc *edesc =
1689 container_of(desc, struct talitos_edesc, desc);
1690 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1692 if (!req_ctx->last && req_ctx->to_hash_later) {
1693 /* Position any partial block for next update/final/finup */
1694 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1695 req_ctx->nbuf = req_ctx->to_hash_later;
1697 common_nonsnoop_hash_unmap(dev, edesc, areq);
1699 kfree(edesc);
1701 areq->base.complete(&areq->base, err);
1705 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1706 * ourself and submit a padded block
1708 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1709 struct talitos_edesc *edesc,
1710 struct talitos_ptr *ptr)
1712 static u8 padded_hash[64] = {
1713 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1714 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1715 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1716 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1719 pr_err_once("Bug in SEC1, padding ourself\n");
1720 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1721 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1722 (char *)padded_hash, DMA_TO_DEVICE);
1725 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1726 struct ahash_request *areq, unsigned int length,
1727 void (*callback) (struct device *dev,
1728 struct talitos_desc *desc,
1729 void *context, int error))
1731 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1732 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1733 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1734 struct device *dev = ctx->dev;
1735 struct talitos_desc *desc = &edesc->desc;
1736 int ret;
1737 struct talitos_private *priv = dev_get_drvdata(dev);
1738 bool is_sec1 = has_ftr_sec1(priv);
1740 /* first DWORD empty */
1741 desc->ptr[0] = zero_entry;
1743 /* hash context in */
1744 if (!req_ctx->first || req_ctx->swinit) {
1745 map_single_talitos_ptr(dev, &desc->ptr[1],
1746 req_ctx->hw_context_size,
1747 (char *)req_ctx->hw_context,
1748 DMA_TO_DEVICE);
1749 req_ctx->swinit = 0;
1750 } else {
1751 desc->ptr[1] = zero_entry;
1753 /* Indicate next op is not the first. */
1754 req_ctx->first = 0;
1756 /* HMAC key */
1757 if (ctx->keylen)
1758 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1759 (char *)&ctx->key, DMA_TO_DEVICE);
1760 else
1761 desc->ptr[2] = zero_entry;
1764 * data in
1766 map_sg_in_talitos_ptr(dev, req_ctx->psrc, length, edesc,
1767 DMA_TO_DEVICE, &desc->ptr[3]);
1769 /* fifth DWORD empty */
1770 desc->ptr[4] = zero_entry;
1772 /* hash/HMAC out -or- hash context out */
1773 if (req_ctx->last)
1774 map_single_talitos_ptr(dev, &desc->ptr[5],
1775 crypto_ahash_digestsize(tfm),
1776 areq->result, DMA_FROM_DEVICE);
1777 else
1778 map_single_talitos_ptr(dev, &desc->ptr[5],
1779 req_ctx->hw_context_size,
1780 req_ctx->hw_context, DMA_FROM_DEVICE);
1782 /* last DWORD empty */
1783 desc->ptr[6] = zero_entry;
1785 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1786 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1788 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1789 if (ret != -EINPROGRESS) {
1790 common_nonsnoop_hash_unmap(dev, edesc, areq);
1791 kfree(edesc);
1793 return ret;
1796 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1797 unsigned int nbytes)
1799 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1800 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1801 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1803 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1804 nbytes, 0, 0, 0, areq->base.flags, false);
1807 static int ahash_init(struct ahash_request *areq)
1809 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1810 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1812 /* Initialize the context */
1813 req_ctx->nbuf = 0;
1814 req_ctx->first = 1; /* first indicates h/w must init its context */
1815 req_ctx->swinit = 0; /* assume h/w init of context */
1816 req_ctx->hw_context_size =
1817 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1818 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1819 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1821 return 0;
1825 * on h/w without explicit sha224 support, we initialize h/w context
1826 * manually with sha224 constants, and tell it to run sha256.
1828 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1830 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1832 ahash_init(areq);
1833 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1835 req_ctx->hw_context[0] = SHA224_H0;
1836 req_ctx->hw_context[1] = SHA224_H1;
1837 req_ctx->hw_context[2] = SHA224_H2;
1838 req_ctx->hw_context[3] = SHA224_H3;
1839 req_ctx->hw_context[4] = SHA224_H4;
1840 req_ctx->hw_context[5] = SHA224_H5;
1841 req_ctx->hw_context[6] = SHA224_H6;
1842 req_ctx->hw_context[7] = SHA224_H7;
1844 /* init 64-bit count */
1845 req_ctx->hw_context[8] = 0;
1846 req_ctx->hw_context[9] = 0;
1848 return 0;
1851 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1853 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1854 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1855 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1856 struct talitos_edesc *edesc;
1857 unsigned int blocksize =
1858 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1859 unsigned int nbytes_to_hash;
1860 unsigned int to_hash_later;
1861 unsigned int nsg;
1863 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1864 /* Buffer up to one whole block */
1865 sg_copy_to_buffer(areq->src,
1866 sg_nents_for_len(areq->src, nbytes),
1867 req_ctx->buf + req_ctx->nbuf, nbytes);
1868 req_ctx->nbuf += nbytes;
1869 return 0;
1872 /* At least (blocksize + 1) bytes are available to hash */
1873 nbytes_to_hash = nbytes + req_ctx->nbuf;
1874 to_hash_later = nbytes_to_hash & (blocksize - 1);
1876 if (req_ctx->last)
1877 to_hash_later = 0;
1878 else if (to_hash_later)
1879 /* There is a partial block. Hash the full block(s) now */
1880 nbytes_to_hash -= to_hash_later;
1881 else {
1882 /* Keep one block buffered */
1883 nbytes_to_hash -= blocksize;
1884 to_hash_later = blocksize;
1887 /* Chain in any previously buffered data */
1888 if (req_ctx->nbuf) {
1889 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1890 sg_init_table(req_ctx->bufsl, nsg);
1891 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1892 if (nsg > 1)
1893 sg_chain(req_ctx->bufsl, 2, areq->src);
1894 req_ctx->psrc = req_ctx->bufsl;
1895 } else
1896 req_ctx->psrc = areq->src;
1898 if (to_hash_later) {
1899 int nents = sg_nents_for_len(areq->src, nbytes);
1900 sg_pcopy_to_buffer(areq->src, nents,
1901 req_ctx->bufnext,
1902 to_hash_later,
1903 nbytes - to_hash_later);
1905 req_ctx->to_hash_later = to_hash_later;
1907 /* Allocate extended descriptor */
1908 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1909 if (IS_ERR(edesc))
1910 return PTR_ERR(edesc);
1912 edesc->desc.hdr = ctx->desc_hdr_template;
1914 /* On last one, request SEC to pad; otherwise continue */
1915 if (req_ctx->last)
1916 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1917 else
1918 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1920 /* request SEC to INIT hash. */
1921 if (req_ctx->first && !req_ctx->swinit)
1922 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1924 /* When the tfm context has a keylen, it's an HMAC.
1925 * A first or last (ie. not middle) descriptor must request HMAC.
1927 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1928 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1930 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1931 ahash_done);
1934 static int ahash_update(struct ahash_request *areq)
1936 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1938 req_ctx->last = 0;
1940 return ahash_process_req(areq, areq->nbytes);
1943 static int ahash_final(struct ahash_request *areq)
1945 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1947 req_ctx->last = 1;
1949 return ahash_process_req(areq, 0);
1952 static int ahash_finup(struct ahash_request *areq)
1954 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1956 req_ctx->last = 1;
1958 return ahash_process_req(areq, areq->nbytes);
1961 static int ahash_digest(struct ahash_request *areq)
1963 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1964 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
1966 ahash->init(areq);
1967 req_ctx->last = 1;
1969 return ahash_process_req(areq, areq->nbytes);
1972 static int ahash_export(struct ahash_request *areq, void *out)
1974 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1975 struct talitos_export_state *export = out;
1977 memcpy(export->hw_context, req_ctx->hw_context,
1978 req_ctx->hw_context_size);
1979 memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
1980 export->swinit = req_ctx->swinit;
1981 export->first = req_ctx->first;
1982 export->last = req_ctx->last;
1983 export->to_hash_later = req_ctx->to_hash_later;
1984 export->nbuf = req_ctx->nbuf;
1986 return 0;
1989 static int ahash_import(struct ahash_request *areq, const void *in)
1991 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1992 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1993 const struct talitos_export_state *export = in;
1995 memset(req_ctx, 0, sizeof(*req_ctx));
1996 req_ctx->hw_context_size =
1997 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1998 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1999 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2000 memcpy(req_ctx->hw_context, export->hw_context,
2001 req_ctx->hw_context_size);
2002 memcpy(req_ctx->buf, export->buf, export->nbuf);
2003 req_ctx->swinit = export->swinit;
2004 req_ctx->first = export->first;
2005 req_ctx->last = export->last;
2006 req_ctx->to_hash_later = export->to_hash_later;
2007 req_ctx->nbuf = export->nbuf;
2009 return 0;
2012 struct keyhash_result {
2013 struct completion completion;
2014 int err;
2017 static void keyhash_complete(struct crypto_async_request *req, int err)
2019 struct keyhash_result *res = req->data;
2021 if (err == -EINPROGRESS)
2022 return;
2024 res->err = err;
2025 complete(&res->completion);
2028 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2029 u8 *hash)
2031 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2033 struct scatterlist sg[1];
2034 struct ahash_request *req;
2035 struct keyhash_result hresult;
2036 int ret;
2038 init_completion(&hresult.completion);
2040 req = ahash_request_alloc(tfm, GFP_KERNEL);
2041 if (!req)
2042 return -ENOMEM;
2044 /* Keep tfm keylen == 0 during hash of the long key */
2045 ctx->keylen = 0;
2046 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2047 keyhash_complete, &hresult);
2049 sg_init_one(&sg[0], key, keylen);
2051 ahash_request_set_crypt(req, sg, hash, keylen);
2052 ret = crypto_ahash_digest(req);
2053 switch (ret) {
2054 case 0:
2055 break;
2056 case -EINPROGRESS:
2057 case -EBUSY:
2058 ret = wait_for_completion_interruptible(
2059 &hresult.completion);
2060 if (!ret)
2061 ret = hresult.err;
2062 break;
2063 default:
2064 break;
2066 ahash_request_free(req);
2068 return ret;
2071 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2072 unsigned int keylen)
2074 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2075 unsigned int blocksize =
2076 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2077 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2078 unsigned int keysize = keylen;
2079 u8 hash[SHA512_DIGEST_SIZE];
2080 int ret;
2082 if (keylen <= blocksize)
2083 memcpy(ctx->key, key, keysize);
2084 else {
2085 /* Must get the hash of the long key */
2086 ret = keyhash(tfm, key, keylen, hash);
2088 if (ret) {
2089 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2090 return -EINVAL;
2093 keysize = digestsize;
2094 memcpy(ctx->key, hash, digestsize);
2097 ctx->keylen = keysize;
2099 return 0;
2103 struct talitos_alg_template {
2104 u32 type;
2105 union {
2106 struct crypto_alg crypto;
2107 struct ahash_alg hash;
2108 struct aead_alg aead;
2109 } alg;
2110 __be32 desc_hdr_template;
2113 static struct talitos_alg_template driver_algs[] = {
2114 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2115 { .type = CRYPTO_ALG_TYPE_AEAD,
2116 .alg.aead = {
2117 .base = {
2118 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2119 .cra_driver_name = "authenc-hmac-sha1-"
2120 "cbc-aes-talitos",
2121 .cra_blocksize = AES_BLOCK_SIZE,
2122 .cra_flags = CRYPTO_ALG_ASYNC,
2124 .ivsize = AES_BLOCK_SIZE,
2125 .maxauthsize = SHA1_DIGEST_SIZE,
2127 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2128 DESC_HDR_SEL0_AESU |
2129 DESC_HDR_MODE0_AESU_CBC |
2130 DESC_HDR_SEL1_MDEUA |
2131 DESC_HDR_MODE1_MDEU_INIT |
2132 DESC_HDR_MODE1_MDEU_PAD |
2133 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2135 { .type = CRYPTO_ALG_TYPE_AEAD,
2136 .alg.aead = {
2137 .base = {
2138 .cra_name = "authenc(hmac(sha1),"
2139 "cbc(des3_ede))",
2140 .cra_driver_name = "authenc-hmac-sha1-"
2141 "cbc-3des-talitos",
2142 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2143 .cra_flags = CRYPTO_ALG_ASYNC,
2145 .ivsize = DES3_EDE_BLOCK_SIZE,
2146 .maxauthsize = SHA1_DIGEST_SIZE,
2148 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2149 DESC_HDR_SEL0_DEU |
2150 DESC_HDR_MODE0_DEU_CBC |
2151 DESC_HDR_MODE0_DEU_3DES |
2152 DESC_HDR_SEL1_MDEUA |
2153 DESC_HDR_MODE1_MDEU_INIT |
2154 DESC_HDR_MODE1_MDEU_PAD |
2155 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2157 { .type = CRYPTO_ALG_TYPE_AEAD,
2158 .alg.aead = {
2159 .base = {
2160 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2161 .cra_driver_name = "authenc-hmac-sha224-"
2162 "cbc-aes-talitos",
2163 .cra_blocksize = AES_BLOCK_SIZE,
2164 .cra_flags = CRYPTO_ALG_ASYNC,
2166 .ivsize = AES_BLOCK_SIZE,
2167 .maxauthsize = SHA224_DIGEST_SIZE,
2169 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2170 DESC_HDR_SEL0_AESU |
2171 DESC_HDR_MODE0_AESU_CBC |
2172 DESC_HDR_SEL1_MDEUA |
2173 DESC_HDR_MODE1_MDEU_INIT |
2174 DESC_HDR_MODE1_MDEU_PAD |
2175 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2177 { .type = CRYPTO_ALG_TYPE_AEAD,
2178 .alg.aead = {
2179 .base = {
2180 .cra_name = "authenc(hmac(sha224),"
2181 "cbc(des3_ede))",
2182 .cra_driver_name = "authenc-hmac-sha224-"
2183 "cbc-3des-talitos",
2184 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2185 .cra_flags = CRYPTO_ALG_ASYNC,
2187 .ivsize = DES3_EDE_BLOCK_SIZE,
2188 .maxauthsize = SHA224_DIGEST_SIZE,
2190 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2191 DESC_HDR_SEL0_DEU |
2192 DESC_HDR_MODE0_DEU_CBC |
2193 DESC_HDR_MODE0_DEU_3DES |
2194 DESC_HDR_SEL1_MDEUA |
2195 DESC_HDR_MODE1_MDEU_INIT |
2196 DESC_HDR_MODE1_MDEU_PAD |
2197 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2199 { .type = CRYPTO_ALG_TYPE_AEAD,
2200 .alg.aead = {
2201 .base = {
2202 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2203 .cra_driver_name = "authenc-hmac-sha256-"
2204 "cbc-aes-talitos",
2205 .cra_blocksize = AES_BLOCK_SIZE,
2206 .cra_flags = CRYPTO_ALG_ASYNC,
2208 .ivsize = AES_BLOCK_SIZE,
2209 .maxauthsize = SHA256_DIGEST_SIZE,
2211 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2212 DESC_HDR_SEL0_AESU |
2213 DESC_HDR_MODE0_AESU_CBC |
2214 DESC_HDR_SEL1_MDEUA |
2215 DESC_HDR_MODE1_MDEU_INIT |
2216 DESC_HDR_MODE1_MDEU_PAD |
2217 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2219 { .type = CRYPTO_ALG_TYPE_AEAD,
2220 .alg.aead = {
2221 .base = {
2222 .cra_name = "authenc(hmac(sha256),"
2223 "cbc(des3_ede))",
2224 .cra_driver_name = "authenc-hmac-sha256-"
2225 "cbc-3des-talitos",
2226 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2227 .cra_flags = CRYPTO_ALG_ASYNC,
2229 .ivsize = DES3_EDE_BLOCK_SIZE,
2230 .maxauthsize = SHA256_DIGEST_SIZE,
2232 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2233 DESC_HDR_SEL0_DEU |
2234 DESC_HDR_MODE0_DEU_CBC |
2235 DESC_HDR_MODE0_DEU_3DES |
2236 DESC_HDR_SEL1_MDEUA |
2237 DESC_HDR_MODE1_MDEU_INIT |
2238 DESC_HDR_MODE1_MDEU_PAD |
2239 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2241 { .type = CRYPTO_ALG_TYPE_AEAD,
2242 .alg.aead = {
2243 .base = {
2244 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2245 .cra_driver_name = "authenc-hmac-sha384-"
2246 "cbc-aes-talitos",
2247 .cra_blocksize = AES_BLOCK_SIZE,
2248 .cra_flags = CRYPTO_ALG_ASYNC,
2250 .ivsize = AES_BLOCK_SIZE,
2251 .maxauthsize = SHA384_DIGEST_SIZE,
2253 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2254 DESC_HDR_SEL0_AESU |
2255 DESC_HDR_MODE0_AESU_CBC |
2256 DESC_HDR_SEL1_MDEUB |
2257 DESC_HDR_MODE1_MDEU_INIT |
2258 DESC_HDR_MODE1_MDEU_PAD |
2259 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2261 { .type = CRYPTO_ALG_TYPE_AEAD,
2262 .alg.aead = {
2263 .base = {
2264 .cra_name = "authenc(hmac(sha384),"
2265 "cbc(des3_ede))",
2266 .cra_driver_name = "authenc-hmac-sha384-"
2267 "cbc-3des-talitos",
2268 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2269 .cra_flags = CRYPTO_ALG_ASYNC,
2271 .ivsize = DES3_EDE_BLOCK_SIZE,
2272 .maxauthsize = SHA384_DIGEST_SIZE,
2274 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2275 DESC_HDR_SEL0_DEU |
2276 DESC_HDR_MODE0_DEU_CBC |
2277 DESC_HDR_MODE0_DEU_3DES |
2278 DESC_HDR_SEL1_MDEUB |
2279 DESC_HDR_MODE1_MDEU_INIT |
2280 DESC_HDR_MODE1_MDEU_PAD |
2281 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2283 { .type = CRYPTO_ALG_TYPE_AEAD,
2284 .alg.aead = {
2285 .base = {
2286 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2287 .cra_driver_name = "authenc-hmac-sha512-"
2288 "cbc-aes-talitos",
2289 .cra_blocksize = AES_BLOCK_SIZE,
2290 .cra_flags = CRYPTO_ALG_ASYNC,
2292 .ivsize = AES_BLOCK_SIZE,
2293 .maxauthsize = SHA512_DIGEST_SIZE,
2295 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2296 DESC_HDR_SEL0_AESU |
2297 DESC_HDR_MODE0_AESU_CBC |
2298 DESC_HDR_SEL1_MDEUB |
2299 DESC_HDR_MODE1_MDEU_INIT |
2300 DESC_HDR_MODE1_MDEU_PAD |
2301 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2303 { .type = CRYPTO_ALG_TYPE_AEAD,
2304 .alg.aead = {
2305 .base = {
2306 .cra_name = "authenc(hmac(sha512),"
2307 "cbc(des3_ede))",
2308 .cra_driver_name = "authenc-hmac-sha512-"
2309 "cbc-3des-talitos",
2310 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2311 .cra_flags = CRYPTO_ALG_ASYNC,
2313 .ivsize = DES3_EDE_BLOCK_SIZE,
2314 .maxauthsize = SHA512_DIGEST_SIZE,
2316 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2317 DESC_HDR_SEL0_DEU |
2318 DESC_HDR_MODE0_DEU_CBC |
2319 DESC_HDR_MODE0_DEU_3DES |
2320 DESC_HDR_SEL1_MDEUB |
2321 DESC_HDR_MODE1_MDEU_INIT |
2322 DESC_HDR_MODE1_MDEU_PAD |
2323 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2325 { .type = CRYPTO_ALG_TYPE_AEAD,
2326 .alg.aead = {
2327 .base = {
2328 .cra_name = "authenc(hmac(md5),cbc(aes))",
2329 .cra_driver_name = "authenc-hmac-md5-"
2330 "cbc-aes-talitos",
2331 .cra_blocksize = AES_BLOCK_SIZE,
2332 .cra_flags = CRYPTO_ALG_ASYNC,
2334 .ivsize = AES_BLOCK_SIZE,
2335 .maxauthsize = MD5_DIGEST_SIZE,
2337 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2338 DESC_HDR_SEL0_AESU |
2339 DESC_HDR_MODE0_AESU_CBC |
2340 DESC_HDR_SEL1_MDEUA |
2341 DESC_HDR_MODE1_MDEU_INIT |
2342 DESC_HDR_MODE1_MDEU_PAD |
2343 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2345 { .type = CRYPTO_ALG_TYPE_AEAD,
2346 .alg.aead = {
2347 .base = {
2348 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2349 .cra_driver_name = "authenc-hmac-md5-"
2350 "cbc-3des-talitos",
2351 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2352 .cra_flags = CRYPTO_ALG_ASYNC,
2354 .ivsize = DES3_EDE_BLOCK_SIZE,
2355 .maxauthsize = MD5_DIGEST_SIZE,
2357 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2358 DESC_HDR_SEL0_DEU |
2359 DESC_HDR_MODE0_DEU_CBC |
2360 DESC_HDR_MODE0_DEU_3DES |
2361 DESC_HDR_SEL1_MDEUA |
2362 DESC_HDR_MODE1_MDEU_INIT |
2363 DESC_HDR_MODE1_MDEU_PAD |
2364 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2366 /* ABLKCIPHER algorithms. */
2367 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2368 .alg.crypto = {
2369 .cra_name = "cbc(aes)",
2370 .cra_driver_name = "cbc-aes-talitos",
2371 .cra_blocksize = AES_BLOCK_SIZE,
2372 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2373 CRYPTO_ALG_ASYNC,
2374 .cra_ablkcipher = {
2375 .min_keysize = AES_MIN_KEY_SIZE,
2376 .max_keysize = AES_MAX_KEY_SIZE,
2377 .ivsize = AES_BLOCK_SIZE,
2380 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2381 DESC_HDR_SEL0_AESU |
2382 DESC_HDR_MODE0_AESU_CBC,
2384 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2385 .alg.crypto = {
2386 .cra_name = "cbc(des3_ede)",
2387 .cra_driver_name = "cbc-3des-talitos",
2388 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2389 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2390 CRYPTO_ALG_ASYNC,
2391 .cra_ablkcipher = {
2392 .min_keysize = DES3_EDE_KEY_SIZE,
2393 .max_keysize = DES3_EDE_KEY_SIZE,
2394 .ivsize = DES3_EDE_BLOCK_SIZE,
2397 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2398 DESC_HDR_SEL0_DEU |
2399 DESC_HDR_MODE0_DEU_CBC |
2400 DESC_HDR_MODE0_DEU_3DES,
2402 /* AHASH algorithms. */
2403 { .type = CRYPTO_ALG_TYPE_AHASH,
2404 .alg.hash = {
2405 .halg.digestsize = MD5_DIGEST_SIZE,
2406 .halg.statesize = sizeof(struct talitos_export_state),
2407 .halg.base = {
2408 .cra_name = "md5",
2409 .cra_driver_name = "md5-talitos",
2410 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2411 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2412 CRYPTO_ALG_ASYNC,
2415 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2416 DESC_HDR_SEL0_MDEUA |
2417 DESC_HDR_MODE0_MDEU_MD5,
2419 { .type = CRYPTO_ALG_TYPE_AHASH,
2420 .alg.hash = {
2421 .halg.digestsize = SHA1_DIGEST_SIZE,
2422 .halg.statesize = sizeof(struct talitos_export_state),
2423 .halg.base = {
2424 .cra_name = "sha1",
2425 .cra_driver_name = "sha1-talitos",
2426 .cra_blocksize = SHA1_BLOCK_SIZE,
2427 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2428 CRYPTO_ALG_ASYNC,
2431 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2432 DESC_HDR_SEL0_MDEUA |
2433 DESC_HDR_MODE0_MDEU_SHA1,
2435 { .type = CRYPTO_ALG_TYPE_AHASH,
2436 .alg.hash = {
2437 .halg.digestsize = SHA224_DIGEST_SIZE,
2438 .halg.statesize = sizeof(struct talitos_export_state),
2439 .halg.base = {
2440 .cra_name = "sha224",
2441 .cra_driver_name = "sha224-talitos",
2442 .cra_blocksize = SHA224_BLOCK_SIZE,
2443 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2444 CRYPTO_ALG_ASYNC,
2447 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2448 DESC_HDR_SEL0_MDEUA |
2449 DESC_HDR_MODE0_MDEU_SHA224,
2451 { .type = CRYPTO_ALG_TYPE_AHASH,
2452 .alg.hash = {
2453 .halg.digestsize = SHA256_DIGEST_SIZE,
2454 .halg.statesize = sizeof(struct talitos_export_state),
2455 .halg.base = {
2456 .cra_name = "sha256",
2457 .cra_driver_name = "sha256-talitos",
2458 .cra_blocksize = SHA256_BLOCK_SIZE,
2459 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2460 CRYPTO_ALG_ASYNC,
2463 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2464 DESC_HDR_SEL0_MDEUA |
2465 DESC_HDR_MODE0_MDEU_SHA256,
2467 { .type = CRYPTO_ALG_TYPE_AHASH,
2468 .alg.hash = {
2469 .halg.digestsize = SHA384_DIGEST_SIZE,
2470 .halg.statesize = sizeof(struct talitos_export_state),
2471 .halg.base = {
2472 .cra_name = "sha384",
2473 .cra_driver_name = "sha384-talitos",
2474 .cra_blocksize = SHA384_BLOCK_SIZE,
2475 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2476 CRYPTO_ALG_ASYNC,
2479 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2480 DESC_HDR_SEL0_MDEUB |
2481 DESC_HDR_MODE0_MDEUB_SHA384,
2483 { .type = CRYPTO_ALG_TYPE_AHASH,
2484 .alg.hash = {
2485 .halg.digestsize = SHA512_DIGEST_SIZE,
2486 .halg.statesize = sizeof(struct talitos_export_state),
2487 .halg.base = {
2488 .cra_name = "sha512",
2489 .cra_driver_name = "sha512-talitos",
2490 .cra_blocksize = SHA512_BLOCK_SIZE,
2491 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2492 CRYPTO_ALG_ASYNC,
2495 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2496 DESC_HDR_SEL0_MDEUB |
2497 DESC_HDR_MODE0_MDEUB_SHA512,
2499 { .type = CRYPTO_ALG_TYPE_AHASH,
2500 .alg.hash = {
2501 .halg.digestsize = MD5_DIGEST_SIZE,
2502 .halg.statesize = sizeof(struct talitos_export_state),
2503 .halg.base = {
2504 .cra_name = "hmac(md5)",
2505 .cra_driver_name = "hmac-md5-talitos",
2506 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2507 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2508 CRYPTO_ALG_ASYNC,
2511 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2512 DESC_HDR_SEL0_MDEUA |
2513 DESC_HDR_MODE0_MDEU_MD5,
2515 { .type = CRYPTO_ALG_TYPE_AHASH,
2516 .alg.hash = {
2517 .halg.digestsize = SHA1_DIGEST_SIZE,
2518 .halg.statesize = sizeof(struct talitos_export_state),
2519 .halg.base = {
2520 .cra_name = "hmac(sha1)",
2521 .cra_driver_name = "hmac-sha1-talitos",
2522 .cra_blocksize = SHA1_BLOCK_SIZE,
2523 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2524 CRYPTO_ALG_ASYNC,
2527 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2528 DESC_HDR_SEL0_MDEUA |
2529 DESC_HDR_MODE0_MDEU_SHA1,
2531 { .type = CRYPTO_ALG_TYPE_AHASH,
2532 .alg.hash = {
2533 .halg.digestsize = SHA224_DIGEST_SIZE,
2534 .halg.statesize = sizeof(struct talitos_export_state),
2535 .halg.base = {
2536 .cra_name = "hmac(sha224)",
2537 .cra_driver_name = "hmac-sha224-talitos",
2538 .cra_blocksize = SHA224_BLOCK_SIZE,
2539 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2540 CRYPTO_ALG_ASYNC,
2543 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2544 DESC_HDR_SEL0_MDEUA |
2545 DESC_HDR_MODE0_MDEU_SHA224,
2547 { .type = CRYPTO_ALG_TYPE_AHASH,
2548 .alg.hash = {
2549 .halg.digestsize = SHA256_DIGEST_SIZE,
2550 .halg.statesize = sizeof(struct talitos_export_state),
2551 .halg.base = {
2552 .cra_name = "hmac(sha256)",
2553 .cra_driver_name = "hmac-sha256-talitos",
2554 .cra_blocksize = SHA256_BLOCK_SIZE,
2555 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2556 CRYPTO_ALG_ASYNC,
2559 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2560 DESC_HDR_SEL0_MDEUA |
2561 DESC_HDR_MODE0_MDEU_SHA256,
2563 { .type = CRYPTO_ALG_TYPE_AHASH,
2564 .alg.hash = {
2565 .halg.digestsize = SHA384_DIGEST_SIZE,
2566 .halg.statesize = sizeof(struct talitos_export_state),
2567 .halg.base = {
2568 .cra_name = "hmac(sha384)",
2569 .cra_driver_name = "hmac-sha384-talitos",
2570 .cra_blocksize = SHA384_BLOCK_SIZE,
2571 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2572 CRYPTO_ALG_ASYNC,
2575 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2576 DESC_HDR_SEL0_MDEUB |
2577 DESC_HDR_MODE0_MDEUB_SHA384,
2579 { .type = CRYPTO_ALG_TYPE_AHASH,
2580 .alg.hash = {
2581 .halg.digestsize = SHA512_DIGEST_SIZE,
2582 .halg.statesize = sizeof(struct talitos_export_state),
2583 .halg.base = {
2584 .cra_name = "hmac(sha512)",
2585 .cra_driver_name = "hmac-sha512-talitos",
2586 .cra_blocksize = SHA512_BLOCK_SIZE,
2587 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2588 CRYPTO_ALG_ASYNC,
2591 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2592 DESC_HDR_SEL0_MDEUB |
2593 DESC_HDR_MODE0_MDEUB_SHA512,
2597 struct talitos_crypto_alg {
2598 struct list_head entry;
2599 struct device *dev;
2600 struct talitos_alg_template algt;
2603 static int talitos_init_common(struct talitos_ctx *ctx,
2604 struct talitos_crypto_alg *talitos_alg)
2606 struct talitos_private *priv;
2608 /* update context with ptr to dev */
2609 ctx->dev = talitos_alg->dev;
2611 /* assign SEC channel to tfm in round-robin fashion */
2612 priv = dev_get_drvdata(ctx->dev);
2613 ctx->ch = atomic_inc_return(&priv->last_chan) &
2614 (priv->num_channels - 1);
2616 /* copy descriptor header template value */
2617 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2619 /* select done notification */
2620 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2622 return 0;
2625 static int talitos_cra_init(struct crypto_tfm *tfm)
2627 struct crypto_alg *alg = tfm->__crt_alg;
2628 struct talitos_crypto_alg *talitos_alg;
2629 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2631 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2632 talitos_alg = container_of(__crypto_ahash_alg(alg),
2633 struct talitos_crypto_alg,
2634 algt.alg.hash);
2635 else
2636 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2637 algt.alg.crypto);
2639 return talitos_init_common(ctx, talitos_alg);
2642 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2644 struct aead_alg *alg = crypto_aead_alg(tfm);
2645 struct talitos_crypto_alg *talitos_alg;
2646 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2648 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2649 algt.alg.aead);
2651 return talitos_init_common(ctx, talitos_alg);
2654 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2656 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2658 talitos_cra_init(tfm);
2660 ctx->keylen = 0;
2661 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2662 sizeof(struct talitos_ahash_req_ctx));
2664 return 0;
2668 * given the alg's descriptor header template, determine whether descriptor
2669 * type and primary/secondary execution units required match the hw
2670 * capabilities description provided in the device tree node.
2672 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2674 struct talitos_private *priv = dev_get_drvdata(dev);
2675 int ret;
2677 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2678 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2680 if (SECONDARY_EU(desc_hdr_template))
2681 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2682 & priv->exec_units);
2684 return ret;
2687 static int talitos_remove(struct platform_device *ofdev)
2689 struct device *dev = &ofdev->dev;
2690 struct talitos_private *priv = dev_get_drvdata(dev);
2691 struct talitos_crypto_alg *t_alg, *n;
2692 int i;
2694 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
2695 switch (t_alg->algt.type) {
2696 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2697 break;
2698 case CRYPTO_ALG_TYPE_AEAD:
2699 crypto_unregister_aead(&t_alg->algt.alg.aead);
2700 case CRYPTO_ALG_TYPE_AHASH:
2701 crypto_unregister_ahash(&t_alg->algt.alg.hash);
2702 break;
2704 list_del(&t_alg->entry);
2705 kfree(t_alg);
2708 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
2709 talitos_unregister_rng(dev);
2711 for (i = 0; priv->chan && i < priv->num_channels; i++)
2712 kfree(priv->chan[i].fifo);
2714 kfree(priv->chan);
2716 for (i = 0; i < 2; i++)
2717 if (priv->irq[i]) {
2718 free_irq(priv->irq[i], dev);
2719 irq_dispose_mapping(priv->irq[i]);
2722 tasklet_kill(&priv->done_task[0]);
2723 if (priv->irq[1])
2724 tasklet_kill(&priv->done_task[1]);
2726 iounmap(priv->reg);
2728 kfree(priv);
2730 return 0;
2733 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
2734 struct talitos_alg_template
2735 *template)
2737 struct talitos_private *priv = dev_get_drvdata(dev);
2738 struct talitos_crypto_alg *t_alg;
2739 struct crypto_alg *alg;
2741 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
2742 if (!t_alg)
2743 return ERR_PTR(-ENOMEM);
2745 t_alg->algt = *template;
2747 switch (t_alg->algt.type) {
2748 case CRYPTO_ALG_TYPE_ABLKCIPHER:
2749 alg = &t_alg->algt.alg.crypto;
2750 alg->cra_init = talitos_cra_init;
2751 alg->cra_type = &crypto_ablkcipher_type;
2752 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
2753 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
2754 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
2755 alg->cra_ablkcipher.geniv = "eseqiv";
2756 break;
2757 case CRYPTO_ALG_TYPE_AEAD:
2758 alg = &t_alg->algt.alg.aead.base;
2759 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
2760 t_alg->algt.alg.aead.setkey = aead_setkey;
2761 t_alg->algt.alg.aead.encrypt = aead_encrypt;
2762 t_alg->algt.alg.aead.decrypt = aead_decrypt;
2763 break;
2764 case CRYPTO_ALG_TYPE_AHASH:
2765 alg = &t_alg->algt.alg.hash.halg.base;
2766 alg->cra_init = talitos_cra_init_ahash;
2767 alg->cra_type = &crypto_ahash_type;
2768 t_alg->algt.alg.hash.init = ahash_init;
2769 t_alg->algt.alg.hash.update = ahash_update;
2770 t_alg->algt.alg.hash.final = ahash_final;
2771 t_alg->algt.alg.hash.finup = ahash_finup;
2772 t_alg->algt.alg.hash.digest = ahash_digest;
2773 if (!strncmp(alg->cra_name, "hmac", 4))
2774 t_alg->algt.alg.hash.setkey = ahash_setkey;
2775 t_alg->algt.alg.hash.import = ahash_import;
2776 t_alg->algt.alg.hash.export = ahash_export;
2778 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
2779 !strncmp(alg->cra_name, "hmac", 4)) {
2780 kfree(t_alg);
2781 return ERR_PTR(-ENOTSUPP);
2783 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
2784 (!strcmp(alg->cra_name, "sha224") ||
2785 !strcmp(alg->cra_name, "hmac(sha224)"))) {
2786 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
2787 t_alg->algt.desc_hdr_template =
2788 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2789 DESC_HDR_SEL0_MDEUA |
2790 DESC_HDR_MODE0_MDEU_SHA256;
2792 break;
2793 default:
2794 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
2795 kfree(t_alg);
2796 return ERR_PTR(-EINVAL);
2799 alg->cra_module = THIS_MODULE;
2800 alg->cra_priority = TALITOS_CRA_PRIORITY;
2801 alg->cra_alignmask = 0;
2802 alg->cra_ctxsize = sizeof(struct talitos_ctx);
2803 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
2805 t_alg->dev = dev;
2807 return t_alg;
2810 static int talitos_probe_irq(struct platform_device *ofdev)
2812 struct device *dev = &ofdev->dev;
2813 struct device_node *np = ofdev->dev.of_node;
2814 struct talitos_private *priv = dev_get_drvdata(dev);
2815 int err;
2816 bool is_sec1 = has_ftr_sec1(priv);
2818 priv->irq[0] = irq_of_parse_and_map(np, 0);
2819 if (!priv->irq[0]) {
2820 dev_err(dev, "failed to map irq\n");
2821 return -EINVAL;
2823 if (is_sec1) {
2824 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
2825 dev_driver_string(dev), dev);
2826 goto primary_out;
2829 priv->irq[1] = irq_of_parse_and_map(np, 1);
2831 /* get the primary irq line */
2832 if (!priv->irq[1]) {
2833 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
2834 dev_driver_string(dev), dev);
2835 goto primary_out;
2838 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
2839 dev_driver_string(dev), dev);
2840 if (err)
2841 goto primary_out;
2843 /* get the secondary irq line */
2844 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
2845 dev_driver_string(dev), dev);
2846 if (err) {
2847 dev_err(dev, "failed to request secondary irq\n");
2848 irq_dispose_mapping(priv->irq[1]);
2849 priv->irq[1] = 0;
2852 return err;
2854 primary_out:
2855 if (err) {
2856 dev_err(dev, "failed to request primary irq\n");
2857 irq_dispose_mapping(priv->irq[0]);
2858 priv->irq[0] = 0;
2861 return err;
2864 static int talitos_probe(struct platform_device *ofdev)
2866 struct device *dev = &ofdev->dev;
2867 struct device_node *np = ofdev->dev.of_node;
2868 struct talitos_private *priv;
2869 const unsigned int *prop;
2870 int i, err;
2871 int stride;
2873 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
2874 if (!priv)
2875 return -ENOMEM;
2877 INIT_LIST_HEAD(&priv->alg_list);
2879 dev_set_drvdata(dev, priv);
2881 priv->ofdev = ofdev;
2883 spin_lock_init(&priv->reg_lock);
2885 priv->reg = of_iomap(np, 0);
2886 if (!priv->reg) {
2887 dev_err(dev, "failed to of_iomap\n");
2888 err = -ENOMEM;
2889 goto err_out;
2892 /* get SEC version capabilities from device tree */
2893 prop = of_get_property(np, "fsl,num-channels", NULL);
2894 if (prop)
2895 priv->num_channels = *prop;
2897 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
2898 if (prop)
2899 priv->chfifo_len = *prop;
2901 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
2902 if (prop)
2903 priv->exec_units = *prop;
2905 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
2906 if (prop)
2907 priv->desc_types = *prop;
2909 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
2910 !priv->exec_units || !priv->desc_types) {
2911 dev_err(dev, "invalid property data in device tree node\n");
2912 err = -EINVAL;
2913 goto err_out;
2916 if (of_device_is_compatible(np, "fsl,sec3.0"))
2917 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
2919 if (of_device_is_compatible(np, "fsl,sec2.1"))
2920 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
2921 TALITOS_FTR_SHA224_HWINIT |
2922 TALITOS_FTR_HMAC_OK;
2924 if (of_device_is_compatible(np, "fsl,sec1.0"))
2925 priv->features |= TALITOS_FTR_SEC1;
2927 if (of_device_is_compatible(np, "fsl,sec1.2")) {
2928 priv->reg_deu = priv->reg + TALITOS12_DEU;
2929 priv->reg_aesu = priv->reg + TALITOS12_AESU;
2930 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
2931 stride = TALITOS1_CH_STRIDE;
2932 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
2933 priv->reg_deu = priv->reg + TALITOS10_DEU;
2934 priv->reg_aesu = priv->reg + TALITOS10_AESU;
2935 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
2936 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
2937 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
2938 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
2939 stride = TALITOS1_CH_STRIDE;
2940 } else {
2941 priv->reg_deu = priv->reg + TALITOS2_DEU;
2942 priv->reg_aesu = priv->reg + TALITOS2_AESU;
2943 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
2944 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
2945 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
2946 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
2947 priv->reg_keu = priv->reg + TALITOS2_KEU;
2948 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
2949 stride = TALITOS2_CH_STRIDE;
2952 err = talitos_probe_irq(ofdev);
2953 if (err)
2954 goto err_out;
2956 if (of_device_is_compatible(np, "fsl,sec1.0")) {
2957 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
2958 (unsigned long)dev);
2959 } else {
2960 if (!priv->irq[1]) {
2961 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
2962 (unsigned long)dev);
2963 } else {
2964 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
2965 (unsigned long)dev);
2966 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
2967 (unsigned long)dev);
2971 priv->chan = kzalloc(sizeof(struct talitos_channel) *
2972 priv->num_channels, GFP_KERNEL);
2973 if (!priv->chan) {
2974 dev_err(dev, "failed to allocate channel management space\n");
2975 err = -ENOMEM;
2976 goto err_out;
2979 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
2981 for (i = 0; i < priv->num_channels; i++) {
2982 priv->chan[i].reg = priv->reg + stride * (i + 1);
2983 if (!priv->irq[1] || !(i & 1))
2984 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
2986 spin_lock_init(&priv->chan[i].head_lock);
2987 spin_lock_init(&priv->chan[i].tail_lock);
2989 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
2990 priv->fifo_len, GFP_KERNEL);
2991 if (!priv->chan[i].fifo) {
2992 dev_err(dev, "failed to allocate request fifo %d\n", i);
2993 err = -ENOMEM;
2994 goto err_out;
2997 atomic_set(&priv->chan[i].submit_count,
2998 -(priv->chfifo_len - 1));
3001 dma_set_mask(dev, DMA_BIT_MASK(36));
3003 /* reset and initialize the h/w */
3004 err = init_device(dev);
3005 if (err) {
3006 dev_err(dev, "failed to initialize device\n");
3007 goto err_out;
3010 /* register the RNG, if available */
3011 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3012 err = talitos_register_rng(dev);
3013 if (err) {
3014 dev_err(dev, "failed to register hwrng: %d\n", err);
3015 goto err_out;
3016 } else
3017 dev_info(dev, "hwrng\n");
3020 /* register crypto algorithms the device supports */
3021 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3022 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3023 struct talitos_crypto_alg *t_alg;
3024 struct crypto_alg *alg = NULL;
3026 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3027 if (IS_ERR(t_alg)) {
3028 err = PTR_ERR(t_alg);
3029 if (err == -ENOTSUPP)
3030 continue;
3031 goto err_out;
3034 switch (t_alg->algt.type) {
3035 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3036 err = crypto_register_alg(
3037 &t_alg->algt.alg.crypto);
3038 alg = &t_alg->algt.alg.crypto;
3039 break;
3041 case CRYPTO_ALG_TYPE_AEAD:
3042 err = crypto_register_aead(
3043 &t_alg->algt.alg.aead);
3044 alg = &t_alg->algt.alg.aead.base;
3045 break;
3047 case CRYPTO_ALG_TYPE_AHASH:
3048 err = crypto_register_ahash(
3049 &t_alg->algt.alg.hash);
3050 alg = &t_alg->algt.alg.hash.halg.base;
3051 break;
3053 if (err) {
3054 dev_err(dev, "%s alg registration failed\n",
3055 alg->cra_driver_name);
3056 kfree(t_alg);
3057 } else
3058 list_add_tail(&t_alg->entry, &priv->alg_list);
3061 if (!list_empty(&priv->alg_list))
3062 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3063 (char *)of_get_property(np, "compatible", NULL));
3065 return 0;
3067 err_out:
3068 talitos_remove(ofdev);
3070 return err;
3073 static const struct of_device_id talitos_match[] = {
3074 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3076 .compatible = "fsl,sec1.0",
3078 #endif
3079 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3081 .compatible = "fsl,sec2.0",
3083 #endif
3086 MODULE_DEVICE_TABLE(of, talitos_match);
3088 static struct platform_driver talitos_driver = {
3089 .driver = {
3090 .name = "talitos",
3091 .of_match_table = talitos_match,
3093 .probe = talitos_probe,
3094 .remove = talitos_remove,
3097 module_platform_driver(talitos_driver);
3099 MODULE_LICENSE("GPL");
3100 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3101 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");