Linux 4.19.133
[linux/fpc-iii.git] / drivers / crypto / talitos.c
blobea16308fae0a563a07c972a79420f0288e3c4e05
1 /*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
56 #include "talitos.h"
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 unsigned int len, bool is_sec1)
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 if (is_sec1) {
63 ptr->len1 = cpu_to_be16(len);
64 } else {
65 ptr->len = cpu_to_be16(len);
66 ptr->eptr = upper_32_bits(dma_addr);
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71 struct talitos_ptr *src_ptr, bool is_sec1)
73 dst_ptr->ptr = src_ptr->ptr;
74 if (is_sec1) {
75 dst_ptr->len1 = src_ptr->len1;
76 } else {
77 dst_ptr->len = src_ptr->len;
78 dst_ptr->eptr = src_ptr->eptr;
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83 bool is_sec1)
85 if (is_sec1)
86 return be16_to_cpu(ptr->len1);
87 else
88 return be16_to_cpu(ptr->len);
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92 bool is_sec1)
94 if (!is_sec1)
95 ptr->j_extent = val;
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
100 if (!is_sec1)
101 ptr->j_extent |= val;
105 * map virtual single (contiguous) pointer to h/w descriptor pointer
107 static void __map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir,
111 unsigned long attrs)
113 dma_addr_t dma_addr = dma_map_single_attrs(dev, data, len, dir, attrs);
114 struct talitos_private *priv = dev_get_drvdata(dev);
115 bool is_sec1 = has_ftr_sec1(priv);
117 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
120 static void map_single_talitos_ptr(struct device *dev,
121 struct talitos_ptr *ptr,
122 unsigned int len, void *data,
123 enum dma_data_direction dir)
125 __map_single_talitos_ptr(dev, ptr, len, data, dir, 0);
128 static void map_single_talitos_ptr_nosync(struct device *dev,
129 struct talitos_ptr *ptr,
130 unsigned int len, void *data,
131 enum dma_data_direction dir)
133 __map_single_talitos_ptr(dev, ptr, len, data, dir,
134 DMA_ATTR_SKIP_CPU_SYNC);
138 * unmap bus single (contiguous) h/w descriptor pointer
140 static void unmap_single_talitos_ptr(struct device *dev,
141 struct talitos_ptr *ptr,
142 enum dma_data_direction dir)
144 struct talitos_private *priv = dev_get_drvdata(dev);
145 bool is_sec1 = has_ftr_sec1(priv);
147 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
148 from_talitos_ptr_len(ptr, is_sec1), dir);
151 static int reset_channel(struct device *dev, int ch)
153 struct talitos_private *priv = dev_get_drvdata(dev);
154 unsigned int timeout = TALITOS_TIMEOUT;
155 bool is_sec1 = has_ftr_sec1(priv);
157 if (is_sec1) {
158 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
159 TALITOS1_CCCR_LO_RESET);
161 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
162 TALITOS1_CCCR_LO_RESET) && --timeout)
163 cpu_relax();
164 } else {
165 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
166 TALITOS2_CCCR_RESET);
168 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
169 TALITOS2_CCCR_RESET) && --timeout)
170 cpu_relax();
173 if (timeout == 0) {
174 dev_err(dev, "failed to reset channel %d\n", ch);
175 return -EIO;
178 /* set 36-bit addressing, done writeback enable and done IRQ enable */
179 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
180 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
181 /* enable chaining descriptors */
182 if (is_sec1)
183 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
184 TALITOS_CCCR_LO_NE);
186 /* and ICCR writeback, if available */
187 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
188 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
189 TALITOS_CCCR_LO_IWSE);
191 return 0;
194 static int reset_device(struct device *dev)
196 struct talitos_private *priv = dev_get_drvdata(dev);
197 unsigned int timeout = TALITOS_TIMEOUT;
198 bool is_sec1 = has_ftr_sec1(priv);
199 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
201 setbits32(priv->reg + TALITOS_MCR, mcr);
203 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
204 && --timeout)
205 cpu_relax();
207 if (priv->irq[1]) {
208 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
209 setbits32(priv->reg + TALITOS_MCR, mcr);
212 if (timeout == 0) {
213 dev_err(dev, "failed to reset device\n");
214 return -EIO;
217 return 0;
221 * Reset and initialize the device
223 static int init_device(struct device *dev)
225 struct talitos_private *priv = dev_get_drvdata(dev);
226 int ch, err;
227 bool is_sec1 = has_ftr_sec1(priv);
230 * Master reset
231 * errata documentation: warning: certain SEC interrupts
232 * are not fully cleared by writing the MCR:SWR bit,
233 * set bit twice to completely reset
235 err = reset_device(dev);
236 if (err)
237 return err;
239 err = reset_device(dev);
240 if (err)
241 return err;
243 /* reset channels */
244 for (ch = 0; ch < priv->num_channels; ch++) {
245 err = reset_channel(dev, ch);
246 if (err)
247 return err;
250 /* enable channel done and error interrupts */
251 if (is_sec1) {
252 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
253 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
254 /* disable parity error check in DEU (erroneous? test vect.) */
255 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
256 } else {
257 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
258 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
261 /* disable integrity check error interrupts (use writeback instead) */
262 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
263 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
264 TALITOS_MDEUICR_LO_ICE);
266 return 0;
270 * talitos_submit - submits a descriptor to the device for processing
271 * @dev: the SEC device to be used
272 * @ch: the SEC device channel to be used
273 * @desc: the descriptor to be processed by the device
274 * @callback: whom to call when processing is complete
275 * @context: a handle for use by caller (optional)
277 * desc must contain valid dma-mapped (bus physical) address pointers.
278 * callback must check err and feedback in descriptor header
279 * for device processing status.
281 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
282 void (*callback)(struct device *dev,
283 struct talitos_desc *desc,
284 void *context, int error),
285 void *context)
287 struct talitos_private *priv = dev_get_drvdata(dev);
288 struct talitos_request *request;
289 unsigned long flags;
290 int head;
291 bool is_sec1 = has_ftr_sec1(priv);
293 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
295 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
296 /* h/w fifo is full */
297 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
298 return -EAGAIN;
301 head = priv->chan[ch].head;
302 request = &priv->chan[ch].fifo[head];
304 /* map descriptor and save caller data */
305 if (is_sec1) {
306 desc->hdr1 = desc->hdr;
307 request->dma_desc = dma_map_single(dev, &desc->hdr1,
308 TALITOS_DESC_SIZE,
309 DMA_BIDIRECTIONAL);
310 } else {
311 request->dma_desc = dma_map_single(dev, desc,
312 TALITOS_DESC_SIZE,
313 DMA_BIDIRECTIONAL);
315 request->callback = callback;
316 request->context = context;
318 /* increment fifo head */
319 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
321 smp_wmb();
322 request->desc = desc;
324 /* GO! */
325 wmb();
326 out_be32(priv->chan[ch].reg + TALITOS_FF,
327 upper_32_bits(request->dma_desc));
328 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
329 lower_32_bits(request->dma_desc));
331 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
333 return -EINPROGRESS;
335 EXPORT_SYMBOL(talitos_submit);
337 static __be32 get_request_hdr(struct talitos_request *request, bool is_sec1)
339 struct talitos_edesc *edesc;
341 if (!is_sec1)
342 return request->desc->hdr;
344 if (!request->desc->next_desc)
345 return request->desc->hdr1;
347 edesc = container_of(request->desc, struct talitos_edesc, desc);
349 return ((struct talitos_desc *)(edesc->buf + edesc->dma_len))->hdr1;
353 * process what was done, notify callback of error if not
355 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
357 struct talitos_private *priv = dev_get_drvdata(dev);
358 struct talitos_request *request, saved_req;
359 unsigned long flags;
360 int tail, status;
361 bool is_sec1 = has_ftr_sec1(priv);
363 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
365 tail = priv->chan[ch].tail;
366 while (priv->chan[ch].fifo[tail].desc) {
367 __be32 hdr;
369 request = &priv->chan[ch].fifo[tail];
371 /* descriptors with their done bits set don't get the error */
372 rmb();
373 hdr = get_request_hdr(request, is_sec1);
375 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
376 status = 0;
377 else
378 if (!error)
379 break;
380 else
381 status = error;
383 dma_unmap_single(dev, request->dma_desc,
384 TALITOS_DESC_SIZE,
385 DMA_BIDIRECTIONAL);
387 /* copy entries so we can call callback outside lock */
388 saved_req.desc = request->desc;
389 saved_req.callback = request->callback;
390 saved_req.context = request->context;
392 /* release request entry in fifo */
393 smp_wmb();
394 request->desc = NULL;
396 /* increment fifo tail */
397 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
399 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
401 atomic_dec(&priv->chan[ch].submit_count);
403 saved_req.callback(dev, saved_req.desc, saved_req.context,
404 status);
405 /* channel may resume processing in single desc error case */
406 if (error && !reset_ch && status == error)
407 return;
408 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
409 tail = priv->chan[ch].tail;
412 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
416 * process completed requests for channels that have done status
418 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
419 static void talitos1_done_##name(unsigned long data) \
421 struct device *dev = (struct device *)data; \
422 struct talitos_private *priv = dev_get_drvdata(dev); \
423 unsigned long flags; \
425 if (ch_done_mask & 0x10000000) \
426 flush_channel(dev, 0, 0, 0); \
427 if (ch_done_mask & 0x40000000) \
428 flush_channel(dev, 1, 0, 0); \
429 if (ch_done_mask & 0x00010000) \
430 flush_channel(dev, 2, 0, 0); \
431 if (ch_done_mask & 0x00040000) \
432 flush_channel(dev, 3, 0, 0); \
434 /* At this point, all completed channels have been processed */ \
435 /* Unmask done interrupts for channels completed later on. */ \
436 spin_lock_irqsave(&priv->reg_lock, flags); \
437 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
438 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
439 spin_unlock_irqrestore(&priv->reg_lock, flags); \
442 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
443 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
445 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
446 static void talitos2_done_##name(unsigned long data) \
448 struct device *dev = (struct device *)data; \
449 struct talitos_private *priv = dev_get_drvdata(dev); \
450 unsigned long flags; \
452 if (ch_done_mask & 1) \
453 flush_channel(dev, 0, 0, 0); \
454 if (ch_done_mask & (1 << 2)) \
455 flush_channel(dev, 1, 0, 0); \
456 if (ch_done_mask & (1 << 4)) \
457 flush_channel(dev, 2, 0, 0); \
458 if (ch_done_mask & (1 << 6)) \
459 flush_channel(dev, 3, 0, 0); \
461 /* At this point, all completed channels have been processed */ \
462 /* Unmask done interrupts for channels completed later on. */ \
463 spin_lock_irqsave(&priv->reg_lock, flags); \
464 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
465 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
466 spin_unlock_irqrestore(&priv->reg_lock, flags); \
469 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
470 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
471 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
472 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
475 * locate current (offending) descriptor
477 static u32 current_desc_hdr(struct device *dev, int ch)
479 struct talitos_private *priv = dev_get_drvdata(dev);
480 int tail, iter;
481 dma_addr_t cur_desc;
483 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
484 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
486 if (!cur_desc) {
487 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
488 return 0;
491 tail = priv->chan[ch].tail;
493 iter = tail;
494 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
495 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
496 iter = (iter + 1) & (priv->fifo_len - 1);
497 if (iter == tail) {
498 dev_err(dev, "couldn't locate current descriptor\n");
499 return 0;
503 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc) {
504 struct talitos_edesc *edesc;
506 edesc = container_of(priv->chan[ch].fifo[iter].desc,
507 struct talitos_edesc, desc);
508 return ((struct talitos_desc *)
509 (edesc->buf + edesc->dma_len))->hdr;
512 return priv->chan[ch].fifo[iter].desc->hdr;
516 * user diagnostics; report root cause of error based on execution unit status
518 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
520 struct talitos_private *priv = dev_get_drvdata(dev);
521 int i;
523 if (!desc_hdr)
524 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
526 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
527 case DESC_HDR_SEL0_AFEU:
528 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
529 in_be32(priv->reg_afeu + TALITOS_EUISR),
530 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
531 break;
532 case DESC_HDR_SEL0_DEU:
533 dev_err(dev, "DEUISR 0x%08x_%08x\n",
534 in_be32(priv->reg_deu + TALITOS_EUISR),
535 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
536 break;
537 case DESC_HDR_SEL0_MDEUA:
538 case DESC_HDR_SEL0_MDEUB:
539 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
540 in_be32(priv->reg_mdeu + TALITOS_EUISR),
541 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
542 break;
543 case DESC_HDR_SEL0_RNG:
544 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
545 in_be32(priv->reg_rngu + TALITOS_ISR),
546 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
547 break;
548 case DESC_HDR_SEL0_PKEU:
549 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
550 in_be32(priv->reg_pkeu + TALITOS_EUISR),
551 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
552 break;
553 case DESC_HDR_SEL0_AESU:
554 dev_err(dev, "AESUISR 0x%08x_%08x\n",
555 in_be32(priv->reg_aesu + TALITOS_EUISR),
556 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
557 break;
558 case DESC_HDR_SEL0_CRCU:
559 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
560 in_be32(priv->reg_crcu + TALITOS_EUISR),
561 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
562 break;
563 case DESC_HDR_SEL0_KEU:
564 dev_err(dev, "KEUISR 0x%08x_%08x\n",
565 in_be32(priv->reg_pkeu + TALITOS_EUISR),
566 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
567 break;
570 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
571 case DESC_HDR_SEL1_MDEUA:
572 case DESC_HDR_SEL1_MDEUB:
573 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
574 in_be32(priv->reg_mdeu + TALITOS_EUISR),
575 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
576 break;
577 case DESC_HDR_SEL1_CRCU:
578 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
579 in_be32(priv->reg_crcu + TALITOS_EUISR),
580 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
581 break;
584 for (i = 0; i < 8; i++)
585 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
586 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
587 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
591 * recover from error interrupts
593 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
595 struct talitos_private *priv = dev_get_drvdata(dev);
596 unsigned int timeout = TALITOS_TIMEOUT;
597 int ch, error, reset_dev = 0;
598 u32 v_lo;
599 bool is_sec1 = has_ftr_sec1(priv);
600 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
602 for (ch = 0; ch < priv->num_channels; ch++) {
603 /* skip channels without errors */
604 if (is_sec1) {
605 /* bits 29, 31, 17, 19 */
606 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
607 continue;
608 } else {
609 if (!(isr & (1 << (ch * 2 + 1))))
610 continue;
613 error = -EINVAL;
615 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
617 if (v_lo & TALITOS_CCPSR_LO_DOF) {
618 dev_err(dev, "double fetch fifo overflow error\n");
619 error = -EAGAIN;
620 reset_ch = 1;
622 if (v_lo & TALITOS_CCPSR_LO_SOF) {
623 /* h/w dropped descriptor */
624 dev_err(dev, "single fetch fifo overflow error\n");
625 error = -EAGAIN;
627 if (v_lo & TALITOS_CCPSR_LO_MDTE)
628 dev_err(dev, "master data transfer error\n");
629 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
630 dev_err(dev, is_sec1 ? "pointer not complete error\n"
631 : "s/g data length zero error\n");
632 if (v_lo & TALITOS_CCPSR_LO_FPZ)
633 dev_err(dev, is_sec1 ? "parity error\n"
634 : "fetch pointer zero error\n");
635 if (v_lo & TALITOS_CCPSR_LO_IDH)
636 dev_err(dev, "illegal descriptor header error\n");
637 if (v_lo & TALITOS_CCPSR_LO_IEU)
638 dev_err(dev, is_sec1 ? "static assignment error\n"
639 : "invalid exec unit error\n");
640 if (v_lo & TALITOS_CCPSR_LO_EU)
641 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
642 if (!is_sec1) {
643 if (v_lo & TALITOS_CCPSR_LO_GB)
644 dev_err(dev, "gather boundary error\n");
645 if (v_lo & TALITOS_CCPSR_LO_GRL)
646 dev_err(dev, "gather return/length error\n");
647 if (v_lo & TALITOS_CCPSR_LO_SB)
648 dev_err(dev, "scatter boundary error\n");
649 if (v_lo & TALITOS_CCPSR_LO_SRL)
650 dev_err(dev, "scatter return/length error\n");
653 flush_channel(dev, ch, error, reset_ch);
655 if (reset_ch) {
656 reset_channel(dev, ch);
657 } else {
658 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
659 TALITOS2_CCCR_CONT);
660 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
661 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
662 TALITOS2_CCCR_CONT) && --timeout)
663 cpu_relax();
664 if (timeout == 0) {
665 dev_err(dev, "failed to restart channel %d\n",
666 ch);
667 reset_dev = 1;
671 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
672 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
673 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
674 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
675 isr, isr_lo);
676 else
677 dev_err(dev, "done overflow, internal time out, or "
678 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
680 /* purge request queues */
681 for (ch = 0; ch < priv->num_channels; ch++)
682 flush_channel(dev, ch, -EIO, 1);
684 /* reset and reinitialize the device */
685 init_device(dev);
689 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
690 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
692 struct device *dev = data; \
693 struct talitos_private *priv = dev_get_drvdata(dev); \
694 u32 isr, isr_lo; \
695 unsigned long flags; \
697 spin_lock_irqsave(&priv->reg_lock, flags); \
698 isr = in_be32(priv->reg + TALITOS_ISR); \
699 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
700 /* Acknowledge interrupt */ \
701 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
702 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
704 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
705 spin_unlock_irqrestore(&priv->reg_lock, flags); \
706 talitos_error(dev, isr & ch_err_mask, isr_lo); \
708 else { \
709 if (likely(isr & ch_done_mask)) { \
710 /* mask further done interrupts. */ \
711 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
712 /* done_task will unmask done interrupts at exit */ \
713 tasklet_schedule(&priv->done_task[tlet]); \
715 spin_unlock_irqrestore(&priv->reg_lock, flags); \
718 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
719 IRQ_NONE; \
722 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
724 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
725 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
727 struct device *dev = data; \
728 struct talitos_private *priv = dev_get_drvdata(dev); \
729 u32 isr, isr_lo; \
730 unsigned long flags; \
732 spin_lock_irqsave(&priv->reg_lock, flags); \
733 isr = in_be32(priv->reg + TALITOS_ISR); \
734 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
735 /* Acknowledge interrupt */ \
736 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
737 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
739 if (unlikely(isr & ch_err_mask || isr_lo)) { \
740 spin_unlock_irqrestore(&priv->reg_lock, flags); \
741 talitos_error(dev, isr & ch_err_mask, isr_lo); \
743 else { \
744 if (likely(isr & ch_done_mask)) { \
745 /* mask further done interrupts. */ \
746 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
747 /* done_task will unmask done interrupts at exit */ \
748 tasklet_schedule(&priv->done_task[tlet]); \
750 spin_unlock_irqrestore(&priv->reg_lock, flags); \
753 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
754 IRQ_NONE; \
757 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
758 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
760 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
764 * hwrng
766 static int talitos_rng_data_present(struct hwrng *rng, int wait)
768 struct device *dev = (struct device *)rng->priv;
769 struct talitos_private *priv = dev_get_drvdata(dev);
770 u32 ofl;
771 int i;
773 for (i = 0; i < 20; i++) {
774 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
775 TALITOS_RNGUSR_LO_OFL;
776 if (ofl || !wait)
777 break;
778 udelay(10);
781 return !!ofl;
784 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
786 struct device *dev = (struct device *)rng->priv;
787 struct talitos_private *priv = dev_get_drvdata(dev);
789 /* rng fifo requires 64-bit accesses */
790 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
791 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
793 return sizeof(u32);
796 static int talitos_rng_init(struct hwrng *rng)
798 struct device *dev = (struct device *)rng->priv;
799 struct talitos_private *priv = dev_get_drvdata(dev);
800 unsigned int timeout = TALITOS_TIMEOUT;
802 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
803 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
804 & TALITOS_RNGUSR_LO_RD)
805 && --timeout)
806 cpu_relax();
807 if (timeout == 0) {
808 dev_err(dev, "failed to reset rng hw\n");
809 return -ENODEV;
812 /* start generating */
813 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
815 return 0;
818 static int talitos_register_rng(struct device *dev)
820 struct talitos_private *priv = dev_get_drvdata(dev);
821 int err;
823 priv->rng.name = dev_driver_string(dev),
824 priv->rng.init = talitos_rng_init,
825 priv->rng.data_present = talitos_rng_data_present,
826 priv->rng.data_read = talitos_rng_data_read,
827 priv->rng.priv = (unsigned long)dev;
829 err = hwrng_register(&priv->rng);
830 if (!err)
831 priv->rng_registered = true;
833 return err;
836 static void talitos_unregister_rng(struct device *dev)
838 struct talitos_private *priv = dev_get_drvdata(dev);
840 if (!priv->rng_registered)
841 return;
843 hwrng_unregister(&priv->rng);
844 priv->rng_registered = false;
848 * crypto alg
850 #define TALITOS_CRA_PRIORITY 3000
852 * Defines a priority for doing AEAD with descriptors type
853 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
855 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
856 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
857 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
859 struct talitos_ctx {
860 struct device *dev;
861 int ch;
862 __be32 desc_hdr_template;
863 u8 key[TALITOS_MAX_KEY_SIZE];
864 u8 iv[TALITOS_MAX_IV_LENGTH];
865 dma_addr_t dma_key;
866 unsigned int keylen;
867 unsigned int enckeylen;
868 unsigned int authkeylen;
871 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
872 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
874 struct talitos_ahash_req_ctx {
875 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
876 unsigned int hw_context_size;
877 u8 buf[2][HASH_MAX_BLOCK_SIZE];
878 int buf_idx;
879 unsigned int swinit;
880 unsigned int first;
881 unsigned int last;
882 unsigned int to_hash_later;
883 unsigned int nbuf;
884 struct scatterlist bufsl[2];
885 struct scatterlist *psrc;
888 struct talitos_export_state {
889 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
890 u8 buf[HASH_MAX_BLOCK_SIZE];
891 unsigned int swinit;
892 unsigned int first;
893 unsigned int last;
894 unsigned int to_hash_later;
895 unsigned int nbuf;
898 static int aead_setkey(struct crypto_aead *authenc,
899 const u8 *key, unsigned int keylen)
901 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
902 struct device *dev = ctx->dev;
903 struct crypto_authenc_keys keys;
905 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
906 goto badkey;
908 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
909 goto badkey;
911 if (ctx->keylen)
912 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
914 memcpy(ctx->key, keys.authkey, keys.authkeylen);
915 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
917 ctx->keylen = keys.authkeylen + keys.enckeylen;
918 ctx->enckeylen = keys.enckeylen;
919 ctx->authkeylen = keys.authkeylen;
920 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
921 DMA_TO_DEVICE);
923 memzero_explicit(&keys, sizeof(keys));
924 return 0;
926 badkey:
927 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
928 memzero_explicit(&keys, sizeof(keys));
929 return -EINVAL;
932 static void talitos_sg_unmap(struct device *dev,
933 struct talitos_edesc *edesc,
934 struct scatterlist *src,
935 struct scatterlist *dst,
936 unsigned int len, unsigned int offset)
938 struct talitos_private *priv = dev_get_drvdata(dev);
939 bool is_sec1 = has_ftr_sec1(priv);
940 unsigned int src_nents = edesc->src_nents ? : 1;
941 unsigned int dst_nents = edesc->dst_nents ? : 1;
943 if (is_sec1 && dst && dst_nents > 1) {
944 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
945 len, DMA_FROM_DEVICE);
946 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
947 offset);
949 if (src != dst) {
950 if (src_nents == 1 || !is_sec1)
951 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
953 if (dst && (dst_nents == 1 || !is_sec1))
954 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
955 } else if (src_nents == 1 || !is_sec1) {
956 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
960 static void ipsec_esp_unmap(struct device *dev,
961 struct talitos_edesc *edesc,
962 struct aead_request *areq, bool encrypt)
964 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
965 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
966 unsigned int ivsize = crypto_aead_ivsize(aead);
967 unsigned int authsize = crypto_aead_authsize(aead);
968 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
969 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
970 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
972 if (is_ipsec_esp)
973 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
974 DMA_FROM_DEVICE);
975 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
977 talitos_sg_unmap(dev, edesc, areq->src, areq->dst,
978 cryptlen + authsize, areq->assoclen);
980 if (edesc->dma_len)
981 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
982 DMA_BIDIRECTIONAL);
984 if (!is_ipsec_esp) {
985 unsigned int dst_nents = edesc->dst_nents ? : 1;
987 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
988 areq->assoclen + cryptlen - ivsize);
993 * ipsec_esp descriptor callbacks
995 static void ipsec_esp_encrypt_done(struct device *dev,
996 struct talitos_desc *desc, void *context,
997 int err)
999 struct aead_request *areq = context;
1000 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1001 unsigned int ivsize = crypto_aead_ivsize(authenc);
1002 struct talitos_edesc *edesc;
1004 edesc = container_of(desc, struct talitos_edesc, desc);
1006 ipsec_esp_unmap(dev, edesc, areq, true);
1008 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1010 kfree(edesc);
1012 aead_request_complete(areq, err);
1015 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1016 struct talitos_desc *desc,
1017 void *context, int err)
1019 struct aead_request *req = context;
1020 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1021 unsigned int authsize = crypto_aead_authsize(authenc);
1022 struct talitos_edesc *edesc;
1023 char *oicv, *icv;
1025 edesc = container_of(desc, struct talitos_edesc, desc);
1027 ipsec_esp_unmap(dev, edesc, req, false);
1029 if (!err) {
1030 /* auth check */
1031 oicv = edesc->buf + edesc->dma_len;
1032 icv = oicv - authsize;
1034 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1037 kfree(edesc);
1039 aead_request_complete(req, err);
1042 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1043 struct talitos_desc *desc,
1044 void *context, int err)
1046 struct aead_request *req = context;
1047 struct talitos_edesc *edesc;
1049 edesc = container_of(desc, struct talitos_edesc, desc);
1051 ipsec_esp_unmap(dev, edesc, req, false);
1053 /* check ICV auth status */
1054 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1055 DESC_HDR_LO_ICCR1_PASS))
1056 err = -EBADMSG;
1058 kfree(edesc);
1060 aead_request_complete(req, err);
1064 * convert scatterlist to SEC h/w link table format
1065 * stop at cryptlen bytes
1067 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1068 unsigned int offset, int datalen, int elen,
1069 struct talitos_ptr *link_tbl_ptr)
1071 int n_sg = elen ? sg_count + 1 : sg_count;
1072 int count = 0;
1073 int cryptlen = datalen + elen;
1075 while (cryptlen && sg && n_sg--) {
1076 unsigned int len = sg_dma_len(sg);
1078 if (offset >= len) {
1079 offset -= len;
1080 goto next;
1083 len -= offset;
1085 if (len > cryptlen)
1086 len = cryptlen;
1088 if (datalen > 0 && len > datalen) {
1089 to_talitos_ptr(link_tbl_ptr + count,
1090 sg_dma_address(sg) + offset, datalen, 0);
1091 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1092 count++;
1093 len -= datalen;
1094 offset += datalen;
1096 to_talitos_ptr(link_tbl_ptr + count,
1097 sg_dma_address(sg) + offset, len, 0);
1098 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1099 count++;
1100 cryptlen -= len;
1101 datalen -= len;
1102 offset = 0;
1104 next:
1105 sg = sg_next(sg);
1108 /* tag end of link table */
1109 if (count > 0)
1110 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1111 DESC_PTR_LNKTBL_RET, 0);
1113 return count;
1116 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1117 unsigned int len, struct talitos_edesc *edesc,
1118 struct talitos_ptr *ptr, int sg_count,
1119 unsigned int offset, int tbl_off, int elen,
1120 bool force)
1122 struct talitos_private *priv = dev_get_drvdata(dev);
1123 bool is_sec1 = has_ftr_sec1(priv);
1125 if (!src) {
1126 to_talitos_ptr(ptr, 0, 0, is_sec1);
1127 return 1;
1129 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1130 if (sg_count == 1 && !force) {
1131 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1132 return sg_count;
1134 if (is_sec1) {
1135 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1136 return sg_count;
1138 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len, elen,
1139 &edesc->link_tbl[tbl_off]);
1140 if (sg_count == 1 && !force) {
1141 /* Only one segment now, so no link tbl needed*/
1142 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1143 return sg_count;
1145 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1146 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1147 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1149 return sg_count;
1152 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1153 unsigned int len, struct talitos_edesc *edesc,
1154 struct talitos_ptr *ptr, int sg_count,
1155 unsigned int offset, int tbl_off)
1157 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1158 tbl_off, 0, false);
1162 * fill in and submit ipsec_esp descriptor
1164 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1165 bool encrypt,
1166 void (*callback)(struct device *dev,
1167 struct talitos_desc *desc,
1168 void *context, int error))
1170 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1171 unsigned int authsize = crypto_aead_authsize(aead);
1172 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1173 struct device *dev = ctx->dev;
1174 struct talitos_desc *desc = &edesc->desc;
1175 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1176 unsigned int ivsize = crypto_aead_ivsize(aead);
1177 int tbl_off = 0;
1178 int sg_count, ret;
1179 int elen = 0;
1180 bool sync_needed = false;
1181 struct talitos_private *priv = dev_get_drvdata(dev);
1182 bool is_sec1 = has_ftr_sec1(priv);
1183 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1184 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1185 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1186 dma_addr_t dma_icv = edesc->dma_link_tbl + edesc->dma_len - authsize;
1188 /* hmac key */
1189 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1191 sg_count = edesc->src_nents ?: 1;
1192 if (is_sec1 && sg_count > 1)
1193 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1194 areq->assoclen + cryptlen);
1195 else
1196 sg_count = dma_map_sg(dev, areq->src, sg_count,
1197 (areq->src == areq->dst) ?
1198 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1200 /* hmac data */
1201 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1202 &desc->ptr[1], sg_count, 0, tbl_off);
1204 if (ret > 1) {
1205 tbl_off += ret;
1206 sync_needed = true;
1209 /* cipher iv */
1210 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1212 /* cipher key */
1213 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1214 ctx->enckeylen, is_sec1);
1217 * cipher in
1218 * map and adjust cipher len to aead request cryptlen.
1219 * extent is bytes of HMAC postpended to ciphertext,
1220 * typically 12 for ipsec
1222 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1223 elen = authsize;
1225 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1226 sg_count, areq->assoclen, tbl_off, elen,
1227 false);
1229 if (ret > 1) {
1230 tbl_off += ret;
1231 sync_needed = true;
1234 /* cipher out */
1235 if (areq->src != areq->dst) {
1236 sg_count = edesc->dst_nents ? : 1;
1237 if (!is_sec1 || sg_count == 1)
1238 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1241 if (is_ipsec_esp && encrypt)
1242 elen = authsize;
1243 else
1244 elen = 0;
1245 ret = talitos_sg_map_ext(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1246 sg_count, areq->assoclen, tbl_off, elen,
1247 is_ipsec_esp && !encrypt);
1248 tbl_off += ret;
1250 /* ICV data */
1251 edesc->icv_ool = !encrypt;
1253 if (!encrypt && is_ipsec_esp) {
1254 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1256 /* Add an entry to the link table for ICV data */
1257 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1258 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RET, is_sec1);
1260 /* icv data follows link tables */
1261 to_talitos_ptr(tbl_ptr, dma_icv, authsize, is_sec1);
1262 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1263 sync_needed = true;
1264 } else if (!encrypt) {
1265 to_talitos_ptr(&desc->ptr[6], dma_icv, authsize, is_sec1);
1266 sync_needed = true;
1267 } else if (!is_ipsec_esp) {
1268 talitos_sg_map(dev, areq->dst, authsize, edesc, &desc->ptr[6],
1269 sg_count, areq->assoclen + cryptlen, tbl_off);
1272 /* iv out */
1273 if (is_ipsec_esp)
1274 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1275 DMA_FROM_DEVICE);
1277 if (sync_needed)
1278 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1279 edesc->dma_len,
1280 DMA_BIDIRECTIONAL);
1282 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1283 if (ret != -EINPROGRESS) {
1284 ipsec_esp_unmap(dev, edesc, areq, encrypt);
1285 kfree(edesc);
1287 return ret;
1291 * allocate and map the extended descriptor
1293 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1294 struct scatterlist *src,
1295 struct scatterlist *dst,
1296 u8 *iv,
1297 unsigned int assoclen,
1298 unsigned int cryptlen,
1299 unsigned int authsize,
1300 unsigned int ivsize,
1301 int icv_stashing,
1302 u32 cryptoflags,
1303 bool encrypt)
1305 struct talitos_edesc *edesc;
1306 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1307 dma_addr_t iv_dma = 0;
1308 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1309 GFP_ATOMIC;
1310 struct talitos_private *priv = dev_get_drvdata(dev);
1311 bool is_sec1 = has_ftr_sec1(priv);
1312 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1314 if (cryptlen + authsize > max_len) {
1315 dev_err(dev, "length exceeds h/w max limit\n");
1316 return ERR_PTR(-EINVAL);
1319 if (!dst || dst == src) {
1320 src_len = assoclen + cryptlen + authsize;
1321 src_nents = sg_nents_for_len(src, src_len);
1322 if (src_nents < 0) {
1323 dev_err(dev, "Invalid number of src SG.\n");
1324 return ERR_PTR(-EINVAL);
1326 src_nents = (src_nents == 1) ? 0 : src_nents;
1327 dst_nents = dst ? src_nents : 0;
1328 dst_len = 0;
1329 } else { /* dst && dst != src*/
1330 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1331 src_nents = sg_nents_for_len(src, src_len);
1332 if (src_nents < 0) {
1333 dev_err(dev, "Invalid number of src SG.\n");
1334 return ERR_PTR(-EINVAL);
1336 src_nents = (src_nents == 1) ? 0 : src_nents;
1337 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1338 dst_nents = sg_nents_for_len(dst, dst_len);
1339 if (dst_nents < 0) {
1340 dev_err(dev, "Invalid number of dst SG.\n");
1341 return ERR_PTR(-EINVAL);
1343 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1347 * allocate space for base edesc plus the link tables,
1348 * allowing for two separate entries for AD and generated ICV (+ 2),
1349 * and space for two sets of ICVs (stashed and generated)
1351 alloc_len = sizeof(struct talitos_edesc);
1352 if (src_nents || dst_nents || !encrypt) {
1353 if (is_sec1)
1354 dma_len = (src_nents ? src_len : 0) +
1355 (dst_nents ? dst_len : 0) + authsize;
1356 else
1357 dma_len = (src_nents + dst_nents + 2) *
1358 sizeof(struct talitos_ptr) + authsize;
1359 alloc_len += dma_len;
1360 } else {
1361 dma_len = 0;
1363 alloc_len += icv_stashing ? authsize : 0;
1365 /* if its a ahash, add space for a second desc next to the first one */
1366 if (is_sec1 && !dst)
1367 alloc_len += sizeof(struct talitos_desc);
1368 alloc_len += ivsize;
1370 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1371 if (!edesc)
1372 return ERR_PTR(-ENOMEM);
1373 if (ivsize) {
1374 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1375 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1377 memset(&edesc->desc, 0, sizeof(edesc->desc));
1379 edesc->src_nents = src_nents;
1380 edesc->dst_nents = dst_nents;
1381 edesc->iv_dma = iv_dma;
1382 edesc->dma_len = dma_len;
1383 if (dma_len)
1384 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1385 edesc->dma_len,
1386 DMA_BIDIRECTIONAL);
1388 return edesc;
1391 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1392 int icv_stashing, bool encrypt)
1394 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1395 unsigned int authsize = crypto_aead_authsize(authenc);
1396 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1397 unsigned int ivsize = crypto_aead_ivsize(authenc);
1398 unsigned int cryptlen = areq->cryptlen - (encrypt ? 0 : authsize);
1400 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1401 iv, areq->assoclen, cryptlen,
1402 authsize, ivsize, icv_stashing,
1403 areq->base.flags, encrypt);
1406 static int aead_encrypt(struct aead_request *req)
1408 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1409 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1410 struct talitos_edesc *edesc;
1412 /* allocate extended descriptor */
1413 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1414 if (IS_ERR(edesc))
1415 return PTR_ERR(edesc);
1417 /* set encrypt */
1418 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1420 return ipsec_esp(edesc, req, true, ipsec_esp_encrypt_done);
1423 static int aead_decrypt(struct aead_request *req)
1425 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1426 unsigned int authsize = crypto_aead_authsize(authenc);
1427 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1428 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1429 struct talitos_edesc *edesc;
1430 void *icvdata;
1432 /* allocate extended descriptor */
1433 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1434 if (IS_ERR(edesc))
1435 return PTR_ERR(edesc);
1437 if ((edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1438 (priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1439 ((!edesc->src_nents && !edesc->dst_nents) ||
1440 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1442 /* decrypt and check the ICV */
1443 edesc->desc.hdr = ctx->desc_hdr_template |
1444 DESC_HDR_DIR_INBOUND |
1445 DESC_HDR_MODE1_MDEU_CICV;
1447 /* reset integrity check result bits */
1449 return ipsec_esp(edesc, req, false,
1450 ipsec_esp_decrypt_hwauth_done);
1453 /* Have to check the ICV with software */
1454 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1456 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1457 icvdata = edesc->buf + edesc->dma_len;
1459 sg_pcopy_to_buffer(req->src, edesc->src_nents ? : 1, icvdata, authsize,
1460 req->assoclen + req->cryptlen - authsize);
1462 return ipsec_esp(edesc, req, false, ipsec_esp_decrypt_swauth_done);
1465 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1466 const u8 *key, unsigned int keylen)
1468 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1469 struct device *dev = ctx->dev;
1470 u32 tmp[DES_EXPKEY_WORDS];
1472 if (keylen > TALITOS_MAX_KEY_SIZE) {
1473 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1474 return -EINVAL;
1477 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1478 CRYPTO_TFM_REQ_WEAK_KEY) &&
1479 !des_ekey(tmp, key)) {
1480 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1481 return -EINVAL;
1484 if (ctx->keylen)
1485 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1487 memcpy(&ctx->key, key, keylen);
1488 ctx->keylen = keylen;
1490 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1492 return 0;
1495 static int ablkcipher_aes_setkey(struct crypto_ablkcipher *cipher,
1496 const u8 *key, unsigned int keylen)
1498 if (keylen == AES_KEYSIZE_128 || keylen == AES_KEYSIZE_192 ||
1499 keylen == AES_KEYSIZE_256)
1500 return ablkcipher_setkey(cipher, key, keylen);
1502 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1504 return -EINVAL;
1507 static void common_nonsnoop_unmap(struct device *dev,
1508 struct talitos_edesc *edesc,
1509 struct ablkcipher_request *areq)
1511 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1513 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1514 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1516 if (edesc->dma_len)
1517 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1518 DMA_BIDIRECTIONAL);
1521 static void ablkcipher_done(struct device *dev,
1522 struct talitos_desc *desc, void *context,
1523 int err)
1525 struct ablkcipher_request *areq = context;
1526 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1527 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1528 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1529 struct talitos_edesc *edesc;
1531 edesc = container_of(desc, struct talitos_edesc, desc);
1533 common_nonsnoop_unmap(dev, edesc, areq);
1534 memcpy(areq->info, ctx->iv, ivsize);
1536 kfree(edesc);
1538 areq->base.complete(&areq->base, err);
1541 static int common_nonsnoop(struct talitos_edesc *edesc,
1542 struct ablkcipher_request *areq,
1543 void (*callback) (struct device *dev,
1544 struct talitos_desc *desc,
1545 void *context, int error))
1547 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1548 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1549 struct device *dev = ctx->dev;
1550 struct talitos_desc *desc = &edesc->desc;
1551 unsigned int cryptlen = areq->nbytes;
1552 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1553 int sg_count, ret;
1554 bool sync_needed = false;
1555 struct talitos_private *priv = dev_get_drvdata(dev);
1556 bool is_sec1 = has_ftr_sec1(priv);
1558 /* first DWORD empty */
1560 /* cipher iv */
1561 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1563 /* cipher key */
1564 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1566 sg_count = edesc->src_nents ?: 1;
1567 if (is_sec1 && sg_count > 1)
1568 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1569 cryptlen);
1570 else
1571 sg_count = dma_map_sg(dev, areq->src, sg_count,
1572 (areq->src == areq->dst) ?
1573 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1575 * cipher in
1577 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1578 &desc->ptr[3], sg_count, 0, 0);
1579 if (sg_count > 1)
1580 sync_needed = true;
1582 /* cipher out */
1583 if (areq->src != areq->dst) {
1584 sg_count = edesc->dst_nents ? : 1;
1585 if (!is_sec1 || sg_count == 1)
1586 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1589 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1590 sg_count, 0, (edesc->src_nents + 1));
1591 if (ret > 1)
1592 sync_needed = true;
1594 /* iv out */
1595 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1596 DMA_FROM_DEVICE);
1598 /* last DWORD empty */
1600 if (sync_needed)
1601 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1602 edesc->dma_len, DMA_BIDIRECTIONAL);
1604 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1605 if (ret != -EINPROGRESS) {
1606 common_nonsnoop_unmap(dev, edesc, areq);
1607 kfree(edesc);
1609 return ret;
1612 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1613 areq, bool encrypt)
1615 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1616 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1617 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1619 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1620 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1621 areq->base.flags, encrypt);
1624 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1626 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1627 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1628 struct talitos_edesc *edesc;
1629 unsigned int blocksize =
1630 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1632 if (!areq->nbytes)
1633 return 0;
1635 if (areq->nbytes % blocksize)
1636 return -EINVAL;
1638 /* allocate extended descriptor */
1639 edesc = ablkcipher_edesc_alloc(areq, true);
1640 if (IS_ERR(edesc))
1641 return PTR_ERR(edesc);
1643 /* set encrypt */
1644 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1646 return common_nonsnoop(edesc, areq, ablkcipher_done);
1649 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1651 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1652 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1653 struct talitos_edesc *edesc;
1654 unsigned int blocksize =
1655 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher));
1657 if (!areq->nbytes)
1658 return 0;
1660 if (areq->nbytes % blocksize)
1661 return -EINVAL;
1663 /* allocate extended descriptor */
1664 edesc = ablkcipher_edesc_alloc(areq, false);
1665 if (IS_ERR(edesc))
1666 return PTR_ERR(edesc);
1668 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1670 return common_nonsnoop(edesc, areq, ablkcipher_done);
1673 static void common_nonsnoop_hash_unmap(struct device *dev,
1674 struct talitos_edesc *edesc,
1675 struct ahash_request *areq)
1677 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1678 struct talitos_private *priv = dev_get_drvdata(dev);
1679 bool is_sec1 = has_ftr_sec1(priv);
1680 struct talitos_desc *desc = &edesc->desc;
1681 struct talitos_desc *desc2 = (struct talitos_desc *)
1682 (edesc->buf + edesc->dma_len);
1684 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1685 if (desc->next_desc &&
1686 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1687 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1689 if (req_ctx->psrc)
1690 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1692 /* When using hashctx-in, must unmap it. */
1693 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1694 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1695 DMA_TO_DEVICE);
1696 else if (desc->next_desc)
1697 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1698 DMA_TO_DEVICE);
1700 if (is_sec1 && req_ctx->nbuf)
1701 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1702 DMA_TO_DEVICE);
1704 if (edesc->dma_len)
1705 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1706 DMA_BIDIRECTIONAL);
1708 if (edesc->desc.next_desc)
1709 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1710 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1713 static void ahash_done(struct device *dev,
1714 struct talitos_desc *desc, void *context,
1715 int err)
1717 struct ahash_request *areq = context;
1718 struct talitos_edesc *edesc =
1719 container_of(desc, struct talitos_edesc, desc);
1720 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1722 if (!req_ctx->last && req_ctx->to_hash_later) {
1723 /* Position any partial block for next update/final/finup */
1724 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1725 req_ctx->nbuf = req_ctx->to_hash_later;
1727 common_nonsnoop_hash_unmap(dev, edesc, areq);
1729 kfree(edesc);
1731 areq->base.complete(&areq->base, err);
1735 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1736 * ourself and submit a padded block
1738 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1739 struct talitos_edesc *edesc,
1740 struct talitos_ptr *ptr)
1742 static u8 padded_hash[64] = {
1743 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1744 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1745 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1746 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1749 pr_err_once("Bug in SEC1, padding ourself\n");
1750 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1751 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1752 (char *)padded_hash, DMA_TO_DEVICE);
1755 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1756 struct ahash_request *areq, unsigned int length,
1757 void (*callback) (struct device *dev,
1758 struct talitos_desc *desc,
1759 void *context, int error))
1761 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1762 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1763 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1764 struct device *dev = ctx->dev;
1765 struct talitos_desc *desc = &edesc->desc;
1766 int ret;
1767 bool sync_needed = false;
1768 struct talitos_private *priv = dev_get_drvdata(dev);
1769 bool is_sec1 = has_ftr_sec1(priv);
1770 int sg_count;
1772 /* first DWORD empty */
1774 /* hash context in */
1775 if (!req_ctx->first || req_ctx->swinit) {
1776 map_single_talitos_ptr_nosync(dev, &desc->ptr[1],
1777 req_ctx->hw_context_size,
1778 req_ctx->hw_context,
1779 DMA_TO_DEVICE);
1780 req_ctx->swinit = 0;
1782 /* Indicate next op is not the first. */
1783 req_ctx->first = 0;
1785 /* HMAC key */
1786 if (ctx->keylen)
1787 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1788 is_sec1);
1790 if (is_sec1 && req_ctx->nbuf)
1791 length -= req_ctx->nbuf;
1793 sg_count = edesc->src_nents ?: 1;
1794 if (is_sec1 && sg_count > 1)
1795 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1796 else if (length)
1797 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1798 DMA_TO_DEVICE);
1800 * data in
1802 if (is_sec1 && req_ctx->nbuf) {
1803 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1804 req_ctx->buf[req_ctx->buf_idx],
1805 DMA_TO_DEVICE);
1806 } else {
1807 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1808 &desc->ptr[3], sg_count, 0, 0);
1809 if (sg_count > 1)
1810 sync_needed = true;
1813 /* fifth DWORD empty */
1815 /* hash/HMAC out -or- hash context out */
1816 if (req_ctx->last)
1817 map_single_talitos_ptr(dev, &desc->ptr[5],
1818 crypto_ahash_digestsize(tfm),
1819 areq->result, DMA_FROM_DEVICE);
1820 else
1821 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1822 req_ctx->hw_context_size,
1823 req_ctx->hw_context,
1824 DMA_FROM_DEVICE);
1826 /* last DWORD empty */
1828 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1829 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1831 if (is_sec1 && req_ctx->nbuf && length) {
1832 struct talitos_desc *desc2 = (struct talitos_desc *)
1833 (edesc->buf + edesc->dma_len);
1834 dma_addr_t next_desc;
1836 memset(desc2, 0, sizeof(*desc2));
1837 desc2->hdr = desc->hdr;
1838 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1839 desc2->hdr1 = desc2->hdr;
1840 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1841 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1842 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1844 if (desc->ptr[1].ptr)
1845 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1846 is_sec1);
1847 else
1848 map_single_talitos_ptr_nosync(dev, &desc2->ptr[1],
1849 req_ctx->hw_context_size,
1850 req_ctx->hw_context,
1851 DMA_TO_DEVICE);
1852 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1853 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1854 &desc2->ptr[3], sg_count, 0, 0);
1855 if (sg_count > 1)
1856 sync_needed = true;
1857 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1858 if (req_ctx->last)
1859 map_single_talitos_ptr_nosync(dev, &desc->ptr[5],
1860 req_ctx->hw_context_size,
1861 req_ctx->hw_context,
1862 DMA_FROM_DEVICE);
1864 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1865 DMA_BIDIRECTIONAL);
1866 desc->next_desc = cpu_to_be32(next_desc);
1869 if (sync_needed)
1870 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1871 edesc->dma_len, DMA_BIDIRECTIONAL);
1873 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1874 if (ret != -EINPROGRESS) {
1875 common_nonsnoop_hash_unmap(dev, edesc, areq);
1876 kfree(edesc);
1878 return ret;
1881 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1882 unsigned int nbytes)
1884 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1885 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1886 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1887 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1888 bool is_sec1 = has_ftr_sec1(priv);
1890 if (is_sec1)
1891 nbytes -= req_ctx->nbuf;
1893 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1894 nbytes, 0, 0, 0, areq->base.flags, false);
1897 static int ahash_init(struct ahash_request *areq)
1899 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1900 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1901 struct device *dev = ctx->dev;
1902 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1903 unsigned int size;
1904 dma_addr_t dma;
1906 /* Initialize the context */
1907 req_ctx->buf_idx = 0;
1908 req_ctx->nbuf = 0;
1909 req_ctx->first = 1; /* first indicates h/w must init its context */
1910 req_ctx->swinit = 0; /* assume h/w init of context */
1911 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1912 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1913 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1914 req_ctx->hw_context_size = size;
1916 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
1917 DMA_TO_DEVICE);
1918 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
1920 return 0;
1924 * on h/w without explicit sha224 support, we initialize h/w context
1925 * manually with sha224 constants, and tell it to run sha256.
1927 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1929 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1931 req_ctx->hw_context[0] = SHA224_H0;
1932 req_ctx->hw_context[1] = SHA224_H1;
1933 req_ctx->hw_context[2] = SHA224_H2;
1934 req_ctx->hw_context[3] = SHA224_H3;
1935 req_ctx->hw_context[4] = SHA224_H4;
1936 req_ctx->hw_context[5] = SHA224_H5;
1937 req_ctx->hw_context[6] = SHA224_H6;
1938 req_ctx->hw_context[7] = SHA224_H7;
1940 /* init 64-bit count */
1941 req_ctx->hw_context[8] = 0;
1942 req_ctx->hw_context[9] = 0;
1944 ahash_init(areq);
1945 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1947 return 0;
1950 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1952 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1953 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1954 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1955 struct talitos_edesc *edesc;
1956 unsigned int blocksize =
1957 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1958 unsigned int nbytes_to_hash;
1959 unsigned int to_hash_later;
1960 unsigned int nsg;
1961 int nents;
1962 struct device *dev = ctx->dev;
1963 struct talitos_private *priv = dev_get_drvdata(dev);
1964 bool is_sec1 = has_ftr_sec1(priv);
1965 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1967 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1968 /* Buffer up to one whole block */
1969 nents = sg_nents_for_len(areq->src, nbytes);
1970 if (nents < 0) {
1971 dev_err(ctx->dev, "Invalid number of src SG.\n");
1972 return nents;
1974 sg_copy_to_buffer(areq->src, nents,
1975 ctx_buf + req_ctx->nbuf, nbytes);
1976 req_ctx->nbuf += nbytes;
1977 return 0;
1980 /* At least (blocksize + 1) bytes are available to hash */
1981 nbytes_to_hash = nbytes + req_ctx->nbuf;
1982 to_hash_later = nbytes_to_hash & (blocksize - 1);
1984 if (req_ctx->last)
1985 to_hash_later = 0;
1986 else if (to_hash_later)
1987 /* There is a partial block. Hash the full block(s) now */
1988 nbytes_to_hash -= to_hash_later;
1989 else {
1990 /* Keep one block buffered */
1991 nbytes_to_hash -= blocksize;
1992 to_hash_later = blocksize;
1995 /* Chain in any previously buffered data */
1996 if (!is_sec1 && req_ctx->nbuf) {
1997 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1998 sg_init_table(req_ctx->bufsl, nsg);
1999 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2000 if (nsg > 1)
2001 sg_chain(req_ctx->bufsl, 2, areq->src);
2002 req_ctx->psrc = req_ctx->bufsl;
2003 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2004 int offset;
2006 if (nbytes_to_hash > blocksize)
2007 offset = blocksize - req_ctx->nbuf;
2008 else
2009 offset = nbytes_to_hash - req_ctx->nbuf;
2010 nents = sg_nents_for_len(areq->src, offset);
2011 if (nents < 0) {
2012 dev_err(ctx->dev, "Invalid number of src SG.\n");
2013 return nents;
2015 sg_copy_to_buffer(areq->src, nents,
2016 ctx_buf + req_ctx->nbuf, offset);
2017 req_ctx->nbuf += offset;
2018 req_ctx->psrc = scatterwalk_ffwd(req_ctx->bufsl, areq->src,
2019 offset);
2020 } else
2021 req_ctx->psrc = areq->src;
2023 if (to_hash_later) {
2024 nents = sg_nents_for_len(areq->src, nbytes);
2025 if (nents < 0) {
2026 dev_err(ctx->dev, "Invalid number of src SG.\n");
2027 return nents;
2029 sg_pcopy_to_buffer(areq->src, nents,
2030 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2031 to_hash_later,
2032 nbytes - to_hash_later);
2034 req_ctx->to_hash_later = to_hash_later;
2036 /* Allocate extended descriptor */
2037 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2038 if (IS_ERR(edesc))
2039 return PTR_ERR(edesc);
2041 edesc->desc.hdr = ctx->desc_hdr_template;
2043 /* On last one, request SEC to pad; otherwise continue */
2044 if (req_ctx->last)
2045 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2046 else
2047 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2049 /* request SEC to INIT hash. */
2050 if (req_ctx->first && !req_ctx->swinit)
2051 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2053 /* When the tfm context has a keylen, it's an HMAC.
2054 * A first or last (ie. not middle) descriptor must request HMAC.
2056 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2057 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2059 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, ahash_done);
2062 static int ahash_update(struct ahash_request *areq)
2064 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2066 req_ctx->last = 0;
2068 return ahash_process_req(areq, areq->nbytes);
2071 static int ahash_final(struct ahash_request *areq)
2073 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2075 req_ctx->last = 1;
2077 return ahash_process_req(areq, 0);
2080 static int ahash_finup(struct ahash_request *areq)
2082 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2084 req_ctx->last = 1;
2086 return ahash_process_req(areq, areq->nbytes);
2089 static int ahash_digest(struct ahash_request *areq)
2091 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2092 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2094 ahash->init(areq);
2095 req_ctx->last = 1;
2097 return ahash_process_req(areq, areq->nbytes);
2100 static int ahash_export(struct ahash_request *areq, void *out)
2102 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2103 struct talitos_export_state *export = out;
2104 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2105 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2106 struct device *dev = ctx->dev;
2107 dma_addr_t dma;
2109 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2110 DMA_FROM_DEVICE);
2111 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_FROM_DEVICE);
2113 memcpy(export->hw_context, req_ctx->hw_context,
2114 req_ctx->hw_context_size);
2115 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2116 export->swinit = req_ctx->swinit;
2117 export->first = req_ctx->first;
2118 export->last = req_ctx->last;
2119 export->to_hash_later = req_ctx->to_hash_later;
2120 export->nbuf = req_ctx->nbuf;
2122 return 0;
2125 static int ahash_import(struct ahash_request *areq, const void *in)
2127 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2128 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2129 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
2130 struct device *dev = ctx->dev;
2131 const struct talitos_export_state *export = in;
2132 unsigned int size;
2133 dma_addr_t dma;
2135 memset(req_ctx, 0, sizeof(*req_ctx));
2136 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2137 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2138 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2139 req_ctx->hw_context_size = size;
2140 memcpy(req_ctx->hw_context, export->hw_context, size);
2141 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2142 req_ctx->swinit = export->swinit;
2143 req_ctx->first = export->first;
2144 req_ctx->last = export->last;
2145 req_ctx->to_hash_later = export->to_hash_later;
2146 req_ctx->nbuf = export->nbuf;
2148 dma = dma_map_single(dev, req_ctx->hw_context, req_ctx->hw_context_size,
2149 DMA_TO_DEVICE);
2150 dma_unmap_single(dev, dma, req_ctx->hw_context_size, DMA_TO_DEVICE);
2152 return 0;
2155 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2156 u8 *hash)
2158 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2160 struct scatterlist sg[1];
2161 struct ahash_request *req;
2162 struct crypto_wait wait;
2163 int ret;
2165 crypto_init_wait(&wait);
2167 req = ahash_request_alloc(tfm, GFP_KERNEL);
2168 if (!req)
2169 return -ENOMEM;
2171 /* Keep tfm keylen == 0 during hash of the long key */
2172 ctx->keylen = 0;
2173 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2174 crypto_req_done, &wait);
2176 sg_init_one(&sg[0], key, keylen);
2178 ahash_request_set_crypt(req, sg, hash, keylen);
2179 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2181 ahash_request_free(req);
2183 return ret;
2186 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2187 unsigned int keylen)
2189 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2190 struct device *dev = ctx->dev;
2191 unsigned int blocksize =
2192 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2193 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2194 unsigned int keysize = keylen;
2195 u8 hash[SHA512_DIGEST_SIZE];
2196 int ret;
2198 if (keylen <= blocksize)
2199 memcpy(ctx->key, key, keysize);
2200 else {
2201 /* Must get the hash of the long key */
2202 ret = keyhash(tfm, key, keylen, hash);
2204 if (ret) {
2205 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2206 return -EINVAL;
2209 keysize = digestsize;
2210 memcpy(ctx->key, hash, digestsize);
2213 if (ctx->keylen)
2214 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2216 ctx->keylen = keysize;
2217 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2219 return 0;
2223 struct talitos_alg_template {
2224 u32 type;
2225 u32 priority;
2226 union {
2227 struct crypto_alg crypto;
2228 struct ahash_alg hash;
2229 struct aead_alg aead;
2230 } alg;
2231 __be32 desc_hdr_template;
2234 static struct talitos_alg_template driver_algs[] = {
2235 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2236 { .type = CRYPTO_ALG_TYPE_AEAD,
2237 .alg.aead = {
2238 .base = {
2239 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2240 .cra_driver_name = "authenc-hmac-sha1-"
2241 "cbc-aes-talitos",
2242 .cra_blocksize = AES_BLOCK_SIZE,
2243 .cra_flags = CRYPTO_ALG_ASYNC,
2245 .ivsize = AES_BLOCK_SIZE,
2246 .maxauthsize = SHA1_DIGEST_SIZE,
2248 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2249 DESC_HDR_SEL0_AESU |
2250 DESC_HDR_MODE0_AESU_CBC |
2251 DESC_HDR_SEL1_MDEUA |
2252 DESC_HDR_MODE1_MDEU_INIT |
2253 DESC_HDR_MODE1_MDEU_PAD |
2254 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2256 { .type = CRYPTO_ALG_TYPE_AEAD,
2257 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2258 .alg.aead = {
2259 .base = {
2260 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2261 .cra_driver_name = "authenc-hmac-sha1-"
2262 "cbc-aes-talitos-hsna",
2263 .cra_blocksize = AES_BLOCK_SIZE,
2264 .cra_flags = CRYPTO_ALG_ASYNC,
2266 .ivsize = AES_BLOCK_SIZE,
2267 .maxauthsize = SHA1_DIGEST_SIZE,
2269 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2270 DESC_HDR_SEL0_AESU |
2271 DESC_HDR_MODE0_AESU_CBC |
2272 DESC_HDR_SEL1_MDEUA |
2273 DESC_HDR_MODE1_MDEU_INIT |
2274 DESC_HDR_MODE1_MDEU_PAD |
2275 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2277 { .type = CRYPTO_ALG_TYPE_AEAD,
2278 .alg.aead = {
2279 .base = {
2280 .cra_name = "authenc(hmac(sha1),"
2281 "cbc(des3_ede))",
2282 .cra_driver_name = "authenc-hmac-sha1-"
2283 "cbc-3des-talitos",
2284 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2285 .cra_flags = CRYPTO_ALG_ASYNC,
2287 .ivsize = DES3_EDE_BLOCK_SIZE,
2288 .maxauthsize = SHA1_DIGEST_SIZE,
2290 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2291 DESC_HDR_SEL0_DEU |
2292 DESC_HDR_MODE0_DEU_CBC |
2293 DESC_HDR_MODE0_DEU_3DES |
2294 DESC_HDR_SEL1_MDEUA |
2295 DESC_HDR_MODE1_MDEU_INIT |
2296 DESC_HDR_MODE1_MDEU_PAD |
2297 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2299 { .type = CRYPTO_ALG_TYPE_AEAD,
2300 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2301 .alg.aead = {
2302 .base = {
2303 .cra_name = "authenc(hmac(sha1),"
2304 "cbc(des3_ede))",
2305 .cra_driver_name = "authenc-hmac-sha1-"
2306 "cbc-3des-talitos-hsna",
2307 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2308 .cra_flags = CRYPTO_ALG_ASYNC,
2310 .ivsize = DES3_EDE_BLOCK_SIZE,
2311 .maxauthsize = SHA1_DIGEST_SIZE,
2313 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2314 DESC_HDR_SEL0_DEU |
2315 DESC_HDR_MODE0_DEU_CBC |
2316 DESC_HDR_MODE0_DEU_3DES |
2317 DESC_HDR_SEL1_MDEUA |
2318 DESC_HDR_MODE1_MDEU_INIT |
2319 DESC_HDR_MODE1_MDEU_PAD |
2320 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2322 { .type = CRYPTO_ALG_TYPE_AEAD,
2323 .alg.aead = {
2324 .base = {
2325 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2326 .cra_driver_name = "authenc-hmac-sha224-"
2327 "cbc-aes-talitos",
2328 .cra_blocksize = AES_BLOCK_SIZE,
2329 .cra_flags = CRYPTO_ALG_ASYNC,
2331 .ivsize = AES_BLOCK_SIZE,
2332 .maxauthsize = SHA224_DIGEST_SIZE,
2334 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2335 DESC_HDR_SEL0_AESU |
2336 DESC_HDR_MODE0_AESU_CBC |
2337 DESC_HDR_SEL1_MDEUA |
2338 DESC_HDR_MODE1_MDEU_INIT |
2339 DESC_HDR_MODE1_MDEU_PAD |
2340 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2342 { .type = CRYPTO_ALG_TYPE_AEAD,
2343 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2344 .alg.aead = {
2345 .base = {
2346 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2347 .cra_driver_name = "authenc-hmac-sha224-"
2348 "cbc-aes-talitos-hsna",
2349 .cra_blocksize = AES_BLOCK_SIZE,
2350 .cra_flags = CRYPTO_ALG_ASYNC,
2352 .ivsize = AES_BLOCK_SIZE,
2353 .maxauthsize = SHA224_DIGEST_SIZE,
2355 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2356 DESC_HDR_SEL0_AESU |
2357 DESC_HDR_MODE0_AESU_CBC |
2358 DESC_HDR_SEL1_MDEUA |
2359 DESC_HDR_MODE1_MDEU_INIT |
2360 DESC_HDR_MODE1_MDEU_PAD |
2361 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2363 { .type = CRYPTO_ALG_TYPE_AEAD,
2364 .alg.aead = {
2365 .base = {
2366 .cra_name = "authenc(hmac(sha224),"
2367 "cbc(des3_ede))",
2368 .cra_driver_name = "authenc-hmac-sha224-"
2369 "cbc-3des-talitos",
2370 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2371 .cra_flags = CRYPTO_ALG_ASYNC,
2373 .ivsize = DES3_EDE_BLOCK_SIZE,
2374 .maxauthsize = SHA224_DIGEST_SIZE,
2376 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2377 DESC_HDR_SEL0_DEU |
2378 DESC_HDR_MODE0_DEU_CBC |
2379 DESC_HDR_MODE0_DEU_3DES |
2380 DESC_HDR_SEL1_MDEUA |
2381 DESC_HDR_MODE1_MDEU_INIT |
2382 DESC_HDR_MODE1_MDEU_PAD |
2383 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2385 { .type = CRYPTO_ALG_TYPE_AEAD,
2386 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2387 .alg.aead = {
2388 .base = {
2389 .cra_name = "authenc(hmac(sha224),"
2390 "cbc(des3_ede))",
2391 .cra_driver_name = "authenc-hmac-sha224-"
2392 "cbc-3des-talitos-hsna",
2393 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2394 .cra_flags = CRYPTO_ALG_ASYNC,
2396 .ivsize = DES3_EDE_BLOCK_SIZE,
2397 .maxauthsize = SHA224_DIGEST_SIZE,
2399 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2400 DESC_HDR_SEL0_DEU |
2401 DESC_HDR_MODE0_DEU_CBC |
2402 DESC_HDR_MODE0_DEU_3DES |
2403 DESC_HDR_SEL1_MDEUA |
2404 DESC_HDR_MODE1_MDEU_INIT |
2405 DESC_HDR_MODE1_MDEU_PAD |
2406 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2408 { .type = CRYPTO_ALG_TYPE_AEAD,
2409 .alg.aead = {
2410 .base = {
2411 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2412 .cra_driver_name = "authenc-hmac-sha256-"
2413 "cbc-aes-talitos",
2414 .cra_blocksize = AES_BLOCK_SIZE,
2415 .cra_flags = CRYPTO_ALG_ASYNC,
2417 .ivsize = AES_BLOCK_SIZE,
2418 .maxauthsize = SHA256_DIGEST_SIZE,
2420 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2421 DESC_HDR_SEL0_AESU |
2422 DESC_HDR_MODE0_AESU_CBC |
2423 DESC_HDR_SEL1_MDEUA |
2424 DESC_HDR_MODE1_MDEU_INIT |
2425 DESC_HDR_MODE1_MDEU_PAD |
2426 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2428 { .type = CRYPTO_ALG_TYPE_AEAD,
2429 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2430 .alg.aead = {
2431 .base = {
2432 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2433 .cra_driver_name = "authenc-hmac-sha256-"
2434 "cbc-aes-talitos-hsna",
2435 .cra_blocksize = AES_BLOCK_SIZE,
2436 .cra_flags = CRYPTO_ALG_ASYNC,
2438 .ivsize = AES_BLOCK_SIZE,
2439 .maxauthsize = SHA256_DIGEST_SIZE,
2441 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2442 DESC_HDR_SEL0_AESU |
2443 DESC_HDR_MODE0_AESU_CBC |
2444 DESC_HDR_SEL1_MDEUA |
2445 DESC_HDR_MODE1_MDEU_INIT |
2446 DESC_HDR_MODE1_MDEU_PAD |
2447 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2449 { .type = CRYPTO_ALG_TYPE_AEAD,
2450 .alg.aead = {
2451 .base = {
2452 .cra_name = "authenc(hmac(sha256),"
2453 "cbc(des3_ede))",
2454 .cra_driver_name = "authenc-hmac-sha256-"
2455 "cbc-3des-talitos",
2456 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2457 .cra_flags = CRYPTO_ALG_ASYNC,
2459 .ivsize = DES3_EDE_BLOCK_SIZE,
2460 .maxauthsize = SHA256_DIGEST_SIZE,
2462 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2463 DESC_HDR_SEL0_DEU |
2464 DESC_HDR_MODE0_DEU_CBC |
2465 DESC_HDR_MODE0_DEU_3DES |
2466 DESC_HDR_SEL1_MDEUA |
2467 DESC_HDR_MODE1_MDEU_INIT |
2468 DESC_HDR_MODE1_MDEU_PAD |
2469 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2471 { .type = CRYPTO_ALG_TYPE_AEAD,
2472 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2473 .alg.aead = {
2474 .base = {
2475 .cra_name = "authenc(hmac(sha256),"
2476 "cbc(des3_ede))",
2477 .cra_driver_name = "authenc-hmac-sha256-"
2478 "cbc-3des-talitos-hsna",
2479 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2480 .cra_flags = CRYPTO_ALG_ASYNC,
2482 .ivsize = DES3_EDE_BLOCK_SIZE,
2483 .maxauthsize = SHA256_DIGEST_SIZE,
2485 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2486 DESC_HDR_SEL0_DEU |
2487 DESC_HDR_MODE0_DEU_CBC |
2488 DESC_HDR_MODE0_DEU_3DES |
2489 DESC_HDR_SEL1_MDEUA |
2490 DESC_HDR_MODE1_MDEU_INIT |
2491 DESC_HDR_MODE1_MDEU_PAD |
2492 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2494 { .type = CRYPTO_ALG_TYPE_AEAD,
2495 .alg.aead = {
2496 .base = {
2497 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2498 .cra_driver_name = "authenc-hmac-sha384-"
2499 "cbc-aes-talitos",
2500 .cra_blocksize = AES_BLOCK_SIZE,
2501 .cra_flags = CRYPTO_ALG_ASYNC,
2503 .ivsize = AES_BLOCK_SIZE,
2504 .maxauthsize = SHA384_DIGEST_SIZE,
2506 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2507 DESC_HDR_SEL0_AESU |
2508 DESC_HDR_MODE0_AESU_CBC |
2509 DESC_HDR_SEL1_MDEUB |
2510 DESC_HDR_MODE1_MDEU_INIT |
2511 DESC_HDR_MODE1_MDEU_PAD |
2512 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2514 { .type = CRYPTO_ALG_TYPE_AEAD,
2515 .alg.aead = {
2516 .base = {
2517 .cra_name = "authenc(hmac(sha384),"
2518 "cbc(des3_ede))",
2519 .cra_driver_name = "authenc-hmac-sha384-"
2520 "cbc-3des-talitos",
2521 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2522 .cra_flags = CRYPTO_ALG_ASYNC,
2524 .ivsize = DES3_EDE_BLOCK_SIZE,
2525 .maxauthsize = SHA384_DIGEST_SIZE,
2527 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2528 DESC_HDR_SEL0_DEU |
2529 DESC_HDR_MODE0_DEU_CBC |
2530 DESC_HDR_MODE0_DEU_3DES |
2531 DESC_HDR_SEL1_MDEUB |
2532 DESC_HDR_MODE1_MDEU_INIT |
2533 DESC_HDR_MODE1_MDEU_PAD |
2534 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2536 { .type = CRYPTO_ALG_TYPE_AEAD,
2537 .alg.aead = {
2538 .base = {
2539 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2540 .cra_driver_name = "authenc-hmac-sha512-"
2541 "cbc-aes-talitos",
2542 .cra_blocksize = AES_BLOCK_SIZE,
2543 .cra_flags = CRYPTO_ALG_ASYNC,
2545 .ivsize = AES_BLOCK_SIZE,
2546 .maxauthsize = SHA512_DIGEST_SIZE,
2548 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2549 DESC_HDR_SEL0_AESU |
2550 DESC_HDR_MODE0_AESU_CBC |
2551 DESC_HDR_SEL1_MDEUB |
2552 DESC_HDR_MODE1_MDEU_INIT |
2553 DESC_HDR_MODE1_MDEU_PAD |
2554 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2556 { .type = CRYPTO_ALG_TYPE_AEAD,
2557 .alg.aead = {
2558 .base = {
2559 .cra_name = "authenc(hmac(sha512),"
2560 "cbc(des3_ede))",
2561 .cra_driver_name = "authenc-hmac-sha512-"
2562 "cbc-3des-talitos",
2563 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2564 .cra_flags = CRYPTO_ALG_ASYNC,
2566 .ivsize = DES3_EDE_BLOCK_SIZE,
2567 .maxauthsize = SHA512_DIGEST_SIZE,
2569 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2570 DESC_HDR_SEL0_DEU |
2571 DESC_HDR_MODE0_DEU_CBC |
2572 DESC_HDR_MODE0_DEU_3DES |
2573 DESC_HDR_SEL1_MDEUB |
2574 DESC_HDR_MODE1_MDEU_INIT |
2575 DESC_HDR_MODE1_MDEU_PAD |
2576 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2578 { .type = CRYPTO_ALG_TYPE_AEAD,
2579 .alg.aead = {
2580 .base = {
2581 .cra_name = "authenc(hmac(md5),cbc(aes))",
2582 .cra_driver_name = "authenc-hmac-md5-"
2583 "cbc-aes-talitos",
2584 .cra_blocksize = AES_BLOCK_SIZE,
2585 .cra_flags = CRYPTO_ALG_ASYNC,
2587 .ivsize = AES_BLOCK_SIZE,
2588 .maxauthsize = MD5_DIGEST_SIZE,
2590 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2591 DESC_HDR_SEL0_AESU |
2592 DESC_HDR_MODE0_AESU_CBC |
2593 DESC_HDR_SEL1_MDEUA |
2594 DESC_HDR_MODE1_MDEU_INIT |
2595 DESC_HDR_MODE1_MDEU_PAD |
2596 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2598 { .type = CRYPTO_ALG_TYPE_AEAD,
2599 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2600 .alg.aead = {
2601 .base = {
2602 .cra_name = "authenc(hmac(md5),cbc(aes))",
2603 .cra_driver_name = "authenc-hmac-md5-"
2604 "cbc-aes-talitos-hsna",
2605 .cra_blocksize = AES_BLOCK_SIZE,
2606 .cra_flags = CRYPTO_ALG_ASYNC,
2608 .ivsize = AES_BLOCK_SIZE,
2609 .maxauthsize = MD5_DIGEST_SIZE,
2611 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2612 DESC_HDR_SEL0_AESU |
2613 DESC_HDR_MODE0_AESU_CBC |
2614 DESC_HDR_SEL1_MDEUA |
2615 DESC_HDR_MODE1_MDEU_INIT |
2616 DESC_HDR_MODE1_MDEU_PAD |
2617 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2619 { .type = CRYPTO_ALG_TYPE_AEAD,
2620 .alg.aead = {
2621 .base = {
2622 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2623 .cra_driver_name = "authenc-hmac-md5-"
2624 "cbc-3des-talitos",
2625 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2626 .cra_flags = CRYPTO_ALG_ASYNC,
2628 .ivsize = DES3_EDE_BLOCK_SIZE,
2629 .maxauthsize = MD5_DIGEST_SIZE,
2631 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2632 DESC_HDR_SEL0_DEU |
2633 DESC_HDR_MODE0_DEU_CBC |
2634 DESC_HDR_MODE0_DEU_3DES |
2635 DESC_HDR_SEL1_MDEUA |
2636 DESC_HDR_MODE1_MDEU_INIT |
2637 DESC_HDR_MODE1_MDEU_PAD |
2638 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2640 { .type = CRYPTO_ALG_TYPE_AEAD,
2641 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2642 .alg.aead = {
2643 .base = {
2644 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2645 .cra_driver_name = "authenc-hmac-md5-"
2646 "cbc-3des-talitos-hsna",
2647 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2648 .cra_flags = CRYPTO_ALG_ASYNC,
2650 .ivsize = DES3_EDE_BLOCK_SIZE,
2651 .maxauthsize = MD5_DIGEST_SIZE,
2653 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2654 DESC_HDR_SEL0_DEU |
2655 DESC_HDR_MODE0_DEU_CBC |
2656 DESC_HDR_MODE0_DEU_3DES |
2657 DESC_HDR_SEL1_MDEUA |
2658 DESC_HDR_MODE1_MDEU_INIT |
2659 DESC_HDR_MODE1_MDEU_PAD |
2660 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2662 /* ABLKCIPHER algorithms. */
2663 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2664 .alg.crypto = {
2665 .cra_name = "ecb(aes)",
2666 .cra_driver_name = "ecb-aes-talitos",
2667 .cra_blocksize = AES_BLOCK_SIZE,
2668 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2669 CRYPTO_ALG_ASYNC,
2670 .cra_ablkcipher = {
2671 .min_keysize = AES_MIN_KEY_SIZE,
2672 .max_keysize = AES_MAX_KEY_SIZE,
2675 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2676 DESC_HDR_SEL0_AESU,
2678 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2679 .alg.crypto = {
2680 .cra_name = "cbc(aes)",
2681 .cra_driver_name = "cbc-aes-talitos",
2682 .cra_blocksize = AES_BLOCK_SIZE,
2683 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2684 CRYPTO_ALG_ASYNC,
2685 .cra_ablkcipher = {
2686 .min_keysize = AES_MIN_KEY_SIZE,
2687 .max_keysize = AES_MAX_KEY_SIZE,
2688 .ivsize = AES_BLOCK_SIZE,
2689 .setkey = ablkcipher_aes_setkey,
2692 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2693 DESC_HDR_SEL0_AESU |
2694 DESC_HDR_MODE0_AESU_CBC,
2696 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2697 .alg.crypto = {
2698 .cra_name = "ctr(aes)",
2699 .cra_driver_name = "ctr-aes-talitos",
2700 .cra_blocksize = 1,
2701 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2702 CRYPTO_ALG_ASYNC,
2703 .cra_ablkcipher = {
2704 .min_keysize = AES_MIN_KEY_SIZE,
2705 .max_keysize = AES_MAX_KEY_SIZE,
2706 .ivsize = AES_BLOCK_SIZE,
2707 .setkey = ablkcipher_aes_setkey,
2710 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2711 DESC_HDR_SEL0_AESU |
2712 DESC_HDR_MODE0_AESU_CTR,
2714 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2715 .alg.crypto = {
2716 .cra_name = "ecb(des)",
2717 .cra_driver_name = "ecb-des-talitos",
2718 .cra_blocksize = DES_BLOCK_SIZE,
2719 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2720 CRYPTO_ALG_ASYNC,
2721 .cra_ablkcipher = {
2722 .min_keysize = DES_KEY_SIZE,
2723 .max_keysize = DES_KEY_SIZE,
2724 .ivsize = DES_BLOCK_SIZE,
2727 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2728 DESC_HDR_SEL0_DEU,
2730 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2731 .alg.crypto = {
2732 .cra_name = "cbc(des)",
2733 .cra_driver_name = "cbc-des-talitos",
2734 .cra_blocksize = DES_BLOCK_SIZE,
2735 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2736 CRYPTO_ALG_ASYNC,
2737 .cra_ablkcipher = {
2738 .min_keysize = DES_KEY_SIZE,
2739 .max_keysize = DES_KEY_SIZE,
2740 .ivsize = DES_BLOCK_SIZE,
2743 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2744 DESC_HDR_SEL0_DEU |
2745 DESC_HDR_MODE0_DEU_CBC,
2747 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2748 .alg.crypto = {
2749 .cra_name = "ecb(des3_ede)",
2750 .cra_driver_name = "ecb-3des-talitos",
2751 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2752 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2753 CRYPTO_ALG_ASYNC,
2754 .cra_ablkcipher = {
2755 .min_keysize = DES3_EDE_KEY_SIZE,
2756 .max_keysize = DES3_EDE_KEY_SIZE,
2757 .ivsize = DES3_EDE_BLOCK_SIZE,
2760 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2761 DESC_HDR_SEL0_DEU |
2762 DESC_HDR_MODE0_DEU_3DES,
2764 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2765 .alg.crypto = {
2766 .cra_name = "cbc(des3_ede)",
2767 .cra_driver_name = "cbc-3des-talitos",
2768 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2769 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2770 CRYPTO_ALG_ASYNC,
2771 .cra_ablkcipher = {
2772 .min_keysize = DES3_EDE_KEY_SIZE,
2773 .max_keysize = DES3_EDE_KEY_SIZE,
2774 .ivsize = DES3_EDE_BLOCK_SIZE,
2777 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2778 DESC_HDR_SEL0_DEU |
2779 DESC_HDR_MODE0_DEU_CBC |
2780 DESC_HDR_MODE0_DEU_3DES,
2782 /* AHASH algorithms. */
2783 { .type = CRYPTO_ALG_TYPE_AHASH,
2784 .alg.hash = {
2785 .halg.digestsize = MD5_DIGEST_SIZE,
2786 .halg.statesize = sizeof(struct talitos_export_state),
2787 .halg.base = {
2788 .cra_name = "md5",
2789 .cra_driver_name = "md5-talitos",
2790 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2791 .cra_flags = CRYPTO_ALG_ASYNC,
2794 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2795 DESC_HDR_SEL0_MDEUA |
2796 DESC_HDR_MODE0_MDEU_MD5,
2798 { .type = CRYPTO_ALG_TYPE_AHASH,
2799 .alg.hash = {
2800 .halg.digestsize = SHA1_DIGEST_SIZE,
2801 .halg.statesize = sizeof(struct talitos_export_state),
2802 .halg.base = {
2803 .cra_name = "sha1",
2804 .cra_driver_name = "sha1-talitos",
2805 .cra_blocksize = SHA1_BLOCK_SIZE,
2806 .cra_flags = CRYPTO_ALG_ASYNC,
2809 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2810 DESC_HDR_SEL0_MDEUA |
2811 DESC_HDR_MODE0_MDEU_SHA1,
2813 { .type = CRYPTO_ALG_TYPE_AHASH,
2814 .alg.hash = {
2815 .halg.digestsize = SHA224_DIGEST_SIZE,
2816 .halg.statesize = sizeof(struct talitos_export_state),
2817 .halg.base = {
2818 .cra_name = "sha224",
2819 .cra_driver_name = "sha224-talitos",
2820 .cra_blocksize = SHA224_BLOCK_SIZE,
2821 .cra_flags = CRYPTO_ALG_ASYNC,
2824 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2825 DESC_HDR_SEL0_MDEUA |
2826 DESC_HDR_MODE0_MDEU_SHA224,
2828 { .type = CRYPTO_ALG_TYPE_AHASH,
2829 .alg.hash = {
2830 .halg.digestsize = SHA256_DIGEST_SIZE,
2831 .halg.statesize = sizeof(struct talitos_export_state),
2832 .halg.base = {
2833 .cra_name = "sha256",
2834 .cra_driver_name = "sha256-talitos",
2835 .cra_blocksize = SHA256_BLOCK_SIZE,
2836 .cra_flags = CRYPTO_ALG_ASYNC,
2839 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2840 DESC_HDR_SEL0_MDEUA |
2841 DESC_HDR_MODE0_MDEU_SHA256,
2843 { .type = CRYPTO_ALG_TYPE_AHASH,
2844 .alg.hash = {
2845 .halg.digestsize = SHA384_DIGEST_SIZE,
2846 .halg.statesize = sizeof(struct talitos_export_state),
2847 .halg.base = {
2848 .cra_name = "sha384",
2849 .cra_driver_name = "sha384-talitos",
2850 .cra_blocksize = SHA384_BLOCK_SIZE,
2851 .cra_flags = CRYPTO_ALG_ASYNC,
2854 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2855 DESC_HDR_SEL0_MDEUB |
2856 DESC_HDR_MODE0_MDEUB_SHA384,
2858 { .type = CRYPTO_ALG_TYPE_AHASH,
2859 .alg.hash = {
2860 .halg.digestsize = SHA512_DIGEST_SIZE,
2861 .halg.statesize = sizeof(struct talitos_export_state),
2862 .halg.base = {
2863 .cra_name = "sha512",
2864 .cra_driver_name = "sha512-talitos",
2865 .cra_blocksize = SHA512_BLOCK_SIZE,
2866 .cra_flags = CRYPTO_ALG_ASYNC,
2869 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2870 DESC_HDR_SEL0_MDEUB |
2871 DESC_HDR_MODE0_MDEUB_SHA512,
2873 { .type = CRYPTO_ALG_TYPE_AHASH,
2874 .alg.hash = {
2875 .halg.digestsize = MD5_DIGEST_SIZE,
2876 .halg.statesize = sizeof(struct talitos_export_state),
2877 .halg.base = {
2878 .cra_name = "hmac(md5)",
2879 .cra_driver_name = "hmac-md5-talitos",
2880 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2881 .cra_flags = CRYPTO_ALG_ASYNC,
2884 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2885 DESC_HDR_SEL0_MDEUA |
2886 DESC_HDR_MODE0_MDEU_MD5,
2888 { .type = CRYPTO_ALG_TYPE_AHASH,
2889 .alg.hash = {
2890 .halg.digestsize = SHA1_DIGEST_SIZE,
2891 .halg.statesize = sizeof(struct talitos_export_state),
2892 .halg.base = {
2893 .cra_name = "hmac(sha1)",
2894 .cra_driver_name = "hmac-sha1-talitos",
2895 .cra_blocksize = SHA1_BLOCK_SIZE,
2896 .cra_flags = CRYPTO_ALG_ASYNC,
2899 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2900 DESC_HDR_SEL0_MDEUA |
2901 DESC_HDR_MODE0_MDEU_SHA1,
2903 { .type = CRYPTO_ALG_TYPE_AHASH,
2904 .alg.hash = {
2905 .halg.digestsize = SHA224_DIGEST_SIZE,
2906 .halg.statesize = sizeof(struct talitos_export_state),
2907 .halg.base = {
2908 .cra_name = "hmac(sha224)",
2909 .cra_driver_name = "hmac-sha224-talitos",
2910 .cra_blocksize = SHA224_BLOCK_SIZE,
2911 .cra_flags = CRYPTO_ALG_ASYNC,
2914 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2915 DESC_HDR_SEL0_MDEUA |
2916 DESC_HDR_MODE0_MDEU_SHA224,
2918 { .type = CRYPTO_ALG_TYPE_AHASH,
2919 .alg.hash = {
2920 .halg.digestsize = SHA256_DIGEST_SIZE,
2921 .halg.statesize = sizeof(struct talitos_export_state),
2922 .halg.base = {
2923 .cra_name = "hmac(sha256)",
2924 .cra_driver_name = "hmac-sha256-talitos",
2925 .cra_blocksize = SHA256_BLOCK_SIZE,
2926 .cra_flags = CRYPTO_ALG_ASYNC,
2929 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2930 DESC_HDR_SEL0_MDEUA |
2931 DESC_HDR_MODE0_MDEU_SHA256,
2933 { .type = CRYPTO_ALG_TYPE_AHASH,
2934 .alg.hash = {
2935 .halg.digestsize = SHA384_DIGEST_SIZE,
2936 .halg.statesize = sizeof(struct talitos_export_state),
2937 .halg.base = {
2938 .cra_name = "hmac(sha384)",
2939 .cra_driver_name = "hmac-sha384-talitos",
2940 .cra_blocksize = SHA384_BLOCK_SIZE,
2941 .cra_flags = CRYPTO_ALG_ASYNC,
2944 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2945 DESC_HDR_SEL0_MDEUB |
2946 DESC_HDR_MODE0_MDEUB_SHA384,
2948 { .type = CRYPTO_ALG_TYPE_AHASH,
2949 .alg.hash = {
2950 .halg.digestsize = SHA512_DIGEST_SIZE,
2951 .halg.statesize = sizeof(struct talitos_export_state),
2952 .halg.base = {
2953 .cra_name = "hmac(sha512)",
2954 .cra_driver_name = "hmac-sha512-talitos",
2955 .cra_blocksize = SHA512_BLOCK_SIZE,
2956 .cra_flags = CRYPTO_ALG_ASYNC,
2959 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2960 DESC_HDR_SEL0_MDEUB |
2961 DESC_HDR_MODE0_MDEUB_SHA512,
2965 struct talitos_crypto_alg {
2966 struct list_head entry;
2967 struct device *dev;
2968 struct talitos_alg_template algt;
2971 static int talitos_init_common(struct talitos_ctx *ctx,
2972 struct talitos_crypto_alg *talitos_alg)
2974 struct talitos_private *priv;
2976 /* update context with ptr to dev */
2977 ctx->dev = talitos_alg->dev;
2979 /* assign SEC channel to tfm in round-robin fashion */
2980 priv = dev_get_drvdata(ctx->dev);
2981 ctx->ch = atomic_inc_return(&priv->last_chan) &
2982 (priv->num_channels - 1);
2984 /* copy descriptor header template value */
2985 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2987 /* select done notification */
2988 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2990 return 0;
2993 static int talitos_cra_init(struct crypto_tfm *tfm)
2995 struct crypto_alg *alg = tfm->__crt_alg;
2996 struct talitos_crypto_alg *talitos_alg;
2997 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2999 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3000 talitos_alg = container_of(__crypto_ahash_alg(alg),
3001 struct talitos_crypto_alg,
3002 algt.alg.hash);
3003 else
3004 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3005 algt.alg.crypto);
3007 return talitos_init_common(ctx, talitos_alg);
3010 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3012 struct aead_alg *alg = crypto_aead_alg(tfm);
3013 struct talitos_crypto_alg *talitos_alg;
3014 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3016 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3017 algt.alg.aead);
3019 return talitos_init_common(ctx, talitos_alg);
3022 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3024 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3026 talitos_cra_init(tfm);
3028 ctx->keylen = 0;
3029 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3030 sizeof(struct talitos_ahash_req_ctx));
3032 return 0;
3035 static void talitos_cra_exit(struct crypto_tfm *tfm)
3037 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3038 struct device *dev = ctx->dev;
3040 if (ctx->keylen)
3041 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3045 * given the alg's descriptor header template, determine whether descriptor
3046 * type and primary/secondary execution units required match the hw
3047 * capabilities description provided in the device tree node.
3049 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3051 struct talitos_private *priv = dev_get_drvdata(dev);
3052 int ret;
3054 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3055 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3057 if (SECONDARY_EU(desc_hdr_template))
3058 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3059 & priv->exec_units);
3061 return ret;
3064 static int talitos_remove(struct platform_device *ofdev)
3066 struct device *dev = &ofdev->dev;
3067 struct talitos_private *priv = dev_get_drvdata(dev);
3068 struct talitos_crypto_alg *t_alg, *n;
3069 int i;
3071 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3072 switch (t_alg->algt.type) {
3073 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3074 break;
3075 case CRYPTO_ALG_TYPE_AEAD:
3076 crypto_unregister_aead(&t_alg->algt.alg.aead);
3077 break;
3078 case CRYPTO_ALG_TYPE_AHASH:
3079 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3080 break;
3082 list_del(&t_alg->entry);
3085 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3086 talitos_unregister_rng(dev);
3088 for (i = 0; i < 2; i++)
3089 if (priv->irq[i]) {
3090 free_irq(priv->irq[i], dev);
3091 irq_dispose_mapping(priv->irq[i]);
3094 tasklet_kill(&priv->done_task[0]);
3095 if (priv->irq[1])
3096 tasklet_kill(&priv->done_task[1]);
3098 return 0;
3101 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3102 struct talitos_alg_template
3103 *template)
3105 struct talitos_private *priv = dev_get_drvdata(dev);
3106 struct talitos_crypto_alg *t_alg;
3107 struct crypto_alg *alg;
3109 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3110 GFP_KERNEL);
3111 if (!t_alg)
3112 return ERR_PTR(-ENOMEM);
3114 t_alg->algt = *template;
3116 switch (t_alg->algt.type) {
3117 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3118 alg = &t_alg->algt.alg.crypto;
3119 alg->cra_init = talitos_cra_init;
3120 alg->cra_exit = talitos_cra_exit;
3121 alg->cra_type = &crypto_ablkcipher_type;
3122 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3123 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3124 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3125 alg->cra_ablkcipher.geniv = "eseqiv";
3126 break;
3127 case CRYPTO_ALG_TYPE_AEAD:
3128 alg = &t_alg->algt.alg.aead.base;
3129 alg->cra_exit = talitos_cra_exit;
3130 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3131 t_alg->algt.alg.aead.setkey = aead_setkey;
3132 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3133 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3134 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3135 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3136 devm_kfree(dev, t_alg);
3137 return ERR_PTR(-ENOTSUPP);
3139 break;
3140 case CRYPTO_ALG_TYPE_AHASH:
3141 alg = &t_alg->algt.alg.hash.halg.base;
3142 alg->cra_init = talitos_cra_init_ahash;
3143 alg->cra_exit = talitos_cra_exit;
3144 t_alg->algt.alg.hash.init = ahash_init;
3145 t_alg->algt.alg.hash.update = ahash_update;
3146 t_alg->algt.alg.hash.final = ahash_final;
3147 t_alg->algt.alg.hash.finup = ahash_finup;
3148 t_alg->algt.alg.hash.digest = ahash_digest;
3149 if (!strncmp(alg->cra_name, "hmac", 4))
3150 t_alg->algt.alg.hash.setkey = ahash_setkey;
3151 t_alg->algt.alg.hash.import = ahash_import;
3152 t_alg->algt.alg.hash.export = ahash_export;
3154 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3155 !strncmp(alg->cra_name, "hmac", 4)) {
3156 devm_kfree(dev, t_alg);
3157 return ERR_PTR(-ENOTSUPP);
3159 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3160 (!strcmp(alg->cra_name, "sha224") ||
3161 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3162 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3163 t_alg->algt.desc_hdr_template =
3164 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3165 DESC_HDR_SEL0_MDEUA |
3166 DESC_HDR_MODE0_MDEU_SHA256;
3168 break;
3169 default:
3170 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3171 devm_kfree(dev, t_alg);
3172 return ERR_PTR(-EINVAL);
3175 alg->cra_module = THIS_MODULE;
3176 if (t_alg->algt.priority)
3177 alg->cra_priority = t_alg->algt.priority;
3178 else
3179 alg->cra_priority = TALITOS_CRA_PRIORITY;
3180 if (has_ftr_sec1(priv))
3181 alg->cra_alignmask = 3;
3182 else
3183 alg->cra_alignmask = 0;
3184 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3185 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3187 t_alg->dev = dev;
3189 return t_alg;
3192 static int talitos_probe_irq(struct platform_device *ofdev)
3194 struct device *dev = &ofdev->dev;
3195 struct device_node *np = ofdev->dev.of_node;
3196 struct talitos_private *priv = dev_get_drvdata(dev);
3197 int err;
3198 bool is_sec1 = has_ftr_sec1(priv);
3200 priv->irq[0] = irq_of_parse_and_map(np, 0);
3201 if (!priv->irq[0]) {
3202 dev_err(dev, "failed to map irq\n");
3203 return -EINVAL;
3205 if (is_sec1) {
3206 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3207 dev_driver_string(dev), dev);
3208 goto primary_out;
3211 priv->irq[1] = irq_of_parse_and_map(np, 1);
3213 /* get the primary irq line */
3214 if (!priv->irq[1]) {
3215 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3216 dev_driver_string(dev), dev);
3217 goto primary_out;
3220 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3221 dev_driver_string(dev), dev);
3222 if (err)
3223 goto primary_out;
3225 /* get the secondary irq line */
3226 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3227 dev_driver_string(dev), dev);
3228 if (err) {
3229 dev_err(dev, "failed to request secondary irq\n");
3230 irq_dispose_mapping(priv->irq[1]);
3231 priv->irq[1] = 0;
3234 return err;
3236 primary_out:
3237 if (err) {
3238 dev_err(dev, "failed to request primary irq\n");
3239 irq_dispose_mapping(priv->irq[0]);
3240 priv->irq[0] = 0;
3243 return err;
3246 static int talitos_probe(struct platform_device *ofdev)
3248 struct device *dev = &ofdev->dev;
3249 struct device_node *np = ofdev->dev.of_node;
3250 struct talitos_private *priv;
3251 int i, err;
3252 int stride;
3253 struct resource *res;
3255 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3256 if (!priv)
3257 return -ENOMEM;
3259 INIT_LIST_HEAD(&priv->alg_list);
3261 dev_set_drvdata(dev, priv);
3263 priv->ofdev = ofdev;
3265 spin_lock_init(&priv->reg_lock);
3267 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3268 if (!res)
3269 return -ENXIO;
3270 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3271 if (!priv->reg) {
3272 dev_err(dev, "failed to of_iomap\n");
3273 err = -ENOMEM;
3274 goto err_out;
3277 /* get SEC version capabilities from device tree */
3278 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3279 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3280 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3281 of_property_read_u32(np, "fsl,descriptor-types-mask",
3282 &priv->desc_types);
3284 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3285 !priv->exec_units || !priv->desc_types) {
3286 dev_err(dev, "invalid property data in device tree node\n");
3287 err = -EINVAL;
3288 goto err_out;
3291 if (of_device_is_compatible(np, "fsl,sec3.0"))
3292 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3294 if (of_device_is_compatible(np, "fsl,sec2.1"))
3295 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3296 TALITOS_FTR_SHA224_HWINIT |
3297 TALITOS_FTR_HMAC_OK;
3299 if (of_device_is_compatible(np, "fsl,sec1.0"))
3300 priv->features |= TALITOS_FTR_SEC1;
3302 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3303 priv->reg_deu = priv->reg + TALITOS12_DEU;
3304 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3305 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3306 stride = TALITOS1_CH_STRIDE;
3307 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3308 priv->reg_deu = priv->reg + TALITOS10_DEU;
3309 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3310 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3311 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3312 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3313 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3314 stride = TALITOS1_CH_STRIDE;
3315 } else {
3316 priv->reg_deu = priv->reg + TALITOS2_DEU;
3317 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3318 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3319 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3320 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3321 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3322 priv->reg_keu = priv->reg + TALITOS2_KEU;
3323 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3324 stride = TALITOS2_CH_STRIDE;
3327 err = talitos_probe_irq(ofdev);
3328 if (err)
3329 goto err_out;
3331 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3332 if (priv->num_channels == 1)
3333 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3334 (unsigned long)dev);
3335 else
3336 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3337 (unsigned long)dev);
3338 } else {
3339 if (priv->irq[1]) {
3340 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3341 (unsigned long)dev);
3342 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3343 (unsigned long)dev);
3344 } else if (priv->num_channels == 1) {
3345 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3346 (unsigned long)dev);
3347 } else {
3348 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3349 (unsigned long)dev);
3353 priv->chan = devm_kcalloc(dev,
3354 priv->num_channels,
3355 sizeof(struct talitos_channel),
3356 GFP_KERNEL);
3357 if (!priv->chan) {
3358 dev_err(dev, "failed to allocate channel management space\n");
3359 err = -ENOMEM;
3360 goto err_out;
3363 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3365 for (i = 0; i < priv->num_channels; i++) {
3366 priv->chan[i].reg = priv->reg + stride * (i + 1);
3367 if (!priv->irq[1] || !(i & 1))
3368 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3370 spin_lock_init(&priv->chan[i].head_lock);
3371 spin_lock_init(&priv->chan[i].tail_lock);
3373 priv->chan[i].fifo = devm_kcalloc(dev,
3374 priv->fifo_len,
3375 sizeof(struct talitos_request),
3376 GFP_KERNEL);
3377 if (!priv->chan[i].fifo) {
3378 dev_err(dev, "failed to allocate request fifo %d\n", i);
3379 err = -ENOMEM;
3380 goto err_out;
3383 atomic_set(&priv->chan[i].submit_count,
3384 -(priv->chfifo_len - 1));
3387 dma_set_mask(dev, DMA_BIT_MASK(36));
3389 /* reset and initialize the h/w */
3390 err = init_device(dev);
3391 if (err) {
3392 dev_err(dev, "failed to initialize device\n");
3393 goto err_out;
3396 /* register the RNG, if available */
3397 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3398 err = talitos_register_rng(dev);
3399 if (err) {
3400 dev_err(dev, "failed to register hwrng: %d\n", err);
3401 goto err_out;
3402 } else
3403 dev_info(dev, "hwrng\n");
3406 /* register crypto algorithms the device supports */
3407 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3408 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3409 struct talitos_crypto_alg *t_alg;
3410 struct crypto_alg *alg = NULL;
3412 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3413 if (IS_ERR(t_alg)) {
3414 err = PTR_ERR(t_alg);
3415 if (err == -ENOTSUPP)
3416 continue;
3417 goto err_out;
3420 switch (t_alg->algt.type) {
3421 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3422 err = crypto_register_alg(
3423 &t_alg->algt.alg.crypto);
3424 alg = &t_alg->algt.alg.crypto;
3425 break;
3427 case CRYPTO_ALG_TYPE_AEAD:
3428 err = crypto_register_aead(
3429 &t_alg->algt.alg.aead);
3430 alg = &t_alg->algt.alg.aead.base;
3431 break;
3433 case CRYPTO_ALG_TYPE_AHASH:
3434 err = crypto_register_ahash(
3435 &t_alg->algt.alg.hash);
3436 alg = &t_alg->algt.alg.hash.halg.base;
3437 break;
3439 if (err) {
3440 dev_err(dev, "%s alg registration failed\n",
3441 alg->cra_driver_name);
3442 devm_kfree(dev, t_alg);
3443 } else
3444 list_add_tail(&t_alg->entry, &priv->alg_list);
3447 if (!list_empty(&priv->alg_list))
3448 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3449 (char *)of_get_property(np, "compatible", NULL));
3451 return 0;
3453 err_out:
3454 talitos_remove(ofdev);
3456 return err;
3459 static const struct of_device_id talitos_match[] = {
3460 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3462 .compatible = "fsl,sec1.0",
3464 #endif
3465 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3467 .compatible = "fsl,sec2.0",
3469 #endif
3472 MODULE_DEVICE_TABLE(of, talitos_match);
3474 static struct platform_driver talitos_driver = {
3475 .driver = {
3476 .name = "talitos",
3477 .of_match_table = talitos_match,
3479 .probe = talitos_probe,
3480 .remove = talitos_remove,
3483 module_platform_driver(talitos_driver);
3485 MODULE_LICENSE("GPL");
3486 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3487 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");