2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr
*ptr
, dma_addr_t dma_addr
,
59 unsigned int len
, bool is_sec1
)
61 ptr
->ptr
= cpu_to_be32(lower_32_bits(dma_addr
));
63 ptr
->len1
= cpu_to_be16(len
);
65 ptr
->len
= cpu_to_be16(len
);
66 ptr
->eptr
= upper_32_bits(dma_addr
);
70 static void copy_talitos_ptr(struct talitos_ptr
*dst_ptr
,
71 struct talitos_ptr
*src_ptr
, bool is_sec1
)
73 dst_ptr
->ptr
= src_ptr
->ptr
;
75 dst_ptr
->len1
= src_ptr
->len1
;
77 dst_ptr
->len
= src_ptr
->len
;
78 dst_ptr
->eptr
= src_ptr
->eptr
;
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr
*ptr
,
86 return be16_to_cpu(ptr
->len1
);
88 return be16_to_cpu(ptr
->len
);
91 static void to_talitos_ptr_ext_set(struct talitos_ptr
*ptr
, u8 val
,
98 static void to_talitos_ptr_ext_or(struct talitos_ptr
*ptr
, u8 val
, bool is_sec1
)
101 ptr
->j_extent
|= val
;
105 * map virtual single (contiguous) pointer to h/w descriptor pointer
107 static void __map_single_talitos_ptr(struct device
*dev
,
108 struct talitos_ptr
*ptr
,
109 unsigned int len
, void *data
,
110 enum dma_data_direction dir
,
113 dma_addr_t dma_addr
= dma_map_single_attrs(dev
, data
, len
, dir
, attrs
);
114 struct talitos_private
*priv
= dev_get_drvdata(dev
);
115 bool is_sec1
= has_ftr_sec1(priv
);
117 to_talitos_ptr(ptr
, dma_addr
, len
, is_sec1
);
120 static void map_single_talitos_ptr(struct device
*dev
,
121 struct talitos_ptr
*ptr
,
122 unsigned int len
, void *data
,
123 enum dma_data_direction dir
)
125 __map_single_talitos_ptr(dev
, ptr
, len
, data
, dir
, 0);
128 static void map_single_talitos_ptr_nosync(struct device
*dev
,
129 struct talitos_ptr
*ptr
,
130 unsigned int len
, void *data
,
131 enum dma_data_direction dir
)
133 __map_single_talitos_ptr(dev
, ptr
, len
, data
, dir
,
134 DMA_ATTR_SKIP_CPU_SYNC
);
138 * unmap bus single (contiguous) h/w descriptor pointer
140 static void unmap_single_talitos_ptr(struct device
*dev
,
141 struct talitos_ptr
*ptr
,
142 enum dma_data_direction dir
)
144 struct talitos_private
*priv
= dev_get_drvdata(dev
);
145 bool is_sec1
= has_ftr_sec1(priv
);
147 dma_unmap_single(dev
, be32_to_cpu(ptr
->ptr
),
148 from_talitos_ptr_len(ptr
, is_sec1
), dir
);
151 static int reset_channel(struct device
*dev
, int ch
)
153 struct talitos_private
*priv
= dev_get_drvdata(dev
);
154 unsigned int timeout
= TALITOS_TIMEOUT
;
155 bool is_sec1
= has_ftr_sec1(priv
);
158 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
159 TALITOS1_CCCR_LO_RESET
);
161 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
) &
162 TALITOS1_CCCR_LO_RESET
) && --timeout
)
165 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
166 TALITOS2_CCCR_RESET
);
168 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
169 TALITOS2_CCCR_RESET
) && --timeout
)
174 dev_err(dev
, "failed to reset channel %d\n", ch
);
178 /* set 36-bit addressing, done writeback enable and done IRQ enable */
179 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, TALITOS_CCCR_LO_EAE
|
180 TALITOS_CCCR_LO_CDWE
| TALITOS_CCCR_LO_CDIE
);
181 /* enable chaining descriptors */
183 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
186 /* and ICCR writeback, if available */
187 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
188 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
189 TALITOS_CCCR_LO_IWSE
);
194 static int reset_device(struct device
*dev
)
196 struct talitos_private
*priv
= dev_get_drvdata(dev
);
197 unsigned int timeout
= TALITOS_TIMEOUT
;
198 bool is_sec1
= has_ftr_sec1(priv
);
199 u32 mcr
= is_sec1
? TALITOS1_MCR_SWR
: TALITOS2_MCR_SWR
;
201 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
203 while ((in_be32(priv
->reg
+ TALITOS_MCR
) & mcr
)
208 mcr
= TALITOS_MCR_RCA1
| TALITOS_MCR_RCA3
;
209 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
213 dev_err(dev
, "failed to reset device\n");
221 * Reset and initialize the device
223 static int init_device(struct device
*dev
)
225 struct talitos_private
*priv
= dev_get_drvdata(dev
);
227 bool is_sec1
= has_ftr_sec1(priv
);
231 * errata documentation: warning: certain SEC interrupts
232 * are not fully cleared by writing the MCR:SWR bit,
233 * set bit twice to completely reset
235 err
= reset_device(dev
);
239 err
= reset_device(dev
);
244 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
245 err
= reset_channel(dev
, ch
);
250 /* enable channel done and error interrupts */
252 clrbits32(priv
->reg
+ TALITOS_IMR
, TALITOS1_IMR_INIT
);
253 clrbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS1_IMR_LO_INIT
);
254 /* disable parity error check in DEU (erroneous? test vect.) */
255 setbits32(priv
->reg_deu
+ TALITOS_EUICR
, TALITOS1_DEUICR_KPE
);
257 setbits32(priv
->reg
+ TALITOS_IMR
, TALITOS2_IMR_INIT
);
258 setbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS2_IMR_LO_INIT
);
261 /* disable integrity check error interrupts (use writeback instead) */
262 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
263 setbits32(priv
->reg_mdeu
+ TALITOS_EUICR_LO
,
264 TALITOS_MDEUICR_LO_ICE
);
270 * talitos_submit - submits a descriptor to the device for processing
271 * @dev: the SEC device to be used
272 * @ch: the SEC device channel to be used
273 * @desc: the descriptor to be processed by the device
274 * @callback: whom to call when processing is complete
275 * @context: a handle for use by caller (optional)
277 * desc must contain valid dma-mapped (bus physical) address pointers.
278 * callback must check err and feedback in descriptor header
279 * for device processing status.
281 int talitos_submit(struct device
*dev
, int ch
, struct talitos_desc
*desc
,
282 void (*callback
)(struct device
*dev
,
283 struct talitos_desc
*desc
,
284 void *context
, int error
),
287 struct talitos_private
*priv
= dev_get_drvdata(dev
);
288 struct talitos_request
*request
;
291 bool is_sec1
= has_ftr_sec1(priv
);
293 spin_lock_irqsave(&priv
->chan
[ch
].head_lock
, flags
);
295 if (!atomic_inc_not_zero(&priv
->chan
[ch
].submit_count
)) {
296 /* h/w fifo is full */
297 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
301 head
= priv
->chan
[ch
].head
;
302 request
= &priv
->chan
[ch
].fifo
[head
];
304 /* map descriptor and save caller data */
306 desc
->hdr1
= desc
->hdr
;
307 request
->dma_desc
= dma_map_single(dev
, &desc
->hdr1
,
311 request
->dma_desc
= dma_map_single(dev
, desc
,
315 request
->callback
= callback
;
316 request
->context
= context
;
318 /* increment fifo head */
319 priv
->chan
[ch
].head
= (priv
->chan
[ch
].head
+ 1) & (priv
->fifo_len
- 1);
322 request
->desc
= desc
;
326 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF
,
327 upper_32_bits(request
->dma_desc
));
328 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF_LO
,
329 lower_32_bits(request
->dma_desc
));
331 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
335 EXPORT_SYMBOL(talitos_submit
);
337 static __be32
get_request_hdr(struct talitos_request
*request
, bool is_sec1
)
339 struct talitos_edesc
*edesc
;
342 return request
->desc
->hdr
;
344 if (!request
->desc
->next_desc
)
345 return request
->desc
->hdr1
;
347 edesc
= container_of(request
->desc
, struct talitos_edesc
, desc
);
349 return ((struct talitos_desc
*)(edesc
->buf
+ edesc
->dma_len
))->hdr1
;
353 * process what was done, notify callback of error if not
355 static void flush_channel(struct device
*dev
, int ch
, int error
, int reset_ch
)
357 struct talitos_private
*priv
= dev_get_drvdata(dev
);
358 struct talitos_request
*request
, saved_req
;
361 bool is_sec1
= has_ftr_sec1(priv
);
363 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
365 tail
= priv
->chan
[ch
].tail
;
366 while (priv
->chan
[ch
].fifo
[tail
].desc
) {
369 request
= &priv
->chan
[ch
].fifo
[tail
];
371 /* descriptors with their done bits set don't get the error */
373 hdr
= get_request_hdr(request
, is_sec1
);
375 if ((hdr
& DESC_HDR_DONE
) == DESC_HDR_DONE
)
383 dma_unmap_single(dev
, request
->dma_desc
,
387 /* copy entries so we can call callback outside lock */
388 saved_req
.desc
= request
->desc
;
389 saved_req
.callback
= request
->callback
;
390 saved_req
.context
= request
->context
;
392 /* release request entry in fifo */
394 request
->desc
= NULL
;
396 /* increment fifo tail */
397 priv
->chan
[ch
].tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
399 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
401 atomic_dec(&priv
->chan
[ch
].submit_count
);
403 saved_req
.callback(dev
, saved_req
.desc
, saved_req
.context
,
405 /* channel may resume processing in single desc error case */
406 if (error
&& !reset_ch
&& status
== error
)
408 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
409 tail
= priv
->chan
[ch
].tail
;
412 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
416 * process completed requests for channels that have done status
418 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
419 static void talitos1_done_##name(unsigned long data) \
421 struct device *dev = (struct device *)data; \
422 struct talitos_private *priv = dev_get_drvdata(dev); \
423 unsigned long flags; \
425 if (ch_done_mask & 0x10000000) \
426 flush_channel(dev, 0, 0, 0); \
427 if (ch_done_mask & 0x40000000) \
428 flush_channel(dev, 1, 0, 0); \
429 if (ch_done_mask & 0x00010000) \
430 flush_channel(dev, 2, 0, 0); \
431 if (ch_done_mask & 0x00040000) \
432 flush_channel(dev, 3, 0, 0); \
434 /* At this point, all completed channels have been processed */ \
435 /* Unmask done interrupts for channels completed later on. */ \
436 spin_lock_irqsave(&priv->reg_lock, flags); \
437 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
438 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
439 spin_unlock_irqrestore(&priv->reg_lock, flags); \
442 DEF_TALITOS1_DONE(4ch
, TALITOS1_ISR_4CHDONE
)
443 DEF_TALITOS1_DONE(ch0
, TALITOS1_ISR_CH_0_DONE
)
445 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
446 static void talitos2_done_##name(unsigned long data) \
448 struct device *dev = (struct device *)data; \
449 struct talitos_private *priv = dev_get_drvdata(dev); \
450 unsigned long flags; \
452 if (ch_done_mask & 1) \
453 flush_channel(dev, 0, 0, 0); \
454 if (ch_done_mask & (1 << 2)) \
455 flush_channel(dev, 1, 0, 0); \
456 if (ch_done_mask & (1 << 4)) \
457 flush_channel(dev, 2, 0, 0); \
458 if (ch_done_mask & (1 << 6)) \
459 flush_channel(dev, 3, 0, 0); \
461 /* At this point, all completed channels have been processed */ \
462 /* Unmask done interrupts for channels completed later on. */ \
463 spin_lock_irqsave(&priv->reg_lock, flags); \
464 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
465 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
466 spin_unlock_irqrestore(&priv->reg_lock, flags); \
469 DEF_TALITOS2_DONE(4ch
, TALITOS2_ISR_4CHDONE
)
470 DEF_TALITOS2_DONE(ch0
, TALITOS2_ISR_CH_0_DONE
)
471 DEF_TALITOS2_DONE(ch0_2
, TALITOS2_ISR_CH_0_2_DONE
)
472 DEF_TALITOS2_DONE(ch1_3
, TALITOS2_ISR_CH_1_3_DONE
)
475 * locate current (offending) descriptor
477 static u32
current_desc_hdr(struct device
*dev
, int ch
)
479 struct talitos_private
*priv
= dev_get_drvdata(dev
);
483 cur_desc
= ((u64
)in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR
)) << 32;
484 cur_desc
|= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR_LO
);
487 dev_err(dev
, "CDPR is NULL, giving up search for offending descriptor\n");
491 tail
= priv
->chan
[ch
].tail
;
494 while (priv
->chan
[ch
].fifo
[iter
].dma_desc
!= cur_desc
&&
495 priv
->chan
[ch
].fifo
[iter
].desc
->next_desc
!= cur_desc
) {
496 iter
= (iter
+ 1) & (priv
->fifo_len
- 1);
498 dev_err(dev
, "couldn't locate current descriptor\n");
503 if (priv
->chan
[ch
].fifo
[iter
].desc
->next_desc
== cur_desc
) {
504 struct talitos_edesc
*edesc
;
506 edesc
= container_of(priv
->chan
[ch
].fifo
[iter
].desc
,
507 struct talitos_edesc
, desc
);
508 return ((struct talitos_desc
*)
509 (edesc
->buf
+ edesc
->dma_len
))->hdr
;
512 return priv
->chan
[ch
].fifo
[iter
].desc
->hdr
;
516 * user diagnostics; report root cause of error based on execution unit status
518 static void report_eu_error(struct device
*dev
, int ch
, u32 desc_hdr
)
520 struct talitos_private
*priv
= dev_get_drvdata(dev
);
524 desc_hdr
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
);
526 switch (desc_hdr
& DESC_HDR_SEL0_MASK
) {
527 case DESC_HDR_SEL0_AFEU
:
528 dev_err(dev
, "AFEUISR 0x%08x_%08x\n",
529 in_be32(priv
->reg_afeu
+ TALITOS_EUISR
),
530 in_be32(priv
->reg_afeu
+ TALITOS_EUISR_LO
));
532 case DESC_HDR_SEL0_DEU
:
533 dev_err(dev
, "DEUISR 0x%08x_%08x\n",
534 in_be32(priv
->reg_deu
+ TALITOS_EUISR
),
535 in_be32(priv
->reg_deu
+ TALITOS_EUISR_LO
));
537 case DESC_HDR_SEL0_MDEUA
:
538 case DESC_HDR_SEL0_MDEUB
:
539 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
540 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR
),
541 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR_LO
));
543 case DESC_HDR_SEL0_RNG
:
544 dev_err(dev
, "RNGUISR 0x%08x_%08x\n",
545 in_be32(priv
->reg_rngu
+ TALITOS_ISR
),
546 in_be32(priv
->reg_rngu
+ TALITOS_ISR_LO
));
548 case DESC_HDR_SEL0_PKEU
:
549 dev_err(dev
, "PKEUISR 0x%08x_%08x\n",
550 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR
),
551 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR_LO
));
553 case DESC_HDR_SEL0_AESU
:
554 dev_err(dev
, "AESUISR 0x%08x_%08x\n",
555 in_be32(priv
->reg_aesu
+ TALITOS_EUISR
),
556 in_be32(priv
->reg_aesu
+ TALITOS_EUISR_LO
));
558 case DESC_HDR_SEL0_CRCU
:
559 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
560 in_be32(priv
->reg_crcu
+ TALITOS_EUISR
),
561 in_be32(priv
->reg_crcu
+ TALITOS_EUISR_LO
));
563 case DESC_HDR_SEL0_KEU
:
564 dev_err(dev
, "KEUISR 0x%08x_%08x\n",
565 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR
),
566 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR_LO
));
570 switch (desc_hdr
& DESC_HDR_SEL1_MASK
) {
571 case DESC_HDR_SEL1_MDEUA
:
572 case DESC_HDR_SEL1_MDEUB
:
573 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
574 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR
),
575 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR_LO
));
577 case DESC_HDR_SEL1_CRCU
:
578 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
579 in_be32(priv
->reg_crcu
+ TALITOS_EUISR
),
580 in_be32(priv
->reg_crcu
+ TALITOS_EUISR_LO
));
584 for (i
= 0; i
< 8; i
++)
585 dev_err(dev
, "DESCBUF 0x%08x_%08x\n",
586 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
+ 8*i
),
587 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF_LO
+ 8*i
));
591 * recover from error interrupts
593 static void talitos_error(struct device
*dev
, u32 isr
, u32 isr_lo
)
595 struct talitos_private
*priv
= dev_get_drvdata(dev
);
596 unsigned int timeout
= TALITOS_TIMEOUT
;
597 int ch
, error
, reset_dev
= 0;
599 bool is_sec1
= has_ftr_sec1(priv
);
600 int reset_ch
= is_sec1
? 1 : 0; /* only SEC2 supports continuation */
602 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
603 /* skip channels without errors */
605 /* bits 29, 31, 17, 19 */
606 if (!(isr
& (1 << (29 + (ch
& 1) * 2 - (ch
& 2) * 6))))
609 if (!(isr
& (1 << (ch
* 2 + 1))))
615 v_lo
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR_LO
);
617 if (v_lo
& TALITOS_CCPSR_LO_DOF
) {
618 dev_err(dev
, "double fetch fifo overflow error\n");
622 if (v_lo
& TALITOS_CCPSR_LO_SOF
) {
623 /* h/w dropped descriptor */
624 dev_err(dev
, "single fetch fifo overflow error\n");
627 if (v_lo
& TALITOS_CCPSR_LO_MDTE
)
628 dev_err(dev
, "master data transfer error\n");
629 if (v_lo
& TALITOS_CCPSR_LO_SGDLZ
)
630 dev_err(dev
, is_sec1
? "pointer not complete error\n"
631 : "s/g data length zero error\n");
632 if (v_lo
& TALITOS_CCPSR_LO_FPZ
)
633 dev_err(dev
, is_sec1
? "parity error\n"
634 : "fetch pointer zero error\n");
635 if (v_lo
& TALITOS_CCPSR_LO_IDH
)
636 dev_err(dev
, "illegal descriptor header error\n");
637 if (v_lo
& TALITOS_CCPSR_LO_IEU
)
638 dev_err(dev
, is_sec1
? "static assignment error\n"
639 : "invalid exec unit error\n");
640 if (v_lo
& TALITOS_CCPSR_LO_EU
)
641 report_eu_error(dev
, ch
, current_desc_hdr(dev
, ch
));
643 if (v_lo
& TALITOS_CCPSR_LO_GB
)
644 dev_err(dev
, "gather boundary error\n");
645 if (v_lo
& TALITOS_CCPSR_LO_GRL
)
646 dev_err(dev
, "gather return/length error\n");
647 if (v_lo
& TALITOS_CCPSR_LO_SB
)
648 dev_err(dev
, "scatter boundary error\n");
649 if (v_lo
& TALITOS_CCPSR_LO_SRL
)
650 dev_err(dev
, "scatter return/length error\n");
653 flush_channel(dev
, ch
, error
, reset_ch
);
656 reset_channel(dev
, ch
);
658 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
660 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, 0);
661 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
662 TALITOS2_CCCR_CONT
) && --timeout
)
665 dev_err(dev
, "failed to restart channel %d\n",
671 if (reset_dev
|| (is_sec1
&& isr
& ~TALITOS1_ISR_4CHERR
) ||
672 (!is_sec1
&& isr
& ~TALITOS2_ISR_4CHERR
) || isr_lo
) {
673 if (is_sec1
&& (isr_lo
& TALITOS1_ISR_TEA_ERR
))
674 dev_err(dev
, "TEA error: ISR 0x%08x_%08x\n",
677 dev_err(dev
, "done overflow, internal time out, or "
678 "rngu error: ISR 0x%08x_%08x\n", isr
, isr_lo
);
680 /* purge request queues */
681 for (ch
= 0; ch
< priv
->num_channels
; ch
++)
682 flush_channel(dev
, ch
, -EIO
, 1);
684 /* reset and reinitialize the device */
689 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
690 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
692 struct device *dev = data; \
693 struct talitos_private *priv = dev_get_drvdata(dev); \
695 unsigned long flags; \
697 spin_lock_irqsave(&priv->reg_lock, flags); \
698 isr = in_be32(priv->reg + TALITOS_ISR); \
699 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
700 /* Acknowledge interrupt */ \
701 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
702 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
704 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
705 spin_unlock_irqrestore(&priv->reg_lock, flags); \
706 talitos_error(dev, isr & ch_err_mask, isr_lo); \
709 if (likely(isr & ch_done_mask)) { \
710 /* mask further done interrupts. */ \
711 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
712 /* done_task will unmask done interrupts at exit */ \
713 tasklet_schedule(&priv->done_task[tlet]); \
715 spin_unlock_irqrestore(&priv->reg_lock, flags); \
718 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
722 DEF_TALITOS1_INTERRUPT(4ch
, TALITOS1_ISR_4CHDONE
, TALITOS1_ISR_4CHERR
, 0)
724 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
725 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
727 struct device *dev = data; \
728 struct talitos_private *priv = dev_get_drvdata(dev); \
730 unsigned long flags; \
732 spin_lock_irqsave(&priv->reg_lock, flags); \
733 isr = in_be32(priv->reg + TALITOS_ISR); \
734 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
735 /* Acknowledge interrupt */ \
736 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
737 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
739 if (unlikely(isr & ch_err_mask || isr_lo)) { \
740 spin_unlock_irqrestore(&priv->reg_lock, flags); \
741 talitos_error(dev, isr & ch_err_mask, isr_lo); \
744 if (likely(isr & ch_done_mask)) { \
745 /* mask further done interrupts. */ \
746 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
747 /* done_task will unmask done interrupts at exit */ \
748 tasklet_schedule(&priv->done_task[tlet]); \
750 spin_unlock_irqrestore(&priv->reg_lock, flags); \
753 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
757 DEF_TALITOS2_INTERRUPT(4ch
, TALITOS2_ISR_4CHDONE
, TALITOS2_ISR_4CHERR
, 0)
758 DEF_TALITOS2_INTERRUPT(ch0_2
, TALITOS2_ISR_CH_0_2_DONE
, TALITOS2_ISR_CH_0_2_ERR
,
760 DEF_TALITOS2_INTERRUPT(ch1_3
, TALITOS2_ISR_CH_1_3_DONE
, TALITOS2_ISR_CH_1_3_ERR
,
766 static int talitos_rng_data_present(struct hwrng
*rng
, int wait
)
768 struct device
*dev
= (struct device
*)rng
->priv
;
769 struct talitos_private
*priv
= dev_get_drvdata(dev
);
773 for (i
= 0; i
< 20; i
++) {
774 ofl
= in_be32(priv
->reg_rngu
+ TALITOS_EUSR_LO
) &
775 TALITOS_RNGUSR_LO_OFL
;
784 static int talitos_rng_data_read(struct hwrng
*rng
, u32
*data
)
786 struct device
*dev
= (struct device
*)rng
->priv
;
787 struct talitos_private
*priv
= dev_get_drvdata(dev
);
789 /* rng fifo requires 64-bit accesses */
790 *data
= in_be32(priv
->reg_rngu
+ TALITOS_EU_FIFO
);
791 *data
= in_be32(priv
->reg_rngu
+ TALITOS_EU_FIFO_LO
);
796 static int talitos_rng_init(struct hwrng
*rng
)
798 struct device
*dev
= (struct device
*)rng
->priv
;
799 struct talitos_private
*priv
= dev_get_drvdata(dev
);
800 unsigned int timeout
= TALITOS_TIMEOUT
;
802 setbits32(priv
->reg_rngu
+ TALITOS_EURCR_LO
, TALITOS_RNGURCR_LO_SR
);
803 while (!(in_be32(priv
->reg_rngu
+ TALITOS_EUSR_LO
)
804 & TALITOS_RNGUSR_LO_RD
)
808 dev_err(dev
, "failed to reset rng hw\n");
812 /* start generating */
813 setbits32(priv
->reg_rngu
+ TALITOS_EUDSR_LO
, 0);
818 static int talitos_register_rng(struct device
*dev
)
820 struct talitos_private
*priv
= dev_get_drvdata(dev
);
823 priv
->rng
.name
= dev_driver_string(dev
),
824 priv
->rng
.init
= talitos_rng_init
,
825 priv
->rng
.data_present
= talitos_rng_data_present
,
826 priv
->rng
.data_read
= talitos_rng_data_read
,
827 priv
->rng
.priv
= (unsigned long)dev
;
829 err
= hwrng_register(&priv
->rng
);
831 priv
->rng_registered
= true;
836 static void talitos_unregister_rng(struct device
*dev
)
838 struct talitos_private
*priv
= dev_get_drvdata(dev
);
840 if (!priv
->rng_registered
)
843 hwrng_unregister(&priv
->rng
);
844 priv
->rng_registered
= false;
850 #define TALITOS_CRA_PRIORITY 3000
852 * Defines a priority for doing AEAD with descriptors type
853 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
855 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
856 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
857 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
862 __be32 desc_hdr_template
;
863 u8 key
[TALITOS_MAX_KEY_SIZE
];
864 u8 iv
[TALITOS_MAX_IV_LENGTH
];
867 unsigned int enckeylen
;
868 unsigned int authkeylen
;
871 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
872 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
874 struct talitos_ahash_req_ctx
{
875 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
876 unsigned int hw_context_size
;
877 u8 buf
[2][HASH_MAX_BLOCK_SIZE
];
882 unsigned int to_hash_later
;
884 struct scatterlist bufsl
[2];
885 struct scatterlist
*psrc
;
888 struct talitos_export_state
{
889 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
890 u8 buf
[HASH_MAX_BLOCK_SIZE
];
894 unsigned int to_hash_later
;
898 static int aead_setkey(struct crypto_aead
*authenc
,
899 const u8
*key
, unsigned int keylen
)
901 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
902 struct device
*dev
= ctx
->dev
;
903 struct crypto_authenc_keys keys
;
905 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
908 if (keys
.authkeylen
+ keys
.enckeylen
> TALITOS_MAX_KEY_SIZE
)
912 dma_unmap_single(dev
, ctx
->dma_key
, ctx
->keylen
, DMA_TO_DEVICE
);
914 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
915 memcpy(&ctx
->key
[keys
.authkeylen
], keys
.enckey
, keys
.enckeylen
);
917 ctx
->keylen
= keys
.authkeylen
+ keys
.enckeylen
;
918 ctx
->enckeylen
= keys
.enckeylen
;
919 ctx
->authkeylen
= keys
.authkeylen
;
920 ctx
->dma_key
= dma_map_single(dev
, ctx
->key
, ctx
->keylen
,
923 memzero_explicit(&keys
, sizeof(keys
));
927 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
928 memzero_explicit(&keys
, sizeof(keys
));
932 static void talitos_sg_unmap(struct device
*dev
,
933 struct talitos_edesc
*edesc
,
934 struct scatterlist
*src
,
935 struct scatterlist
*dst
,
936 unsigned int len
, unsigned int offset
)
938 struct talitos_private
*priv
= dev_get_drvdata(dev
);
939 bool is_sec1
= has_ftr_sec1(priv
);
940 unsigned int src_nents
= edesc
->src_nents
? : 1;
941 unsigned int dst_nents
= edesc
->dst_nents
? : 1;
943 if (is_sec1
&& dst
&& dst_nents
> 1) {
944 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
+ offset
,
945 len
, DMA_FROM_DEVICE
);
946 sg_pcopy_from_buffer(dst
, dst_nents
, edesc
->buf
+ offset
, len
,
950 if (src_nents
== 1 || !is_sec1
)
951 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
953 if (dst
&& (dst_nents
== 1 || !is_sec1
))
954 dma_unmap_sg(dev
, dst
, dst_nents
, DMA_FROM_DEVICE
);
955 } else if (src_nents
== 1 || !is_sec1
) {
956 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
960 static void ipsec_esp_unmap(struct device
*dev
,
961 struct talitos_edesc
*edesc
,
962 struct aead_request
*areq
, bool encrypt
)
964 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
965 struct talitos_ctx
*ctx
= crypto_aead_ctx(aead
);
966 unsigned int ivsize
= crypto_aead_ivsize(aead
);
967 unsigned int authsize
= crypto_aead_authsize(aead
);
968 unsigned int cryptlen
= areq
->cryptlen
- (encrypt
? 0 : authsize
);
969 bool is_ipsec_esp
= edesc
->desc
.hdr
& DESC_HDR_TYPE_IPSEC_ESP
;
970 struct talitos_ptr
*civ_ptr
= &edesc
->desc
.ptr
[is_ipsec_esp
? 2 : 3];
973 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[6],
975 unmap_single_talitos_ptr(dev
, civ_ptr
, DMA_TO_DEVICE
);
977 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
,
978 cryptlen
+ authsize
, areq
->assoclen
);
981 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
985 unsigned int dst_nents
= edesc
->dst_nents
? : 1;
987 sg_pcopy_to_buffer(areq
->dst
, dst_nents
, ctx
->iv
, ivsize
,
988 areq
->assoclen
+ cryptlen
- ivsize
);
993 * ipsec_esp descriptor callbacks
995 static void ipsec_esp_encrypt_done(struct device
*dev
,
996 struct talitos_desc
*desc
, void *context
,
999 struct aead_request
*areq
= context
;
1000 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1001 unsigned int ivsize
= crypto_aead_ivsize(authenc
);
1002 struct talitos_edesc
*edesc
;
1004 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1006 ipsec_esp_unmap(dev
, edesc
, areq
, true);
1008 dma_unmap_single(dev
, edesc
->iv_dma
, ivsize
, DMA_TO_DEVICE
);
1012 aead_request_complete(areq
, err
);
1015 static void ipsec_esp_decrypt_swauth_done(struct device
*dev
,
1016 struct talitos_desc
*desc
,
1017 void *context
, int err
)
1019 struct aead_request
*req
= context
;
1020 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1021 unsigned int authsize
= crypto_aead_authsize(authenc
);
1022 struct talitos_edesc
*edesc
;
1025 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1027 ipsec_esp_unmap(dev
, edesc
, req
, false);
1031 oicv
= edesc
->buf
+ edesc
->dma_len
;
1032 icv
= oicv
- authsize
;
1034 err
= crypto_memneq(oicv
, icv
, authsize
) ? -EBADMSG
: 0;
1039 aead_request_complete(req
, err
);
1042 static void ipsec_esp_decrypt_hwauth_done(struct device
*dev
,
1043 struct talitos_desc
*desc
,
1044 void *context
, int err
)
1046 struct aead_request
*req
= context
;
1047 struct talitos_edesc
*edesc
;
1049 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1051 ipsec_esp_unmap(dev
, edesc
, req
, false);
1053 /* check ICV auth status */
1054 if (!err
&& ((desc
->hdr_lo
& DESC_HDR_LO_ICCR1_MASK
) !=
1055 DESC_HDR_LO_ICCR1_PASS
))
1060 aead_request_complete(req
, err
);
1064 * convert scatterlist to SEC h/w link table format
1065 * stop at cryptlen bytes
1067 static int sg_to_link_tbl_offset(struct scatterlist
*sg
, int sg_count
,
1068 unsigned int offset
, int datalen
, int elen
,
1069 struct talitos_ptr
*link_tbl_ptr
)
1071 int n_sg
= elen
? sg_count
+ 1 : sg_count
;
1073 int cryptlen
= datalen
+ elen
;
1075 while (cryptlen
&& sg
&& n_sg
--) {
1076 unsigned int len
= sg_dma_len(sg
);
1078 if (offset
>= len
) {
1088 if (datalen
> 0 && len
> datalen
) {
1089 to_talitos_ptr(link_tbl_ptr
+ count
,
1090 sg_dma_address(sg
) + offset
, datalen
, 0);
1091 to_talitos_ptr_ext_set(link_tbl_ptr
+ count
, 0, 0);
1096 to_talitos_ptr(link_tbl_ptr
+ count
,
1097 sg_dma_address(sg
) + offset
, len
, 0);
1098 to_talitos_ptr_ext_set(link_tbl_ptr
+ count
, 0, 0);
1108 /* tag end of link table */
1110 to_talitos_ptr_ext_set(link_tbl_ptr
+ count
- 1,
1111 DESC_PTR_LNKTBL_RET
, 0);
1116 static int talitos_sg_map_ext(struct device
*dev
, struct scatterlist
*src
,
1117 unsigned int len
, struct talitos_edesc
*edesc
,
1118 struct talitos_ptr
*ptr
, int sg_count
,
1119 unsigned int offset
, int tbl_off
, int elen
,
1122 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1123 bool is_sec1
= has_ftr_sec1(priv
);
1126 to_talitos_ptr(ptr
, 0, 0, is_sec1
);
1129 to_talitos_ptr_ext_set(ptr
, elen
, is_sec1
);
1130 if (sg_count
== 1 && !force
) {
1131 to_talitos_ptr(ptr
, sg_dma_address(src
) + offset
, len
, is_sec1
);
1135 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
+ offset
, len
, is_sec1
);
1138 sg_count
= sg_to_link_tbl_offset(src
, sg_count
, offset
, len
, elen
,
1139 &edesc
->link_tbl
[tbl_off
]);
1140 if (sg_count
== 1 && !force
) {
1141 /* Only one segment now, so no link tbl needed*/
1142 copy_talitos_ptr(ptr
, &edesc
->link_tbl
[tbl_off
], is_sec1
);
1145 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
+
1146 tbl_off
* sizeof(struct talitos_ptr
), len
, is_sec1
);
1147 to_talitos_ptr_ext_or(ptr
, DESC_PTR_LNKTBL_JUMP
, is_sec1
);
1152 static int talitos_sg_map(struct device
*dev
, struct scatterlist
*src
,
1153 unsigned int len
, struct talitos_edesc
*edesc
,
1154 struct talitos_ptr
*ptr
, int sg_count
,
1155 unsigned int offset
, int tbl_off
)
1157 return talitos_sg_map_ext(dev
, src
, len
, edesc
, ptr
, sg_count
, offset
,
1162 * fill in and submit ipsec_esp descriptor
1164 static int ipsec_esp(struct talitos_edesc
*edesc
, struct aead_request
*areq
,
1166 void (*callback
)(struct device
*dev
,
1167 struct talitos_desc
*desc
,
1168 void *context
, int error
))
1170 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
1171 unsigned int authsize
= crypto_aead_authsize(aead
);
1172 struct talitos_ctx
*ctx
= crypto_aead_ctx(aead
);
1173 struct device
*dev
= ctx
->dev
;
1174 struct talitos_desc
*desc
= &edesc
->desc
;
1175 unsigned int cryptlen
= areq
->cryptlen
- (encrypt
? 0 : authsize
);
1176 unsigned int ivsize
= crypto_aead_ivsize(aead
);
1180 bool sync_needed
= false;
1181 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1182 bool is_sec1
= has_ftr_sec1(priv
);
1183 bool is_ipsec_esp
= desc
->hdr
& DESC_HDR_TYPE_IPSEC_ESP
;
1184 struct talitos_ptr
*civ_ptr
= &desc
->ptr
[is_ipsec_esp
? 2 : 3];
1185 struct talitos_ptr
*ckey_ptr
= &desc
->ptr
[is_ipsec_esp
? 3 : 2];
1186 dma_addr_t dma_icv
= edesc
->dma_link_tbl
+ edesc
->dma_len
- authsize
;
1189 to_talitos_ptr(&desc
->ptr
[0], ctx
->dma_key
, ctx
->authkeylen
, is_sec1
);
1191 sg_count
= edesc
->src_nents
?: 1;
1192 if (is_sec1
&& sg_count
> 1)
1193 sg_copy_to_buffer(areq
->src
, sg_count
, edesc
->buf
,
1194 areq
->assoclen
+ cryptlen
);
1196 sg_count
= dma_map_sg(dev
, areq
->src
, sg_count
,
1197 (areq
->src
== areq
->dst
) ?
1198 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
);
1201 ret
= talitos_sg_map(dev
, areq
->src
, areq
->assoclen
, edesc
,
1202 &desc
->ptr
[1], sg_count
, 0, tbl_off
);
1210 to_talitos_ptr(civ_ptr
, edesc
->iv_dma
, ivsize
, is_sec1
);
1213 to_talitos_ptr(ckey_ptr
, ctx
->dma_key
+ ctx
->authkeylen
,
1214 ctx
->enckeylen
, is_sec1
);
1218 * map and adjust cipher len to aead request cryptlen.
1219 * extent is bytes of HMAC postpended to ciphertext,
1220 * typically 12 for ipsec
1222 if (is_ipsec_esp
&& (desc
->hdr
& DESC_HDR_MODE1_MDEU_CICV
))
1225 ret
= talitos_sg_map_ext(dev
, areq
->src
, cryptlen
, edesc
, &desc
->ptr
[4],
1226 sg_count
, areq
->assoclen
, tbl_off
, elen
,
1235 if (areq
->src
!= areq
->dst
) {
1236 sg_count
= edesc
->dst_nents
? : 1;
1237 if (!is_sec1
|| sg_count
== 1)
1238 dma_map_sg(dev
, areq
->dst
, sg_count
, DMA_FROM_DEVICE
);
1241 if (is_ipsec_esp
&& encrypt
)
1245 ret
= talitos_sg_map_ext(dev
, areq
->dst
, cryptlen
, edesc
, &desc
->ptr
[5],
1246 sg_count
, areq
->assoclen
, tbl_off
, elen
,
1247 is_ipsec_esp
&& !encrypt
);
1251 edesc
->icv_ool
= !encrypt
;
1253 if (!encrypt
&& is_ipsec_esp
) {
1254 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
1256 /* Add an entry to the link table for ICV data */
1257 to_talitos_ptr_ext_set(tbl_ptr
- 1, 0, is_sec1
);
1258 to_talitos_ptr_ext_set(tbl_ptr
, DESC_PTR_LNKTBL_RET
, is_sec1
);
1260 /* icv data follows link tables */
1261 to_talitos_ptr(tbl_ptr
, dma_icv
, authsize
, is_sec1
);
1262 to_talitos_ptr_ext_or(&desc
->ptr
[5], authsize
, is_sec1
);
1264 } else if (!encrypt
) {
1265 to_talitos_ptr(&desc
->ptr
[6], dma_icv
, authsize
, is_sec1
);
1267 } else if (!is_ipsec_esp
) {
1268 talitos_sg_map(dev
, areq
->dst
, authsize
, edesc
, &desc
->ptr
[6],
1269 sg_count
, areq
->assoclen
+ cryptlen
, tbl_off
);
1274 map_single_talitos_ptr(dev
, &desc
->ptr
[6], ivsize
, ctx
->iv
,
1278 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1282 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1283 if (ret
!= -EINPROGRESS
) {
1284 ipsec_esp_unmap(dev
, edesc
, areq
, encrypt
);
1291 * allocate and map the extended descriptor
1293 static struct talitos_edesc
*talitos_edesc_alloc(struct device
*dev
,
1294 struct scatterlist
*src
,
1295 struct scatterlist
*dst
,
1297 unsigned int assoclen
,
1298 unsigned int cryptlen
,
1299 unsigned int authsize
,
1300 unsigned int ivsize
,
1305 struct talitos_edesc
*edesc
;
1306 int src_nents
, dst_nents
, alloc_len
, dma_len
, src_len
, dst_len
;
1307 dma_addr_t iv_dma
= 0;
1308 gfp_t flags
= cryptoflags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1310 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1311 bool is_sec1
= has_ftr_sec1(priv
);
1312 int max_len
= is_sec1
? TALITOS1_MAX_DATA_LEN
: TALITOS2_MAX_DATA_LEN
;
1314 if (cryptlen
+ authsize
> max_len
) {
1315 dev_err(dev
, "length exceeds h/w max limit\n");
1316 return ERR_PTR(-EINVAL
);
1319 if (!dst
|| dst
== src
) {
1320 src_len
= assoclen
+ cryptlen
+ authsize
;
1321 src_nents
= sg_nents_for_len(src
, src_len
);
1322 if (src_nents
< 0) {
1323 dev_err(dev
, "Invalid number of src SG.\n");
1324 return ERR_PTR(-EINVAL
);
1326 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1327 dst_nents
= dst
? src_nents
: 0;
1329 } else { /* dst && dst != src*/
1330 src_len
= assoclen
+ cryptlen
+ (encrypt
? 0 : authsize
);
1331 src_nents
= sg_nents_for_len(src
, src_len
);
1332 if (src_nents
< 0) {
1333 dev_err(dev
, "Invalid number of src SG.\n");
1334 return ERR_PTR(-EINVAL
);
1336 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1337 dst_len
= assoclen
+ cryptlen
+ (encrypt
? authsize
: 0);
1338 dst_nents
= sg_nents_for_len(dst
, dst_len
);
1339 if (dst_nents
< 0) {
1340 dev_err(dev
, "Invalid number of dst SG.\n");
1341 return ERR_PTR(-EINVAL
);
1343 dst_nents
= (dst_nents
== 1) ? 0 : dst_nents
;
1347 * allocate space for base edesc plus the link tables,
1348 * allowing for two separate entries for AD and generated ICV (+ 2),
1349 * and space for two sets of ICVs (stashed and generated)
1351 alloc_len
= sizeof(struct talitos_edesc
);
1352 if (src_nents
|| dst_nents
|| !encrypt
) {
1354 dma_len
= (src_nents
? src_len
: 0) +
1355 (dst_nents
? dst_len
: 0) + authsize
;
1357 dma_len
= (src_nents
+ dst_nents
+ 2) *
1358 sizeof(struct talitos_ptr
) + authsize
;
1359 alloc_len
+= dma_len
;
1363 alloc_len
+= icv_stashing
? authsize
: 0;
1365 /* if its a ahash, add space for a second desc next to the first one */
1366 if (is_sec1
&& !dst
)
1367 alloc_len
+= sizeof(struct talitos_desc
);
1368 alloc_len
+= ivsize
;
1370 edesc
= kmalloc(alloc_len
, GFP_DMA
| flags
);
1372 return ERR_PTR(-ENOMEM
);
1374 iv
= memcpy(((u8
*)edesc
) + alloc_len
- ivsize
, iv
, ivsize
);
1375 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
1377 memset(&edesc
->desc
, 0, sizeof(edesc
->desc
));
1379 edesc
->src_nents
= src_nents
;
1380 edesc
->dst_nents
= dst_nents
;
1381 edesc
->iv_dma
= iv_dma
;
1382 edesc
->dma_len
= dma_len
;
1384 edesc
->dma_link_tbl
= dma_map_single(dev
, &edesc
->link_tbl
[0],
1391 static struct talitos_edesc
*aead_edesc_alloc(struct aead_request
*areq
, u8
*iv
,
1392 int icv_stashing
, bool encrypt
)
1394 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1395 unsigned int authsize
= crypto_aead_authsize(authenc
);
1396 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1397 unsigned int ivsize
= crypto_aead_ivsize(authenc
);
1398 unsigned int cryptlen
= areq
->cryptlen
- (encrypt
? 0 : authsize
);
1400 return talitos_edesc_alloc(ctx
->dev
, areq
->src
, areq
->dst
,
1401 iv
, areq
->assoclen
, cryptlen
,
1402 authsize
, ivsize
, icv_stashing
,
1403 areq
->base
.flags
, encrypt
);
1406 static int aead_encrypt(struct aead_request
*req
)
1408 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1409 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1410 struct talitos_edesc
*edesc
;
1412 /* allocate extended descriptor */
1413 edesc
= aead_edesc_alloc(req
, req
->iv
, 0, true);
1415 return PTR_ERR(edesc
);
1418 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1420 return ipsec_esp(edesc
, req
, true, ipsec_esp_encrypt_done
);
1423 static int aead_decrypt(struct aead_request
*req
)
1425 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1426 unsigned int authsize
= crypto_aead_authsize(authenc
);
1427 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1428 struct talitos_private
*priv
= dev_get_drvdata(ctx
->dev
);
1429 struct talitos_edesc
*edesc
;
1432 /* allocate extended descriptor */
1433 edesc
= aead_edesc_alloc(req
, req
->iv
, 1, false);
1435 return PTR_ERR(edesc
);
1437 if ((edesc
->desc
.hdr
& DESC_HDR_TYPE_IPSEC_ESP
) &&
1438 (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
) &&
1439 ((!edesc
->src_nents
&& !edesc
->dst_nents
) ||
1440 priv
->features
& TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
)) {
1442 /* decrypt and check the ICV */
1443 edesc
->desc
.hdr
= ctx
->desc_hdr_template
|
1444 DESC_HDR_DIR_INBOUND
|
1445 DESC_HDR_MODE1_MDEU_CICV
;
1447 /* reset integrity check result bits */
1449 return ipsec_esp(edesc
, req
, false,
1450 ipsec_esp_decrypt_hwauth_done
);
1453 /* Have to check the ICV with software */
1454 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1456 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1457 icvdata
= edesc
->buf
+ edesc
->dma_len
;
1459 sg_pcopy_to_buffer(req
->src
, edesc
->src_nents
? : 1, icvdata
, authsize
,
1460 req
->assoclen
+ req
->cryptlen
- authsize
);
1462 return ipsec_esp(edesc
, req
, false, ipsec_esp_decrypt_swauth_done
);
1465 static int ablkcipher_setkey(struct crypto_ablkcipher
*cipher
,
1466 const u8
*key
, unsigned int keylen
)
1468 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1469 struct device
*dev
= ctx
->dev
;
1470 u32 tmp
[DES_EXPKEY_WORDS
];
1472 if (keylen
> TALITOS_MAX_KEY_SIZE
) {
1473 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1477 if (unlikely(crypto_ablkcipher_get_flags(cipher
) &
1478 CRYPTO_TFM_REQ_WEAK_KEY
) &&
1479 !des_ekey(tmp
, key
)) {
1480 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_WEAK_KEY
);
1485 dma_unmap_single(dev
, ctx
->dma_key
, ctx
->keylen
, DMA_TO_DEVICE
);
1487 memcpy(&ctx
->key
, key
, keylen
);
1488 ctx
->keylen
= keylen
;
1490 ctx
->dma_key
= dma_map_single(dev
, ctx
->key
, keylen
, DMA_TO_DEVICE
);
1495 static int ablkcipher_aes_setkey(struct crypto_ablkcipher
*cipher
,
1496 const u8
*key
, unsigned int keylen
)
1498 if (keylen
== AES_KEYSIZE_128
|| keylen
== AES_KEYSIZE_192
||
1499 keylen
== AES_KEYSIZE_256
)
1500 return ablkcipher_setkey(cipher
, key
, keylen
);
1502 crypto_ablkcipher_set_flags(cipher
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1507 static void common_nonsnoop_unmap(struct device
*dev
,
1508 struct talitos_edesc
*edesc
,
1509 struct ablkcipher_request
*areq
)
1511 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1513 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
, areq
->nbytes
, 0);
1514 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1], DMA_TO_DEVICE
);
1517 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1521 static void ablkcipher_done(struct device
*dev
,
1522 struct talitos_desc
*desc
, void *context
,
1525 struct ablkcipher_request
*areq
= context
;
1526 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1527 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1528 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1529 struct talitos_edesc
*edesc
;
1531 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1533 common_nonsnoop_unmap(dev
, edesc
, areq
);
1534 memcpy(areq
->info
, ctx
->iv
, ivsize
);
1538 areq
->base
.complete(&areq
->base
, err
);
1541 static int common_nonsnoop(struct talitos_edesc
*edesc
,
1542 struct ablkcipher_request
*areq
,
1543 void (*callback
) (struct device
*dev
,
1544 struct talitos_desc
*desc
,
1545 void *context
, int error
))
1547 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1548 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1549 struct device
*dev
= ctx
->dev
;
1550 struct talitos_desc
*desc
= &edesc
->desc
;
1551 unsigned int cryptlen
= areq
->nbytes
;
1552 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1554 bool sync_needed
= false;
1555 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1556 bool is_sec1
= has_ftr_sec1(priv
);
1558 /* first DWORD empty */
1561 to_talitos_ptr(&desc
->ptr
[1], edesc
->iv_dma
, ivsize
, is_sec1
);
1564 to_talitos_ptr(&desc
->ptr
[2], ctx
->dma_key
, ctx
->keylen
, is_sec1
);
1566 sg_count
= edesc
->src_nents
?: 1;
1567 if (is_sec1
&& sg_count
> 1)
1568 sg_copy_to_buffer(areq
->src
, sg_count
, edesc
->buf
,
1571 sg_count
= dma_map_sg(dev
, areq
->src
, sg_count
,
1572 (areq
->src
== areq
->dst
) ?
1573 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
);
1577 sg_count
= talitos_sg_map(dev
, areq
->src
, cryptlen
, edesc
,
1578 &desc
->ptr
[3], sg_count
, 0, 0);
1583 if (areq
->src
!= areq
->dst
) {
1584 sg_count
= edesc
->dst_nents
? : 1;
1585 if (!is_sec1
|| sg_count
== 1)
1586 dma_map_sg(dev
, areq
->dst
, sg_count
, DMA_FROM_DEVICE
);
1589 ret
= talitos_sg_map(dev
, areq
->dst
, cryptlen
, edesc
, &desc
->ptr
[4],
1590 sg_count
, 0, (edesc
->src_nents
+ 1));
1595 map_single_talitos_ptr(dev
, &desc
->ptr
[5], ivsize
, ctx
->iv
,
1598 /* last DWORD empty */
1601 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1602 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1604 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1605 if (ret
!= -EINPROGRESS
) {
1606 common_nonsnoop_unmap(dev
, edesc
, areq
);
1612 static struct talitos_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
*
1615 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1616 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1617 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1619 return talitos_edesc_alloc(ctx
->dev
, areq
->src
, areq
->dst
,
1620 areq
->info
, 0, areq
->nbytes
, 0, ivsize
, 0,
1621 areq
->base
.flags
, encrypt
);
1624 static int ablkcipher_encrypt(struct ablkcipher_request
*areq
)
1626 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1627 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1628 struct talitos_edesc
*edesc
;
1629 unsigned int blocksize
=
1630 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher
));
1635 if (areq
->nbytes
% blocksize
)
1638 /* allocate extended descriptor */
1639 edesc
= ablkcipher_edesc_alloc(areq
, true);
1641 return PTR_ERR(edesc
);
1644 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1646 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1649 static int ablkcipher_decrypt(struct ablkcipher_request
*areq
)
1651 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1652 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1653 struct talitos_edesc
*edesc
;
1654 unsigned int blocksize
=
1655 crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(cipher
));
1660 if (areq
->nbytes
% blocksize
)
1663 /* allocate extended descriptor */
1664 edesc
= ablkcipher_edesc_alloc(areq
, false);
1666 return PTR_ERR(edesc
);
1668 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1670 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1673 static void common_nonsnoop_hash_unmap(struct device
*dev
,
1674 struct talitos_edesc
*edesc
,
1675 struct ahash_request
*areq
)
1677 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1678 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1679 bool is_sec1
= has_ftr_sec1(priv
);
1680 struct talitos_desc
*desc
= &edesc
->desc
;
1681 struct talitos_desc
*desc2
= (struct talitos_desc
*)
1682 (edesc
->buf
+ edesc
->dma_len
);
1684 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1685 if (desc
->next_desc
&&
1686 desc
->ptr
[5].ptr
!= desc2
->ptr
[5].ptr
)
1687 unmap_single_talitos_ptr(dev
, &desc2
->ptr
[5], DMA_FROM_DEVICE
);
1690 talitos_sg_unmap(dev
, edesc
, req_ctx
->psrc
, NULL
, 0, 0);
1692 /* When using hashctx-in, must unmap it. */
1693 if (from_talitos_ptr_len(&edesc
->desc
.ptr
[1], is_sec1
))
1694 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1],
1696 else if (desc
->next_desc
)
1697 unmap_single_talitos_ptr(dev
, &desc2
->ptr
[1],
1700 if (is_sec1
&& req_ctx
->nbuf
)
1701 unmap_single_talitos_ptr(dev
, &desc
->ptr
[3],
1705 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1708 if (edesc
->desc
.next_desc
)
1709 dma_unmap_single(dev
, be32_to_cpu(edesc
->desc
.next_desc
),
1710 TALITOS_DESC_SIZE
, DMA_BIDIRECTIONAL
);
1713 static void ahash_done(struct device
*dev
,
1714 struct talitos_desc
*desc
, void *context
,
1717 struct ahash_request
*areq
= context
;
1718 struct talitos_edesc
*edesc
=
1719 container_of(desc
, struct talitos_edesc
, desc
);
1720 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1722 if (!req_ctx
->last
&& req_ctx
->to_hash_later
) {
1723 /* Position any partial block for next update/final/finup */
1724 req_ctx
->buf_idx
= (req_ctx
->buf_idx
+ 1) & 1;
1725 req_ctx
->nbuf
= req_ctx
->to_hash_later
;
1727 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1731 areq
->base
.complete(&areq
->base
, err
);
1735 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1736 * ourself and submit a padded block
1738 static void talitos_handle_buggy_hash(struct talitos_ctx
*ctx
,
1739 struct talitos_edesc
*edesc
,
1740 struct talitos_ptr
*ptr
)
1742 static u8 padded_hash
[64] = {
1743 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1744 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1745 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1746 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1749 pr_err_once("Bug in SEC1, padding ourself\n");
1750 edesc
->desc
.hdr
&= ~DESC_HDR_MODE0_MDEU_PAD
;
1751 map_single_talitos_ptr(ctx
->dev
, ptr
, sizeof(padded_hash
),
1752 (char *)padded_hash
, DMA_TO_DEVICE
);
1755 static int common_nonsnoop_hash(struct talitos_edesc
*edesc
,
1756 struct ahash_request
*areq
, unsigned int length
,
1757 void (*callback
) (struct device
*dev
,
1758 struct talitos_desc
*desc
,
1759 void *context
, int error
))
1761 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1762 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1763 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1764 struct device
*dev
= ctx
->dev
;
1765 struct talitos_desc
*desc
= &edesc
->desc
;
1767 bool sync_needed
= false;
1768 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1769 bool is_sec1
= has_ftr_sec1(priv
);
1772 /* first DWORD empty */
1774 /* hash context in */
1775 if (!req_ctx
->first
|| req_ctx
->swinit
) {
1776 map_single_talitos_ptr_nosync(dev
, &desc
->ptr
[1],
1777 req_ctx
->hw_context_size
,
1778 req_ctx
->hw_context
,
1780 req_ctx
->swinit
= 0;
1782 /* Indicate next op is not the first. */
1787 to_talitos_ptr(&desc
->ptr
[2], ctx
->dma_key
, ctx
->keylen
,
1790 if (is_sec1
&& req_ctx
->nbuf
)
1791 length
-= req_ctx
->nbuf
;
1793 sg_count
= edesc
->src_nents
?: 1;
1794 if (is_sec1
&& sg_count
> 1)
1795 sg_copy_to_buffer(req_ctx
->psrc
, sg_count
, edesc
->buf
, length
);
1797 sg_count
= dma_map_sg(dev
, req_ctx
->psrc
, sg_count
,
1802 if (is_sec1
&& req_ctx
->nbuf
) {
1803 map_single_talitos_ptr(dev
, &desc
->ptr
[3], req_ctx
->nbuf
,
1804 req_ctx
->buf
[req_ctx
->buf_idx
],
1807 sg_count
= talitos_sg_map(dev
, req_ctx
->psrc
, length
, edesc
,
1808 &desc
->ptr
[3], sg_count
, 0, 0);
1813 /* fifth DWORD empty */
1815 /* hash/HMAC out -or- hash context out */
1817 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1818 crypto_ahash_digestsize(tfm
),
1819 areq
->result
, DMA_FROM_DEVICE
);
1821 map_single_talitos_ptr_nosync(dev
, &desc
->ptr
[5],
1822 req_ctx
->hw_context_size
,
1823 req_ctx
->hw_context
,
1826 /* last DWORD empty */
1828 if (is_sec1
&& from_talitos_ptr_len(&desc
->ptr
[3], true) == 0)
1829 talitos_handle_buggy_hash(ctx
, edesc
, &desc
->ptr
[3]);
1831 if (is_sec1
&& req_ctx
->nbuf
&& length
) {
1832 struct talitos_desc
*desc2
= (struct talitos_desc
*)
1833 (edesc
->buf
+ edesc
->dma_len
);
1834 dma_addr_t next_desc
;
1836 memset(desc2
, 0, sizeof(*desc2
));
1837 desc2
->hdr
= desc
->hdr
;
1838 desc2
->hdr
&= ~DESC_HDR_MODE0_MDEU_INIT
;
1839 desc2
->hdr1
= desc2
->hdr
;
1840 desc
->hdr
&= ~DESC_HDR_MODE0_MDEU_PAD
;
1841 desc
->hdr
|= DESC_HDR_MODE0_MDEU_CONT
;
1842 desc
->hdr
&= ~DESC_HDR_DONE_NOTIFY
;
1844 if (desc
->ptr
[1].ptr
)
1845 copy_talitos_ptr(&desc2
->ptr
[1], &desc
->ptr
[1],
1848 map_single_talitos_ptr_nosync(dev
, &desc2
->ptr
[1],
1849 req_ctx
->hw_context_size
,
1850 req_ctx
->hw_context
,
1852 copy_talitos_ptr(&desc2
->ptr
[2], &desc
->ptr
[2], is_sec1
);
1853 sg_count
= talitos_sg_map(dev
, req_ctx
->psrc
, length
, edesc
,
1854 &desc2
->ptr
[3], sg_count
, 0, 0);
1857 copy_talitos_ptr(&desc2
->ptr
[5], &desc
->ptr
[5], is_sec1
);
1859 map_single_talitos_ptr_nosync(dev
, &desc
->ptr
[5],
1860 req_ctx
->hw_context_size
,
1861 req_ctx
->hw_context
,
1864 next_desc
= dma_map_single(dev
, &desc2
->hdr1
, TALITOS_DESC_SIZE
,
1866 desc
->next_desc
= cpu_to_be32(next_desc
);
1870 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1871 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1873 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1874 if (ret
!= -EINPROGRESS
) {
1875 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1881 static struct talitos_edesc
*ahash_edesc_alloc(struct ahash_request
*areq
,
1882 unsigned int nbytes
)
1884 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1885 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1886 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1887 struct talitos_private
*priv
= dev_get_drvdata(ctx
->dev
);
1888 bool is_sec1
= has_ftr_sec1(priv
);
1891 nbytes
-= req_ctx
->nbuf
;
1893 return talitos_edesc_alloc(ctx
->dev
, req_ctx
->psrc
, NULL
, NULL
, 0,
1894 nbytes
, 0, 0, 0, areq
->base
.flags
, false);
1897 static int ahash_init(struct ahash_request
*areq
)
1899 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1900 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1901 struct device
*dev
= ctx
->dev
;
1902 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1906 /* Initialize the context */
1907 req_ctx
->buf_idx
= 0;
1909 req_ctx
->first
= 1; /* first indicates h/w must init its context */
1910 req_ctx
->swinit
= 0; /* assume h/w init of context */
1911 size
= (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
1912 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1913 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
1914 req_ctx
->hw_context_size
= size
;
1916 dma
= dma_map_single(dev
, req_ctx
->hw_context
, req_ctx
->hw_context_size
,
1918 dma_unmap_single(dev
, dma
, req_ctx
->hw_context_size
, DMA_TO_DEVICE
);
1924 * on h/w without explicit sha224 support, we initialize h/w context
1925 * manually with sha224 constants, and tell it to run sha256.
1927 static int ahash_init_sha224_swinit(struct ahash_request
*areq
)
1929 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1931 req_ctx
->hw_context
[0] = SHA224_H0
;
1932 req_ctx
->hw_context
[1] = SHA224_H1
;
1933 req_ctx
->hw_context
[2] = SHA224_H2
;
1934 req_ctx
->hw_context
[3] = SHA224_H3
;
1935 req_ctx
->hw_context
[4] = SHA224_H4
;
1936 req_ctx
->hw_context
[5] = SHA224_H5
;
1937 req_ctx
->hw_context
[6] = SHA224_H6
;
1938 req_ctx
->hw_context
[7] = SHA224_H7
;
1940 /* init 64-bit count */
1941 req_ctx
->hw_context
[8] = 0;
1942 req_ctx
->hw_context
[9] = 0;
1945 req_ctx
->swinit
= 1;/* prevent h/w initting context with sha256 values*/
1950 static int ahash_process_req(struct ahash_request
*areq
, unsigned int nbytes
)
1952 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1953 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1954 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1955 struct talitos_edesc
*edesc
;
1956 unsigned int blocksize
=
1957 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1958 unsigned int nbytes_to_hash
;
1959 unsigned int to_hash_later
;
1962 struct device
*dev
= ctx
->dev
;
1963 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1964 bool is_sec1
= has_ftr_sec1(priv
);
1965 u8
*ctx_buf
= req_ctx
->buf
[req_ctx
->buf_idx
];
1967 if (!req_ctx
->last
&& (nbytes
+ req_ctx
->nbuf
<= blocksize
)) {
1968 /* Buffer up to one whole block */
1969 nents
= sg_nents_for_len(areq
->src
, nbytes
);
1971 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
1974 sg_copy_to_buffer(areq
->src
, nents
,
1975 ctx_buf
+ req_ctx
->nbuf
, nbytes
);
1976 req_ctx
->nbuf
+= nbytes
;
1980 /* At least (blocksize + 1) bytes are available to hash */
1981 nbytes_to_hash
= nbytes
+ req_ctx
->nbuf
;
1982 to_hash_later
= nbytes_to_hash
& (blocksize
- 1);
1986 else if (to_hash_later
)
1987 /* There is a partial block. Hash the full block(s) now */
1988 nbytes_to_hash
-= to_hash_later
;
1990 /* Keep one block buffered */
1991 nbytes_to_hash
-= blocksize
;
1992 to_hash_later
= blocksize
;
1995 /* Chain in any previously buffered data */
1996 if (!is_sec1
&& req_ctx
->nbuf
) {
1997 nsg
= (req_ctx
->nbuf
< nbytes_to_hash
) ? 2 : 1;
1998 sg_init_table(req_ctx
->bufsl
, nsg
);
1999 sg_set_buf(req_ctx
->bufsl
, ctx_buf
, req_ctx
->nbuf
);
2001 sg_chain(req_ctx
->bufsl
, 2, areq
->src
);
2002 req_ctx
->psrc
= req_ctx
->bufsl
;
2003 } else if (is_sec1
&& req_ctx
->nbuf
&& req_ctx
->nbuf
< blocksize
) {
2006 if (nbytes_to_hash
> blocksize
)
2007 offset
= blocksize
- req_ctx
->nbuf
;
2009 offset
= nbytes_to_hash
- req_ctx
->nbuf
;
2010 nents
= sg_nents_for_len(areq
->src
, offset
);
2012 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
2015 sg_copy_to_buffer(areq
->src
, nents
,
2016 ctx_buf
+ req_ctx
->nbuf
, offset
);
2017 req_ctx
->nbuf
+= offset
;
2018 req_ctx
->psrc
= scatterwalk_ffwd(req_ctx
->bufsl
, areq
->src
,
2021 req_ctx
->psrc
= areq
->src
;
2023 if (to_hash_later
) {
2024 nents
= sg_nents_for_len(areq
->src
, nbytes
);
2026 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
2029 sg_pcopy_to_buffer(areq
->src
, nents
,
2030 req_ctx
->buf
[(req_ctx
->buf_idx
+ 1) & 1],
2032 nbytes
- to_hash_later
);
2034 req_ctx
->to_hash_later
= to_hash_later
;
2036 /* Allocate extended descriptor */
2037 edesc
= ahash_edesc_alloc(areq
, nbytes_to_hash
);
2039 return PTR_ERR(edesc
);
2041 edesc
->desc
.hdr
= ctx
->desc_hdr_template
;
2043 /* On last one, request SEC to pad; otherwise continue */
2045 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_PAD
;
2047 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_CONT
;
2049 /* request SEC to INIT hash. */
2050 if (req_ctx
->first
&& !req_ctx
->swinit
)
2051 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_INIT
;
2053 /* When the tfm context has a keylen, it's an HMAC.
2054 * A first or last (ie. not middle) descriptor must request HMAC.
2056 if (ctx
->keylen
&& (req_ctx
->first
|| req_ctx
->last
))
2057 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_HMAC
;
2059 return common_nonsnoop_hash(edesc
, areq
, nbytes_to_hash
, ahash_done
);
2062 static int ahash_update(struct ahash_request
*areq
)
2064 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2068 return ahash_process_req(areq
, areq
->nbytes
);
2071 static int ahash_final(struct ahash_request
*areq
)
2073 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2077 return ahash_process_req(areq
, 0);
2080 static int ahash_finup(struct ahash_request
*areq
)
2082 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2086 return ahash_process_req(areq
, areq
->nbytes
);
2089 static int ahash_digest(struct ahash_request
*areq
)
2091 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2092 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
2097 return ahash_process_req(areq
, areq
->nbytes
);
2100 static int ahash_export(struct ahash_request
*areq
, void *out
)
2102 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2103 struct talitos_export_state
*export
= out
;
2104 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2105 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
2106 struct device
*dev
= ctx
->dev
;
2109 dma
= dma_map_single(dev
, req_ctx
->hw_context
, req_ctx
->hw_context_size
,
2111 dma_unmap_single(dev
, dma
, req_ctx
->hw_context_size
, DMA_FROM_DEVICE
);
2113 memcpy(export
->hw_context
, req_ctx
->hw_context
,
2114 req_ctx
->hw_context_size
);
2115 memcpy(export
->buf
, req_ctx
->buf
[req_ctx
->buf_idx
], req_ctx
->nbuf
);
2116 export
->swinit
= req_ctx
->swinit
;
2117 export
->first
= req_ctx
->first
;
2118 export
->last
= req_ctx
->last
;
2119 export
->to_hash_later
= req_ctx
->to_hash_later
;
2120 export
->nbuf
= req_ctx
->nbuf
;
2125 static int ahash_import(struct ahash_request
*areq
, const void *in
)
2127 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2128 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2129 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
2130 struct device
*dev
= ctx
->dev
;
2131 const struct talitos_export_state
*export
= in
;
2135 memset(req_ctx
, 0, sizeof(*req_ctx
));
2136 size
= (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
2137 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2138 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
2139 req_ctx
->hw_context_size
= size
;
2140 memcpy(req_ctx
->hw_context
, export
->hw_context
, size
);
2141 memcpy(req_ctx
->buf
[0], export
->buf
, export
->nbuf
);
2142 req_ctx
->swinit
= export
->swinit
;
2143 req_ctx
->first
= export
->first
;
2144 req_ctx
->last
= export
->last
;
2145 req_ctx
->to_hash_later
= export
->to_hash_later
;
2146 req_ctx
->nbuf
= export
->nbuf
;
2148 dma
= dma_map_single(dev
, req_ctx
->hw_context
, req_ctx
->hw_context_size
,
2150 dma_unmap_single(dev
, dma
, req_ctx
->hw_context_size
, DMA_TO_DEVICE
);
2155 static int keyhash(struct crypto_ahash
*tfm
, const u8
*key
, unsigned int keylen
,
2158 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
2160 struct scatterlist sg
[1];
2161 struct ahash_request
*req
;
2162 struct crypto_wait wait
;
2165 crypto_init_wait(&wait
);
2167 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
2171 /* Keep tfm keylen == 0 during hash of the long key */
2173 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
2174 crypto_req_done
, &wait
);
2176 sg_init_one(&sg
[0], key
, keylen
);
2178 ahash_request_set_crypt(req
, sg
, hash
, keylen
);
2179 ret
= crypto_wait_req(crypto_ahash_digest(req
), &wait
);
2181 ahash_request_free(req
);
2186 static int ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2187 unsigned int keylen
)
2189 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
2190 struct device
*dev
= ctx
->dev
;
2191 unsigned int blocksize
=
2192 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2193 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2194 unsigned int keysize
= keylen
;
2195 u8 hash
[SHA512_DIGEST_SIZE
];
2198 if (keylen
<= blocksize
)
2199 memcpy(ctx
->key
, key
, keysize
);
2201 /* Must get the hash of the long key */
2202 ret
= keyhash(tfm
, key
, keylen
, hash
);
2205 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2209 keysize
= digestsize
;
2210 memcpy(ctx
->key
, hash
, digestsize
);
2214 dma_unmap_single(dev
, ctx
->dma_key
, ctx
->keylen
, DMA_TO_DEVICE
);
2216 ctx
->keylen
= keysize
;
2217 ctx
->dma_key
= dma_map_single(dev
, ctx
->key
, keysize
, DMA_TO_DEVICE
);
2223 struct talitos_alg_template
{
2227 struct crypto_alg crypto
;
2228 struct ahash_alg hash
;
2229 struct aead_alg aead
;
2231 __be32 desc_hdr_template
;
2234 static struct talitos_alg_template driver_algs
[] = {
2235 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2236 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2239 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
2240 .cra_driver_name
= "authenc-hmac-sha1-"
2242 .cra_blocksize
= AES_BLOCK_SIZE
,
2243 .cra_flags
= CRYPTO_ALG_ASYNC
,
2245 .ivsize
= AES_BLOCK_SIZE
,
2246 .maxauthsize
= SHA1_DIGEST_SIZE
,
2248 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2249 DESC_HDR_SEL0_AESU
|
2250 DESC_HDR_MODE0_AESU_CBC
|
2251 DESC_HDR_SEL1_MDEUA
|
2252 DESC_HDR_MODE1_MDEU_INIT
|
2253 DESC_HDR_MODE1_MDEU_PAD
|
2254 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2256 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2257 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2260 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
2261 .cra_driver_name
= "authenc-hmac-sha1-"
2262 "cbc-aes-talitos-hsna",
2263 .cra_blocksize
= AES_BLOCK_SIZE
,
2264 .cra_flags
= CRYPTO_ALG_ASYNC
,
2266 .ivsize
= AES_BLOCK_SIZE
,
2267 .maxauthsize
= SHA1_DIGEST_SIZE
,
2269 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2270 DESC_HDR_SEL0_AESU
|
2271 DESC_HDR_MODE0_AESU_CBC
|
2272 DESC_HDR_SEL1_MDEUA
|
2273 DESC_HDR_MODE1_MDEU_INIT
|
2274 DESC_HDR_MODE1_MDEU_PAD
|
2275 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2277 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2280 .cra_name
= "authenc(hmac(sha1),"
2282 .cra_driver_name
= "authenc-hmac-sha1-"
2284 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2285 .cra_flags
= CRYPTO_ALG_ASYNC
,
2287 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2288 .maxauthsize
= SHA1_DIGEST_SIZE
,
2290 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2292 DESC_HDR_MODE0_DEU_CBC
|
2293 DESC_HDR_MODE0_DEU_3DES
|
2294 DESC_HDR_SEL1_MDEUA
|
2295 DESC_HDR_MODE1_MDEU_INIT
|
2296 DESC_HDR_MODE1_MDEU_PAD
|
2297 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2299 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2300 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2303 .cra_name
= "authenc(hmac(sha1),"
2305 .cra_driver_name
= "authenc-hmac-sha1-"
2306 "cbc-3des-talitos-hsna",
2307 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2308 .cra_flags
= CRYPTO_ALG_ASYNC
,
2310 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2311 .maxauthsize
= SHA1_DIGEST_SIZE
,
2313 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2315 DESC_HDR_MODE0_DEU_CBC
|
2316 DESC_HDR_MODE0_DEU_3DES
|
2317 DESC_HDR_SEL1_MDEUA
|
2318 DESC_HDR_MODE1_MDEU_INIT
|
2319 DESC_HDR_MODE1_MDEU_PAD
|
2320 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2322 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2325 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
2326 .cra_driver_name
= "authenc-hmac-sha224-"
2328 .cra_blocksize
= AES_BLOCK_SIZE
,
2329 .cra_flags
= CRYPTO_ALG_ASYNC
,
2331 .ivsize
= AES_BLOCK_SIZE
,
2332 .maxauthsize
= SHA224_DIGEST_SIZE
,
2334 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2335 DESC_HDR_SEL0_AESU
|
2336 DESC_HDR_MODE0_AESU_CBC
|
2337 DESC_HDR_SEL1_MDEUA
|
2338 DESC_HDR_MODE1_MDEU_INIT
|
2339 DESC_HDR_MODE1_MDEU_PAD
|
2340 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2342 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2343 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2346 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
2347 .cra_driver_name
= "authenc-hmac-sha224-"
2348 "cbc-aes-talitos-hsna",
2349 .cra_blocksize
= AES_BLOCK_SIZE
,
2350 .cra_flags
= CRYPTO_ALG_ASYNC
,
2352 .ivsize
= AES_BLOCK_SIZE
,
2353 .maxauthsize
= SHA224_DIGEST_SIZE
,
2355 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2356 DESC_HDR_SEL0_AESU
|
2357 DESC_HDR_MODE0_AESU_CBC
|
2358 DESC_HDR_SEL1_MDEUA
|
2359 DESC_HDR_MODE1_MDEU_INIT
|
2360 DESC_HDR_MODE1_MDEU_PAD
|
2361 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2363 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2366 .cra_name
= "authenc(hmac(sha224),"
2368 .cra_driver_name
= "authenc-hmac-sha224-"
2370 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2371 .cra_flags
= CRYPTO_ALG_ASYNC
,
2373 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2374 .maxauthsize
= SHA224_DIGEST_SIZE
,
2376 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2378 DESC_HDR_MODE0_DEU_CBC
|
2379 DESC_HDR_MODE0_DEU_3DES
|
2380 DESC_HDR_SEL1_MDEUA
|
2381 DESC_HDR_MODE1_MDEU_INIT
|
2382 DESC_HDR_MODE1_MDEU_PAD
|
2383 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2385 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2386 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2389 .cra_name
= "authenc(hmac(sha224),"
2391 .cra_driver_name
= "authenc-hmac-sha224-"
2392 "cbc-3des-talitos-hsna",
2393 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2394 .cra_flags
= CRYPTO_ALG_ASYNC
,
2396 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2397 .maxauthsize
= SHA224_DIGEST_SIZE
,
2399 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2401 DESC_HDR_MODE0_DEU_CBC
|
2402 DESC_HDR_MODE0_DEU_3DES
|
2403 DESC_HDR_SEL1_MDEUA
|
2404 DESC_HDR_MODE1_MDEU_INIT
|
2405 DESC_HDR_MODE1_MDEU_PAD
|
2406 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2408 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2411 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
2412 .cra_driver_name
= "authenc-hmac-sha256-"
2414 .cra_blocksize
= AES_BLOCK_SIZE
,
2415 .cra_flags
= CRYPTO_ALG_ASYNC
,
2417 .ivsize
= AES_BLOCK_SIZE
,
2418 .maxauthsize
= SHA256_DIGEST_SIZE
,
2420 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2421 DESC_HDR_SEL0_AESU
|
2422 DESC_HDR_MODE0_AESU_CBC
|
2423 DESC_HDR_SEL1_MDEUA
|
2424 DESC_HDR_MODE1_MDEU_INIT
|
2425 DESC_HDR_MODE1_MDEU_PAD
|
2426 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2428 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2429 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2432 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
2433 .cra_driver_name
= "authenc-hmac-sha256-"
2434 "cbc-aes-talitos-hsna",
2435 .cra_blocksize
= AES_BLOCK_SIZE
,
2436 .cra_flags
= CRYPTO_ALG_ASYNC
,
2438 .ivsize
= AES_BLOCK_SIZE
,
2439 .maxauthsize
= SHA256_DIGEST_SIZE
,
2441 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2442 DESC_HDR_SEL0_AESU
|
2443 DESC_HDR_MODE0_AESU_CBC
|
2444 DESC_HDR_SEL1_MDEUA
|
2445 DESC_HDR_MODE1_MDEU_INIT
|
2446 DESC_HDR_MODE1_MDEU_PAD
|
2447 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2449 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2452 .cra_name
= "authenc(hmac(sha256),"
2454 .cra_driver_name
= "authenc-hmac-sha256-"
2456 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2457 .cra_flags
= CRYPTO_ALG_ASYNC
,
2459 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2460 .maxauthsize
= SHA256_DIGEST_SIZE
,
2462 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2464 DESC_HDR_MODE0_DEU_CBC
|
2465 DESC_HDR_MODE0_DEU_3DES
|
2466 DESC_HDR_SEL1_MDEUA
|
2467 DESC_HDR_MODE1_MDEU_INIT
|
2468 DESC_HDR_MODE1_MDEU_PAD
|
2469 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2471 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2472 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2475 .cra_name
= "authenc(hmac(sha256),"
2477 .cra_driver_name
= "authenc-hmac-sha256-"
2478 "cbc-3des-talitos-hsna",
2479 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2480 .cra_flags
= CRYPTO_ALG_ASYNC
,
2482 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2483 .maxauthsize
= SHA256_DIGEST_SIZE
,
2485 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2487 DESC_HDR_MODE0_DEU_CBC
|
2488 DESC_HDR_MODE0_DEU_3DES
|
2489 DESC_HDR_SEL1_MDEUA
|
2490 DESC_HDR_MODE1_MDEU_INIT
|
2491 DESC_HDR_MODE1_MDEU_PAD
|
2492 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2494 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2497 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
2498 .cra_driver_name
= "authenc-hmac-sha384-"
2500 .cra_blocksize
= AES_BLOCK_SIZE
,
2501 .cra_flags
= CRYPTO_ALG_ASYNC
,
2503 .ivsize
= AES_BLOCK_SIZE
,
2504 .maxauthsize
= SHA384_DIGEST_SIZE
,
2506 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2507 DESC_HDR_SEL0_AESU
|
2508 DESC_HDR_MODE0_AESU_CBC
|
2509 DESC_HDR_SEL1_MDEUB
|
2510 DESC_HDR_MODE1_MDEU_INIT
|
2511 DESC_HDR_MODE1_MDEU_PAD
|
2512 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2514 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2517 .cra_name
= "authenc(hmac(sha384),"
2519 .cra_driver_name
= "authenc-hmac-sha384-"
2521 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2522 .cra_flags
= CRYPTO_ALG_ASYNC
,
2524 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2525 .maxauthsize
= SHA384_DIGEST_SIZE
,
2527 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2529 DESC_HDR_MODE0_DEU_CBC
|
2530 DESC_HDR_MODE0_DEU_3DES
|
2531 DESC_HDR_SEL1_MDEUB
|
2532 DESC_HDR_MODE1_MDEU_INIT
|
2533 DESC_HDR_MODE1_MDEU_PAD
|
2534 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2536 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2539 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
2540 .cra_driver_name
= "authenc-hmac-sha512-"
2542 .cra_blocksize
= AES_BLOCK_SIZE
,
2543 .cra_flags
= CRYPTO_ALG_ASYNC
,
2545 .ivsize
= AES_BLOCK_SIZE
,
2546 .maxauthsize
= SHA512_DIGEST_SIZE
,
2548 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2549 DESC_HDR_SEL0_AESU
|
2550 DESC_HDR_MODE0_AESU_CBC
|
2551 DESC_HDR_SEL1_MDEUB
|
2552 DESC_HDR_MODE1_MDEU_INIT
|
2553 DESC_HDR_MODE1_MDEU_PAD
|
2554 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2556 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2559 .cra_name
= "authenc(hmac(sha512),"
2561 .cra_driver_name
= "authenc-hmac-sha512-"
2563 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2564 .cra_flags
= CRYPTO_ALG_ASYNC
,
2566 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2567 .maxauthsize
= SHA512_DIGEST_SIZE
,
2569 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2571 DESC_HDR_MODE0_DEU_CBC
|
2572 DESC_HDR_MODE0_DEU_3DES
|
2573 DESC_HDR_SEL1_MDEUB
|
2574 DESC_HDR_MODE1_MDEU_INIT
|
2575 DESC_HDR_MODE1_MDEU_PAD
|
2576 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2578 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2581 .cra_name
= "authenc(hmac(md5),cbc(aes))",
2582 .cra_driver_name
= "authenc-hmac-md5-"
2584 .cra_blocksize
= AES_BLOCK_SIZE
,
2585 .cra_flags
= CRYPTO_ALG_ASYNC
,
2587 .ivsize
= AES_BLOCK_SIZE
,
2588 .maxauthsize
= MD5_DIGEST_SIZE
,
2590 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2591 DESC_HDR_SEL0_AESU
|
2592 DESC_HDR_MODE0_AESU_CBC
|
2593 DESC_HDR_SEL1_MDEUA
|
2594 DESC_HDR_MODE1_MDEU_INIT
|
2595 DESC_HDR_MODE1_MDEU_PAD
|
2596 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2598 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2599 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2602 .cra_name
= "authenc(hmac(md5),cbc(aes))",
2603 .cra_driver_name
= "authenc-hmac-md5-"
2604 "cbc-aes-talitos-hsna",
2605 .cra_blocksize
= AES_BLOCK_SIZE
,
2606 .cra_flags
= CRYPTO_ALG_ASYNC
,
2608 .ivsize
= AES_BLOCK_SIZE
,
2609 .maxauthsize
= MD5_DIGEST_SIZE
,
2611 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2612 DESC_HDR_SEL0_AESU
|
2613 DESC_HDR_MODE0_AESU_CBC
|
2614 DESC_HDR_SEL1_MDEUA
|
2615 DESC_HDR_MODE1_MDEU_INIT
|
2616 DESC_HDR_MODE1_MDEU_PAD
|
2617 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2619 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2622 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2623 .cra_driver_name
= "authenc-hmac-md5-"
2625 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2626 .cra_flags
= CRYPTO_ALG_ASYNC
,
2628 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2629 .maxauthsize
= MD5_DIGEST_SIZE
,
2631 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2633 DESC_HDR_MODE0_DEU_CBC
|
2634 DESC_HDR_MODE0_DEU_3DES
|
2635 DESC_HDR_SEL1_MDEUA
|
2636 DESC_HDR_MODE1_MDEU_INIT
|
2637 DESC_HDR_MODE1_MDEU_PAD
|
2638 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2640 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2641 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2644 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2645 .cra_driver_name
= "authenc-hmac-md5-"
2646 "cbc-3des-talitos-hsna",
2647 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2648 .cra_flags
= CRYPTO_ALG_ASYNC
,
2650 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2651 .maxauthsize
= MD5_DIGEST_SIZE
,
2653 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2655 DESC_HDR_MODE0_DEU_CBC
|
2656 DESC_HDR_MODE0_DEU_3DES
|
2657 DESC_HDR_SEL1_MDEUA
|
2658 DESC_HDR_MODE1_MDEU_INIT
|
2659 DESC_HDR_MODE1_MDEU_PAD
|
2660 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2662 /* ABLKCIPHER algorithms. */
2663 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2665 .cra_name
= "ecb(aes)",
2666 .cra_driver_name
= "ecb-aes-talitos",
2667 .cra_blocksize
= AES_BLOCK_SIZE
,
2668 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2671 .min_keysize
= AES_MIN_KEY_SIZE
,
2672 .max_keysize
= AES_MAX_KEY_SIZE
,
2675 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2678 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2680 .cra_name
= "cbc(aes)",
2681 .cra_driver_name
= "cbc-aes-talitos",
2682 .cra_blocksize
= AES_BLOCK_SIZE
,
2683 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2686 .min_keysize
= AES_MIN_KEY_SIZE
,
2687 .max_keysize
= AES_MAX_KEY_SIZE
,
2688 .ivsize
= AES_BLOCK_SIZE
,
2689 .setkey
= ablkcipher_aes_setkey
,
2692 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2693 DESC_HDR_SEL0_AESU
|
2694 DESC_HDR_MODE0_AESU_CBC
,
2696 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2698 .cra_name
= "ctr(aes)",
2699 .cra_driver_name
= "ctr-aes-talitos",
2701 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2704 .min_keysize
= AES_MIN_KEY_SIZE
,
2705 .max_keysize
= AES_MAX_KEY_SIZE
,
2706 .ivsize
= AES_BLOCK_SIZE
,
2707 .setkey
= ablkcipher_aes_setkey
,
2710 .desc_hdr_template
= DESC_HDR_TYPE_AESU_CTR_NONSNOOP
|
2711 DESC_HDR_SEL0_AESU
|
2712 DESC_HDR_MODE0_AESU_CTR
,
2714 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2716 .cra_name
= "ecb(des)",
2717 .cra_driver_name
= "ecb-des-talitos",
2718 .cra_blocksize
= DES_BLOCK_SIZE
,
2719 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2722 .min_keysize
= DES_KEY_SIZE
,
2723 .max_keysize
= DES_KEY_SIZE
,
2724 .ivsize
= DES_BLOCK_SIZE
,
2727 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2730 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2732 .cra_name
= "cbc(des)",
2733 .cra_driver_name
= "cbc-des-talitos",
2734 .cra_blocksize
= DES_BLOCK_SIZE
,
2735 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2738 .min_keysize
= DES_KEY_SIZE
,
2739 .max_keysize
= DES_KEY_SIZE
,
2740 .ivsize
= DES_BLOCK_SIZE
,
2743 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2745 DESC_HDR_MODE0_DEU_CBC
,
2747 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2749 .cra_name
= "ecb(des3_ede)",
2750 .cra_driver_name
= "ecb-3des-talitos",
2751 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2752 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2755 .min_keysize
= DES3_EDE_KEY_SIZE
,
2756 .max_keysize
= DES3_EDE_KEY_SIZE
,
2757 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2760 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2762 DESC_HDR_MODE0_DEU_3DES
,
2764 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2766 .cra_name
= "cbc(des3_ede)",
2767 .cra_driver_name
= "cbc-3des-talitos",
2768 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2769 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2772 .min_keysize
= DES3_EDE_KEY_SIZE
,
2773 .max_keysize
= DES3_EDE_KEY_SIZE
,
2774 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2777 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2779 DESC_HDR_MODE0_DEU_CBC
|
2780 DESC_HDR_MODE0_DEU_3DES
,
2782 /* AHASH algorithms. */
2783 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2785 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2786 .halg
.statesize
= sizeof(struct talitos_export_state
),
2789 .cra_driver_name
= "md5-talitos",
2790 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
2791 .cra_flags
= CRYPTO_ALG_ASYNC
,
2794 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2795 DESC_HDR_SEL0_MDEUA
|
2796 DESC_HDR_MODE0_MDEU_MD5
,
2798 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2800 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2801 .halg
.statesize
= sizeof(struct talitos_export_state
),
2804 .cra_driver_name
= "sha1-talitos",
2805 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2806 .cra_flags
= CRYPTO_ALG_ASYNC
,
2809 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2810 DESC_HDR_SEL0_MDEUA
|
2811 DESC_HDR_MODE0_MDEU_SHA1
,
2813 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2815 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2816 .halg
.statesize
= sizeof(struct talitos_export_state
),
2818 .cra_name
= "sha224",
2819 .cra_driver_name
= "sha224-talitos",
2820 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2821 .cra_flags
= CRYPTO_ALG_ASYNC
,
2824 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2825 DESC_HDR_SEL0_MDEUA
|
2826 DESC_HDR_MODE0_MDEU_SHA224
,
2828 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2830 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2831 .halg
.statesize
= sizeof(struct talitos_export_state
),
2833 .cra_name
= "sha256",
2834 .cra_driver_name
= "sha256-talitos",
2835 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2836 .cra_flags
= CRYPTO_ALG_ASYNC
,
2839 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2840 DESC_HDR_SEL0_MDEUA
|
2841 DESC_HDR_MODE0_MDEU_SHA256
,
2843 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2845 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2846 .halg
.statesize
= sizeof(struct talitos_export_state
),
2848 .cra_name
= "sha384",
2849 .cra_driver_name
= "sha384-talitos",
2850 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2851 .cra_flags
= CRYPTO_ALG_ASYNC
,
2854 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2855 DESC_HDR_SEL0_MDEUB
|
2856 DESC_HDR_MODE0_MDEUB_SHA384
,
2858 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2860 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2861 .halg
.statesize
= sizeof(struct talitos_export_state
),
2863 .cra_name
= "sha512",
2864 .cra_driver_name
= "sha512-talitos",
2865 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2866 .cra_flags
= CRYPTO_ALG_ASYNC
,
2869 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2870 DESC_HDR_SEL0_MDEUB
|
2871 DESC_HDR_MODE0_MDEUB_SHA512
,
2873 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2875 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2876 .halg
.statesize
= sizeof(struct talitos_export_state
),
2878 .cra_name
= "hmac(md5)",
2879 .cra_driver_name
= "hmac-md5-talitos",
2880 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
2881 .cra_flags
= CRYPTO_ALG_ASYNC
,
2884 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2885 DESC_HDR_SEL0_MDEUA
|
2886 DESC_HDR_MODE0_MDEU_MD5
,
2888 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2890 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2891 .halg
.statesize
= sizeof(struct talitos_export_state
),
2893 .cra_name
= "hmac(sha1)",
2894 .cra_driver_name
= "hmac-sha1-talitos",
2895 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2896 .cra_flags
= CRYPTO_ALG_ASYNC
,
2899 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2900 DESC_HDR_SEL0_MDEUA
|
2901 DESC_HDR_MODE0_MDEU_SHA1
,
2903 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2905 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2906 .halg
.statesize
= sizeof(struct talitos_export_state
),
2908 .cra_name
= "hmac(sha224)",
2909 .cra_driver_name
= "hmac-sha224-talitos",
2910 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2911 .cra_flags
= CRYPTO_ALG_ASYNC
,
2914 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2915 DESC_HDR_SEL0_MDEUA
|
2916 DESC_HDR_MODE0_MDEU_SHA224
,
2918 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2920 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2921 .halg
.statesize
= sizeof(struct talitos_export_state
),
2923 .cra_name
= "hmac(sha256)",
2924 .cra_driver_name
= "hmac-sha256-talitos",
2925 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2926 .cra_flags
= CRYPTO_ALG_ASYNC
,
2929 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2930 DESC_HDR_SEL0_MDEUA
|
2931 DESC_HDR_MODE0_MDEU_SHA256
,
2933 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2935 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2936 .halg
.statesize
= sizeof(struct talitos_export_state
),
2938 .cra_name
= "hmac(sha384)",
2939 .cra_driver_name
= "hmac-sha384-talitos",
2940 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2941 .cra_flags
= CRYPTO_ALG_ASYNC
,
2944 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2945 DESC_HDR_SEL0_MDEUB
|
2946 DESC_HDR_MODE0_MDEUB_SHA384
,
2948 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2950 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2951 .halg
.statesize
= sizeof(struct talitos_export_state
),
2953 .cra_name
= "hmac(sha512)",
2954 .cra_driver_name
= "hmac-sha512-talitos",
2955 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2956 .cra_flags
= CRYPTO_ALG_ASYNC
,
2959 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2960 DESC_HDR_SEL0_MDEUB
|
2961 DESC_HDR_MODE0_MDEUB_SHA512
,
2965 struct talitos_crypto_alg
{
2966 struct list_head entry
;
2968 struct talitos_alg_template algt
;
2971 static int talitos_init_common(struct talitos_ctx
*ctx
,
2972 struct talitos_crypto_alg
*talitos_alg
)
2974 struct talitos_private
*priv
;
2976 /* update context with ptr to dev */
2977 ctx
->dev
= talitos_alg
->dev
;
2979 /* assign SEC channel to tfm in round-robin fashion */
2980 priv
= dev_get_drvdata(ctx
->dev
);
2981 ctx
->ch
= atomic_inc_return(&priv
->last_chan
) &
2982 (priv
->num_channels
- 1);
2984 /* copy descriptor header template value */
2985 ctx
->desc_hdr_template
= talitos_alg
->algt
.desc_hdr_template
;
2987 /* select done notification */
2988 ctx
->desc_hdr_template
|= DESC_HDR_DONE_NOTIFY
;
2993 static int talitos_cra_init(struct crypto_tfm
*tfm
)
2995 struct crypto_alg
*alg
= tfm
->__crt_alg
;
2996 struct talitos_crypto_alg
*talitos_alg
;
2997 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2999 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_AHASH
)
3000 talitos_alg
= container_of(__crypto_ahash_alg(alg
),
3001 struct talitos_crypto_alg
,
3004 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
3007 return talitos_init_common(ctx
, talitos_alg
);
3010 static int talitos_cra_init_aead(struct crypto_aead
*tfm
)
3012 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
3013 struct talitos_crypto_alg
*talitos_alg
;
3014 struct talitos_ctx
*ctx
= crypto_aead_ctx(tfm
);
3016 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
3019 return talitos_init_common(ctx
, talitos_alg
);
3022 static int talitos_cra_init_ahash(struct crypto_tfm
*tfm
)
3024 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
3026 talitos_cra_init(tfm
);
3029 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
3030 sizeof(struct talitos_ahash_req_ctx
));
3035 static void talitos_cra_exit(struct crypto_tfm
*tfm
)
3037 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
3038 struct device
*dev
= ctx
->dev
;
3041 dma_unmap_single(dev
, ctx
->dma_key
, ctx
->keylen
, DMA_TO_DEVICE
);
3045 * given the alg's descriptor header template, determine whether descriptor
3046 * type and primary/secondary execution units required match the hw
3047 * capabilities description provided in the device tree node.
3049 static int hw_supports(struct device
*dev
, __be32 desc_hdr_template
)
3051 struct talitos_private
*priv
= dev_get_drvdata(dev
);
3054 ret
= (1 << DESC_TYPE(desc_hdr_template
) & priv
->desc_types
) &&
3055 (1 << PRIMARY_EU(desc_hdr_template
) & priv
->exec_units
);
3057 if (SECONDARY_EU(desc_hdr_template
))
3058 ret
= ret
&& (1 << SECONDARY_EU(desc_hdr_template
)
3059 & priv
->exec_units
);
3064 static int talitos_remove(struct platform_device
*ofdev
)
3066 struct device
*dev
= &ofdev
->dev
;
3067 struct talitos_private
*priv
= dev_get_drvdata(dev
);
3068 struct talitos_crypto_alg
*t_alg
, *n
;
3071 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
3072 switch (t_alg
->algt
.type
) {
3073 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3075 case CRYPTO_ALG_TYPE_AEAD
:
3076 crypto_unregister_aead(&t_alg
->algt
.alg
.aead
);
3078 case CRYPTO_ALG_TYPE_AHASH
:
3079 crypto_unregister_ahash(&t_alg
->algt
.alg
.hash
);
3082 list_del(&t_alg
->entry
);
3085 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
))
3086 talitos_unregister_rng(dev
);
3088 for (i
= 0; i
< 2; i
++)
3090 free_irq(priv
->irq
[i
], dev
);
3091 irq_dispose_mapping(priv
->irq
[i
]);
3094 tasklet_kill(&priv
->done_task
[0]);
3096 tasklet_kill(&priv
->done_task
[1]);
3101 static struct talitos_crypto_alg
*talitos_alg_alloc(struct device
*dev
,
3102 struct talitos_alg_template
3105 struct talitos_private
*priv
= dev_get_drvdata(dev
);
3106 struct talitos_crypto_alg
*t_alg
;
3107 struct crypto_alg
*alg
;
3109 t_alg
= devm_kzalloc(dev
, sizeof(struct talitos_crypto_alg
),
3112 return ERR_PTR(-ENOMEM
);
3114 t_alg
->algt
= *template;
3116 switch (t_alg
->algt
.type
) {
3117 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3118 alg
= &t_alg
->algt
.alg
.crypto
;
3119 alg
->cra_init
= talitos_cra_init
;
3120 alg
->cra_exit
= talitos_cra_exit
;
3121 alg
->cra_type
= &crypto_ablkcipher_type
;
3122 alg
->cra_ablkcipher
.setkey
= ablkcipher_setkey
;
3123 alg
->cra_ablkcipher
.encrypt
= ablkcipher_encrypt
;
3124 alg
->cra_ablkcipher
.decrypt
= ablkcipher_decrypt
;
3125 alg
->cra_ablkcipher
.geniv
= "eseqiv";
3127 case CRYPTO_ALG_TYPE_AEAD
:
3128 alg
= &t_alg
->algt
.alg
.aead
.base
;
3129 alg
->cra_exit
= talitos_cra_exit
;
3130 t_alg
->algt
.alg
.aead
.init
= talitos_cra_init_aead
;
3131 t_alg
->algt
.alg
.aead
.setkey
= aead_setkey
;
3132 t_alg
->algt
.alg
.aead
.encrypt
= aead_encrypt
;
3133 t_alg
->algt
.alg
.aead
.decrypt
= aead_decrypt
;
3134 if (!(priv
->features
& TALITOS_FTR_SHA224_HWINIT
) &&
3135 !strncmp(alg
->cra_name
, "authenc(hmac(sha224)", 20)) {
3136 devm_kfree(dev
, t_alg
);
3137 return ERR_PTR(-ENOTSUPP
);
3140 case CRYPTO_ALG_TYPE_AHASH
:
3141 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
3142 alg
->cra_init
= talitos_cra_init_ahash
;
3143 alg
->cra_exit
= talitos_cra_exit
;
3144 t_alg
->algt
.alg
.hash
.init
= ahash_init
;
3145 t_alg
->algt
.alg
.hash
.update
= ahash_update
;
3146 t_alg
->algt
.alg
.hash
.final
= ahash_final
;
3147 t_alg
->algt
.alg
.hash
.finup
= ahash_finup
;
3148 t_alg
->algt
.alg
.hash
.digest
= ahash_digest
;
3149 if (!strncmp(alg
->cra_name
, "hmac", 4))
3150 t_alg
->algt
.alg
.hash
.setkey
= ahash_setkey
;
3151 t_alg
->algt
.alg
.hash
.import
= ahash_import
;
3152 t_alg
->algt
.alg
.hash
.export
= ahash_export
;
3154 if (!(priv
->features
& TALITOS_FTR_HMAC_OK
) &&
3155 !strncmp(alg
->cra_name
, "hmac", 4)) {
3156 devm_kfree(dev
, t_alg
);
3157 return ERR_PTR(-ENOTSUPP
);
3159 if (!(priv
->features
& TALITOS_FTR_SHA224_HWINIT
) &&
3160 (!strcmp(alg
->cra_name
, "sha224") ||
3161 !strcmp(alg
->cra_name
, "hmac(sha224)"))) {
3162 t_alg
->algt
.alg
.hash
.init
= ahash_init_sha224_swinit
;
3163 t_alg
->algt
.desc_hdr_template
=
3164 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
3165 DESC_HDR_SEL0_MDEUA
|
3166 DESC_HDR_MODE0_MDEU_SHA256
;
3170 dev_err(dev
, "unknown algorithm type %d\n", t_alg
->algt
.type
);
3171 devm_kfree(dev
, t_alg
);
3172 return ERR_PTR(-EINVAL
);
3175 alg
->cra_module
= THIS_MODULE
;
3176 if (t_alg
->algt
.priority
)
3177 alg
->cra_priority
= t_alg
->algt
.priority
;
3179 alg
->cra_priority
= TALITOS_CRA_PRIORITY
;
3180 if (has_ftr_sec1(priv
))
3181 alg
->cra_alignmask
= 3;
3183 alg
->cra_alignmask
= 0;
3184 alg
->cra_ctxsize
= sizeof(struct talitos_ctx
);
3185 alg
->cra_flags
|= CRYPTO_ALG_KERN_DRIVER_ONLY
;
3192 static int talitos_probe_irq(struct platform_device
*ofdev
)
3194 struct device
*dev
= &ofdev
->dev
;
3195 struct device_node
*np
= ofdev
->dev
.of_node
;
3196 struct talitos_private
*priv
= dev_get_drvdata(dev
);
3198 bool is_sec1
= has_ftr_sec1(priv
);
3200 priv
->irq
[0] = irq_of_parse_and_map(np
, 0);
3201 if (!priv
->irq
[0]) {
3202 dev_err(dev
, "failed to map irq\n");
3206 err
= request_irq(priv
->irq
[0], talitos1_interrupt_4ch
, 0,
3207 dev_driver_string(dev
), dev
);
3211 priv
->irq
[1] = irq_of_parse_and_map(np
, 1);
3213 /* get the primary irq line */
3214 if (!priv
->irq
[1]) {
3215 err
= request_irq(priv
->irq
[0], talitos2_interrupt_4ch
, 0,
3216 dev_driver_string(dev
), dev
);
3220 err
= request_irq(priv
->irq
[0], talitos2_interrupt_ch0_2
, 0,
3221 dev_driver_string(dev
), dev
);
3225 /* get the secondary irq line */
3226 err
= request_irq(priv
->irq
[1], talitos2_interrupt_ch1_3
, 0,
3227 dev_driver_string(dev
), dev
);
3229 dev_err(dev
, "failed to request secondary irq\n");
3230 irq_dispose_mapping(priv
->irq
[1]);
3238 dev_err(dev
, "failed to request primary irq\n");
3239 irq_dispose_mapping(priv
->irq
[0]);
3246 static int talitos_probe(struct platform_device
*ofdev
)
3248 struct device
*dev
= &ofdev
->dev
;
3249 struct device_node
*np
= ofdev
->dev
.of_node
;
3250 struct talitos_private
*priv
;
3253 struct resource
*res
;
3255 priv
= devm_kzalloc(dev
, sizeof(struct talitos_private
), GFP_KERNEL
);
3259 INIT_LIST_HEAD(&priv
->alg_list
);
3261 dev_set_drvdata(dev
, priv
);
3263 priv
->ofdev
= ofdev
;
3265 spin_lock_init(&priv
->reg_lock
);
3267 res
= platform_get_resource(ofdev
, IORESOURCE_MEM
, 0);
3270 priv
->reg
= devm_ioremap(dev
, res
->start
, resource_size(res
));
3272 dev_err(dev
, "failed to of_iomap\n");
3277 /* get SEC version capabilities from device tree */
3278 of_property_read_u32(np
, "fsl,num-channels", &priv
->num_channels
);
3279 of_property_read_u32(np
, "fsl,channel-fifo-len", &priv
->chfifo_len
);
3280 of_property_read_u32(np
, "fsl,exec-units-mask", &priv
->exec_units
);
3281 of_property_read_u32(np
, "fsl,descriptor-types-mask",
3284 if (!is_power_of_2(priv
->num_channels
) || !priv
->chfifo_len
||
3285 !priv
->exec_units
|| !priv
->desc_types
) {
3286 dev_err(dev
, "invalid property data in device tree node\n");
3291 if (of_device_is_compatible(np
, "fsl,sec3.0"))
3292 priv
->features
|= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
;
3294 if (of_device_is_compatible(np
, "fsl,sec2.1"))
3295 priv
->features
|= TALITOS_FTR_HW_AUTH_CHECK
|
3296 TALITOS_FTR_SHA224_HWINIT
|
3297 TALITOS_FTR_HMAC_OK
;
3299 if (of_device_is_compatible(np
, "fsl,sec1.0"))
3300 priv
->features
|= TALITOS_FTR_SEC1
;
3302 if (of_device_is_compatible(np
, "fsl,sec1.2")) {
3303 priv
->reg_deu
= priv
->reg
+ TALITOS12_DEU
;
3304 priv
->reg_aesu
= priv
->reg
+ TALITOS12_AESU
;
3305 priv
->reg_mdeu
= priv
->reg
+ TALITOS12_MDEU
;
3306 stride
= TALITOS1_CH_STRIDE
;
3307 } else if (of_device_is_compatible(np
, "fsl,sec1.0")) {
3308 priv
->reg_deu
= priv
->reg
+ TALITOS10_DEU
;
3309 priv
->reg_aesu
= priv
->reg
+ TALITOS10_AESU
;
3310 priv
->reg_mdeu
= priv
->reg
+ TALITOS10_MDEU
;
3311 priv
->reg_afeu
= priv
->reg
+ TALITOS10_AFEU
;
3312 priv
->reg_rngu
= priv
->reg
+ TALITOS10_RNGU
;
3313 priv
->reg_pkeu
= priv
->reg
+ TALITOS10_PKEU
;
3314 stride
= TALITOS1_CH_STRIDE
;
3316 priv
->reg_deu
= priv
->reg
+ TALITOS2_DEU
;
3317 priv
->reg_aesu
= priv
->reg
+ TALITOS2_AESU
;
3318 priv
->reg_mdeu
= priv
->reg
+ TALITOS2_MDEU
;
3319 priv
->reg_afeu
= priv
->reg
+ TALITOS2_AFEU
;
3320 priv
->reg_rngu
= priv
->reg
+ TALITOS2_RNGU
;
3321 priv
->reg_pkeu
= priv
->reg
+ TALITOS2_PKEU
;
3322 priv
->reg_keu
= priv
->reg
+ TALITOS2_KEU
;
3323 priv
->reg_crcu
= priv
->reg
+ TALITOS2_CRCU
;
3324 stride
= TALITOS2_CH_STRIDE
;
3327 err
= talitos_probe_irq(ofdev
);
3331 if (of_device_is_compatible(np
, "fsl,sec1.0")) {
3332 if (priv
->num_channels
== 1)
3333 tasklet_init(&priv
->done_task
[0], talitos1_done_ch0
,
3334 (unsigned long)dev
);
3336 tasklet_init(&priv
->done_task
[0], talitos1_done_4ch
,
3337 (unsigned long)dev
);
3340 tasklet_init(&priv
->done_task
[0], talitos2_done_ch0_2
,
3341 (unsigned long)dev
);
3342 tasklet_init(&priv
->done_task
[1], talitos2_done_ch1_3
,
3343 (unsigned long)dev
);
3344 } else if (priv
->num_channels
== 1) {
3345 tasklet_init(&priv
->done_task
[0], talitos2_done_ch0
,
3346 (unsigned long)dev
);
3348 tasklet_init(&priv
->done_task
[0], talitos2_done_4ch
,
3349 (unsigned long)dev
);
3353 priv
->chan
= devm_kcalloc(dev
,
3355 sizeof(struct talitos_channel
),
3358 dev_err(dev
, "failed to allocate channel management space\n");
3363 priv
->fifo_len
= roundup_pow_of_two(priv
->chfifo_len
);
3365 for (i
= 0; i
< priv
->num_channels
; i
++) {
3366 priv
->chan
[i
].reg
= priv
->reg
+ stride
* (i
+ 1);
3367 if (!priv
->irq
[1] || !(i
& 1))
3368 priv
->chan
[i
].reg
+= TALITOS_CH_BASE_OFFSET
;
3370 spin_lock_init(&priv
->chan
[i
].head_lock
);
3371 spin_lock_init(&priv
->chan
[i
].tail_lock
);
3373 priv
->chan
[i
].fifo
= devm_kcalloc(dev
,
3375 sizeof(struct talitos_request
),
3377 if (!priv
->chan
[i
].fifo
) {
3378 dev_err(dev
, "failed to allocate request fifo %d\n", i
);
3383 atomic_set(&priv
->chan
[i
].submit_count
,
3384 -(priv
->chfifo_len
- 1));
3387 dma_set_mask(dev
, DMA_BIT_MASK(36));
3389 /* reset and initialize the h/w */
3390 err
= init_device(dev
);
3392 dev_err(dev
, "failed to initialize device\n");
3396 /* register the RNG, if available */
3397 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
)) {
3398 err
= talitos_register_rng(dev
);
3400 dev_err(dev
, "failed to register hwrng: %d\n", err
);
3403 dev_info(dev
, "hwrng\n");
3406 /* register crypto algorithms the device supports */
3407 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
3408 if (hw_supports(dev
, driver_algs
[i
].desc_hdr_template
)) {
3409 struct talitos_crypto_alg
*t_alg
;
3410 struct crypto_alg
*alg
= NULL
;
3412 t_alg
= talitos_alg_alloc(dev
, &driver_algs
[i
]);
3413 if (IS_ERR(t_alg
)) {
3414 err
= PTR_ERR(t_alg
);
3415 if (err
== -ENOTSUPP
)
3420 switch (t_alg
->algt
.type
) {
3421 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3422 err
= crypto_register_alg(
3423 &t_alg
->algt
.alg
.crypto
);
3424 alg
= &t_alg
->algt
.alg
.crypto
;
3427 case CRYPTO_ALG_TYPE_AEAD
:
3428 err
= crypto_register_aead(
3429 &t_alg
->algt
.alg
.aead
);
3430 alg
= &t_alg
->algt
.alg
.aead
.base
;
3433 case CRYPTO_ALG_TYPE_AHASH
:
3434 err
= crypto_register_ahash(
3435 &t_alg
->algt
.alg
.hash
);
3436 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
3440 dev_err(dev
, "%s alg registration failed\n",
3441 alg
->cra_driver_name
);
3442 devm_kfree(dev
, t_alg
);
3444 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
3447 if (!list_empty(&priv
->alg_list
))
3448 dev_info(dev
, "%s algorithms registered in /proc/crypto\n",
3449 (char *)of_get_property(np
, "compatible", NULL
));
3454 talitos_remove(ofdev
);
3459 static const struct of_device_id talitos_match
[] = {
3460 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3462 .compatible
= "fsl,sec1.0",
3465 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3467 .compatible
= "fsl,sec2.0",
3472 MODULE_DEVICE_TABLE(of
, talitos_match
);
3474 static struct platform_driver talitos_driver
= {
3477 .of_match_table
= talitos_match
,
3479 .probe
= talitos_probe
,
3480 .remove
= talitos_remove
,
3483 module_platform_driver(talitos_driver
);
3485 MODULE_LICENSE("GPL");
3486 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3487 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");