2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr
*ptr
, dma_addr_t dma_addr
,
61 ptr
->ptr
= cpu_to_be32(lower_32_bits(dma_addr
));
63 ptr
->eptr
= upper_32_bits(dma_addr
);
66 static void copy_talitos_ptr(struct talitos_ptr
*dst_ptr
,
67 struct talitos_ptr
*src_ptr
, bool is_sec1
)
69 dst_ptr
->ptr
= src_ptr
->ptr
;
71 dst_ptr
->eptr
= src_ptr
->eptr
;
74 static void to_talitos_ptr_len(struct talitos_ptr
*ptr
, unsigned int len
,
79 ptr
->len1
= cpu_to_be16(len
);
81 ptr
->len
= cpu_to_be16(len
);
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr
*ptr
,
89 return be16_to_cpu(ptr
->len1
);
91 return be16_to_cpu(ptr
->len
);
94 static void to_talitos_ptr_ext_set(struct talitos_ptr
*ptr
, u8 val
,
101 static void to_talitos_ptr_ext_or(struct talitos_ptr
*ptr
, u8 val
, bool is_sec1
)
104 ptr
->j_extent
|= val
;
108 * map virtual single (contiguous) pointer to h/w descriptor pointer
110 static void map_single_talitos_ptr(struct device
*dev
,
111 struct talitos_ptr
*ptr
,
112 unsigned int len
, void *data
,
113 enum dma_data_direction dir
)
115 dma_addr_t dma_addr
= dma_map_single(dev
, data
, len
, dir
);
116 struct talitos_private
*priv
= dev_get_drvdata(dev
);
117 bool is_sec1
= has_ftr_sec1(priv
);
119 to_talitos_ptr_len(ptr
, len
, is_sec1
);
120 to_talitos_ptr(ptr
, dma_addr
, is_sec1
);
121 to_talitos_ptr_ext_set(ptr
, 0, is_sec1
);
125 * unmap bus single (contiguous) h/w descriptor pointer
127 static void unmap_single_talitos_ptr(struct device
*dev
,
128 struct talitos_ptr
*ptr
,
129 enum dma_data_direction dir
)
131 struct talitos_private
*priv
= dev_get_drvdata(dev
);
132 bool is_sec1
= has_ftr_sec1(priv
);
134 dma_unmap_single(dev
, be32_to_cpu(ptr
->ptr
),
135 from_talitos_ptr_len(ptr
, is_sec1
), dir
);
138 static int reset_channel(struct device
*dev
, int ch
)
140 struct talitos_private
*priv
= dev_get_drvdata(dev
);
141 unsigned int timeout
= TALITOS_TIMEOUT
;
142 bool is_sec1
= has_ftr_sec1(priv
);
145 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
146 TALITOS1_CCCR_LO_RESET
);
148 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
) &
149 TALITOS1_CCCR_LO_RESET
) && --timeout
)
152 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
153 TALITOS2_CCCR_RESET
);
155 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
156 TALITOS2_CCCR_RESET
) && --timeout
)
161 dev_err(dev
, "failed to reset channel %d\n", ch
);
165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
166 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, TALITOS_CCCR_LO_EAE
|
167 TALITOS_CCCR_LO_CDWE
| TALITOS_CCCR_LO_CDIE
);
169 /* and ICCR writeback, if available */
170 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
171 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
172 TALITOS_CCCR_LO_IWSE
);
177 static int reset_device(struct device
*dev
)
179 struct talitos_private
*priv
= dev_get_drvdata(dev
);
180 unsigned int timeout
= TALITOS_TIMEOUT
;
181 bool is_sec1
= has_ftr_sec1(priv
);
182 u32 mcr
= is_sec1
? TALITOS1_MCR_SWR
: TALITOS2_MCR_SWR
;
184 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
186 while ((in_be32(priv
->reg
+ TALITOS_MCR
) & mcr
)
191 mcr
= TALITOS_MCR_RCA1
| TALITOS_MCR_RCA3
;
192 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
196 dev_err(dev
, "failed to reset device\n");
204 * Reset and initialize the device
206 static int init_device(struct device
*dev
)
208 struct talitos_private
*priv
= dev_get_drvdata(dev
);
210 bool is_sec1
= has_ftr_sec1(priv
);
214 * errata documentation: warning: certain SEC interrupts
215 * are not fully cleared by writing the MCR:SWR bit,
216 * set bit twice to completely reset
218 err
= reset_device(dev
);
222 err
= reset_device(dev
);
227 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
228 err
= reset_channel(dev
, ch
);
233 /* enable channel done and error interrupts */
235 clrbits32(priv
->reg
+ TALITOS_IMR
, TALITOS1_IMR_INIT
);
236 clrbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS1_IMR_LO_INIT
);
237 /* disable parity error check in DEU (erroneous? test vect.) */
238 setbits32(priv
->reg_deu
+ TALITOS_EUICR
, TALITOS1_DEUICR_KPE
);
240 setbits32(priv
->reg
+ TALITOS_IMR
, TALITOS2_IMR_INIT
);
241 setbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS2_IMR_LO_INIT
);
244 /* disable integrity check error interrupts (use writeback instead) */
245 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
246 setbits32(priv
->reg_mdeu
+ TALITOS_EUICR_LO
,
247 TALITOS_MDEUICR_LO_ICE
);
253 * talitos_submit - submits a descriptor to the device for processing
254 * @dev: the SEC device to be used
255 * @ch: the SEC device channel to be used
256 * @desc: the descriptor to be processed by the device
257 * @callback: whom to call when processing is complete
258 * @context: a handle for use by caller (optional)
260 * desc must contain valid dma-mapped (bus physical) address pointers.
261 * callback must check err and feedback in descriptor header
262 * for device processing status.
264 int talitos_submit(struct device
*dev
, int ch
, struct talitos_desc
*desc
,
265 void (*callback
)(struct device
*dev
,
266 struct talitos_desc
*desc
,
267 void *context
, int error
),
270 struct talitos_private
*priv
= dev_get_drvdata(dev
);
271 struct talitos_request
*request
;
274 bool is_sec1
= has_ftr_sec1(priv
);
276 spin_lock_irqsave(&priv
->chan
[ch
].head_lock
, flags
);
278 if (!atomic_inc_not_zero(&priv
->chan
[ch
].submit_count
)) {
279 /* h/w fifo is full */
280 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
284 head
= priv
->chan
[ch
].head
;
285 request
= &priv
->chan
[ch
].fifo
[head
];
287 /* map descriptor and save caller data */
289 desc
->hdr1
= desc
->hdr
;
291 request
->dma_desc
= dma_map_single(dev
, &desc
->hdr1
,
295 request
->dma_desc
= dma_map_single(dev
, desc
,
299 request
->callback
= callback
;
300 request
->context
= context
;
302 /* increment fifo head */
303 priv
->chan
[ch
].head
= (priv
->chan
[ch
].head
+ 1) & (priv
->fifo_len
- 1);
306 request
->desc
= desc
;
310 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF
,
311 upper_32_bits(request
->dma_desc
));
312 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF_LO
,
313 lower_32_bits(request
->dma_desc
));
315 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
319 EXPORT_SYMBOL(talitos_submit
);
322 * process what was done, notify callback of error if not
324 static void flush_channel(struct device
*dev
, int ch
, int error
, int reset_ch
)
326 struct talitos_private
*priv
= dev_get_drvdata(dev
);
327 struct talitos_request
*request
, saved_req
;
330 bool is_sec1
= has_ftr_sec1(priv
);
332 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
334 tail
= priv
->chan
[ch
].tail
;
335 while (priv
->chan
[ch
].fifo
[tail
].desc
) {
338 request
= &priv
->chan
[ch
].fifo
[tail
];
340 /* descriptors with their done bits set don't get the error */
342 hdr
= is_sec1
? request
->desc
->hdr1
: request
->desc
->hdr
;
344 if ((hdr
& DESC_HDR_DONE
) == DESC_HDR_DONE
)
352 dma_unmap_single(dev
, request
->dma_desc
,
356 /* copy entries so we can call callback outside lock */
357 saved_req
.desc
= request
->desc
;
358 saved_req
.callback
= request
->callback
;
359 saved_req
.context
= request
->context
;
361 /* release request entry in fifo */
363 request
->desc
= NULL
;
365 /* increment fifo tail */
366 priv
->chan
[ch
].tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
368 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
370 atomic_dec(&priv
->chan
[ch
].submit_count
);
372 saved_req
.callback(dev
, saved_req
.desc
, saved_req
.context
,
374 /* channel may resume processing in single desc error case */
375 if (error
&& !reset_ch
&& status
== error
)
377 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
378 tail
= priv
->chan
[ch
].tail
;
381 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
385 * process completed requests for channels that have done status
387 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
388 static void talitos1_done_##name(unsigned long data) \
390 struct device *dev = (struct device *)data; \
391 struct talitos_private *priv = dev_get_drvdata(dev); \
392 unsigned long flags; \
394 if (ch_done_mask & 0x10000000) \
395 flush_channel(dev, 0, 0, 0); \
396 if (priv->num_channels == 1) \
398 if (ch_done_mask & 0x40000000) \
399 flush_channel(dev, 1, 0, 0); \
400 if (ch_done_mask & 0x00010000) \
401 flush_channel(dev, 2, 0, 0); \
402 if (ch_done_mask & 0x00040000) \
403 flush_channel(dev, 3, 0, 0); \
406 /* At this point, all completed channels have been processed */ \
407 /* Unmask done interrupts for channels completed later on. */ \
408 spin_lock_irqsave(&priv->reg_lock, flags); \
409 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
410 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
411 spin_unlock_irqrestore(&priv->reg_lock, flags); \
414 DEF_TALITOS1_DONE(4ch
, TALITOS1_ISR_4CHDONE
)
416 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
417 static void talitos2_done_##name(unsigned long data) \
419 struct device *dev = (struct device *)data; \
420 struct talitos_private *priv = dev_get_drvdata(dev); \
421 unsigned long flags; \
423 if (ch_done_mask & 1) \
424 flush_channel(dev, 0, 0, 0); \
425 if (priv->num_channels == 1) \
427 if (ch_done_mask & (1 << 2)) \
428 flush_channel(dev, 1, 0, 0); \
429 if (ch_done_mask & (1 << 4)) \
430 flush_channel(dev, 2, 0, 0); \
431 if (ch_done_mask & (1 << 6)) \
432 flush_channel(dev, 3, 0, 0); \
435 /* At this point, all completed channels have been processed */ \
436 /* Unmask done interrupts for channels completed later on. */ \
437 spin_lock_irqsave(&priv->reg_lock, flags); \
438 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
439 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
440 spin_unlock_irqrestore(&priv->reg_lock, flags); \
443 DEF_TALITOS2_DONE(4ch
, TALITOS2_ISR_4CHDONE
)
444 DEF_TALITOS2_DONE(ch0_2
, TALITOS2_ISR_CH_0_2_DONE
)
445 DEF_TALITOS2_DONE(ch1_3
, TALITOS2_ISR_CH_1_3_DONE
)
448 * locate current (offending) descriptor
450 static u32
current_desc_hdr(struct device
*dev
, int ch
)
452 struct talitos_private
*priv
= dev_get_drvdata(dev
);
456 cur_desc
= ((u64
)in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR
)) << 32;
457 cur_desc
|= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR_LO
);
460 dev_err(dev
, "CDPR is NULL, giving up search for offending descriptor\n");
464 tail
= priv
->chan
[ch
].tail
;
467 while (priv
->chan
[ch
].fifo
[iter
].dma_desc
!= cur_desc
) {
468 iter
= (iter
+ 1) & (priv
->fifo_len
- 1);
470 dev_err(dev
, "couldn't locate current descriptor\n");
475 return priv
->chan
[ch
].fifo
[iter
].desc
->hdr
;
479 * user diagnostics; report root cause of error based on execution unit status
481 static void report_eu_error(struct device
*dev
, int ch
, u32 desc_hdr
)
483 struct talitos_private
*priv
= dev_get_drvdata(dev
);
487 desc_hdr
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
);
489 switch (desc_hdr
& DESC_HDR_SEL0_MASK
) {
490 case DESC_HDR_SEL0_AFEU
:
491 dev_err(dev
, "AFEUISR 0x%08x_%08x\n",
492 in_be32(priv
->reg_afeu
+ TALITOS_EUISR
),
493 in_be32(priv
->reg_afeu
+ TALITOS_EUISR_LO
));
495 case DESC_HDR_SEL0_DEU
:
496 dev_err(dev
, "DEUISR 0x%08x_%08x\n",
497 in_be32(priv
->reg_deu
+ TALITOS_EUISR
),
498 in_be32(priv
->reg_deu
+ TALITOS_EUISR_LO
));
500 case DESC_HDR_SEL0_MDEUA
:
501 case DESC_HDR_SEL0_MDEUB
:
502 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
503 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR
),
504 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR_LO
));
506 case DESC_HDR_SEL0_RNG
:
507 dev_err(dev
, "RNGUISR 0x%08x_%08x\n",
508 in_be32(priv
->reg_rngu
+ TALITOS_ISR
),
509 in_be32(priv
->reg_rngu
+ TALITOS_ISR_LO
));
511 case DESC_HDR_SEL0_PKEU
:
512 dev_err(dev
, "PKEUISR 0x%08x_%08x\n",
513 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR
),
514 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR_LO
));
516 case DESC_HDR_SEL0_AESU
:
517 dev_err(dev
, "AESUISR 0x%08x_%08x\n",
518 in_be32(priv
->reg_aesu
+ TALITOS_EUISR
),
519 in_be32(priv
->reg_aesu
+ TALITOS_EUISR_LO
));
521 case DESC_HDR_SEL0_CRCU
:
522 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
523 in_be32(priv
->reg_crcu
+ TALITOS_EUISR
),
524 in_be32(priv
->reg_crcu
+ TALITOS_EUISR_LO
));
526 case DESC_HDR_SEL0_KEU
:
527 dev_err(dev
, "KEUISR 0x%08x_%08x\n",
528 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR
),
529 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR_LO
));
533 switch (desc_hdr
& DESC_HDR_SEL1_MASK
) {
534 case DESC_HDR_SEL1_MDEUA
:
535 case DESC_HDR_SEL1_MDEUB
:
536 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
537 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR
),
538 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR_LO
));
540 case DESC_HDR_SEL1_CRCU
:
541 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
542 in_be32(priv
->reg_crcu
+ TALITOS_EUISR
),
543 in_be32(priv
->reg_crcu
+ TALITOS_EUISR_LO
));
547 for (i
= 0; i
< 8; i
++)
548 dev_err(dev
, "DESCBUF 0x%08x_%08x\n",
549 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
+ 8*i
),
550 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF_LO
+ 8*i
));
554 * recover from error interrupts
556 static void talitos_error(struct device
*dev
, u32 isr
, u32 isr_lo
)
558 struct talitos_private
*priv
= dev_get_drvdata(dev
);
559 unsigned int timeout
= TALITOS_TIMEOUT
;
560 int ch
, error
, reset_dev
= 0;
562 bool is_sec1
= has_ftr_sec1(priv
);
563 int reset_ch
= is_sec1
? 1 : 0; /* only SEC2 supports continuation */
565 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
566 /* skip channels without errors */
568 /* bits 29, 31, 17, 19 */
569 if (!(isr
& (1 << (29 + (ch
& 1) * 2 - (ch
& 2) * 6))))
572 if (!(isr
& (1 << (ch
* 2 + 1))))
578 v_lo
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR_LO
);
580 if (v_lo
& TALITOS_CCPSR_LO_DOF
) {
581 dev_err(dev
, "double fetch fifo overflow error\n");
585 if (v_lo
& TALITOS_CCPSR_LO_SOF
) {
586 /* h/w dropped descriptor */
587 dev_err(dev
, "single fetch fifo overflow error\n");
590 if (v_lo
& TALITOS_CCPSR_LO_MDTE
)
591 dev_err(dev
, "master data transfer error\n");
592 if (v_lo
& TALITOS_CCPSR_LO_SGDLZ
)
593 dev_err(dev
, is_sec1
? "pointeur not complete error\n"
594 : "s/g data length zero error\n");
595 if (v_lo
& TALITOS_CCPSR_LO_FPZ
)
596 dev_err(dev
, is_sec1
? "parity error\n"
597 : "fetch pointer zero error\n");
598 if (v_lo
& TALITOS_CCPSR_LO_IDH
)
599 dev_err(dev
, "illegal descriptor header error\n");
600 if (v_lo
& TALITOS_CCPSR_LO_IEU
)
601 dev_err(dev
, is_sec1
? "static assignment error\n"
602 : "invalid exec unit error\n");
603 if (v_lo
& TALITOS_CCPSR_LO_EU
)
604 report_eu_error(dev
, ch
, current_desc_hdr(dev
, ch
));
606 if (v_lo
& TALITOS_CCPSR_LO_GB
)
607 dev_err(dev
, "gather boundary error\n");
608 if (v_lo
& TALITOS_CCPSR_LO_GRL
)
609 dev_err(dev
, "gather return/length error\n");
610 if (v_lo
& TALITOS_CCPSR_LO_SB
)
611 dev_err(dev
, "scatter boundary error\n");
612 if (v_lo
& TALITOS_CCPSR_LO_SRL
)
613 dev_err(dev
, "scatter return/length error\n");
616 flush_channel(dev
, ch
, error
, reset_ch
);
619 reset_channel(dev
, ch
);
621 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
623 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, 0);
624 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
625 TALITOS2_CCCR_CONT
) && --timeout
)
628 dev_err(dev
, "failed to restart channel %d\n",
634 if (reset_dev
|| (is_sec1
&& isr
& ~TALITOS1_ISR_4CHERR
) ||
635 (!is_sec1
&& isr
& ~TALITOS2_ISR_4CHERR
) || isr_lo
) {
636 if (is_sec1
&& (isr_lo
& TALITOS1_ISR_TEA_ERR
))
637 dev_err(dev
, "TEA error: ISR 0x%08x_%08x\n",
640 dev_err(dev
, "done overflow, internal time out, or "
641 "rngu error: ISR 0x%08x_%08x\n", isr
, isr_lo
);
643 /* purge request queues */
644 for (ch
= 0; ch
< priv
->num_channels
; ch
++)
645 flush_channel(dev
, ch
, -EIO
, 1);
647 /* reset and reinitialize the device */
652 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
653 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
655 struct device *dev = data; \
656 struct talitos_private *priv = dev_get_drvdata(dev); \
658 unsigned long flags; \
660 spin_lock_irqsave(&priv->reg_lock, flags); \
661 isr = in_be32(priv->reg + TALITOS_ISR); \
662 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
663 /* Acknowledge interrupt */ \
664 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
665 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
667 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
668 spin_unlock_irqrestore(&priv->reg_lock, flags); \
669 talitos_error(dev, isr & ch_err_mask, isr_lo); \
672 if (likely(isr & ch_done_mask)) { \
673 /* mask further done interrupts. */ \
674 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
675 /* done_task will unmask done interrupts at exit */ \
676 tasklet_schedule(&priv->done_task[tlet]); \
678 spin_unlock_irqrestore(&priv->reg_lock, flags); \
681 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
685 DEF_TALITOS1_INTERRUPT(4ch
, TALITOS1_ISR_4CHDONE
, TALITOS1_ISR_4CHERR
, 0)
687 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
688 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
690 struct device *dev = data; \
691 struct talitos_private *priv = dev_get_drvdata(dev); \
693 unsigned long flags; \
695 spin_lock_irqsave(&priv->reg_lock, flags); \
696 isr = in_be32(priv->reg + TALITOS_ISR); \
697 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
698 /* Acknowledge interrupt */ \
699 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
700 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
702 if (unlikely(isr & ch_err_mask || isr_lo)) { \
703 spin_unlock_irqrestore(&priv->reg_lock, flags); \
704 talitos_error(dev, isr & ch_err_mask, isr_lo); \
707 if (likely(isr & ch_done_mask)) { \
708 /* mask further done interrupts. */ \
709 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
710 /* done_task will unmask done interrupts at exit */ \
711 tasklet_schedule(&priv->done_task[tlet]); \
713 spin_unlock_irqrestore(&priv->reg_lock, flags); \
716 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
720 DEF_TALITOS2_INTERRUPT(4ch
, TALITOS2_ISR_4CHDONE
, TALITOS2_ISR_4CHERR
, 0)
721 DEF_TALITOS2_INTERRUPT(ch0_2
, TALITOS2_ISR_CH_0_2_DONE
, TALITOS2_ISR_CH_0_2_ERR
,
723 DEF_TALITOS2_INTERRUPT(ch1_3
, TALITOS2_ISR_CH_1_3_DONE
, TALITOS2_ISR_CH_1_3_ERR
,
729 static int talitos_rng_data_present(struct hwrng
*rng
, int wait
)
731 struct device
*dev
= (struct device
*)rng
->priv
;
732 struct talitos_private
*priv
= dev_get_drvdata(dev
);
736 for (i
= 0; i
< 20; i
++) {
737 ofl
= in_be32(priv
->reg_rngu
+ TALITOS_EUSR_LO
) &
738 TALITOS_RNGUSR_LO_OFL
;
747 static int talitos_rng_data_read(struct hwrng
*rng
, u32
*data
)
749 struct device
*dev
= (struct device
*)rng
->priv
;
750 struct talitos_private
*priv
= dev_get_drvdata(dev
);
752 /* rng fifo requires 64-bit accesses */
753 *data
= in_be32(priv
->reg_rngu
+ TALITOS_EU_FIFO
);
754 *data
= in_be32(priv
->reg_rngu
+ TALITOS_EU_FIFO_LO
);
759 static int talitos_rng_init(struct hwrng
*rng
)
761 struct device
*dev
= (struct device
*)rng
->priv
;
762 struct talitos_private
*priv
= dev_get_drvdata(dev
);
763 unsigned int timeout
= TALITOS_TIMEOUT
;
765 setbits32(priv
->reg_rngu
+ TALITOS_EURCR_LO
, TALITOS_RNGURCR_LO_SR
);
766 while (!(in_be32(priv
->reg_rngu
+ TALITOS_EUSR_LO
)
767 & TALITOS_RNGUSR_LO_RD
)
771 dev_err(dev
, "failed to reset rng hw\n");
775 /* start generating */
776 setbits32(priv
->reg_rngu
+ TALITOS_EUDSR_LO
, 0);
781 static int talitos_register_rng(struct device
*dev
)
783 struct talitos_private
*priv
= dev_get_drvdata(dev
);
786 priv
->rng
.name
= dev_driver_string(dev
),
787 priv
->rng
.init
= talitos_rng_init
,
788 priv
->rng
.data_present
= talitos_rng_data_present
,
789 priv
->rng
.data_read
= talitos_rng_data_read
,
790 priv
->rng
.priv
= (unsigned long)dev
;
792 err
= hwrng_register(&priv
->rng
);
794 priv
->rng_registered
= true;
799 static void talitos_unregister_rng(struct device
*dev
)
801 struct talitos_private
*priv
= dev_get_drvdata(dev
);
803 if (!priv
->rng_registered
)
806 hwrng_unregister(&priv
->rng
);
807 priv
->rng_registered
= false;
813 #define TALITOS_CRA_PRIORITY 3000
815 * Defines a priority for doing AEAD with descriptors type
816 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
818 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
819 #define TALITOS_MAX_KEY_SIZE 96
820 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
825 __be32 desc_hdr_template
;
826 u8 key
[TALITOS_MAX_KEY_SIZE
];
827 u8 iv
[TALITOS_MAX_IV_LENGTH
];
829 unsigned int enckeylen
;
830 unsigned int authkeylen
;
833 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
834 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
836 struct talitos_ahash_req_ctx
{
837 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
838 unsigned int hw_context_size
;
839 u8 buf
[HASH_MAX_BLOCK_SIZE
];
840 u8 bufnext
[HASH_MAX_BLOCK_SIZE
];
844 unsigned int to_hash_later
;
846 struct scatterlist bufsl
[2];
847 struct scatterlist
*psrc
;
850 struct talitos_export_state
{
851 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
852 u8 buf
[HASH_MAX_BLOCK_SIZE
];
856 unsigned int to_hash_later
;
860 static int aead_setkey(struct crypto_aead
*authenc
,
861 const u8
*key
, unsigned int keylen
)
863 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
864 struct crypto_authenc_keys keys
;
866 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
869 if (keys
.authkeylen
+ keys
.enckeylen
> TALITOS_MAX_KEY_SIZE
)
872 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
873 memcpy(&ctx
->key
[keys
.authkeylen
], keys
.enckey
, keys
.enckeylen
);
875 ctx
->keylen
= keys
.authkeylen
+ keys
.enckeylen
;
876 ctx
->enckeylen
= keys
.enckeylen
;
877 ctx
->authkeylen
= keys
.authkeylen
;
882 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
887 * talitos_edesc - s/w-extended descriptor
888 * @src_nents: number of segments in input scatterlist
889 * @dst_nents: number of segments in output scatterlist
890 * @icv_ool: whether ICV is out-of-line
891 * @iv_dma: dma address of iv for checking continuity and link table
892 * @dma_len: length of dma mapped link_tbl space
893 * @dma_link_tbl: bus physical address of link_tbl/buf
894 * @desc: h/w descriptor
895 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
896 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
898 * if decrypting (with authcheck), or either one of src_nents or dst_nents
899 * is greater than 1, an integrity check value is concatenated to the end
902 struct talitos_edesc
{
908 dma_addr_t dma_link_tbl
;
909 struct talitos_desc desc
;
911 struct talitos_ptr link_tbl
[0];
916 static void talitos_sg_unmap(struct device
*dev
,
917 struct talitos_edesc
*edesc
,
918 struct scatterlist
*src
,
919 struct scatterlist
*dst
,
920 unsigned int len
, unsigned int offset
)
922 struct talitos_private
*priv
= dev_get_drvdata(dev
);
923 bool is_sec1
= has_ftr_sec1(priv
);
924 unsigned int src_nents
= edesc
->src_nents
? : 1;
925 unsigned int dst_nents
= edesc
->dst_nents
? : 1;
927 if (is_sec1
&& dst
&& dst_nents
> 1) {
928 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
+ offset
,
929 len
, DMA_FROM_DEVICE
);
930 sg_pcopy_from_buffer(dst
, dst_nents
, edesc
->buf
+ offset
, len
,
934 if (src_nents
== 1 || !is_sec1
)
935 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
937 if (dst
&& (dst_nents
== 1 || !is_sec1
))
938 dma_unmap_sg(dev
, dst
, dst_nents
, DMA_FROM_DEVICE
);
939 } else if (src_nents
== 1 || !is_sec1
) {
940 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
944 static void ipsec_esp_unmap(struct device
*dev
,
945 struct talitos_edesc
*edesc
,
946 struct aead_request
*areq
)
948 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
949 struct talitos_ctx
*ctx
= crypto_aead_ctx(aead
);
950 unsigned int ivsize
= crypto_aead_ivsize(aead
);
952 if (edesc
->desc
.hdr
& DESC_HDR_TYPE_IPSEC_ESP
)
953 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[6],
955 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[3], DMA_TO_DEVICE
);
956 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
957 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[0], DMA_TO_DEVICE
);
959 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
, areq
->cryptlen
,
963 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
966 if (!(edesc
->desc
.hdr
& DESC_HDR_TYPE_IPSEC_ESP
)) {
967 unsigned int dst_nents
= edesc
->dst_nents
? : 1;
969 sg_pcopy_to_buffer(areq
->dst
, dst_nents
, ctx
->iv
, ivsize
,
970 areq
->assoclen
+ areq
->cryptlen
- ivsize
);
975 * ipsec_esp descriptor callbacks
977 static void ipsec_esp_encrypt_done(struct device
*dev
,
978 struct talitos_desc
*desc
, void *context
,
981 struct talitos_private
*priv
= dev_get_drvdata(dev
);
982 bool is_sec1
= has_ftr_sec1(priv
);
983 struct aead_request
*areq
= context
;
984 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
985 unsigned int authsize
= crypto_aead_authsize(authenc
);
986 struct talitos_edesc
*edesc
;
987 struct scatterlist
*sg
;
990 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
992 ipsec_esp_unmap(dev
, edesc
, areq
);
994 /* copy the generated ICV to dst */
995 if (edesc
->icv_ool
) {
997 icvdata
= edesc
->buf
+ areq
->assoclen
+ areq
->cryptlen
;
999 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
1000 edesc
->dst_nents
+ 2];
1001 sg
= sg_last(areq
->dst
, edesc
->dst_nents
);
1002 memcpy((char *)sg_virt(sg
) + sg
->length
- authsize
,
1008 aead_request_complete(areq
, err
);
1011 static void ipsec_esp_decrypt_swauth_done(struct device
*dev
,
1012 struct talitos_desc
*desc
,
1013 void *context
, int err
)
1015 struct aead_request
*req
= context
;
1016 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1017 unsigned int authsize
= crypto_aead_authsize(authenc
);
1018 struct talitos_edesc
*edesc
;
1019 struct scatterlist
*sg
;
1021 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1022 bool is_sec1
= has_ftr_sec1(priv
);
1024 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1026 ipsec_esp_unmap(dev
, edesc
, req
);
1030 sg
= sg_last(req
->dst
, edesc
->dst_nents
? : 1);
1031 icv
= (char *)sg_virt(sg
) + sg
->length
- authsize
;
1033 if (edesc
->dma_len
) {
1035 oicv
= (char *)&edesc
->dma_link_tbl
+
1036 req
->assoclen
+ req
->cryptlen
;
1039 &edesc
->link_tbl
[edesc
->src_nents
+
1040 edesc
->dst_nents
+ 2];
1042 icv
= oicv
+ authsize
;
1044 oicv
= (char *)&edesc
->link_tbl
[0];
1046 err
= crypto_memneq(oicv
, icv
, authsize
) ? -EBADMSG
: 0;
1051 aead_request_complete(req
, err
);
1054 static void ipsec_esp_decrypt_hwauth_done(struct device
*dev
,
1055 struct talitos_desc
*desc
,
1056 void *context
, int err
)
1058 struct aead_request
*req
= context
;
1059 struct talitos_edesc
*edesc
;
1061 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1063 ipsec_esp_unmap(dev
, edesc
, req
);
1065 /* check ICV auth status */
1066 if (!err
&& ((desc
->hdr_lo
& DESC_HDR_LO_ICCR1_MASK
) !=
1067 DESC_HDR_LO_ICCR1_PASS
))
1072 aead_request_complete(req
, err
);
1076 * convert scatterlist to SEC h/w link table format
1077 * stop at cryptlen bytes
1079 static int sg_to_link_tbl_offset(struct scatterlist
*sg
, int sg_count
,
1080 unsigned int offset
, int cryptlen
,
1081 struct talitos_ptr
*link_tbl_ptr
)
1083 int n_sg
= sg_count
;
1086 while (cryptlen
&& sg
&& n_sg
--) {
1087 unsigned int len
= sg_dma_len(sg
);
1089 if (offset
>= len
) {
1099 to_talitos_ptr(link_tbl_ptr
+ count
,
1100 sg_dma_address(sg
) + offset
, 0);
1101 to_talitos_ptr_len(link_tbl_ptr
+ count
, len
, 0);
1102 to_talitos_ptr_ext_set(link_tbl_ptr
+ count
, 0, 0);
1111 /* tag end of link table */
1113 to_talitos_ptr_ext_set(link_tbl_ptr
+ count
- 1,
1114 DESC_PTR_LNKTBL_RETURN
, 0);
1119 int talitos_sg_map(struct device
*dev
, struct scatterlist
*src
,
1120 unsigned int len
, struct talitos_edesc
*edesc
,
1121 struct talitos_ptr
*ptr
,
1122 int sg_count
, unsigned int offset
, int tbl_off
)
1124 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1125 bool is_sec1
= has_ftr_sec1(priv
);
1127 to_talitos_ptr_len(ptr
, len
, is_sec1
);
1128 to_talitos_ptr_ext_set(ptr
, 0, is_sec1
);
1130 if (sg_count
== 1) {
1131 to_talitos_ptr(ptr
, sg_dma_address(src
) + offset
, is_sec1
);
1135 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
+ offset
, is_sec1
);
1138 sg_count
= sg_to_link_tbl_offset(src
, sg_count
, offset
, len
,
1139 &edesc
->link_tbl
[tbl_off
]);
1140 if (sg_count
== 1) {
1141 /* Only one segment now, so no link tbl needed*/
1142 copy_talitos_ptr(ptr
, &edesc
->link_tbl
[tbl_off
], is_sec1
);
1145 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
+
1146 tbl_off
* sizeof(struct talitos_ptr
), is_sec1
);
1147 to_talitos_ptr_ext_or(ptr
, DESC_PTR_LNKTBL_JUMP
, is_sec1
);
1153 * fill in and submit ipsec_esp descriptor
1155 static int ipsec_esp(struct talitos_edesc
*edesc
, struct aead_request
*areq
,
1156 void (*callback
)(struct device
*dev
,
1157 struct talitos_desc
*desc
,
1158 void *context
, int error
))
1160 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
1161 unsigned int authsize
= crypto_aead_authsize(aead
);
1162 struct talitos_ctx
*ctx
= crypto_aead_ctx(aead
);
1163 struct device
*dev
= ctx
->dev
;
1164 struct talitos_desc
*desc
= &edesc
->desc
;
1165 unsigned int cryptlen
= areq
->cryptlen
;
1166 unsigned int ivsize
= crypto_aead_ivsize(aead
);
1169 int sg_link_tbl_len
;
1170 bool sync_needed
= false;
1171 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1172 bool is_sec1
= has_ftr_sec1(priv
);
1175 map_single_talitos_ptr(dev
, &desc
->ptr
[0], ctx
->authkeylen
, &ctx
->key
,
1178 sg_count
= edesc
->src_nents
?: 1;
1179 if (is_sec1
&& sg_count
> 1)
1180 sg_copy_to_buffer(areq
->src
, sg_count
, edesc
->buf
,
1181 areq
->assoclen
+ cryptlen
);
1183 sg_count
= dma_map_sg(dev
, areq
->src
, sg_count
,
1184 (areq
->src
== areq
->dst
) ?
1185 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
);
1188 ret
= talitos_sg_map(dev
, areq
->src
, areq
->assoclen
, edesc
,
1189 &desc
->ptr
[1], sg_count
, 0, tbl_off
);
1197 if (desc
->hdr
& DESC_HDR_TYPE_IPSEC_ESP
) {
1198 to_talitos_ptr(&desc
->ptr
[2], edesc
->iv_dma
, is_sec1
);
1199 to_talitos_ptr_len(&desc
->ptr
[2], ivsize
, is_sec1
);
1200 to_talitos_ptr_ext_set(&desc
->ptr
[2], 0, is_sec1
);
1202 to_talitos_ptr(&desc
->ptr
[3], edesc
->iv_dma
, is_sec1
);
1203 to_talitos_ptr_len(&desc
->ptr
[3], ivsize
, is_sec1
);
1204 to_talitos_ptr_ext_set(&desc
->ptr
[3], 0, is_sec1
);
1208 if (desc
->hdr
& DESC_HDR_TYPE_IPSEC_ESP
)
1209 map_single_talitos_ptr(dev
, &desc
->ptr
[3], ctx
->enckeylen
,
1210 (char *)&ctx
->key
+ ctx
->authkeylen
,
1213 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->enckeylen
,
1214 (char *)&ctx
->key
+ ctx
->authkeylen
,
1219 * map and adjust cipher len to aead request cryptlen.
1220 * extent is bytes of HMAC postpended to ciphertext,
1221 * typically 12 for ipsec
1223 to_talitos_ptr_len(&desc
->ptr
[4], cryptlen
, is_sec1
);
1224 to_talitos_ptr_ext_set(&desc
->ptr
[4], 0, is_sec1
);
1226 sg_link_tbl_len
= cryptlen
;
1228 if (desc
->hdr
& DESC_HDR_TYPE_IPSEC_ESP
) {
1229 to_talitos_ptr_ext_set(&desc
->ptr
[4], authsize
, is_sec1
);
1231 if (edesc
->desc
.hdr
& DESC_HDR_MODE1_MDEU_CICV
)
1232 sg_link_tbl_len
+= authsize
;
1235 sg_count
= talitos_sg_map(dev
, areq
->src
, cryptlen
, edesc
,
1236 &desc
->ptr
[4], sg_count
, areq
->assoclen
,
1240 tbl_off
+= sg_count
;
1245 if (areq
->src
!= areq
->dst
) {
1246 sg_count
= edesc
->dst_nents
? : 1;
1247 if (!is_sec1
|| sg_count
== 1)
1248 dma_map_sg(dev
, areq
->dst
, sg_count
, DMA_FROM_DEVICE
);
1251 sg_count
= talitos_sg_map(dev
, areq
->dst
, cryptlen
, edesc
,
1252 &desc
->ptr
[5], sg_count
, areq
->assoclen
,
1255 if (desc
->hdr
& DESC_HDR_TYPE_IPSEC_ESP
)
1256 to_talitos_ptr_ext_or(&desc
->ptr
[5], authsize
, is_sec1
);
1259 edesc
->icv_ool
= true;
1262 if (desc
->hdr
& DESC_HDR_TYPE_IPSEC_ESP
) {
1263 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
1264 int offset
= (edesc
->src_nents
+ edesc
->dst_nents
+ 2) *
1265 sizeof(struct talitos_ptr
) + authsize
;
1267 /* Add an entry to the link table for ICV data */
1268 tbl_ptr
+= sg_count
- 1;
1269 to_talitos_ptr_ext_set(tbl_ptr
, 0, is_sec1
);
1271 to_talitos_ptr_ext_set(tbl_ptr
, DESC_PTR_LNKTBL_RETURN
,
1273 to_talitos_ptr_len(tbl_ptr
, authsize
, is_sec1
);
1275 /* icv data follows link tables */
1276 to_talitos_ptr(tbl_ptr
, edesc
->dma_link_tbl
+ offset
,
1280 edesc
->icv_ool
= false;
1284 if (!(desc
->hdr
& DESC_HDR_TYPE_IPSEC_ESP
)) {
1285 to_talitos_ptr_len(&desc
->ptr
[6], authsize
, is_sec1
);
1286 to_talitos_ptr(&desc
->ptr
[6], edesc
->dma_link_tbl
+
1287 areq
->assoclen
+ cryptlen
, is_sec1
);
1291 if (desc
->hdr
& DESC_HDR_TYPE_IPSEC_ESP
)
1292 map_single_talitos_ptr(dev
, &desc
->ptr
[6], ivsize
, ctx
->iv
,
1296 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1300 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1301 if (ret
!= -EINPROGRESS
) {
1302 ipsec_esp_unmap(dev
, edesc
, areq
);
1309 * allocate and map the extended descriptor
1311 static struct talitos_edesc
*talitos_edesc_alloc(struct device
*dev
,
1312 struct scatterlist
*src
,
1313 struct scatterlist
*dst
,
1315 unsigned int assoclen
,
1316 unsigned int cryptlen
,
1317 unsigned int authsize
,
1318 unsigned int ivsize
,
1323 struct talitos_edesc
*edesc
;
1324 int src_nents
, dst_nents
, alloc_len
, dma_len
, src_len
, dst_len
;
1325 dma_addr_t iv_dma
= 0;
1326 gfp_t flags
= cryptoflags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1328 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1329 bool is_sec1
= has_ftr_sec1(priv
);
1330 int max_len
= is_sec1
? TALITOS1_MAX_DATA_LEN
: TALITOS2_MAX_DATA_LEN
;
1333 if (cryptlen
+ authsize
> max_len
) {
1334 dev_err(dev
, "length exceeds h/w max limit\n");
1335 return ERR_PTR(-EINVAL
);
1339 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
1341 if (!dst
|| dst
== src
) {
1342 src_len
= assoclen
+ cryptlen
+ authsize
;
1343 src_nents
= sg_nents_for_len(src
, src_len
);
1344 if (src_nents
< 0) {
1345 dev_err(dev
, "Invalid number of src SG.\n");
1346 err
= ERR_PTR(-EINVAL
);
1349 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1350 dst_nents
= dst
? src_nents
: 0;
1352 } else { /* dst && dst != src*/
1353 src_len
= assoclen
+ cryptlen
+ (encrypt
? 0 : authsize
);
1354 src_nents
= sg_nents_for_len(src
, src_len
);
1355 if (src_nents
< 0) {
1356 dev_err(dev
, "Invalid number of src SG.\n");
1357 err
= ERR_PTR(-EINVAL
);
1360 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1361 dst_len
= assoclen
+ cryptlen
+ (encrypt
? authsize
: 0);
1362 dst_nents
= sg_nents_for_len(dst
, dst_len
);
1363 if (dst_nents
< 0) {
1364 dev_err(dev
, "Invalid number of dst SG.\n");
1365 err
= ERR_PTR(-EINVAL
);
1368 dst_nents
= (dst_nents
== 1) ? 0 : dst_nents
;
1372 * allocate space for base edesc plus the link tables,
1373 * allowing for two separate entries for AD and generated ICV (+ 2),
1374 * and space for two sets of ICVs (stashed and generated)
1376 alloc_len
= sizeof(struct talitos_edesc
);
1377 if (src_nents
|| dst_nents
) {
1379 dma_len
= (src_nents
? src_len
: 0) +
1380 (dst_nents
? dst_len
: 0);
1382 dma_len
= (src_nents
+ dst_nents
+ 2) *
1383 sizeof(struct talitos_ptr
) + authsize
* 2;
1384 alloc_len
+= dma_len
;
1387 alloc_len
+= icv_stashing
? authsize
: 0;
1390 edesc
= kmalloc(alloc_len
, GFP_DMA
| flags
);
1392 dev_err(dev
, "could not allocate edescriptor\n");
1393 err
= ERR_PTR(-ENOMEM
);
1397 edesc
->src_nents
= src_nents
;
1398 edesc
->dst_nents
= dst_nents
;
1399 edesc
->iv_dma
= iv_dma
;
1400 edesc
->dma_len
= dma_len
;
1402 edesc
->dma_link_tbl
= dma_map_single(dev
, &edesc
->link_tbl
[0],
1409 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
1413 static struct talitos_edesc
*aead_edesc_alloc(struct aead_request
*areq
, u8
*iv
,
1414 int icv_stashing
, bool encrypt
)
1416 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1417 unsigned int authsize
= crypto_aead_authsize(authenc
);
1418 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1419 unsigned int ivsize
= crypto_aead_ivsize(authenc
);
1421 return talitos_edesc_alloc(ctx
->dev
, areq
->src
, areq
->dst
,
1422 iv
, areq
->assoclen
, areq
->cryptlen
,
1423 authsize
, ivsize
, icv_stashing
,
1424 areq
->base
.flags
, encrypt
);
1427 static int aead_encrypt(struct aead_request
*req
)
1429 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1430 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1431 struct talitos_edesc
*edesc
;
1433 /* allocate extended descriptor */
1434 edesc
= aead_edesc_alloc(req
, req
->iv
, 0, true);
1436 return PTR_ERR(edesc
);
1439 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1441 return ipsec_esp(edesc
, req
, ipsec_esp_encrypt_done
);
1444 static int aead_decrypt(struct aead_request
*req
)
1446 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1447 unsigned int authsize
= crypto_aead_authsize(authenc
);
1448 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1449 struct talitos_private
*priv
= dev_get_drvdata(ctx
->dev
);
1450 struct talitos_edesc
*edesc
;
1451 struct scatterlist
*sg
;
1454 req
->cryptlen
-= authsize
;
1456 /* allocate extended descriptor */
1457 edesc
= aead_edesc_alloc(req
, req
->iv
, 1, false);
1459 return PTR_ERR(edesc
);
1461 if ((priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
) &&
1462 ((!edesc
->src_nents
&& !edesc
->dst_nents
) ||
1463 priv
->features
& TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
)) {
1465 /* decrypt and check the ICV */
1466 edesc
->desc
.hdr
= ctx
->desc_hdr_template
|
1467 DESC_HDR_DIR_INBOUND
|
1468 DESC_HDR_MODE1_MDEU_CICV
;
1470 /* reset integrity check result bits */
1471 edesc
->desc
.hdr_lo
= 0;
1473 return ipsec_esp(edesc
, req
, ipsec_esp_decrypt_hwauth_done
);
1476 /* Have to check the ICV with software */
1477 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1479 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1481 icvdata
= (char *)&edesc
->link_tbl
[edesc
->src_nents
+
1482 edesc
->dst_nents
+ 2];
1484 icvdata
= &edesc
->link_tbl
[0];
1486 sg
= sg_last(req
->src
, edesc
->src_nents
? : 1);
1488 memcpy(icvdata
, (char *)sg_virt(sg
) + sg
->length
- authsize
, authsize
);
1490 return ipsec_esp(edesc
, req
, ipsec_esp_decrypt_swauth_done
);
1493 static int ablkcipher_setkey(struct crypto_ablkcipher
*cipher
,
1494 const u8
*key
, unsigned int keylen
)
1496 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1498 memcpy(&ctx
->key
, key
, keylen
);
1499 ctx
->keylen
= keylen
;
1504 static void common_nonsnoop_unmap(struct device
*dev
,
1505 struct talitos_edesc
*edesc
,
1506 struct ablkcipher_request
*areq
)
1508 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1510 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
, areq
->nbytes
, 0);
1511 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
1512 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1], DMA_TO_DEVICE
);
1515 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1519 static void ablkcipher_done(struct device
*dev
,
1520 struct talitos_desc
*desc
, void *context
,
1523 struct ablkcipher_request
*areq
= context
;
1524 struct talitos_edesc
*edesc
;
1526 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1528 common_nonsnoop_unmap(dev
, edesc
, areq
);
1532 areq
->base
.complete(&areq
->base
, err
);
1535 static int common_nonsnoop(struct talitos_edesc
*edesc
,
1536 struct ablkcipher_request
*areq
,
1537 void (*callback
) (struct device
*dev
,
1538 struct talitos_desc
*desc
,
1539 void *context
, int error
))
1541 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1542 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1543 struct device
*dev
= ctx
->dev
;
1544 struct talitos_desc
*desc
= &edesc
->desc
;
1545 unsigned int cryptlen
= areq
->nbytes
;
1546 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1548 bool sync_needed
= false;
1549 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1550 bool is_sec1
= has_ftr_sec1(priv
);
1552 /* first DWORD empty */
1553 desc
->ptr
[0] = zero_entry
;
1556 to_talitos_ptr(&desc
->ptr
[1], edesc
->iv_dma
, is_sec1
);
1557 to_talitos_ptr_len(&desc
->ptr
[1], ivsize
, is_sec1
);
1558 to_talitos_ptr_ext_set(&desc
->ptr
[1], 0, is_sec1
);
1561 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1562 (char *)&ctx
->key
, DMA_TO_DEVICE
);
1564 sg_count
= edesc
->src_nents
?: 1;
1565 if (is_sec1
&& sg_count
> 1)
1566 sg_copy_to_buffer(areq
->src
, sg_count
, edesc
->buf
,
1569 sg_count
= dma_map_sg(dev
, areq
->src
, sg_count
,
1570 (areq
->src
== areq
->dst
) ?
1571 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
);
1575 sg_count
= talitos_sg_map(dev
, areq
->src
, cryptlen
, edesc
,
1576 &desc
->ptr
[3], sg_count
, 0, 0);
1581 if (areq
->src
!= areq
->dst
) {
1582 sg_count
= edesc
->dst_nents
? : 1;
1583 if (!is_sec1
|| sg_count
== 1)
1584 dma_map_sg(dev
, areq
->dst
, sg_count
, DMA_FROM_DEVICE
);
1587 ret
= talitos_sg_map(dev
, areq
->dst
, cryptlen
, edesc
, &desc
->ptr
[4],
1588 sg_count
, 0, (edesc
->src_nents
+ 1));
1593 map_single_talitos_ptr(dev
, &desc
->ptr
[5], ivsize
, ctx
->iv
,
1596 /* last DWORD empty */
1597 desc
->ptr
[6] = zero_entry
;
1600 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1601 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1603 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1604 if (ret
!= -EINPROGRESS
) {
1605 common_nonsnoop_unmap(dev
, edesc
, areq
);
1611 static struct talitos_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
*
1614 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1615 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1616 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1618 return talitos_edesc_alloc(ctx
->dev
, areq
->src
, areq
->dst
,
1619 areq
->info
, 0, areq
->nbytes
, 0, ivsize
, 0,
1620 areq
->base
.flags
, encrypt
);
1623 static int ablkcipher_encrypt(struct ablkcipher_request
*areq
)
1625 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1626 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1627 struct talitos_edesc
*edesc
;
1629 /* allocate extended descriptor */
1630 edesc
= ablkcipher_edesc_alloc(areq
, true);
1632 return PTR_ERR(edesc
);
1635 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1637 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1640 static int ablkcipher_decrypt(struct ablkcipher_request
*areq
)
1642 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1643 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1644 struct talitos_edesc
*edesc
;
1646 /* allocate extended descriptor */
1647 edesc
= ablkcipher_edesc_alloc(areq
, false);
1649 return PTR_ERR(edesc
);
1651 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1653 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1656 static void common_nonsnoop_hash_unmap(struct device
*dev
,
1657 struct talitos_edesc
*edesc
,
1658 struct ahash_request
*areq
)
1660 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1661 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1662 bool is_sec1
= has_ftr_sec1(priv
);
1664 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1666 talitos_sg_unmap(dev
, edesc
, req_ctx
->psrc
, NULL
, 0, 0);
1668 /* When using hashctx-in, must unmap it. */
1669 if (from_talitos_ptr_len(&edesc
->desc
.ptr
[1], is_sec1
))
1670 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1],
1673 if (from_talitos_ptr_len(&edesc
->desc
.ptr
[2], is_sec1
))
1674 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2],
1678 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1683 static void ahash_done(struct device
*dev
,
1684 struct talitos_desc
*desc
, void *context
,
1687 struct ahash_request
*areq
= context
;
1688 struct talitos_edesc
*edesc
=
1689 container_of(desc
, struct talitos_edesc
, desc
);
1690 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1692 if (!req_ctx
->last
&& req_ctx
->to_hash_later
) {
1693 /* Position any partial block for next update/final/finup */
1694 memcpy(req_ctx
->buf
, req_ctx
->bufnext
, req_ctx
->to_hash_later
);
1695 req_ctx
->nbuf
= req_ctx
->to_hash_later
;
1697 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1701 areq
->base
.complete(&areq
->base
, err
);
1705 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1706 * ourself and submit a padded block
1708 void talitos_handle_buggy_hash(struct talitos_ctx
*ctx
,
1709 struct talitos_edesc
*edesc
,
1710 struct talitos_ptr
*ptr
)
1712 static u8 padded_hash
[64] = {
1713 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1714 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1715 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1716 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1719 pr_err_once("Bug in SEC1, padding ourself\n");
1720 edesc
->desc
.hdr
&= ~DESC_HDR_MODE0_MDEU_PAD
;
1721 map_single_talitos_ptr(ctx
->dev
, ptr
, sizeof(padded_hash
),
1722 (char *)padded_hash
, DMA_TO_DEVICE
);
1725 static int common_nonsnoop_hash(struct talitos_edesc
*edesc
,
1726 struct ahash_request
*areq
, unsigned int length
,
1727 void (*callback
) (struct device
*dev
,
1728 struct talitos_desc
*desc
,
1729 void *context
, int error
))
1731 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1732 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1733 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1734 struct device
*dev
= ctx
->dev
;
1735 struct talitos_desc
*desc
= &edesc
->desc
;
1737 bool sync_needed
= false;
1738 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1739 bool is_sec1
= has_ftr_sec1(priv
);
1742 /* first DWORD empty */
1743 desc
->ptr
[0] = zero_entry
;
1745 /* hash context in */
1746 if (!req_ctx
->first
|| req_ctx
->swinit
) {
1747 map_single_talitos_ptr(dev
, &desc
->ptr
[1],
1748 req_ctx
->hw_context_size
,
1749 (char *)req_ctx
->hw_context
,
1751 req_ctx
->swinit
= 0;
1753 desc
->ptr
[1] = zero_entry
;
1754 /* Indicate next op is not the first. */
1760 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1761 (char *)&ctx
->key
, DMA_TO_DEVICE
);
1763 desc
->ptr
[2] = zero_entry
;
1765 sg_count
= edesc
->src_nents
?: 1;
1766 if (is_sec1
&& sg_count
> 1)
1767 sg_copy_to_buffer(areq
->src
, sg_count
, edesc
->buf
, length
);
1769 sg_count
= dma_map_sg(dev
, req_ctx
->psrc
, sg_count
,
1774 sg_count
= talitos_sg_map(dev
, req_ctx
->psrc
, length
, edesc
,
1775 &desc
->ptr
[3], sg_count
, 0, 0);
1779 /* fifth DWORD empty */
1780 desc
->ptr
[4] = zero_entry
;
1782 /* hash/HMAC out -or- hash context out */
1784 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1785 crypto_ahash_digestsize(tfm
),
1786 areq
->result
, DMA_FROM_DEVICE
);
1788 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1789 req_ctx
->hw_context_size
,
1790 req_ctx
->hw_context
, DMA_FROM_DEVICE
);
1792 /* last DWORD empty */
1793 desc
->ptr
[6] = zero_entry
;
1795 if (is_sec1
&& from_talitos_ptr_len(&desc
->ptr
[3], true) == 0)
1796 talitos_handle_buggy_hash(ctx
, edesc
, &desc
->ptr
[3]);
1799 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1800 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1802 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1803 if (ret
!= -EINPROGRESS
) {
1804 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1810 static struct talitos_edesc
*ahash_edesc_alloc(struct ahash_request
*areq
,
1811 unsigned int nbytes
)
1813 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1814 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1815 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1817 return talitos_edesc_alloc(ctx
->dev
, req_ctx
->psrc
, NULL
, NULL
, 0,
1818 nbytes
, 0, 0, 0, areq
->base
.flags
, false);
1821 static int ahash_init(struct ahash_request
*areq
)
1823 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1824 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1826 /* Initialize the context */
1828 req_ctx
->first
= 1; /* first indicates h/w must init its context */
1829 req_ctx
->swinit
= 0; /* assume h/w init of context */
1830 req_ctx
->hw_context_size
=
1831 (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
1832 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1833 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
1839 * on h/w without explicit sha224 support, we initialize h/w context
1840 * manually with sha224 constants, and tell it to run sha256.
1842 static int ahash_init_sha224_swinit(struct ahash_request
*areq
)
1844 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1847 req_ctx
->swinit
= 1;/* prevent h/w initting context with sha256 values*/
1849 req_ctx
->hw_context
[0] = SHA224_H0
;
1850 req_ctx
->hw_context
[1] = SHA224_H1
;
1851 req_ctx
->hw_context
[2] = SHA224_H2
;
1852 req_ctx
->hw_context
[3] = SHA224_H3
;
1853 req_ctx
->hw_context
[4] = SHA224_H4
;
1854 req_ctx
->hw_context
[5] = SHA224_H5
;
1855 req_ctx
->hw_context
[6] = SHA224_H6
;
1856 req_ctx
->hw_context
[7] = SHA224_H7
;
1858 /* init 64-bit count */
1859 req_ctx
->hw_context
[8] = 0;
1860 req_ctx
->hw_context
[9] = 0;
1865 static int ahash_process_req(struct ahash_request
*areq
, unsigned int nbytes
)
1867 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1868 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1869 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1870 struct talitos_edesc
*edesc
;
1871 unsigned int blocksize
=
1872 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1873 unsigned int nbytes_to_hash
;
1874 unsigned int to_hash_later
;
1878 if (!req_ctx
->last
&& (nbytes
+ req_ctx
->nbuf
<= blocksize
)) {
1879 /* Buffer up to one whole block */
1880 nents
= sg_nents_for_len(areq
->src
, nbytes
);
1882 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
1885 sg_copy_to_buffer(areq
->src
, nents
,
1886 req_ctx
->buf
+ req_ctx
->nbuf
, nbytes
);
1887 req_ctx
->nbuf
+= nbytes
;
1891 /* At least (blocksize + 1) bytes are available to hash */
1892 nbytes_to_hash
= nbytes
+ req_ctx
->nbuf
;
1893 to_hash_later
= nbytes_to_hash
& (blocksize
- 1);
1897 else if (to_hash_later
)
1898 /* There is a partial block. Hash the full block(s) now */
1899 nbytes_to_hash
-= to_hash_later
;
1901 /* Keep one block buffered */
1902 nbytes_to_hash
-= blocksize
;
1903 to_hash_later
= blocksize
;
1906 /* Chain in any previously buffered data */
1907 if (req_ctx
->nbuf
) {
1908 nsg
= (req_ctx
->nbuf
< nbytes_to_hash
) ? 2 : 1;
1909 sg_init_table(req_ctx
->bufsl
, nsg
);
1910 sg_set_buf(req_ctx
->bufsl
, req_ctx
->buf
, req_ctx
->nbuf
);
1912 sg_chain(req_ctx
->bufsl
, 2, areq
->src
);
1913 req_ctx
->psrc
= req_ctx
->bufsl
;
1915 req_ctx
->psrc
= areq
->src
;
1917 if (to_hash_later
) {
1918 nents
= sg_nents_for_len(areq
->src
, nbytes
);
1920 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
1923 sg_pcopy_to_buffer(areq
->src
, nents
,
1926 nbytes
- to_hash_later
);
1928 req_ctx
->to_hash_later
= to_hash_later
;
1930 /* Allocate extended descriptor */
1931 edesc
= ahash_edesc_alloc(areq
, nbytes_to_hash
);
1933 return PTR_ERR(edesc
);
1935 edesc
->desc
.hdr
= ctx
->desc_hdr_template
;
1937 /* On last one, request SEC to pad; otherwise continue */
1939 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_PAD
;
1941 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_CONT
;
1943 /* request SEC to INIT hash. */
1944 if (req_ctx
->first
&& !req_ctx
->swinit
)
1945 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_INIT
;
1947 /* When the tfm context has a keylen, it's an HMAC.
1948 * A first or last (ie. not middle) descriptor must request HMAC.
1950 if (ctx
->keylen
&& (req_ctx
->first
|| req_ctx
->last
))
1951 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_HMAC
;
1953 return common_nonsnoop_hash(edesc
, areq
, nbytes_to_hash
,
1957 static int ahash_update(struct ahash_request
*areq
)
1959 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1963 return ahash_process_req(areq
, areq
->nbytes
);
1966 static int ahash_final(struct ahash_request
*areq
)
1968 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1972 return ahash_process_req(areq
, 0);
1975 static int ahash_finup(struct ahash_request
*areq
)
1977 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1981 return ahash_process_req(areq
, areq
->nbytes
);
1984 static int ahash_digest(struct ahash_request
*areq
)
1986 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1987 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
1992 return ahash_process_req(areq
, areq
->nbytes
);
1995 static int ahash_export(struct ahash_request
*areq
, void *out
)
1997 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1998 struct talitos_export_state
*export
= out
;
2000 memcpy(export
->hw_context
, req_ctx
->hw_context
,
2001 req_ctx
->hw_context_size
);
2002 memcpy(export
->buf
, req_ctx
->buf
, req_ctx
->nbuf
);
2003 export
->swinit
= req_ctx
->swinit
;
2004 export
->first
= req_ctx
->first
;
2005 export
->last
= req_ctx
->last
;
2006 export
->to_hash_later
= req_ctx
->to_hash_later
;
2007 export
->nbuf
= req_ctx
->nbuf
;
2012 static int ahash_import(struct ahash_request
*areq
, const void *in
)
2014 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2015 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2016 const struct talitos_export_state
*export
= in
;
2018 memset(req_ctx
, 0, sizeof(*req_ctx
));
2019 req_ctx
->hw_context_size
=
2020 (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
2021 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2022 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
2023 memcpy(req_ctx
->hw_context
, export
->hw_context
,
2024 req_ctx
->hw_context_size
);
2025 memcpy(req_ctx
->buf
, export
->buf
, export
->nbuf
);
2026 req_ctx
->swinit
= export
->swinit
;
2027 req_ctx
->first
= export
->first
;
2028 req_ctx
->last
= export
->last
;
2029 req_ctx
->to_hash_later
= export
->to_hash_later
;
2030 req_ctx
->nbuf
= export
->nbuf
;
2035 struct keyhash_result
{
2036 struct completion completion
;
2040 static void keyhash_complete(struct crypto_async_request
*req
, int err
)
2042 struct keyhash_result
*res
= req
->data
;
2044 if (err
== -EINPROGRESS
)
2048 complete(&res
->completion
);
2051 static int keyhash(struct crypto_ahash
*tfm
, const u8
*key
, unsigned int keylen
,
2054 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
2056 struct scatterlist sg
[1];
2057 struct ahash_request
*req
;
2058 struct keyhash_result hresult
;
2061 init_completion(&hresult
.completion
);
2063 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
2067 /* Keep tfm keylen == 0 during hash of the long key */
2069 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
2070 keyhash_complete
, &hresult
);
2072 sg_init_one(&sg
[0], key
, keylen
);
2074 ahash_request_set_crypt(req
, sg
, hash
, keylen
);
2075 ret
= crypto_ahash_digest(req
);
2081 ret
= wait_for_completion_interruptible(
2082 &hresult
.completion
);
2089 ahash_request_free(req
);
2094 static int ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2095 unsigned int keylen
)
2097 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
2098 unsigned int blocksize
=
2099 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2100 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2101 unsigned int keysize
= keylen
;
2102 u8 hash
[SHA512_DIGEST_SIZE
];
2105 if (keylen
<= blocksize
)
2106 memcpy(ctx
->key
, key
, keysize
);
2108 /* Must get the hash of the long key */
2109 ret
= keyhash(tfm
, key
, keylen
, hash
);
2112 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2116 keysize
= digestsize
;
2117 memcpy(ctx
->key
, hash
, digestsize
);
2120 ctx
->keylen
= keysize
;
2126 struct talitos_alg_template
{
2130 struct crypto_alg crypto
;
2131 struct ahash_alg hash
;
2132 struct aead_alg aead
;
2134 __be32 desc_hdr_template
;
2137 static struct talitos_alg_template driver_algs
[] = {
2138 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2139 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2142 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
2143 .cra_driver_name
= "authenc-hmac-sha1-"
2145 .cra_blocksize
= AES_BLOCK_SIZE
,
2146 .cra_flags
= CRYPTO_ALG_ASYNC
,
2148 .ivsize
= AES_BLOCK_SIZE
,
2149 .maxauthsize
= SHA1_DIGEST_SIZE
,
2151 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2152 DESC_HDR_SEL0_AESU
|
2153 DESC_HDR_MODE0_AESU_CBC
|
2154 DESC_HDR_SEL1_MDEUA
|
2155 DESC_HDR_MODE1_MDEU_INIT
|
2156 DESC_HDR_MODE1_MDEU_PAD
|
2157 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2159 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2160 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2163 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
2164 .cra_driver_name
= "authenc-hmac-sha1-"
2166 .cra_blocksize
= AES_BLOCK_SIZE
,
2167 .cra_flags
= CRYPTO_ALG_ASYNC
,
2169 .ivsize
= AES_BLOCK_SIZE
,
2170 .maxauthsize
= SHA1_DIGEST_SIZE
,
2172 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2173 DESC_HDR_SEL0_AESU
|
2174 DESC_HDR_MODE0_AESU_CBC
|
2175 DESC_HDR_SEL1_MDEUA
|
2176 DESC_HDR_MODE1_MDEU_INIT
|
2177 DESC_HDR_MODE1_MDEU_PAD
|
2178 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2180 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2183 .cra_name
= "authenc(hmac(sha1),"
2185 .cra_driver_name
= "authenc-hmac-sha1-"
2187 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2188 .cra_flags
= CRYPTO_ALG_ASYNC
,
2190 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2191 .maxauthsize
= SHA1_DIGEST_SIZE
,
2193 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2195 DESC_HDR_MODE0_DEU_CBC
|
2196 DESC_HDR_MODE0_DEU_3DES
|
2197 DESC_HDR_SEL1_MDEUA
|
2198 DESC_HDR_MODE1_MDEU_INIT
|
2199 DESC_HDR_MODE1_MDEU_PAD
|
2200 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2202 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2203 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2206 .cra_name
= "authenc(hmac(sha1),"
2208 .cra_driver_name
= "authenc-hmac-sha1-"
2210 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2211 .cra_flags
= CRYPTO_ALG_ASYNC
,
2213 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2214 .maxauthsize
= SHA1_DIGEST_SIZE
,
2216 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2218 DESC_HDR_MODE0_DEU_CBC
|
2219 DESC_HDR_MODE0_DEU_3DES
|
2220 DESC_HDR_SEL1_MDEUA
|
2221 DESC_HDR_MODE1_MDEU_INIT
|
2222 DESC_HDR_MODE1_MDEU_PAD
|
2223 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2225 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2228 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
2229 .cra_driver_name
= "authenc-hmac-sha224-"
2231 .cra_blocksize
= AES_BLOCK_SIZE
,
2232 .cra_flags
= CRYPTO_ALG_ASYNC
,
2234 .ivsize
= AES_BLOCK_SIZE
,
2235 .maxauthsize
= SHA224_DIGEST_SIZE
,
2237 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2238 DESC_HDR_SEL0_AESU
|
2239 DESC_HDR_MODE0_AESU_CBC
|
2240 DESC_HDR_SEL1_MDEUA
|
2241 DESC_HDR_MODE1_MDEU_INIT
|
2242 DESC_HDR_MODE1_MDEU_PAD
|
2243 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2245 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2246 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2249 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
2250 .cra_driver_name
= "authenc-hmac-sha224-"
2252 .cra_blocksize
= AES_BLOCK_SIZE
,
2253 .cra_flags
= CRYPTO_ALG_ASYNC
,
2255 .ivsize
= AES_BLOCK_SIZE
,
2256 .maxauthsize
= SHA224_DIGEST_SIZE
,
2258 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2259 DESC_HDR_SEL0_AESU
|
2260 DESC_HDR_MODE0_AESU_CBC
|
2261 DESC_HDR_SEL1_MDEUA
|
2262 DESC_HDR_MODE1_MDEU_INIT
|
2263 DESC_HDR_MODE1_MDEU_PAD
|
2264 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2266 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2269 .cra_name
= "authenc(hmac(sha224),"
2271 .cra_driver_name
= "authenc-hmac-sha224-"
2273 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2274 .cra_flags
= CRYPTO_ALG_ASYNC
,
2276 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2277 .maxauthsize
= SHA224_DIGEST_SIZE
,
2279 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2281 DESC_HDR_MODE0_DEU_CBC
|
2282 DESC_HDR_MODE0_DEU_3DES
|
2283 DESC_HDR_SEL1_MDEUA
|
2284 DESC_HDR_MODE1_MDEU_INIT
|
2285 DESC_HDR_MODE1_MDEU_PAD
|
2286 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2288 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2289 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2292 .cra_name
= "authenc(hmac(sha224),"
2294 .cra_driver_name
= "authenc-hmac-sha224-"
2296 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2297 .cra_flags
= CRYPTO_ALG_ASYNC
,
2299 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2300 .maxauthsize
= SHA224_DIGEST_SIZE
,
2302 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2304 DESC_HDR_MODE0_DEU_CBC
|
2305 DESC_HDR_MODE0_DEU_3DES
|
2306 DESC_HDR_SEL1_MDEUA
|
2307 DESC_HDR_MODE1_MDEU_INIT
|
2308 DESC_HDR_MODE1_MDEU_PAD
|
2309 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2311 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2314 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
2315 .cra_driver_name
= "authenc-hmac-sha256-"
2317 .cra_blocksize
= AES_BLOCK_SIZE
,
2318 .cra_flags
= CRYPTO_ALG_ASYNC
,
2320 .ivsize
= AES_BLOCK_SIZE
,
2321 .maxauthsize
= SHA256_DIGEST_SIZE
,
2323 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2324 DESC_HDR_SEL0_AESU
|
2325 DESC_HDR_MODE0_AESU_CBC
|
2326 DESC_HDR_SEL1_MDEUA
|
2327 DESC_HDR_MODE1_MDEU_INIT
|
2328 DESC_HDR_MODE1_MDEU_PAD
|
2329 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2331 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2332 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2335 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
2336 .cra_driver_name
= "authenc-hmac-sha256-"
2338 .cra_blocksize
= AES_BLOCK_SIZE
,
2339 .cra_flags
= CRYPTO_ALG_ASYNC
,
2341 .ivsize
= AES_BLOCK_SIZE
,
2342 .maxauthsize
= SHA256_DIGEST_SIZE
,
2344 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2345 DESC_HDR_SEL0_AESU
|
2346 DESC_HDR_MODE0_AESU_CBC
|
2347 DESC_HDR_SEL1_MDEUA
|
2348 DESC_HDR_MODE1_MDEU_INIT
|
2349 DESC_HDR_MODE1_MDEU_PAD
|
2350 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2352 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2355 .cra_name
= "authenc(hmac(sha256),"
2357 .cra_driver_name
= "authenc-hmac-sha256-"
2359 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2360 .cra_flags
= CRYPTO_ALG_ASYNC
,
2362 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2363 .maxauthsize
= SHA256_DIGEST_SIZE
,
2365 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2367 DESC_HDR_MODE0_DEU_CBC
|
2368 DESC_HDR_MODE0_DEU_3DES
|
2369 DESC_HDR_SEL1_MDEUA
|
2370 DESC_HDR_MODE1_MDEU_INIT
|
2371 DESC_HDR_MODE1_MDEU_PAD
|
2372 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2374 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2375 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2378 .cra_name
= "authenc(hmac(sha256),"
2380 .cra_driver_name
= "authenc-hmac-sha256-"
2382 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2383 .cra_flags
= CRYPTO_ALG_ASYNC
,
2385 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2386 .maxauthsize
= SHA256_DIGEST_SIZE
,
2388 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2390 DESC_HDR_MODE0_DEU_CBC
|
2391 DESC_HDR_MODE0_DEU_3DES
|
2392 DESC_HDR_SEL1_MDEUA
|
2393 DESC_HDR_MODE1_MDEU_INIT
|
2394 DESC_HDR_MODE1_MDEU_PAD
|
2395 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2397 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2400 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
2401 .cra_driver_name
= "authenc-hmac-sha384-"
2403 .cra_blocksize
= AES_BLOCK_SIZE
,
2404 .cra_flags
= CRYPTO_ALG_ASYNC
,
2406 .ivsize
= AES_BLOCK_SIZE
,
2407 .maxauthsize
= SHA384_DIGEST_SIZE
,
2409 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2410 DESC_HDR_SEL0_AESU
|
2411 DESC_HDR_MODE0_AESU_CBC
|
2412 DESC_HDR_SEL1_MDEUB
|
2413 DESC_HDR_MODE1_MDEU_INIT
|
2414 DESC_HDR_MODE1_MDEU_PAD
|
2415 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2417 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2420 .cra_name
= "authenc(hmac(sha384),"
2422 .cra_driver_name
= "authenc-hmac-sha384-"
2424 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2425 .cra_flags
= CRYPTO_ALG_ASYNC
,
2427 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2428 .maxauthsize
= SHA384_DIGEST_SIZE
,
2430 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2432 DESC_HDR_MODE0_DEU_CBC
|
2433 DESC_HDR_MODE0_DEU_3DES
|
2434 DESC_HDR_SEL1_MDEUB
|
2435 DESC_HDR_MODE1_MDEU_INIT
|
2436 DESC_HDR_MODE1_MDEU_PAD
|
2437 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2439 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2442 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
2443 .cra_driver_name
= "authenc-hmac-sha512-"
2445 .cra_blocksize
= AES_BLOCK_SIZE
,
2446 .cra_flags
= CRYPTO_ALG_ASYNC
,
2448 .ivsize
= AES_BLOCK_SIZE
,
2449 .maxauthsize
= SHA512_DIGEST_SIZE
,
2451 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2452 DESC_HDR_SEL0_AESU
|
2453 DESC_HDR_MODE0_AESU_CBC
|
2454 DESC_HDR_SEL1_MDEUB
|
2455 DESC_HDR_MODE1_MDEU_INIT
|
2456 DESC_HDR_MODE1_MDEU_PAD
|
2457 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2459 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2462 .cra_name
= "authenc(hmac(sha512),"
2464 .cra_driver_name
= "authenc-hmac-sha512-"
2466 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2467 .cra_flags
= CRYPTO_ALG_ASYNC
,
2469 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2470 .maxauthsize
= SHA512_DIGEST_SIZE
,
2472 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2474 DESC_HDR_MODE0_DEU_CBC
|
2475 DESC_HDR_MODE0_DEU_3DES
|
2476 DESC_HDR_SEL1_MDEUB
|
2477 DESC_HDR_MODE1_MDEU_INIT
|
2478 DESC_HDR_MODE1_MDEU_PAD
|
2479 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2481 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2484 .cra_name
= "authenc(hmac(md5),cbc(aes))",
2485 .cra_driver_name
= "authenc-hmac-md5-"
2487 .cra_blocksize
= AES_BLOCK_SIZE
,
2488 .cra_flags
= CRYPTO_ALG_ASYNC
,
2490 .ivsize
= AES_BLOCK_SIZE
,
2491 .maxauthsize
= MD5_DIGEST_SIZE
,
2493 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2494 DESC_HDR_SEL0_AESU
|
2495 DESC_HDR_MODE0_AESU_CBC
|
2496 DESC_HDR_SEL1_MDEUA
|
2497 DESC_HDR_MODE1_MDEU_INIT
|
2498 DESC_HDR_MODE1_MDEU_PAD
|
2499 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2501 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2502 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2505 .cra_name
= "authenc(hmac(md5),cbc(aes))",
2506 .cra_driver_name
= "authenc-hmac-md5-"
2508 .cra_blocksize
= AES_BLOCK_SIZE
,
2509 .cra_flags
= CRYPTO_ALG_ASYNC
,
2511 .ivsize
= AES_BLOCK_SIZE
,
2512 .maxauthsize
= MD5_DIGEST_SIZE
,
2514 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2515 DESC_HDR_SEL0_AESU
|
2516 DESC_HDR_MODE0_AESU_CBC
|
2517 DESC_HDR_SEL1_MDEUA
|
2518 DESC_HDR_MODE1_MDEU_INIT
|
2519 DESC_HDR_MODE1_MDEU_PAD
|
2520 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2522 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2525 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2526 .cra_driver_name
= "authenc-hmac-md5-"
2528 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2529 .cra_flags
= CRYPTO_ALG_ASYNC
,
2531 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2532 .maxauthsize
= MD5_DIGEST_SIZE
,
2534 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2536 DESC_HDR_MODE0_DEU_CBC
|
2537 DESC_HDR_MODE0_DEU_3DES
|
2538 DESC_HDR_SEL1_MDEUA
|
2539 DESC_HDR_MODE1_MDEU_INIT
|
2540 DESC_HDR_MODE1_MDEU_PAD
|
2541 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2543 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2544 .priority
= TALITOS_CRA_PRIORITY_AEAD_HSNA
,
2547 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2548 .cra_driver_name
= "authenc-hmac-md5-"
2550 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2551 .cra_flags
= CRYPTO_ALG_ASYNC
,
2553 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2554 .maxauthsize
= MD5_DIGEST_SIZE
,
2556 .desc_hdr_template
= DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU
|
2558 DESC_HDR_MODE0_DEU_CBC
|
2559 DESC_HDR_MODE0_DEU_3DES
|
2560 DESC_HDR_SEL1_MDEUA
|
2561 DESC_HDR_MODE1_MDEU_INIT
|
2562 DESC_HDR_MODE1_MDEU_PAD
|
2563 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2565 /* ABLKCIPHER algorithms. */
2566 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2568 .cra_name
= "ecb(aes)",
2569 .cra_driver_name
= "ecb-aes-talitos",
2570 .cra_blocksize
= AES_BLOCK_SIZE
,
2571 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2574 .min_keysize
= AES_MIN_KEY_SIZE
,
2575 .max_keysize
= AES_MAX_KEY_SIZE
,
2576 .ivsize
= AES_BLOCK_SIZE
,
2579 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2582 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2584 .cra_name
= "cbc(aes)",
2585 .cra_driver_name
= "cbc-aes-talitos",
2586 .cra_blocksize
= AES_BLOCK_SIZE
,
2587 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2590 .min_keysize
= AES_MIN_KEY_SIZE
,
2591 .max_keysize
= AES_MAX_KEY_SIZE
,
2592 .ivsize
= AES_BLOCK_SIZE
,
2595 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2596 DESC_HDR_SEL0_AESU
|
2597 DESC_HDR_MODE0_AESU_CBC
,
2599 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2601 .cra_name
= "ctr(aes)",
2602 .cra_driver_name
= "ctr-aes-talitos",
2603 .cra_blocksize
= AES_BLOCK_SIZE
,
2604 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2607 .min_keysize
= AES_MIN_KEY_SIZE
,
2608 .max_keysize
= AES_MAX_KEY_SIZE
,
2609 .ivsize
= AES_BLOCK_SIZE
,
2612 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2613 DESC_HDR_SEL0_AESU
|
2614 DESC_HDR_MODE0_AESU_CTR
,
2616 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2618 .cra_name
= "ecb(des)",
2619 .cra_driver_name
= "ecb-des-talitos",
2620 .cra_blocksize
= DES_BLOCK_SIZE
,
2621 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2624 .min_keysize
= DES_KEY_SIZE
,
2625 .max_keysize
= DES_KEY_SIZE
,
2626 .ivsize
= DES_BLOCK_SIZE
,
2629 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2632 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2634 .cra_name
= "cbc(des)",
2635 .cra_driver_name
= "cbc-des-talitos",
2636 .cra_blocksize
= DES_BLOCK_SIZE
,
2637 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2640 .min_keysize
= DES_KEY_SIZE
,
2641 .max_keysize
= DES_KEY_SIZE
,
2642 .ivsize
= DES_BLOCK_SIZE
,
2645 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2647 DESC_HDR_MODE0_DEU_CBC
,
2649 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2651 .cra_name
= "ecb(des3_ede)",
2652 .cra_driver_name
= "ecb-3des-talitos",
2653 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2654 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2657 .min_keysize
= DES3_EDE_KEY_SIZE
,
2658 .max_keysize
= DES3_EDE_KEY_SIZE
,
2659 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2662 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2664 DESC_HDR_MODE0_DEU_3DES
,
2666 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2668 .cra_name
= "cbc(des3_ede)",
2669 .cra_driver_name
= "cbc-3des-talitos",
2670 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2671 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2674 .min_keysize
= DES3_EDE_KEY_SIZE
,
2675 .max_keysize
= DES3_EDE_KEY_SIZE
,
2676 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2679 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2681 DESC_HDR_MODE0_DEU_CBC
|
2682 DESC_HDR_MODE0_DEU_3DES
,
2684 /* AHASH algorithms. */
2685 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2687 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2688 .halg
.statesize
= sizeof(struct talitos_export_state
),
2691 .cra_driver_name
= "md5-talitos",
2692 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
2693 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2697 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2698 DESC_HDR_SEL0_MDEUA
|
2699 DESC_HDR_MODE0_MDEU_MD5
,
2701 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2703 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2704 .halg
.statesize
= sizeof(struct talitos_export_state
),
2707 .cra_driver_name
= "sha1-talitos",
2708 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2709 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2713 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2714 DESC_HDR_SEL0_MDEUA
|
2715 DESC_HDR_MODE0_MDEU_SHA1
,
2717 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2719 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2720 .halg
.statesize
= sizeof(struct talitos_export_state
),
2722 .cra_name
= "sha224",
2723 .cra_driver_name
= "sha224-talitos",
2724 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2725 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2729 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2730 DESC_HDR_SEL0_MDEUA
|
2731 DESC_HDR_MODE0_MDEU_SHA224
,
2733 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2735 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2736 .halg
.statesize
= sizeof(struct talitos_export_state
),
2738 .cra_name
= "sha256",
2739 .cra_driver_name
= "sha256-talitos",
2740 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2741 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2745 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2746 DESC_HDR_SEL0_MDEUA
|
2747 DESC_HDR_MODE0_MDEU_SHA256
,
2749 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2751 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2752 .halg
.statesize
= sizeof(struct talitos_export_state
),
2754 .cra_name
= "sha384",
2755 .cra_driver_name
= "sha384-talitos",
2756 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2757 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2761 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2762 DESC_HDR_SEL0_MDEUB
|
2763 DESC_HDR_MODE0_MDEUB_SHA384
,
2765 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2767 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2768 .halg
.statesize
= sizeof(struct talitos_export_state
),
2770 .cra_name
= "sha512",
2771 .cra_driver_name
= "sha512-talitos",
2772 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2773 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2777 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2778 DESC_HDR_SEL0_MDEUB
|
2779 DESC_HDR_MODE0_MDEUB_SHA512
,
2781 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2783 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2784 .halg
.statesize
= sizeof(struct talitos_export_state
),
2786 .cra_name
= "hmac(md5)",
2787 .cra_driver_name
= "hmac-md5-talitos",
2788 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
2789 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2793 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2794 DESC_HDR_SEL0_MDEUA
|
2795 DESC_HDR_MODE0_MDEU_MD5
,
2797 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2799 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2800 .halg
.statesize
= sizeof(struct talitos_export_state
),
2802 .cra_name
= "hmac(sha1)",
2803 .cra_driver_name
= "hmac-sha1-talitos",
2804 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2805 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2809 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2810 DESC_HDR_SEL0_MDEUA
|
2811 DESC_HDR_MODE0_MDEU_SHA1
,
2813 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2815 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2816 .halg
.statesize
= sizeof(struct talitos_export_state
),
2818 .cra_name
= "hmac(sha224)",
2819 .cra_driver_name
= "hmac-sha224-talitos",
2820 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2821 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2825 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2826 DESC_HDR_SEL0_MDEUA
|
2827 DESC_HDR_MODE0_MDEU_SHA224
,
2829 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2831 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2832 .halg
.statesize
= sizeof(struct talitos_export_state
),
2834 .cra_name
= "hmac(sha256)",
2835 .cra_driver_name
= "hmac-sha256-talitos",
2836 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2837 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2841 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2842 DESC_HDR_SEL0_MDEUA
|
2843 DESC_HDR_MODE0_MDEU_SHA256
,
2845 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2847 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2848 .halg
.statesize
= sizeof(struct talitos_export_state
),
2850 .cra_name
= "hmac(sha384)",
2851 .cra_driver_name
= "hmac-sha384-talitos",
2852 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2853 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2857 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2858 DESC_HDR_SEL0_MDEUB
|
2859 DESC_HDR_MODE0_MDEUB_SHA384
,
2861 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2863 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2864 .halg
.statesize
= sizeof(struct talitos_export_state
),
2866 .cra_name
= "hmac(sha512)",
2867 .cra_driver_name
= "hmac-sha512-talitos",
2868 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2869 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2873 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2874 DESC_HDR_SEL0_MDEUB
|
2875 DESC_HDR_MODE0_MDEUB_SHA512
,
2879 struct talitos_crypto_alg
{
2880 struct list_head entry
;
2882 struct talitos_alg_template algt
;
2885 static int talitos_init_common(struct talitos_ctx
*ctx
,
2886 struct talitos_crypto_alg
*talitos_alg
)
2888 struct talitos_private
*priv
;
2890 /* update context with ptr to dev */
2891 ctx
->dev
= talitos_alg
->dev
;
2893 /* assign SEC channel to tfm in round-robin fashion */
2894 priv
= dev_get_drvdata(ctx
->dev
);
2895 ctx
->ch
= atomic_inc_return(&priv
->last_chan
) &
2896 (priv
->num_channels
- 1);
2898 /* copy descriptor header template value */
2899 ctx
->desc_hdr_template
= talitos_alg
->algt
.desc_hdr_template
;
2901 /* select done notification */
2902 ctx
->desc_hdr_template
|= DESC_HDR_DONE_NOTIFY
;
2907 static int talitos_cra_init(struct crypto_tfm
*tfm
)
2909 struct crypto_alg
*alg
= tfm
->__crt_alg
;
2910 struct talitos_crypto_alg
*talitos_alg
;
2911 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2913 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_AHASH
)
2914 talitos_alg
= container_of(__crypto_ahash_alg(alg
),
2915 struct talitos_crypto_alg
,
2918 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
2921 return talitos_init_common(ctx
, talitos_alg
);
2924 static int talitos_cra_init_aead(struct crypto_aead
*tfm
)
2926 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
2927 struct talitos_crypto_alg
*talitos_alg
;
2928 struct talitos_ctx
*ctx
= crypto_aead_ctx(tfm
);
2930 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
2933 return talitos_init_common(ctx
, talitos_alg
);
2936 static int talitos_cra_init_ahash(struct crypto_tfm
*tfm
)
2938 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2940 talitos_cra_init(tfm
);
2943 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2944 sizeof(struct talitos_ahash_req_ctx
));
2950 * given the alg's descriptor header template, determine whether descriptor
2951 * type and primary/secondary execution units required match the hw
2952 * capabilities description provided in the device tree node.
2954 static int hw_supports(struct device
*dev
, __be32 desc_hdr_template
)
2956 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2959 ret
= (1 << DESC_TYPE(desc_hdr_template
) & priv
->desc_types
) &&
2960 (1 << PRIMARY_EU(desc_hdr_template
) & priv
->exec_units
);
2962 if (SECONDARY_EU(desc_hdr_template
))
2963 ret
= ret
&& (1 << SECONDARY_EU(desc_hdr_template
)
2964 & priv
->exec_units
);
2969 static int talitos_remove(struct platform_device
*ofdev
)
2971 struct device
*dev
= &ofdev
->dev
;
2972 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2973 struct talitos_crypto_alg
*t_alg
, *n
;
2976 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
2977 switch (t_alg
->algt
.type
) {
2978 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2980 case CRYPTO_ALG_TYPE_AEAD
:
2981 crypto_unregister_aead(&t_alg
->algt
.alg
.aead
);
2982 case CRYPTO_ALG_TYPE_AHASH
:
2983 crypto_unregister_ahash(&t_alg
->algt
.alg
.hash
);
2986 list_del(&t_alg
->entry
);
2990 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
))
2991 talitos_unregister_rng(dev
);
2993 for (i
= 0; priv
->chan
&& i
< priv
->num_channels
; i
++)
2994 kfree(priv
->chan
[i
].fifo
);
2998 for (i
= 0; i
< 2; i
++)
3000 free_irq(priv
->irq
[i
], dev
);
3001 irq_dispose_mapping(priv
->irq
[i
]);
3004 tasklet_kill(&priv
->done_task
[0]);
3006 tasklet_kill(&priv
->done_task
[1]);
3015 static struct talitos_crypto_alg
*talitos_alg_alloc(struct device
*dev
,
3016 struct talitos_alg_template
3019 struct talitos_private
*priv
= dev_get_drvdata(dev
);
3020 struct talitos_crypto_alg
*t_alg
;
3021 struct crypto_alg
*alg
;
3023 t_alg
= kzalloc(sizeof(struct talitos_crypto_alg
), GFP_KERNEL
);
3025 return ERR_PTR(-ENOMEM
);
3027 t_alg
->algt
= *template;
3029 switch (t_alg
->algt
.type
) {
3030 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3031 alg
= &t_alg
->algt
.alg
.crypto
;
3032 alg
->cra_init
= talitos_cra_init
;
3033 alg
->cra_type
= &crypto_ablkcipher_type
;
3034 alg
->cra_ablkcipher
.setkey
= ablkcipher_setkey
;
3035 alg
->cra_ablkcipher
.encrypt
= ablkcipher_encrypt
;
3036 alg
->cra_ablkcipher
.decrypt
= ablkcipher_decrypt
;
3037 alg
->cra_ablkcipher
.geniv
= "eseqiv";
3039 case CRYPTO_ALG_TYPE_AEAD
:
3040 alg
= &t_alg
->algt
.alg
.aead
.base
;
3041 t_alg
->algt
.alg
.aead
.init
= talitos_cra_init_aead
;
3042 t_alg
->algt
.alg
.aead
.setkey
= aead_setkey
;
3043 t_alg
->algt
.alg
.aead
.encrypt
= aead_encrypt
;
3044 t_alg
->algt
.alg
.aead
.decrypt
= aead_decrypt
;
3046 case CRYPTO_ALG_TYPE_AHASH
:
3047 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
3048 alg
->cra_init
= talitos_cra_init_ahash
;
3049 alg
->cra_type
= &crypto_ahash_type
;
3050 t_alg
->algt
.alg
.hash
.init
= ahash_init
;
3051 t_alg
->algt
.alg
.hash
.update
= ahash_update
;
3052 t_alg
->algt
.alg
.hash
.final
= ahash_final
;
3053 t_alg
->algt
.alg
.hash
.finup
= ahash_finup
;
3054 t_alg
->algt
.alg
.hash
.digest
= ahash_digest
;
3055 t_alg
->algt
.alg
.hash
.setkey
= ahash_setkey
;
3056 t_alg
->algt
.alg
.hash
.import
= ahash_import
;
3057 t_alg
->algt
.alg
.hash
.export
= ahash_export
;
3059 if (!(priv
->features
& TALITOS_FTR_HMAC_OK
) &&
3060 !strncmp(alg
->cra_name
, "hmac", 4)) {
3062 return ERR_PTR(-ENOTSUPP
);
3064 if (!(priv
->features
& TALITOS_FTR_SHA224_HWINIT
) &&
3065 (!strcmp(alg
->cra_name
, "sha224") ||
3066 !strcmp(alg
->cra_name
, "hmac(sha224)"))) {
3067 t_alg
->algt
.alg
.hash
.init
= ahash_init_sha224_swinit
;
3068 t_alg
->algt
.desc_hdr_template
=
3069 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
3070 DESC_HDR_SEL0_MDEUA
|
3071 DESC_HDR_MODE0_MDEU_SHA256
;
3075 dev_err(dev
, "unknown algorithm type %d\n", t_alg
->algt
.type
);
3077 return ERR_PTR(-EINVAL
);
3080 alg
->cra_module
= THIS_MODULE
;
3081 if (t_alg
->algt
.priority
)
3082 alg
->cra_priority
= t_alg
->algt
.priority
;
3084 alg
->cra_priority
= TALITOS_CRA_PRIORITY
;
3085 alg
->cra_alignmask
= 0;
3086 alg
->cra_ctxsize
= sizeof(struct talitos_ctx
);
3087 alg
->cra_flags
|= CRYPTO_ALG_KERN_DRIVER_ONLY
;
3094 static int talitos_probe_irq(struct platform_device
*ofdev
)
3096 struct device
*dev
= &ofdev
->dev
;
3097 struct device_node
*np
= ofdev
->dev
.of_node
;
3098 struct talitos_private
*priv
= dev_get_drvdata(dev
);
3100 bool is_sec1
= has_ftr_sec1(priv
);
3102 priv
->irq
[0] = irq_of_parse_and_map(np
, 0);
3103 if (!priv
->irq
[0]) {
3104 dev_err(dev
, "failed to map irq\n");
3108 err
= request_irq(priv
->irq
[0], talitos1_interrupt_4ch
, 0,
3109 dev_driver_string(dev
), dev
);
3113 priv
->irq
[1] = irq_of_parse_and_map(np
, 1);
3115 /* get the primary irq line */
3116 if (!priv
->irq
[1]) {
3117 err
= request_irq(priv
->irq
[0], talitos2_interrupt_4ch
, 0,
3118 dev_driver_string(dev
), dev
);
3122 err
= request_irq(priv
->irq
[0], talitos2_interrupt_ch0_2
, 0,
3123 dev_driver_string(dev
), dev
);
3127 /* get the secondary irq line */
3128 err
= request_irq(priv
->irq
[1], talitos2_interrupt_ch1_3
, 0,
3129 dev_driver_string(dev
), dev
);
3131 dev_err(dev
, "failed to request secondary irq\n");
3132 irq_dispose_mapping(priv
->irq
[1]);
3140 dev_err(dev
, "failed to request primary irq\n");
3141 irq_dispose_mapping(priv
->irq
[0]);
3148 static int talitos_probe(struct platform_device
*ofdev
)
3150 struct device
*dev
= &ofdev
->dev
;
3151 struct device_node
*np
= ofdev
->dev
.of_node
;
3152 struct talitos_private
*priv
;
3153 const unsigned int *prop
;
3157 priv
= kzalloc(sizeof(struct talitos_private
), GFP_KERNEL
);
3161 INIT_LIST_HEAD(&priv
->alg_list
);
3163 dev_set_drvdata(dev
, priv
);
3165 priv
->ofdev
= ofdev
;
3167 spin_lock_init(&priv
->reg_lock
);
3169 priv
->reg
= of_iomap(np
, 0);
3171 dev_err(dev
, "failed to of_iomap\n");
3176 /* get SEC version capabilities from device tree */
3177 prop
= of_get_property(np
, "fsl,num-channels", NULL
);
3179 priv
->num_channels
= *prop
;
3181 prop
= of_get_property(np
, "fsl,channel-fifo-len", NULL
);
3183 priv
->chfifo_len
= *prop
;
3185 prop
= of_get_property(np
, "fsl,exec-units-mask", NULL
);
3187 priv
->exec_units
= *prop
;
3189 prop
= of_get_property(np
, "fsl,descriptor-types-mask", NULL
);
3191 priv
->desc_types
= *prop
;
3193 if (!is_power_of_2(priv
->num_channels
) || !priv
->chfifo_len
||
3194 !priv
->exec_units
|| !priv
->desc_types
) {
3195 dev_err(dev
, "invalid property data in device tree node\n");
3200 if (of_device_is_compatible(np
, "fsl,sec3.0"))
3201 priv
->features
|= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
;
3203 if (of_device_is_compatible(np
, "fsl,sec2.1"))
3204 priv
->features
|= TALITOS_FTR_HW_AUTH_CHECK
|
3205 TALITOS_FTR_SHA224_HWINIT
|
3206 TALITOS_FTR_HMAC_OK
;
3208 if (of_device_is_compatible(np
, "fsl,sec1.0"))
3209 priv
->features
|= TALITOS_FTR_SEC1
;
3211 if (of_device_is_compatible(np
, "fsl,sec1.2")) {
3212 priv
->reg_deu
= priv
->reg
+ TALITOS12_DEU
;
3213 priv
->reg_aesu
= priv
->reg
+ TALITOS12_AESU
;
3214 priv
->reg_mdeu
= priv
->reg
+ TALITOS12_MDEU
;
3215 stride
= TALITOS1_CH_STRIDE
;
3216 } else if (of_device_is_compatible(np
, "fsl,sec1.0")) {
3217 priv
->reg_deu
= priv
->reg
+ TALITOS10_DEU
;
3218 priv
->reg_aesu
= priv
->reg
+ TALITOS10_AESU
;
3219 priv
->reg_mdeu
= priv
->reg
+ TALITOS10_MDEU
;
3220 priv
->reg_afeu
= priv
->reg
+ TALITOS10_AFEU
;
3221 priv
->reg_rngu
= priv
->reg
+ TALITOS10_RNGU
;
3222 priv
->reg_pkeu
= priv
->reg
+ TALITOS10_PKEU
;
3223 stride
= TALITOS1_CH_STRIDE
;
3225 priv
->reg_deu
= priv
->reg
+ TALITOS2_DEU
;
3226 priv
->reg_aesu
= priv
->reg
+ TALITOS2_AESU
;
3227 priv
->reg_mdeu
= priv
->reg
+ TALITOS2_MDEU
;
3228 priv
->reg_afeu
= priv
->reg
+ TALITOS2_AFEU
;
3229 priv
->reg_rngu
= priv
->reg
+ TALITOS2_RNGU
;
3230 priv
->reg_pkeu
= priv
->reg
+ TALITOS2_PKEU
;
3231 priv
->reg_keu
= priv
->reg
+ TALITOS2_KEU
;
3232 priv
->reg_crcu
= priv
->reg
+ TALITOS2_CRCU
;
3233 stride
= TALITOS2_CH_STRIDE
;
3236 err
= talitos_probe_irq(ofdev
);
3240 if (of_device_is_compatible(np
, "fsl,sec1.0")) {
3241 tasklet_init(&priv
->done_task
[0], talitos1_done_4ch
,
3242 (unsigned long)dev
);
3244 if (!priv
->irq
[1]) {
3245 tasklet_init(&priv
->done_task
[0], talitos2_done_4ch
,
3246 (unsigned long)dev
);
3248 tasklet_init(&priv
->done_task
[0], talitos2_done_ch0_2
,
3249 (unsigned long)dev
);
3250 tasklet_init(&priv
->done_task
[1], talitos2_done_ch1_3
,
3251 (unsigned long)dev
);
3255 priv
->chan
= kzalloc(sizeof(struct talitos_channel
) *
3256 priv
->num_channels
, GFP_KERNEL
);
3258 dev_err(dev
, "failed to allocate channel management space\n");
3263 priv
->fifo_len
= roundup_pow_of_two(priv
->chfifo_len
);
3265 for (i
= 0; i
< priv
->num_channels
; i
++) {
3266 priv
->chan
[i
].reg
= priv
->reg
+ stride
* (i
+ 1);
3267 if (!priv
->irq
[1] || !(i
& 1))
3268 priv
->chan
[i
].reg
+= TALITOS_CH_BASE_OFFSET
;
3270 spin_lock_init(&priv
->chan
[i
].head_lock
);
3271 spin_lock_init(&priv
->chan
[i
].tail_lock
);
3273 priv
->chan
[i
].fifo
= kzalloc(sizeof(struct talitos_request
) *
3274 priv
->fifo_len
, GFP_KERNEL
);
3275 if (!priv
->chan
[i
].fifo
) {
3276 dev_err(dev
, "failed to allocate request fifo %d\n", i
);
3281 atomic_set(&priv
->chan
[i
].submit_count
,
3282 -(priv
->chfifo_len
- 1));
3285 dma_set_mask(dev
, DMA_BIT_MASK(36));
3287 /* reset and initialize the h/w */
3288 err
= init_device(dev
);
3290 dev_err(dev
, "failed to initialize device\n");
3294 /* register the RNG, if available */
3295 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
)) {
3296 err
= talitos_register_rng(dev
);
3298 dev_err(dev
, "failed to register hwrng: %d\n", err
);
3301 dev_info(dev
, "hwrng\n");
3304 /* register crypto algorithms the device supports */
3305 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
3306 if (hw_supports(dev
, driver_algs
[i
].desc_hdr_template
)) {
3307 struct talitos_crypto_alg
*t_alg
;
3308 struct crypto_alg
*alg
= NULL
;
3310 t_alg
= talitos_alg_alloc(dev
, &driver_algs
[i
]);
3311 if (IS_ERR(t_alg
)) {
3312 err
= PTR_ERR(t_alg
);
3313 if (err
== -ENOTSUPP
)
3318 switch (t_alg
->algt
.type
) {
3319 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3320 err
= crypto_register_alg(
3321 &t_alg
->algt
.alg
.crypto
);
3322 alg
= &t_alg
->algt
.alg
.crypto
;
3325 case CRYPTO_ALG_TYPE_AEAD
:
3326 err
= crypto_register_aead(
3327 &t_alg
->algt
.alg
.aead
);
3328 alg
= &t_alg
->algt
.alg
.aead
.base
;
3331 case CRYPTO_ALG_TYPE_AHASH
:
3332 err
= crypto_register_ahash(
3333 &t_alg
->algt
.alg
.hash
);
3334 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
3338 dev_err(dev
, "%s alg registration failed\n",
3339 alg
->cra_driver_name
);
3342 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
3345 if (!list_empty(&priv
->alg_list
))
3346 dev_info(dev
, "%s algorithms registered in /proc/crypto\n",
3347 (char *)of_get_property(np
, "compatible", NULL
));
3352 talitos_remove(ofdev
);
3357 static const struct of_device_id talitos_match
[] = {
3358 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3360 .compatible
= "fsl,sec1.0",
3363 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3365 .compatible
= "fsl,sec2.0",
3370 MODULE_DEVICE_TABLE(of
, talitos_match
);
3372 static struct platform_driver talitos_driver
= {
3375 .of_match_table
= talitos_match
,
3377 .probe
= talitos_probe
,
3378 .remove
= talitos_remove
,
3381 module_platform_driver(talitos_driver
);
3383 MODULE_LICENSE("GPL");
3384 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3385 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");