2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr
*ptr
, dma_addr_t dma_addr
,
61 ptr
->ptr
= cpu_to_be32(lower_32_bits(dma_addr
));
63 ptr
->eptr
= upper_32_bits(dma_addr
);
66 static void copy_talitos_ptr(struct talitos_ptr
*dst_ptr
,
67 struct talitos_ptr
*src_ptr
, bool is_sec1
)
69 dst_ptr
->ptr
= src_ptr
->ptr
;
71 dst_ptr
->eptr
= src_ptr
->eptr
;
74 static void to_talitos_ptr_len(struct talitos_ptr
*ptr
, unsigned int len
,
79 ptr
->len1
= cpu_to_be16(len
);
81 ptr
->len
= cpu_to_be16(len
);
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr
*ptr
,
89 return be16_to_cpu(ptr
->len1
);
91 return be16_to_cpu(ptr
->len
);
94 static void to_talitos_ptr_extent_clear(struct talitos_ptr
*ptr
, bool is_sec1
)
101 * map virtual single (contiguous) pointer to h/w descriptor pointer
103 static void map_single_talitos_ptr(struct device
*dev
,
104 struct talitos_ptr
*ptr
,
105 unsigned int len
, void *data
,
106 enum dma_data_direction dir
)
108 dma_addr_t dma_addr
= dma_map_single(dev
, data
, len
, dir
);
109 struct talitos_private
*priv
= dev_get_drvdata(dev
);
110 bool is_sec1
= has_ftr_sec1(priv
);
112 to_talitos_ptr_len(ptr
, len
, is_sec1
);
113 to_talitos_ptr(ptr
, dma_addr
, is_sec1
);
114 to_talitos_ptr_extent_clear(ptr
, is_sec1
);
118 * unmap bus single (contiguous) h/w descriptor pointer
120 static void unmap_single_talitos_ptr(struct device
*dev
,
121 struct talitos_ptr
*ptr
,
122 enum dma_data_direction dir
)
124 struct talitos_private
*priv
= dev_get_drvdata(dev
);
125 bool is_sec1
= has_ftr_sec1(priv
);
127 dma_unmap_single(dev
, be32_to_cpu(ptr
->ptr
),
128 from_talitos_ptr_len(ptr
, is_sec1
), dir
);
131 static int reset_channel(struct device
*dev
, int ch
)
133 struct talitos_private
*priv
= dev_get_drvdata(dev
);
134 unsigned int timeout
= TALITOS_TIMEOUT
;
135 bool is_sec1
= has_ftr_sec1(priv
);
138 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
139 TALITOS1_CCCR_LO_RESET
);
141 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
) &
142 TALITOS1_CCCR_LO_RESET
) && --timeout
)
145 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
146 TALITOS2_CCCR_RESET
);
148 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
149 TALITOS2_CCCR_RESET
) && --timeout
)
154 dev_err(dev
, "failed to reset channel %d\n", ch
);
158 /* set 36-bit addressing, done writeback enable and done IRQ enable */
159 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, TALITOS_CCCR_LO_EAE
|
160 TALITOS_CCCR_LO_CDWE
| TALITOS_CCCR_LO_CDIE
);
162 /* and ICCR writeback, if available */
163 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
164 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
165 TALITOS_CCCR_LO_IWSE
);
170 static int reset_device(struct device
*dev
)
172 struct talitos_private
*priv
= dev_get_drvdata(dev
);
173 unsigned int timeout
= TALITOS_TIMEOUT
;
174 bool is_sec1
= has_ftr_sec1(priv
);
175 u32 mcr
= is_sec1
? TALITOS1_MCR_SWR
: TALITOS2_MCR_SWR
;
177 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
179 while ((in_be32(priv
->reg
+ TALITOS_MCR
) & mcr
)
184 mcr
= TALITOS_MCR_RCA1
| TALITOS_MCR_RCA3
;
185 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
189 dev_err(dev
, "failed to reset device\n");
197 * Reset and initialize the device
199 static int init_device(struct device
*dev
)
201 struct talitos_private
*priv
= dev_get_drvdata(dev
);
203 bool is_sec1
= has_ftr_sec1(priv
);
207 * errata documentation: warning: certain SEC interrupts
208 * are not fully cleared by writing the MCR:SWR bit,
209 * set bit twice to completely reset
211 err
= reset_device(dev
);
215 err
= reset_device(dev
);
220 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
221 err
= reset_channel(dev
, ch
);
226 /* enable channel done and error interrupts */
228 clrbits32(priv
->reg
+ TALITOS_IMR
, TALITOS1_IMR_INIT
);
229 clrbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS1_IMR_LO_INIT
);
230 /* disable parity error check in DEU (erroneous? test vect.) */
231 setbits32(priv
->reg_deu
+ TALITOS_EUICR
, TALITOS1_DEUICR_KPE
);
233 setbits32(priv
->reg
+ TALITOS_IMR
, TALITOS2_IMR_INIT
);
234 setbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS2_IMR_LO_INIT
);
237 /* disable integrity check error interrupts (use writeback instead) */
238 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
239 setbits32(priv
->reg_mdeu
+ TALITOS_EUICR_LO
,
240 TALITOS_MDEUICR_LO_ICE
);
246 * talitos_submit - submits a descriptor to the device for processing
247 * @dev: the SEC device to be used
248 * @ch: the SEC device channel to be used
249 * @desc: the descriptor to be processed by the device
250 * @callback: whom to call when processing is complete
251 * @context: a handle for use by caller (optional)
253 * desc must contain valid dma-mapped (bus physical) address pointers.
254 * callback must check err and feedback in descriptor header
255 * for device processing status.
257 int talitos_submit(struct device
*dev
, int ch
, struct talitos_desc
*desc
,
258 void (*callback
)(struct device
*dev
,
259 struct talitos_desc
*desc
,
260 void *context
, int error
),
263 struct talitos_private
*priv
= dev_get_drvdata(dev
);
264 struct talitos_request
*request
;
267 bool is_sec1
= has_ftr_sec1(priv
);
269 spin_lock_irqsave(&priv
->chan
[ch
].head_lock
, flags
);
271 if (!atomic_inc_not_zero(&priv
->chan
[ch
].submit_count
)) {
272 /* h/w fifo is full */
273 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
277 head
= priv
->chan
[ch
].head
;
278 request
= &priv
->chan
[ch
].fifo
[head
];
280 /* map descriptor and save caller data */
282 desc
->hdr1
= desc
->hdr
;
284 request
->dma_desc
= dma_map_single(dev
, &desc
->hdr1
,
288 request
->dma_desc
= dma_map_single(dev
, desc
,
292 request
->callback
= callback
;
293 request
->context
= context
;
295 /* increment fifo head */
296 priv
->chan
[ch
].head
= (priv
->chan
[ch
].head
+ 1) & (priv
->fifo_len
- 1);
299 request
->desc
= desc
;
303 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF
,
304 upper_32_bits(request
->dma_desc
));
305 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF_LO
,
306 lower_32_bits(request
->dma_desc
));
308 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
312 EXPORT_SYMBOL(talitos_submit
);
315 * process what was done, notify callback of error if not
317 static void flush_channel(struct device
*dev
, int ch
, int error
, int reset_ch
)
319 struct talitos_private
*priv
= dev_get_drvdata(dev
);
320 struct talitos_request
*request
, saved_req
;
323 bool is_sec1
= has_ftr_sec1(priv
);
325 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
327 tail
= priv
->chan
[ch
].tail
;
328 while (priv
->chan
[ch
].fifo
[tail
].desc
) {
331 request
= &priv
->chan
[ch
].fifo
[tail
];
333 /* descriptors with their done bits set don't get the error */
335 hdr
= is_sec1
? request
->desc
->hdr1
: request
->desc
->hdr
;
337 if ((hdr
& DESC_HDR_DONE
) == DESC_HDR_DONE
)
345 dma_unmap_single(dev
, request
->dma_desc
,
349 /* copy entries so we can call callback outside lock */
350 saved_req
.desc
= request
->desc
;
351 saved_req
.callback
= request
->callback
;
352 saved_req
.context
= request
->context
;
354 /* release request entry in fifo */
356 request
->desc
= NULL
;
358 /* increment fifo tail */
359 priv
->chan
[ch
].tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
361 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
363 atomic_dec(&priv
->chan
[ch
].submit_count
);
365 saved_req
.callback(dev
, saved_req
.desc
, saved_req
.context
,
367 /* channel may resume processing in single desc error case */
368 if (error
&& !reset_ch
&& status
== error
)
370 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
371 tail
= priv
->chan
[ch
].tail
;
374 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
378 * process completed requests for channels that have done status
380 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
381 static void talitos1_done_##name(unsigned long data) \
383 struct device *dev = (struct device *)data; \
384 struct talitos_private *priv = dev_get_drvdata(dev); \
385 unsigned long flags; \
387 if (ch_done_mask & 0x10000000) \
388 flush_channel(dev, 0, 0, 0); \
389 if (priv->num_channels == 1) \
391 if (ch_done_mask & 0x40000000) \
392 flush_channel(dev, 1, 0, 0); \
393 if (ch_done_mask & 0x00010000) \
394 flush_channel(dev, 2, 0, 0); \
395 if (ch_done_mask & 0x00040000) \
396 flush_channel(dev, 3, 0, 0); \
399 /* At this point, all completed channels have been processed */ \
400 /* Unmask done interrupts for channels completed later on. */ \
401 spin_lock_irqsave(&priv->reg_lock, flags); \
402 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
403 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
404 spin_unlock_irqrestore(&priv->reg_lock, flags); \
407 DEF_TALITOS1_DONE(4ch
, TALITOS1_ISR_4CHDONE
)
409 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
410 static void talitos2_done_##name(unsigned long data) \
412 struct device *dev = (struct device *)data; \
413 struct talitos_private *priv = dev_get_drvdata(dev); \
414 unsigned long flags; \
416 if (ch_done_mask & 1) \
417 flush_channel(dev, 0, 0, 0); \
418 if (priv->num_channels == 1) \
420 if (ch_done_mask & (1 << 2)) \
421 flush_channel(dev, 1, 0, 0); \
422 if (ch_done_mask & (1 << 4)) \
423 flush_channel(dev, 2, 0, 0); \
424 if (ch_done_mask & (1 << 6)) \
425 flush_channel(dev, 3, 0, 0); \
428 /* At this point, all completed channels have been processed */ \
429 /* Unmask done interrupts for channels completed later on. */ \
430 spin_lock_irqsave(&priv->reg_lock, flags); \
431 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
432 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
433 spin_unlock_irqrestore(&priv->reg_lock, flags); \
436 DEF_TALITOS2_DONE(4ch
, TALITOS2_ISR_4CHDONE
)
437 DEF_TALITOS2_DONE(ch0_2
, TALITOS2_ISR_CH_0_2_DONE
)
438 DEF_TALITOS2_DONE(ch1_3
, TALITOS2_ISR_CH_1_3_DONE
)
441 * locate current (offending) descriptor
443 static u32
current_desc_hdr(struct device
*dev
, int ch
)
445 struct talitos_private
*priv
= dev_get_drvdata(dev
);
449 cur_desc
= ((u64
)in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR
)) << 32;
450 cur_desc
|= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR_LO
);
453 dev_err(dev
, "CDPR is NULL, giving up search for offending descriptor\n");
457 tail
= priv
->chan
[ch
].tail
;
460 while (priv
->chan
[ch
].fifo
[iter
].dma_desc
!= cur_desc
) {
461 iter
= (iter
+ 1) & (priv
->fifo_len
- 1);
463 dev_err(dev
, "couldn't locate current descriptor\n");
468 return priv
->chan
[ch
].fifo
[iter
].desc
->hdr
;
472 * user diagnostics; report root cause of error based on execution unit status
474 static void report_eu_error(struct device
*dev
, int ch
, u32 desc_hdr
)
476 struct talitos_private
*priv
= dev_get_drvdata(dev
);
480 desc_hdr
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
);
482 switch (desc_hdr
& DESC_HDR_SEL0_MASK
) {
483 case DESC_HDR_SEL0_AFEU
:
484 dev_err(dev
, "AFEUISR 0x%08x_%08x\n",
485 in_be32(priv
->reg_afeu
+ TALITOS_EUISR
),
486 in_be32(priv
->reg_afeu
+ TALITOS_EUISR_LO
));
488 case DESC_HDR_SEL0_DEU
:
489 dev_err(dev
, "DEUISR 0x%08x_%08x\n",
490 in_be32(priv
->reg_deu
+ TALITOS_EUISR
),
491 in_be32(priv
->reg_deu
+ TALITOS_EUISR_LO
));
493 case DESC_HDR_SEL0_MDEUA
:
494 case DESC_HDR_SEL0_MDEUB
:
495 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
496 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR
),
497 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR_LO
));
499 case DESC_HDR_SEL0_RNG
:
500 dev_err(dev
, "RNGUISR 0x%08x_%08x\n",
501 in_be32(priv
->reg_rngu
+ TALITOS_ISR
),
502 in_be32(priv
->reg_rngu
+ TALITOS_ISR_LO
));
504 case DESC_HDR_SEL0_PKEU
:
505 dev_err(dev
, "PKEUISR 0x%08x_%08x\n",
506 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR
),
507 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR_LO
));
509 case DESC_HDR_SEL0_AESU
:
510 dev_err(dev
, "AESUISR 0x%08x_%08x\n",
511 in_be32(priv
->reg_aesu
+ TALITOS_EUISR
),
512 in_be32(priv
->reg_aesu
+ TALITOS_EUISR_LO
));
514 case DESC_HDR_SEL0_CRCU
:
515 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
516 in_be32(priv
->reg_crcu
+ TALITOS_EUISR
),
517 in_be32(priv
->reg_crcu
+ TALITOS_EUISR_LO
));
519 case DESC_HDR_SEL0_KEU
:
520 dev_err(dev
, "KEUISR 0x%08x_%08x\n",
521 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR
),
522 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR_LO
));
526 switch (desc_hdr
& DESC_HDR_SEL1_MASK
) {
527 case DESC_HDR_SEL1_MDEUA
:
528 case DESC_HDR_SEL1_MDEUB
:
529 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
530 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR
),
531 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR_LO
));
533 case DESC_HDR_SEL1_CRCU
:
534 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
535 in_be32(priv
->reg_crcu
+ TALITOS_EUISR
),
536 in_be32(priv
->reg_crcu
+ TALITOS_EUISR_LO
));
540 for (i
= 0; i
< 8; i
++)
541 dev_err(dev
, "DESCBUF 0x%08x_%08x\n",
542 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
+ 8*i
),
543 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF_LO
+ 8*i
));
547 * recover from error interrupts
549 static void talitos_error(struct device
*dev
, u32 isr
, u32 isr_lo
)
551 struct talitos_private
*priv
= dev_get_drvdata(dev
);
552 unsigned int timeout
= TALITOS_TIMEOUT
;
553 int ch
, error
, reset_dev
= 0;
555 bool is_sec1
= has_ftr_sec1(priv
);
556 int reset_ch
= is_sec1
? 1 : 0; /* only SEC2 supports continuation */
558 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
559 /* skip channels without errors */
561 /* bits 29, 31, 17, 19 */
562 if (!(isr
& (1 << (29 + (ch
& 1) * 2 - (ch
& 2) * 6))))
565 if (!(isr
& (1 << (ch
* 2 + 1))))
571 v_lo
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR_LO
);
573 if (v_lo
& TALITOS_CCPSR_LO_DOF
) {
574 dev_err(dev
, "double fetch fifo overflow error\n");
578 if (v_lo
& TALITOS_CCPSR_LO_SOF
) {
579 /* h/w dropped descriptor */
580 dev_err(dev
, "single fetch fifo overflow error\n");
583 if (v_lo
& TALITOS_CCPSR_LO_MDTE
)
584 dev_err(dev
, "master data transfer error\n");
585 if (v_lo
& TALITOS_CCPSR_LO_SGDLZ
)
586 dev_err(dev
, is_sec1
? "pointeur not complete error\n"
587 : "s/g data length zero error\n");
588 if (v_lo
& TALITOS_CCPSR_LO_FPZ
)
589 dev_err(dev
, is_sec1
? "parity error\n"
590 : "fetch pointer zero error\n");
591 if (v_lo
& TALITOS_CCPSR_LO_IDH
)
592 dev_err(dev
, "illegal descriptor header error\n");
593 if (v_lo
& TALITOS_CCPSR_LO_IEU
)
594 dev_err(dev
, is_sec1
? "static assignment error\n"
595 : "invalid exec unit error\n");
596 if (v_lo
& TALITOS_CCPSR_LO_EU
)
597 report_eu_error(dev
, ch
, current_desc_hdr(dev
, ch
));
599 if (v_lo
& TALITOS_CCPSR_LO_GB
)
600 dev_err(dev
, "gather boundary error\n");
601 if (v_lo
& TALITOS_CCPSR_LO_GRL
)
602 dev_err(dev
, "gather return/length error\n");
603 if (v_lo
& TALITOS_CCPSR_LO_SB
)
604 dev_err(dev
, "scatter boundary error\n");
605 if (v_lo
& TALITOS_CCPSR_LO_SRL
)
606 dev_err(dev
, "scatter return/length error\n");
609 flush_channel(dev
, ch
, error
, reset_ch
);
612 reset_channel(dev
, ch
);
614 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
616 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, 0);
617 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
618 TALITOS2_CCCR_CONT
) && --timeout
)
621 dev_err(dev
, "failed to restart channel %d\n",
627 if (reset_dev
|| (is_sec1
&& isr
& ~TALITOS1_ISR_4CHERR
) ||
628 (!is_sec1
&& isr
& ~TALITOS2_ISR_4CHERR
) || isr_lo
) {
629 if (is_sec1
&& (isr_lo
& TALITOS1_ISR_TEA_ERR
))
630 dev_err(dev
, "TEA error: ISR 0x%08x_%08x\n",
633 dev_err(dev
, "done overflow, internal time out, or "
634 "rngu error: ISR 0x%08x_%08x\n", isr
, isr_lo
);
636 /* purge request queues */
637 for (ch
= 0; ch
< priv
->num_channels
; ch
++)
638 flush_channel(dev
, ch
, -EIO
, 1);
640 /* reset and reinitialize the device */
645 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
646 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
648 struct device *dev = data; \
649 struct talitos_private *priv = dev_get_drvdata(dev); \
651 unsigned long flags; \
653 spin_lock_irqsave(&priv->reg_lock, flags); \
654 isr = in_be32(priv->reg + TALITOS_ISR); \
655 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
656 /* Acknowledge interrupt */ \
657 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
658 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
660 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
661 spin_unlock_irqrestore(&priv->reg_lock, flags); \
662 talitos_error(dev, isr & ch_err_mask, isr_lo); \
665 if (likely(isr & ch_done_mask)) { \
666 /* mask further done interrupts. */ \
667 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
668 /* done_task will unmask done interrupts at exit */ \
669 tasklet_schedule(&priv->done_task[tlet]); \
671 spin_unlock_irqrestore(&priv->reg_lock, flags); \
674 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
678 DEF_TALITOS1_INTERRUPT(4ch
, TALITOS1_ISR_4CHDONE
, TALITOS1_ISR_4CHERR
, 0)
680 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
681 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
683 struct device *dev = data; \
684 struct talitos_private *priv = dev_get_drvdata(dev); \
686 unsigned long flags; \
688 spin_lock_irqsave(&priv->reg_lock, flags); \
689 isr = in_be32(priv->reg + TALITOS_ISR); \
690 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
691 /* Acknowledge interrupt */ \
692 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
693 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
695 if (unlikely(isr & ch_err_mask || isr_lo)) { \
696 spin_unlock_irqrestore(&priv->reg_lock, flags); \
697 talitos_error(dev, isr & ch_err_mask, isr_lo); \
700 if (likely(isr & ch_done_mask)) { \
701 /* mask further done interrupts. */ \
702 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
703 /* done_task will unmask done interrupts at exit */ \
704 tasklet_schedule(&priv->done_task[tlet]); \
706 spin_unlock_irqrestore(&priv->reg_lock, flags); \
709 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
713 DEF_TALITOS2_INTERRUPT(4ch
, TALITOS2_ISR_4CHDONE
, TALITOS2_ISR_4CHERR
, 0)
714 DEF_TALITOS2_INTERRUPT(ch0_2
, TALITOS2_ISR_CH_0_2_DONE
, TALITOS2_ISR_CH_0_2_ERR
,
716 DEF_TALITOS2_INTERRUPT(ch1_3
, TALITOS2_ISR_CH_1_3_DONE
, TALITOS2_ISR_CH_1_3_ERR
,
722 static int talitos_rng_data_present(struct hwrng
*rng
, int wait
)
724 struct device
*dev
= (struct device
*)rng
->priv
;
725 struct talitos_private
*priv
= dev_get_drvdata(dev
);
729 for (i
= 0; i
< 20; i
++) {
730 ofl
= in_be32(priv
->reg_rngu
+ TALITOS_EUSR_LO
) &
731 TALITOS_RNGUSR_LO_OFL
;
740 static int talitos_rng_data_read(struct hwrng
*rng
, u32
*data
)
742 struct device
*dev
= (struct device
*)rng
->priv
;
743 struct talitos_private
*priv
= dev_get_drvdata(dev
);
745 /* rng fifo requires 64-bit accesses */
746 *data
= in_be32(priv
->reg_rngu
+ TALITOS_EU_FIFO
);
747 *data
= in_be32(priv
->reg_rngu
+ TALITOS_EU_FIFO_LO
);
752 static int talitos_rng_init(struct hwrng
*rng
)
754 struct device
*dev
= (struct device
*)rng
->priv
;
755 struct talitos_private
*priv
= dev_get_drvdata(dev
);
756 unsigned int timeout
= TALITOS_TIMEOUT
;
758 setbits32(priv
->reg_rngu
+ TALITOS_EURCR_LO
, TALITOS_RNGURCR_LO_SR
);
759 while (!(in_be32(priv
->reg_rngu
+ TALITOS_EUSR_LO
)
760 & TALITOS_RNGUSR_LO_RD
)
764 dev_err(dev
, "failed to reset rng hw\n");
768 /* start generating */
769 setbits32(priv
->reg_rngu
+ TALITOS_EUDSR_LO
, 0);
774 static int talitos_register_rng(struct device
*dev
)
776 struct talitos_private
*priv
= dev_get_drvdata(dev
);
779 priv
->rng
.name
= dev_driver_string(dev
),
780 priv
->rng
.init
= talitos_rng_init
,
781 priv
->rng
.data_present
= talitos_rng_data_present
,
782 priv
->rng
.data_read
= talitos_rng_data_read
,
783 priv
->rng
.priv
= (unsigned long)dev
;
785 err
= hwrng_register(&priv
->rng
);
787 priv
->rng_registered
= true;
792 static void talitos_unregister_rng(struct device
*dev
)
794 struct talitos_private
*priv
= dev_get_drvdata(dev
);
796 if (!priv
->rng_registered
)
799 hwrng_unregister(&priv
->rng
);
800 priv
->rng_registered
= false;
806 #define TALITOS_CRA_PRIORITY 3000
807 #define TALITOS_MAX_KEY_SIZE 96
808 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
813 __be32 desc_hdr_template
;
814 u8 key
[TALITOS_MAX_KEY_SIZE
];
815 u8 iv
[TALITOS_MAX_IV_LENGTH
];
817 unsigned int enckeylen
;
818 unsigned int authkeylen
;
821 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
822 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
824 struct talitos_ahash_req_ctx
{
825 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
826 unsigned int hw_context_size
;
827 u8 buf
[HASH_MAX_BLOCK_SIZE
];
828 u8 bufnext
[HASH_MAX_BLOCK_SIZE
];
832 unsigned int to_hash_later
;
834 struct scatterlist bufsl
[2];
835 struct scatterlist
*psrc
;
838 struct talitos_export_state
{
839 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
840 u8 buf
[HASH_MAX_BLOCK_SIZE
];
844 unsigned int to_hash_later
;
848 static int aead_setkey(struct crypto_aead
*authenc
,
849 const u8
*key
, unsigned int keylen
)
851 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
852 struct crypto_authenc_keys keys
;
854 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
857 if (keys
.authkeylen
+ keys
.enckeylen
> TALITOS_MAX_KEY_SIZE
)
860 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
861 memcpy(&ctx
->key
[keys
.authkeylen
], keys
.enckey
, keys
.enckeylen
);
863 ctx
->keylen
= keys
.authkeylen
+ keys
.enckeylen
;
864 ctx
->enckeylen
= keys
.enckeylen
;
865 ctx
->authkeylen
= keys
.authkeylen
;
870 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
875 * talitos_edesc - s/w-extended descriptor
876 * @src_nents: number of segments in input scatterlist
877 * @dst_nents: number of segments in output scatterlist
878 * @icv_ool: whether ICV is out-of-line
879 * @iv_dma: dma address of iv for checking continuity and link table
880 * @dma_len: length of dma mapped link_tbl space
881 * @dma_link_tbl: bus physical address of link_tbl/buf
882 * @desc: h/w descriptor
883 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
884 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
886 * if decrypting (with authcheck), or either one of src_nents or dst_nents
887 * is greater than 1, an integrity check value is concatenated to the end
890 struct talitos_edesc
{
896 dma_addr_t dma_link_tbl
;
897 struct talitos_desc desc
;
899 struct talitos_ptr link_tbl
[0];
904 static void talitos_sg_unmap(struct device
*dev
,
905 struct talitos_edesc
*edesc
,
906 struct scatterlist
*src
,
907 struct scatterlist
*dst
)
909 unsigned int src_nents
= edesc
->src_nents
? : 1;
910 unsigned int dst_nents
= edesc
->dst_nents
? : 1;
913 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
916 dma_unmap_sg(dev
, dst
, dst_nents
, DMA_FROM_DEVICE
);
919 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
922 static void ipsec_esp_unmap(struct device
*dev
,
923 struct talitos_edesc
*edesc
,
924 struct aead_request
*areq
)
926 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[6], DMA_FROM_DEVICE
);
927 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[3], DMA_TO_DEVICE
);
928 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
929 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[0], DMA_TO_DEVICE
);
931 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
);
934 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
939 * ipsec_esp descriptor callbacks
941 static void ipsec_esp_encrypt_done(struct device
*dev
,
942 struct talitos_desc
*desc
, void *context
,
945 struct aead_request
*areq
= context
;
946 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
947 unsigned int authsize
= crypto_aead_authsize(authenc
);
948 struct talitos_edesc
*edesc
;
949 struct scatterlist
*sg
;
952 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
954 ipsec_esp_unmap(dev
, edesc
, areq
);
956 /* copy the generated ICV to dst */
957 if (edesc
->icv_ool
) {
958 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
959 edesc
->dst_nents
+ 2];
960 sg
= sg_last(areq
->dst
, edesc
->dst_nents
);
961 memcpy((char *)sg_virt(sg
) + sg
->length
- authsize
,
967 aead_request_complete(areq
, err
);
970 static void ipsec_esp_decrypt_swauth_done(struct device
*dev
,
971 struct talitos_desc
*desc
,
972 void *context
, int err
)
974 struct aead_request
*req
= context
;
975 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
976 unsigned int authsize
= crypto_aead_authsize(authenc
);
977 struct talitos_edesc
*edesc
;
978 struct scatterlist
*sg
;
981 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
983 ipsec_esp_unmap(dev
, edesc
, req
);
987 sg
= sg_last(req
->dst
, edesc
->dst_nents
? : 1);
988 icv
= (char *)sg_virt(sg
) + sg
->length
- authsize
;
990 if (edesc
->dma_len
) {
991 oicv
= (char *)&edesc
->link_tbl
[edesc
->src_nents
+
992 edesc
->dst_nents
+ 2];
994 icv
= oicv
+ authsize
;
996 oicv
= (char *)&edesc
->link_tbl
[0];
998 err
= crypto_memneq(oicv
, icv
, authsize
) ? -EBADMSG
: 0;
1003 aead_request_complete(req
, err
);
1006 static void ipsec_esp_decrypt_hwauth_done(struct device
*dev
,
1007 struct talitos_desc
*desc
,
1008 void *context
, int err
)
1010 struct aead_request
*req
= context
;
1011 struct talitos_edesc
*edesc
;
1013 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1015 ipsec_esp_unmap(dev
, edesc
, req
);
1017 /* check ICV auth status */
1018 if (!err
&& ((desc
->hdr_lo
& DESC_HDR_LO_ICCR1_MASK
) !=
1019 DESC_HDR_LO_ICCR1_PASS
))
1024 aead_request_complete(req
, err
);
1028 * convert scatterlist to SEC h/w link table format
1029 * stop at cryptlen bytes
1031 static int sg_to_link_tbl_offset(struct scatterlist
*sg
, int sg_count
,
1032 unsigned int offset
, int cryptlen
,
1033 struct talitos_ptr
*link_tbl_ptr
)
1035 int n_sg
= sg_count
;
1038 while (cryptlen
&& sg
&& n_sg
--) {
1039 unsigned int len
= sg_dma_len(sg
);
1041 if (offset
>= len
) {
1051 to_talitos_ptr(link_tbl_ptr
+ count
,
1052 sg_dma_address(sg
) + offset
, 0);
1053 link_tbl_ptr
[count
].len
= cpu_to_be16(len
);
1054 link_tbl_ptr
[count
].j_extent
= 0;
1063 /* tag end of link table */
1065 link_tbl_ptr
[count
- 1].j_extent
= DESC_PTR_LNKTBL_RETURN
;
1070 static inline int sg_to_link_tbl(struct scatterlist
*sg
, int sg_count
,
1072 struct talitos_ptr
*link_tbl_ptr
)
1074 return sg_to_link_tbl_offset(sg
, sg_count
, 0, cryptlen
,
1079 * fill in and submit ipsec_esp descriptor
1081 static int ipsec_esp(struct talitos_edesc
*edesc
, struct aead_request
*areq
,
1082 void (*callback
)(struct device
*dev
,
1083 struct talitos_desc
*desc
,
1084 void *context
, int error
))
1086 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
1087 unsigned int authsize
= crypto_aead_authsize(aead
);
1088 struct talitos_ctx
*ctx
= crypto_aead_ctx(aead
);
1089 struct device
*dev
= ctx
->dev
;
1090 struct talitos_desc
*desc
= &edesc
->desc
;
1091 unsigned int cryptlen
= areq
->cryptlen
;
1092 unsigned int ivsize
= crypto_aead_ivsize(aead
);
1095 int sg_link_tbl_len
;
1098 map_single_talitos_ptr(dev
, &desc
->ptr
[0], ctx
->authkeylen
, &ctx
->key
,
1101 sg_count
= dma_map_sg(dev
, areq
->src
, edesc
->src_nents
?: 1,
1102 (areq
->src
== areq
->dst
) ? DMA_BIDIRECTIONAL
1105 desc
->ptr
[1].len
= cpu_to_be16(areq
->assoclen
);
1107 (ret
= sg_to_link_tbl_offset(areq
->src
, sg_count
, 0,
1109 &edesc
->link_tbl
[tbl_off
])) > 1) {
1110 to_talitos_ptr(&desc
->ptr
[1], edesc
->dma_link_tbl
+ tbl_off
*
1111 sizeof(struct talitos_ptr
), 0);
1112 desc
->ptr
[1].j_extent
= DESC_PTR_LNKTBL_JUMP
;
1114 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1115 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1119 to_talitos_ptr(&desc
->ptr
[1], sg_dma_address(areq
->src
), 0);
1120 desc
->ptr
[1].j_extent
= 0;
1124 to_talitos_ptr(&desc
->ptr
[2], edesc
->iv_dma
, 0);
1125 desc
->ptr
[2].len
= cpu_to_be16(ivsize
);
1126 desc
->ptr
[2].j_extent
= 0;
1129 map_single_talitos_ptr(dev
, &desc
->ptr
[3], ctx
->enckeylen
,
1130 (char *)&ctx
->key
+ ctx
->authkeylen
,
1135 * map and adjust cipher len to aead request cryptlen.
1136 * extent is bytes of HMAC postpended to ciphertext,
1137 * typically 12 for ipsec
1139 desc
->ptr
[4].len
= cpu_to_be16(cryptlen
);
1140 desc
->ptr
[4].j_extent
= authsize
;
1142 sg_link_tbl_len
= cryptlen
;
1143 if (edesc
->desc
.hdr
& DESC_HDR_MODE1_MDEU_CICV
)
1144 sg_link_tbl_len
+= authsize
;
1146 if (sg_count
== 1) {
1147 to_talitos_ptr(&desc
->ptr
[4], sg_dma_address(areq
->src
) +
1149 } else if ((ret
= sg_to_link_tbl_offset(areq
->src
, sg_count
,
1150 areq
->assoclen
, sg_link_tbl_len
,
1151 &edesc
->link_tbl
[tbl_off
])) >
1153 desc
->ptr
[4].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1154 to_talitos_ptr(&desc
->ptr
[4], edesc
->dma_link_tbl
+
1156 sizeof(struct talitos_ptr
), 0);
1157 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1162 copy_talitos_ptr(&desc
->ptr
[4], &edesc
->link_tbl
[tbl_off
], 0);
1166 desc
->ptr
[5].len
= cpu_to_be16(cryptlen
);
1167 desc
->ptr
[5].j_extent
= authsize
;
1169 if (areq
->src
!= areq
->dst
)
1170 sg_count
= dma_map_sg(dev
, areq
->dst
, edesc
->dst_nents
? : 1,
1173 edesc
->icv_ool
= false;
1175 if (sg_count
== 1) {
1176 to_talitos_ptr(&desc
->ptr
[5], sg_dma_address(areq
->dst
) +
1178 } else if ((sg_count
=
1179 sg_to_link_tbl_offset(areq
->dst
, sg_count
,
1180 areq
->assoclen
, cryptlen
,
1181 &edesc
->link_tbl
[tbl_off
])) > 1) {
1182 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
1184 to_talitos_ptr(&desc
->ptr
[5], edesc
->dma_link_tbl
+
1185 tbl_off
* sizeof(struct talitos_ptr
), 0);
1187 /* Add an entry to the link table for ICV data */
1188 tbl_ptr
+= sg_count
- 1;
1189 tbl_ptr
->j_extent
= 0;
1191 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
1192 tbl_ptr
->len
= cpu_to_be16(authsize
);
1194 /* icv data follows link tables */
1195 to_talitos_ptr(tbl_ptr
, edesc
->dma_link_tbl
+
1196 (edesc
->src_nents
+ edesc
->dst_nents
+
1197 2) * sizeof(struct talitos_ptr
) +
1199 desc
->ptr
[5].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1200 dma_sync_single_for_device(ctx
->dev
, edesc
->dma_link_tbl
,
1201 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1203 edesc
->icv_ool
= true;
1205 copy_talitos_ptr(&desc
->ptr
[5], &edesc
->link_tbl
[tbl_off
], 0);
1209 map_single_talitos_ptr(dev
, &desc
->ptr
[6], ivsize
, ctx
->iv
,
1212 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1213 if (ret
!= -EINPROGRESS
) {
1214 ipsec_esp_unmap(dev
, edesc
, areq
);
1221 * allocate and map the extended descriptor
1223 static struct talitos_edesc
*talitos_edesc_alloc(struct device
*dev
,
1224 struct scatterlist
*src
,
1225 struct scatterlist
*dst
,
1227 unsigned int assoclen
,
1228 unsigned int cryptlen
,
1229 unsigned int authsize
,
1230 unsigned int ivsize
,
1235 struct talitos_edesc
*edesc
;
1236 int src_nents
, dst_nents
, alloc_len
, dma_len
;
1237 dma_addr_t iv_dma
= 0;
1238 gfp_t flags
= cryptoflags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1240 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1241 bool is_sec1
= has_ftr_sec1(priv
);
1242 int max_len
= is_sec1
? TALITOS1_MAX_DATA_LEN
: TALITOS2_MAX_DATA_LEN
;
1245 if (cryptlen
+ authsize
> max_len
) {
1246 dev_err(dev
, "length exceeds h/w max limit\n");
1247 return ERR_PTR(-EINVAL
);
1251 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
1253 if (!dst
|| dst
== src
) {
1254 src_nents
= sg_nents_for_len(src
,
1255 assoclen
+ cryptlen
+ authsize
);
1256 if (src_nents
< 0) {
1257 dev_err(dev
, "Invalid number of src SG.\n");
1258 err
= ERR_PTR(-EINVAL
);
1261 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1262 dst_nents
= dst
? src_nents
: 0;
1263 } else { /* dst && dst != src*/
1264 src_nents
= sg_nents_for_len(src
, assoclen
+ cryptlen
+
1265 (encrypt
? 0 : authsize
));
1266 if (src_nents
< 0) {
1267 dev_err(dev
, "Invalid number of src SG.\n");
1268 err
= ERR_PTR(-EINVAL
);
1271 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1272 dst_nents
= sg_nents_for_len(dst
, assoclen
+ cryptlen
+
1273 (encrypt
? authsize
: 0));
1274 if (dst_nents
< 0) {
1275 dev_err(dev
, "Invalid number of dst SG.\n");
1276 err
= ERR_PTR(-EINVAL
);
1279 dst_nents
= (dst_nents
== 1) ? 0 : dst_nents
;
1283 * allocate space for base edesc plus the link tables,
1284 * allowing for two separate entries for AD and generated ICV (+ 2),
1285 * and space for two sets of ICVs (stashed and generated)
1287 alloc_len
= sizeof(struct talitos_edesc
);
1288 if (src_nents
|| dst_nents
) {
1290 dma_len
= (src_nents
? cryptlen
: 0) +
1291 (dst_nents
? cryptlen
: 0);
1293 dma_len
= (src_nents
+ dst_nents
+ 2) *
1294 sizeof(struct talitos_ptr
) + authsize
* 2;
1295 alloc_len
+= dma_len
;
1298 alloc_len
+= icv_stashing
? authsize
: 0;
1301 edesc
= kmalloc(alloc_len
, GFP_DMA
| flags
);
1303 dev_err(dev
, "could not allocate edescriptor\n");
1304 err
= ERR_PTR(-ENOMEM
);
1308 edesc
->src_nents
= src_nents
;
1309 edesc
->dst_nents
= dst_nents
;
1310 edesc
->iv_dma
= iv_dma
;
1311 edesc
->dma_len
= dma_len
;
1313 edesc
->dma_link_tbl
= dma_map_single(dev
, &edesc
->link_tbl
[0],
1320 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
1324 static struct talitos_edesc
*aead_edesc_alloc(struct aead_request
*areq
, u8
*iv
,
1325 int icv_stashing
, bool encrypt
)
1327 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1328 unsigned int authsize
= crypto_aead_authsize(authenc
);
1329 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1330 unsigned int ivsize
= crypto_aead_ivsize(authenc
);
1332 return talitos_edesc_alloc(ctx
->dev
, areq
->src
, areq
->dst
,
1333 iv
, areq
->assoclen
, areq
->cryptlen
,
1334 authsize
, ivsize
, icv_stashing
,
1335 areq
->base
.flags
, encrypt
);
1338 static int aead_encrypt(struct aead_request
*req
)
1340 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1341 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1342 struct talitos_edesc
*edesc
;
1344 /* allocate extended descriptor */
1345 edesc
= aead_edesc_alloc(req
, req
->iv
, 0, true);
1347 return PTR_ERR(edesc
);
1350 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1352 return ipsec_esp(edesc
, req
, ipsec_esp_encrypt_done
);
1355 static int aead_decrypt(struct aead_request
*req
)
1357 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1358 unsigned int authsize
= crypto_aead_authsize(authenc
);
1359 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1360 struct talitos_private
*priv
= dev_get_drvdata(ctx
->dev
);
1361 struct talitos_edesc
*edesc
;
1362 struct scatterlist
*sg
;
1365 req
->cryptlen
-= authsize
;
1367 /* allocate extended descriptor */
1368 edesc
= aead_edesc_alloc(req
, req
->iv
, 1, false);
1370 return PTR_ERR(edesc
);
1372 if ((priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
) &&
1373 ((!edesc
->src_nents
&& !edesc
->dst_nents
) ||
1374 priv
->features
& TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
)) {
1376 /* decrypt and check the ICV */
1377 edesc
->desc
.hdr
= ctx
->desc_hdr_template
|
1378 DESC_HDR_DIR_INBOUND
|
1379 DESC_HDR_MODE1_MDEU_CICV
;
1381 /* reset integrity check result bits */
1382 edesc
->desc
.hdr_lo
= 0;
1384 return ipsec_esp(edesc
, req
, ipsec_esp_decrypt_hwauth_done
);
1387 /* Have to check the ICV with software */
1388 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1390 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1392 icvdata
= (char *)&edesc
->link_tbl
[edesc
->src_nents
+
1393 edesc
->dst_nents
+ 2];
1395 icvdata
= &edesc
->link_tbl
[0];
1397 sg
= sg_last(req
->src
, edesc
->src_nents
? : 1);
1399 memcpy(icvdata
, (char *)sg_virt(sg
) + sg
->length
- authsize
, authsize
);
1401 return ipsec_esp(edesc
, req
, ipsec_esp_decrypt_swauth_done
);
1404 static int ablkcipher_setkey(struct crypto_ablkcipher
*cipher
,
1405 const u8
*key
, unsigned int keylen
)
1407 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1409 memcpy(&ctx
->key
, key
, keylen
);
1410 ctx
->keylen
= keylen
;
1415 static void unmap_sg_talitos_ptr(struct device
*dev
, struct scatterlist
*src
,
1416 struct scatterlist
*dst
, unsigned int len
,
1417 struct talitos_edesc
*edesc
)
1419 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1420 bool is_sec1
= has_ftr_sec1(priv
);
1423 if (!edesc
->src_nents
) {
1424 dma_unmap_sg(dev
, src
, 1,
1425 dst
!= src
? DMA_TO_DEVICE
1426 : DMA_BIDIRECTIONAL
);
1428 if (dst
&& edesc
->dst_nents
) {
1429 dma_sync_single_for_device(dev
,
1430 edesc
->dma_link_tbl
+ len
,
1431 len
, DMA_FROM_DEVICE
);
1432 sg_copy_from_buffer(dst
, edesc
->dst_nents
? : 1,
1433 edesc
->buf
+ len
, len
);
1434 } else if (dst
&& dst
!= src
) {
1435 dma_unmap_sg(dev
, dst
, 1, DMA_FROM_DEVICE
);
1438 talitos_sg_unmap(dev
, edesc
, src
, dst
);
1442 static void common_nonsnoop_unmap(struct device
*dev
,
1443 struct talitos_edesc
*edesc
,
1444 struct ablkcipher_request
*areq
)
1446 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1448 unmap_sg_talitos_ptr(dev
, areq
->src
, areq
->dst
, areq
->nbytes
, edesc
);
1449 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
1450 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1], DMA_TO_DEVICE
);
1453 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1457 static void ablkcipher_done(struct device
*dev
,
1458 struct talitos_desc
*desc
, void *context
,
1461 struct ablkcipher_request
*areq
= context
;
1462 struct talitos_edesc
*edesc
;
1464 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1466 common_nonsnoop_unmap(dev
, edesc
, areq
);
1470 areq
->base
.complete(&areq
->base
, err
);
1473 int map_sg_in_talitos_ptr(struct device
*dev
, struct scatterlist
*src
,
1474 unsigned int len
, struct talitos_edesc
*edesc
,
1475 enum dma_data_direction dir
, struct talitos_ptr
*ptr
)
1478 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1479 bool is_sec1
= has_ftr_sec1(priv
);
1481 to_talitos_ptr_len(ptr
, len
, is_sec1
);
1484 sg_count
= edesc
->src_nents
? : 1;
1486 if (sg_count
== 1) {
1487 dma_map_sg(dev
, src
, 1, dir
);
1488 to_talitos_ptr(ptr
, sg_dma_address(src
), is_sec1
);
1490 sg_copy_to_buffer(src
, sg_count
, edesc
->buf
, len
);
1491 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
, is_sec1
);
1492 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1493 len
, DMA_TO_DEVICE
);
1496 to_talitos_ptr_extent_clear(ptr
, is_sec1
);
1498 sg_count
= dma_map_sg(dev
, src
, edesc
->src_nents
? : 1, dir
);
1500 if (sg_count
== 1) {
1501 to_talitos_ptr(ptr
, sg_dma_address(src
), is_sec1
);
1503 sg_count
= sg_to_link_tbl(src
, sg_count
, len
,
1504 &edesc
->link_tbl
[0]);
1506 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
, 0);
1507 ptr
->j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1508 dma_sync_single_for_device(dev
,
1509 edesc
->dma_link_tbl
,
1513 /* Only one segment now, so no link tbl needed*/
1514 to_talitos_ptr(ptr
, sg_dma_address(src
),
1522 void map_sg_out_talitos_ptr(struct device
*dev
, struct scatterlist
*dst
,
1523 unsigned int len
, struct talitos_edesc
*edesc
,
1524 enum dma_data_direction dir
,
1525 struct talitos_ptr
*ptr
, int sg_count
)
1527 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1528 bool is_sec1
= has_ftr_sec1(priv
);
1530 if (dir
!= DMA_NONE
)
1531 sg_count
= dma_map_sg(dev
, dst
, edesc
->dst_nents
? : 1, dir
);
1533 to_talitos_ptr_len(ptr
, len
, is_sec1
);
1536 if (sg_count
== 1) {
1537 if (dir
!= DMA_NONE
)
1538 dma_map_sg(dev
, dst
, 1, dir
);
1539 to_talitos_ptr(ptr
, sg_dma_address(dst
), is_sec1
);
1541 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
+ len
, is_sec1
);
1542 dma_sync_single_for_device(dev
,
1543 edesc
->dma_link_tbl
+ len
,
1544 len
, DMA_FROM_DEVICE
);
1547 to_talitos_ptr_extent_clear(ptr
, is_sec1
);
1549 if (sg_count
== 1) {
1550 to_talitos_ptr(ptr
, sg_dma_address(dst
), is_sec1
);
1552 struct talitos_ptr
*link_tbl_ptr
=
1553 &edesc
->link_tbl
[edesc
->src_nents
+ 1];
1555 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
+
1556 (edesc
->src_nents
+ 1) *
1557 sizeof(struct talitos_ptr
), 0);
1558 ptr
->j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1559 sg_to_link_tbl(dst
, sg_count
, len
, link_tbl_ptr
);
1560 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1567 static int common_nonsnoop(struct talitos_edesc
*edesc
,
1568 struct ablkcipher_request
*areq
,
1569 void (*callback
) (struct device
*dev
,
1570 struct talitos_desc
*desc
,
1571 void *context
, int error
))
1573 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1574 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1575 struct device
*dev
= ctx
->dev
;
1576 struct talitos_desc
*desc
= &edesc
->desc
;
1577 unsigned int cryptlen
= areq
->nbytes
;
1578 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1580 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1581 bool is_sec1
= has_ftr_sec1(priv
);
1583 /* first DWORD empty */
1584 desc
->ptr
[0] = zero_entry
;
1587 to_talitos_ptr(&desc
->ptr
[1], edesc
->iv_dma
, is_sec1
);
1588 to_talitos_ptr_len(&desc
->ptr
[1], ivsize
, is_sec1
);
1589 to_talitos_ptr_extent_clear(&desc
->ptr
[1], is_sec1
);
1592 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1593 (char *)&ctx
->key
, DMA_TO_DEVICE
);
1598 sg_count
= map_sg_in_talitos_ptr(dev
, areq
->src
, cryptlen
, edesc
,
1599 (areq
->src
== areq
->dst
) ?
1600 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
,
1604 map_sg_out_talitos_ptr(dev
, areq
->dst
, cryptlen
, edesc
,
1605 (areq
->src
== areq
->dst
) ? DMA_NONE
1607 &desc
->ptr
[4], sg_count
);
1610 map_single_talitos_ptr(dev
, &desc
->ptr
[5], ivsize
, ctx
->iv
,
1613 /* last DWORD empty */
1614 desc
->ptr
[6] = zero_entry
;
1616 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1617 if (ret
!= -EINPROGRESS
) {
1618 common_nonsnoop_unmap(dev
, edesc
, areq
);
1624 static struct talitos_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
*
1627 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1628 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1629 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1631 return talitos_edesc_alloc(ctx
->dev
, areq
->src
, areq
->dst
,
1632 areq
->info
, 0, areq
->nbytes
, 0, ivsize
, 0,
1633 areq
->base
.flags
, encrypt
);
1636 static int ablkcipher_encrypt(struct ablkcipher_request
*areq
)
1638 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1639 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1640 struct talitos_edesc
*edesc
;
1642 /* allocate extended descriptor */
1643 edesc
= ablkcipher_edesc_alloc(areq
, true);
1645 return PTR_ERR(edesc
);
1648 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1650 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1653 static int ablkcipher_decrypt(struct ablkcipher_request
*areq
)
1655 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1656 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1657 struct talitos_edesc
*edesc
;
1659 /* allocate extended descriptor */
1660 edesc
= ablkcipher_edesc_alloc(areq
, false);
1662 return PTR_ERR(edesc
);
1664 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1666 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1669 static void common_nonsnoop_hash_unmap(struct device
*dev
,
1670 struct talitos_edesc
*edesc
,
1671 struct ahash_request
*areq
)
1673 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1674 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1675 bool is_sec1
= has_ftr_sec1(priv
);
1677 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1679 unmap_sg_talitos_ptr(dev
, req_ctx
->psrc
, NULL
, 0, edesc
);
1681 /* When using hashctx-in, must unmap it. */
1682 if (from_talitos_ptr_len(&edesc
->desc
.ptr
[1], is_sec1
))
1683 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1],
1686 if (from_talitos_ptr_len(&edesc
->desc
.ptr
[2], is_sec1
))
1687 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2],
1691 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1696 static void ahash_done(struct device
*dev
,
1697 struct talitos_desc
*desc
, void *context
,
1700 struct ahash_request
*areq
= context
;
1701 struct talitos_edesc
*edesc
=
1702 container_of(desc
, struct talitos_edesc
, desc
);
1703 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1705 if (!req_ctx
->last
&& req_ctx
->to_hash_later
) {
1706 /* Position any partial block for next update/final/finup */
1707 memcpy(req_ctx
->buf
, req_ctx
->bufnext
, req_ctx
->to_hash_later
);
1708 req_ctx
->nbuf
= req_ctx
->to_hash_later
;
1710 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1714 areq
->base
.complete(&areq
->base
, err
);
1718 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1719 * ourself and submit a padded block
1721 void talitos_handle_buggy_hash(struct talitos_ctx
*ctx
,
1722 struct talitos_edesc
*edesc
,
1723 struct talitos_ptr
*ptr
)
1725 static u8 padded_hash
[64] = {
1726 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1727 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1728 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1729 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1732 pr_err_once("Bug in SEC1, padding ourself\n");
1733 edesc
->desc
.hdr
&= ~DESC_HDR_MODE0_MDEU_PAD
;
1734 map_single_talitos_ptr(ctx
->dev
, ptr
, sizeof(padded_hash
),
1735 (char *)padded_hash
, DMA_TO_DEVICE
);
1738 static int common_nonsnoop_hash(struct talitos_edesc
*edesc
,
1739 struct ahash_request
*areq
, unsigned int length
,
1740 void (*callback
) (struct device
*dev
,
1741 struct talitos_desc
*desc
,
1742 void *context
, int error
))
1744 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1745 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1746 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1747 struct device
*dev
= ctx
->dev
;
1748 struct talitos_desc
*desc
= &edesc
->desc
;
1750 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1751 bool is_sec1
= has_ftr_sec1(priv
);
1753 /* first DWORD empty */
1754 desc
->ptr
[0] = zero_entry
;
1756 /* hash context in */
1757 if (!req_ctx
->first
|| req_ctx
->swinit
) {
1758 map_single_talitos_ptr(dev
, &desc
->ptr
[1],
1759 req_ctx
->hw_context_size
,
1760 (char *)req_ctx
->hw_context
,
1762 req_ctx
->swinit
= 0;
1764 desc
->ptr
[1] = zero_entry
;
1765 /* Indicate next op is not the first. */
1771 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1772 (char *)&ctx
->key
, DMA_TO_DEVICE
);
1774 desc
->ptr
[2] = zero_entry
;
1779 map_sg_in_talitos_ptr(dev
, req_ctx
->psrc
, length
, edesc
,
1780 DMA_TO_DEVICE
, &desc
->ptr
[3]);
1782 /* fifth DWORD empty */
1783 desc
->ptr
[4] = zero_entry
;
1785 /* hash/HMAC out -or- hash context out */
1787 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1788 crypto_ahash_digestsize(tfm
),
1789 areq
->result
, DMA_FROM_DEVICE
);
1791 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1792 req_ctx
->hw_context_size
,
1793 req_ctx
->hw_context
, DMA_FROM_DEVICE
);
1795 /* last DWORD empty */
1796 desc
->ptr
[6] = zero_entry
;
1798 if (is_sec1
&& from_talitos_ptr_len(&desc
->ptr
[3], true) == 0)
1799 talitos_handle_buggy_hash(ctx
, edesc
, &desc
->ptr
[3]);
1801 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1802 if (ret
!= -EINPROGRESS
) {
1803 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1809 static struct talitos_edesc
*ahash_edesc_alloc(struct ahash_request
*areq
,
1810 unsigned int nbytes
)
1812 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1813 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1814 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1816 return talitos_edesc_alloc(ctx
->dev
, req_ctx
->psrc
, NULL
, NULL
, 0,
1817 nbytes
, 0, 0, 0, areq
->base
.flags
, false);
1820 static int ahash_init(struct ahash_request
*areq
)
1822 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1823 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1825 /* Initialize the context */
1827 req_ctx
->first
= 1; /* first indicates h/w must init its context */
1828 req_ctx
->swinit
= 0; /* assume h/w init of context */
1829 req_ctx
->hw_context_size
=
1830 (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
1831 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1832 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
1838 * on h/w without explicit sha224 support, we initialize h/w context
1839 * manually with sha224 constants, and tell it to run sha256.
1841 static int ahash_init_sha224_swinit(struct ahash_request
*areq
)
1843 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1846 req_ctx
->swinit
= 1;/* prevent h/w initting context with sha256 values*/
1848 req_ctx
->hw_context
[0] = SHA224_H0
;
1849 req_ctx
->hw_context
[1] = SHA224_H1
;
1850 req_ctx
->hw_context
[2] = SHA224_H2
;
1851 req_ctx
->hw_context
[3] = SHA224_H3
;
1852 req_ctx
->hw_context
[4] = SHA224_H4
;
1853 req_ctx
->hw_context
[5] = SHA224_H5
;
1854 req_ctx
->hw_context
[6] = SHA224_H6
;
1855 req_ctx
->hw_context
[7] = SHA224_H7
;
1857 /* init 64-bit count */
1858 req_ctx
->hw_context
[8] = 0;
1859 req_ctx
->hw_context
[9] = 0;
1864 static int ahash_process_req(struct ahash_request
*areq
, unsigned int nbytes
)
1866 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1867 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1868 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1869 struct talitos_edesc
*edesc
;
1870 unsigned int blocksize
=
1871 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1872 unsigned int nbytes_to_hash
;
1873 unsigned int to_hash_later
;
1877 if (!req_ctx
->last
&& (nbytes
+ req_ctx
->nbuf
<= blocksize
)) {
1878 /* Buffer up to one whole block */
1879 nents
= sg_nents_for_len(areq
->src
, nbytes
);
1881 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
1884 sg_copy_to_buffer(areq
->src
, nents
,
1885 req_ctx
->buf
+ req_ctx
->nbuf
, nbytes
);
1886 req_ctx
->nbuf
+= nbytes
;
1890 /* At least (blocksize + 1) bytes are available to hash */
1891 nbytes_to_hash
= nbytes
+ req_ctx
->nbuf
;
1892 to_hash_later
= nbytes_to_hash
& (blocksize
- 1);
1896 else if (to_hash_later
)
1897 /* There is a partial block. Hash the full block(s) now */
1898 nbytes_to_hash
-= to_hash_later
;
1900 /* Keep one block buffered */
1901 nbytes_to_hash
-= blocksize
;
1902 to_hash_later
= blocksize
;
1905 /* Chain in any previously buffered data */
1906 if (req_ctx
->nbuf
) {
1907 nsg
= (req_ctx
->nbuf
< nbytes_to_hash
) ? 2 : 1;
1908 sg_init_table(req_ctx
->bufsl
, nsg
);
1909 sg_set_buf(req_ctx
->bufsl
, req_ctx
->buf
, req_ctx
->nbuf
);
1911 sg_chain(req_ctx
->bufsl
, 2, areq
->src
);
1912 req_ctx
->psrc
= req_ctx
->bufsl
;
1914 req_ctx
->psrc
= areq
->src
;
1916 if (to_hash_later
) {
1917 nents
= sg_nents_for_len(areq
->src
, nbytes
);
1919 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
1922 sg_pcopy_to_buffer(areq
->src
, nents
,
1925 nbytes
- to_hash_later
);
1927 req_ctx
->to_hash_later
= to_hash_later
;
1929 /* Allocate extended descriptor */
1930 edesc
= ahash_edesc_alloc(areq
, nbytes_to_hash
);
1932 return PTR_ERR(edesc
);
1934 edesc
->desc
.hdr
= ctx
->desc_hdr_template
;
1936 /* On last one, request SEC to pad; otherwise continue */
1938 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_PAD
;
1940 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_CONT
;
1942 /* request SEC to INIT hash. */
1943 if (req_ctx
->first
&& !req_ctx
->swinit
)
1944 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_INIT
;
1946 /* When the tfm context has a keylen, it's an HMAC.
1947 * A first or last (ie. not middle) descriptor must request HMAC.
1949 if (ctx
->keylen
&& (req_ctx
->first
|| req_ctx
->last
))
1950 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_HMAC
;
1952 return common_nonsnoop_hash(edesc
, areq
, nbytes_to_hash
,
1956 static int ahash_update(struct ahash_request
*areq
)
1958 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1962 return ahash_process_req(areq
, areq
->nbytes
);
1965 static int ahash_final(struct ahash_request
*areq
)
1967 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1971 return ahash_process_req(areq
, 0);
1974 static int ahash_finup(struct ahash_request
*areq
)
1976 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1980 return ahash_process_req(areq
, areq
->nbytes
);
1983 static int ahash_digest(struct ahash_request
*areq
)
1985 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1986 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
1991 return ahash_process_req(areq
, areq
->nbytes
);
1994 static int ahash_export(struct ahash_request
*areq
, void *out
)
1996 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1997 struct talitos_export_state
*export
= out
;
1999 memcpy(export
->hw_context
, req_ctx
->hw_context
,
2000 req_ctx
->hw_context_size
);
2001 memcpy(export
->buf
, req_ctx
->buf
, req_ctx
->nbuf
);
2002 export
->swinit
= req_ctx
->swinit
;
2003 export
->first
= req_ctx
->first
;
2004 export
->last
= req_ctx
->last
;
2005 export
->to_hash_later
= req_ctx
->to_hash_later
;
2006 export
->nbuf
= req_ctx
->nbuf
;
2011 static int ahash_import(struct ahash_request
*areq
, const void *in
)
2013 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
2014 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2015 const struct talitos_export_state
*export
= in
;
2017 memset(req_ctx
, 0, sizeof(*req_ctx
));
2018 req_ctx
->hw_context_size
=
2019 (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
2020 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2021 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
2022 memcpy(req_ctx
->hw_context
, export
->hw_context
,
2023 req_ctx
->hw_context_size
);
2024 memcpy(req_ctx
->buf
, export
->buf
, export
->nbuf
);
2025 req_ctx
->swinit
= export
->swinit
;
2026 req_ctx
->first
= export
->first
;
2027 req_ctx
->last
= export
->last
;
2028 req_ctx
->to_hash_later
= export
->to_hash_later
;
2029 req_ctx
->nbuf
= export
->nbuf
;
2034 struct keyhash_result
{
2035 struct completion completion
;
2039 static void keyhash_complete(struct crypto_async_request
*req
, int err
)
2041 struct keyhash_result
*res
= req
->data
;
2043 if (err
== -EINPROGRESS
)
2047 complete(&res
->completion
);
2050 static int keyhash(struct crypto_ahash
*tfm
, const u8
*key
, unsigned int keylen
,
2053 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
2055 struct scatterlist sg
[1];
2056 struct ahash_request
*req
;
2057 struct keyhash_result hresult
;
2060 init_completion(&hresult
.completion
);
2062 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
2066 /* Keep tfm keylen == 0 during hash of the long key */
2068 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
2069 keyhash_complete
, &hresult
);
2071 sg_init_one(&sg
[0], key
, keylen
);
2073 ahash_request_set_crypt(req
, sg
, hash
, keylen
);
2074 ret
= crypto_ahash_digest(req
);
2080 ret
= wait_for_completion_interruptible(
2081 &hresult
.completion
);
2088 ahash_request_free(req
);
2093 static int ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2094 unsigned int keylen
)
2096 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
2097 unsigned int blocksize
=
2098 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2099 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2100 unsigned int keysize
= keylen
;
2101 u8 hash
[SHA512_DIGEST_SIZE
];
2104 if (keylen
<= blocksize
)
2105 memcpy(ctx
->key
, key
, keysize
);
2107 /* Must get the hash of the long key */
2108 ret
= keyhash(tfm
, key
, keylen
, hash
);
2111 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2115 keysize
= digestsize
;
2116 memcpy(ctx
->key
, hash
, digestsize
);
2119 ctx
->keylen
= keysize
;
2125 struct talitos_alg_template
{
2128 struct crypto_alg crypto
;
2129 struct ahash_alg hash
;
2130 struct aead_alg aead
;
2132 __be32 desc_hdr_template
;
2135 static struct talitos_alg_template driver_algs
[] = {
2136 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2137 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2140 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
2141 .cra_driver_name
= "authenc-hmac-sha1-"
2143 .cra_blocksize
= AES_BLOCK_SIZE
,
2144 .cra_flags
= CRYPTO_ALG_ASYNC
,
2146 .ivsize
= AES_BLOCK_SIZE
,
2147 .maxauthsize
= SHA1_DIGEST_SIZE
,
2149 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2150 DESC_HDR_SEL0_AESU
|
2151 DESC_HDR_MODE0_AESU_CBC
|
2152 DESC_HDR_SEL1_MDEUA
|
2153 DESC_HDR_MODE1_MDEU_INIT
|
2154 DESC_HDR_MODE1_MDEU_PAD
|
2155 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2157 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2160 .cra_name
= "authenc(hmac(sha1),"
2162 .cra_driver_name
= "authenc-hmac-sha1-"
2164 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2165 .cra_flags
= CRYPTO_ALG_ASYNC
,
2167 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2168 .maxauthsize
= SHA1_DIGEST_SIZE
,
2170 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2172 DESC_HDR_MODE0_DEU_CBC
|
2173 DESC_HDR_MODE0_DEU_3DES
|
2174 DESC_HDR_SEL1_MDEUA
|
2175 DESC_HDR_MODE1_MDEU_INIT
|
2176 DESC_HDR_MODE1_MDEU_PAD
|
2177 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2179 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2182 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
2183 .cra_driver_name
= "authenc-hmac-sha224-"
2185 .cra_blocksize
= AES_BLOCK_SIZE
,
2186 .cra_flags
= CRYPTO_ALG_ASYNC
,
2188 .ivsize
= AES_BLOCK_SIZE
,
2189 .maxauthsize
= SHA224_DIGEST_SIZE
,
2191 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2192 DESC_HDR_SEL0_AESU
|
2193 DESC_HDR_MODE0_AESU_CBC
|
2194 DESC_HDR_SEL1_MDEUA
|
2195 DESC_HDR_MODE1_MDEU_INIT
|
2196 DESC_HDR_MODE1_MDEU_PAD
|
2197 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2199 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2202 .cra_name
= "authenc(hmac(sha224),"
2204 .cra_driver_name
= "authenc-hmac-sha224-"
2206 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2207 .cra_flags
= CRYPTO_ALG_ASYNC
,
2209 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2210 .maxauthsize
= SHA224_DIGEST_SIZE
,
2212 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2214 DESC_HDR_MODE0_DEU_CBC
|
2215 DESC_HDR_MODE0_DEU_3DES
|
2216 DESC_HDR_SEL1_MDEUA
|
2217 DESC_HDR_MODE1_MDEU_INIT
|
2218 DESC_HDR_MODE1_MDEU_PAD
|
2219 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2221 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2224 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
2225 .cra_driver_name
= "authenc-hmac-sha256-"
2227 .cra_blocksize
= AES_BLOCK_SIZE
,
2228 .cra_flags
= CRYPTO_ALG_ASYNC
,
2230 .ivsize
= AES_BLOCK_SIZE
,
2231 .maxauthsize
= SHA256_DIGEST_SIZE
,
2233 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2234 DESC_HDR_SEL0_AESU
|
2235 DESC_HDR_MODE0_AESU_CBC
|
2236 DESC_HDR_SEL1_MDEUA
|
2237 DESC_HDR_MODE1_MDEU_INIT
|
2238 DESC_HDR_MODE1_MDEU_PAD
|
2239 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2241 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2244 .cra_name
= "authenc(hmac(sha256),"
2246 .cra_driver_name
= "authenc-hmac-sha256-"
2248 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2249 .cra_flags
= CRYPTO_ALG_ASYNC
,
2251 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2252 .maxauthsize
= SHA256_DIGEST_SIZE
,
2254 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2256 DESC_HDR_MODE0_DEU_CBC
|
2257 DESC_HDR_MODE0_DEU_3DES
|
2258 DESC_HDR_SEL1_MDEUA
|
2259 DESC_HDR_MODE1_MDEU_INIT
|
2260 DESC_HDR_MODE1_MDEU_PAD
|
2261 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2263 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2266 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
2267 .cra_driver_name
= "authenc-hmac-sha384-"
2269 .cra_blocksize
= AES_BLOCK_SIZE
,
2270 .cra_flags
= CRYPTO_ALG_ASYNC
,
2272 .ivsize
= AES_BLOCK_SIZE
,
2273 .maxauthsize
= SHA384_DIGEST_SIZE
,
2275 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2276 DESC_HDR_SEL0_AESU
|
2277 DESC_HDR_MODE0_AESU_CBC
|
2278 DESC_HDR_SEL1_MDEUB
|
2279 DESC_HDR_MODE1_MDEU_INIT
|
2280 DESC_HDR_MODE1_MDEU_PAD
|
2281 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2283 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2286 .cra_name
= "authenc(hmac(sha384),"
2288 .cra_driver_name
= "authenc-hmac-sha384-"
2290 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2291 .cra_flags
= CRYPTO_ALG_ASYNC
,
2293 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2294 .maxauthsize
= SHA384_DIGEST_SIZE
,
2296 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2298 DESC_HDR_MODE0_DEU_CBC
|
2299 DESC_HDR_MODE0_DEU_3DES
|
2300 DESC_HDR_SEL1_MDEUB
|
2301 DESC_HDR_MODE1_MDEU_INIT
|
2302 DESC_HDR_MODE1_MDEU_PAD
|
2303 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2305 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2308 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
2309 .cra_driver_name
= "authenc-hmac-sha512-"
2311 .cra_blocksize
= AES_BLOCK_SIZE
,
2312 .cra_flags
= CRYPTO_ALG_ASYNC
,
2314 .ivsize
= AES_BLOCK_SIZE
,
2315 .maxauthsize
= SHA512_DIGEST_SIZE
,
2317 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2318 DESC_HDR_SEL0_AESU
|
2319 DESC_HDR_MODE0_AESU_CBC
|
2320 DESC_HDR_SEL1_MDEUB
|
2321 DESC_HDR_MODE1_MDEU_INIT
|
2322 DESC_HDR_MODE1_MDEU_PAD
|
2323 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2325 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2328 .cra_name
= "authenc(hmac(sha512),"
2330 .cra_driver_name
= "authenc-hmac-sha512-"
2332 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2333 .cra_flags
= CRYPTO_ALG_ASYNC
,
2335 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2336 .maxauthsize
= SHA512_DIGEST_SIZE
,
2338 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2340 DESC_HDR_MODE0_DEU_CBC
|
2341 DESC_HDR_MODE0_DEU_3DES
|
2342 DESC_HDR_SEL1_MDEUB
|
2343 DESC_HDR_MODE1_MDEU_INIT
|
2344 DESC_HDR_MODE1_MDEU_PAD
|
2345 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2347 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2350 .cra_name
= "authenc(hmac(md5),cbc(aes))",
2351 .cra_driver_name
= "authenc-hmac-md5-"
2353 .cra_blocksize
= AES_BLOCK_SIZE
,
2354 .cra_flags
= CRYPTO_ALG_ASYNC
,
2356 .ivsize
= AES_BLOCK_SIZE
,
2357 .maxauthsize
= MD5_DIGEST_SIZE
,
2359 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2360 DESC_HDR_SEL0_AESU
|
2361 DESC_HDR_MODE0_AESU_CBC
|
2362 DESC_HDR_SEL1_MDEUA
|
2363 DESC_HDR_MODE1_MDEU_INIT
|
2364 DESC_HDR_MODE1_MDEU_PAD
|
2365 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2367 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2370 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2371 .cra_driver_name
= "authenc-hmac-md5-"
2373 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2374 .cra_flags
= CRYPTO_ALG_ASYNC
,
2376 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2377 .maxauthsize
= MD5_DIGEST_SIZE
,
2379 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2381 DESC_HDR_MODE0_DEU_CBC
|
2382 DESC_HDR_MODE0_DEU_3DES
|
2383 DESC_HDR_SEL1_MDEUA
|
2384 DESC_HDR_MODE1_MDEU_INIT
|
2385 DESC_HDR_MODE1_MDEU_PAD
|
2386 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2388 /* ABLKCIPHER algorithms. */
2389 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2391 .cra_name
= "ecb(aes)",
2392 .cra_driver_name
= "ecb-aes-talitos",
2393 .cra_blocksize
= AES_BLOCK_SIZE
,
2394 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2397 .min_keysize
= AES_MIN_KEY_SIZE
,
2398 .max_keysize
= AES_MAX_KEY_SIZE
,
2399 .ivsize
= AES_BLOCK_SIZE
,
2402 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2405 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2407 .cra_name
= "cbc(aes)",
2408 .cra_driver_name
= "cbc-aes-talitos",
2409 .cra_blocksize
= AES_BLOCK_SIZE
,
2410 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2413 .min_keysize
= AES_MIN_KEY_SIZE
,
2414 .max_keysize
= AES_MAX_KEY_SIZE
,
2415 .ivsize
= AES_BLOCK_SIZE
,
2418 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2419 DESC_HDR_SEL0_AESU
|
2420 DESC_HDR_MODE0_AESU_CBC
,
2422 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2424 .cra_name
= "ctr(aes)",
2425 .cra_driver_name
= "ctr-aes-talitos",
2426 .cra_blocksize
= AES_BLOCK_SIZE
,
2427 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2430 .min_keysize
= AES_MIN_KEY_SIZE
,
2431 .max_keysize
= AES_MAX_KEY_SIZE
,
2432 .ivsize
= AES_BLOCK_SIZE
,
2435 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2436 DESC_HDR_SEL0_AESU
|
2437 DESC_HDR_MODE0_AESU_CTR
,
2439 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2441 .cra_name
= "ecb(des)",
2442 .cra_driver_name
= "ecb-des-talitos",
2443 .cra_blocksize
= DES_BLOCK_SIZE
,
2444 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2447 .min_keysize
= DES_KEY_SIZE
,
2448 .max_keysize
= DES_KEY_SIZE
,
2449 .ivsize
= DES_BLOCK_SIZE
,
2452 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2455 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2457 .cra_name
= "cbc(des)",
2458 .cra_driver_name
= "cbc-des-talitos",
2459 .cra_blocksize
= DES_BLOCK_SIZE
,
2460 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2463 .min_keysize
= DES_KEY_SIZE
,
2464 .max_keysize
= DES_KEY_SIZE
,
2465 .ivsize
= DES_BLOCK_SIZE
,
2468 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2470 DESC_HDR_MODE0_DEU_CBC
,
2472 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2474 .cra_name
= "ecb(des3_ede)",
2475 .cra_driver_name
= "ecb-3des-talitos",
2476 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2477 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2480 .min_keysize
= DES3_EDE_KEY_SIZE
,
2481 .max_keysize
= DES3_EDE_KEY_SIZE
,
2482 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2485 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2487 DESC_HDR_MODE0_DEU_3DES
,
2489 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2491 .cra_name
= "cbc(des3_ede)",
2492 .cra_driver_name
= "cbc-3des-talitos",
2493 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2494 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2497 .min_keysize
= DES3_EDE_KEY_SIZE
,
2498 .max_keysize
= DES3_EDE_KEY_SIZE
,
2499 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2502 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2504 DESC_HDR_MODE0_DEU_CBC
|
2505 DESC_HDR_MODE0_DEU_3DES
,
2507 /* AHASH algorithms. */
2508 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2510 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2511 .halg
.statesize
= sizeof(struct talitos_export_state
),
2514 .cra_driver_name
= "md5-talitos",
2515 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
2516 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2520 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2521 DESC_HDR_SEL0_MDEUA
|
2522 DESC_HDR_MODE0_MDEU_MD5
,
2524 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2526 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2527 .halg
.statesize
= sizeof(struct talitos_export_state
),
2530 .cra_driver_name
= "sha1-talitos",
2531 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2532 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2536 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2537 DESC_HDR_SEL0_MDEUA
|
2538 DESC_HDR_MODE0_MDEU_SHA1
,
2540 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2542 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2543 .halg
.statesize
= sizeof(struct talitos_export_state
),
2545 .cra_name
= "sha224",
2546 .cra_driver_name
= "sha224-talitos",
2547 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2548 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2552 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2553 DESC_HDR_SEL0_MDEUA
|
2554 DESC_HDR_MODE0_MDEU_SHA224
,
2556 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2558 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2559 .halg
.statesize
= sizeof(struct talitos_export_state
),
2561 .cra_name
= "sha256",
2562 .cra_driver_name
= "sha256-talitos",
2563 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2564 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2568 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2569 DESC_HDR_SEL0_MDEUA
|
2570 DESC_HDR_MODE0_MDEU_SHA256
,
2572 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2574 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2575 .halg
.statesize
= sizeof(struct talitos_export_state
),
2577 .cra_name
= "sha384",
2578 .cra_driver_name
= "sha384-talitos",
2579 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2580 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2584 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2585 DESC_HDR_SEL0_MDEUB
|
2586 DESC_HDR_MODE0_MDEUB_SHA384
,
2588 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2590 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2591 .halg
.statesize
= sizeof(struct talitos_export_state
),
2593 .cra_name
= "sha512",
2594 .cra_driver_name
= "sha512-talitos",
2595 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2596 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2600 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2601 DESC_HDR_SEL0_MDEUB
|
2602 DESC_HDR_MODE0_MDEUB_SHA512
,
2604 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2606 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2607 .halg
.statesize
= sizeof(struct talitos_export_state
),
2609 .cra_name
= "hmac(md5)",
2610 .cra_driver_name
= "hmac-md5-talitos",
2611 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
2612 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2616 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2617 DESC_HDR_SEL0_MDEUA
|
2618 DESC_HDR_MODE0_MDEU_MD5
,
2620 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2622 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2623 .halg
.statesize
= sizeof(struct talitos_export_state
),
2625 .cra_name
= "hmac(sha1)",
2626 .cra_driver_name
= "hmac-sha1-talitos",
2627 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2628 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2632 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2633 DESC_HDR_SEL0_MDEUA
|
2634 DESC_HDR_MODE0_MDEU_SHA1
,
2636 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2638 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2639 .halg
.statesize
= sizeof(struct talitos_export_state
),
2641 .cra_name
= "hmac(sha224)",
2642 .cra_driver_name
= "hmac-sha224-talitos",
2643 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2644 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2648 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2649 DESC_HDR_SEL0_MDEUA
|
2650 DESC_HDR_MODE0_MDEU_SHA224
,
2652 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2654 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2655 .halg
.statesize
= sizeof(struct talitos_export_state
),
2657 .cra_name
= "hmac(sha256)",
2658 .cra_driver_name
= "hmac-sha256-talitos",
2659 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2660 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2664 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2665 DESC_HDR_SEL0_MDEUA
|
2666 DESC_HDR_MODE0_MDEU_SHA256
,
2668 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2670 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2671 .halg
.statesize
= sizeof(struct talitos_export_state
),
2673 .cra_name
= "hmac(sha384)",
2674 .cra_driver_name
= "hmac-sha384-talitos",
2675 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2676 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2680 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2681 DESC_HDR_SEL0_MDEUB
|
2682 DESC_HDR_MODE0_MDEUB_SHA384
,
2684 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2686 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2687 .halg
.statesize
= sizeof(struct talitos_export_state
),
2689 .cra_name
= "hmac(sha512)",
2690 .cra_driver_name
= "hmac-sha512-talitos",
2691 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2692 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2696 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2697 DESC_HDR_SEL0_MDEUB
|
2698 DESC_HDR_MODE0_MDEUB_SHA512
,
2702 struct talitos_crypto_alg
{
2703 struct list_head entry
;
2705 struct talitos_alg_template algt
;
2708 static int talitos_init_common(struct talitos_ctx
*ctx
,
2709 struct talitos_crypto_alg
*talitos_alg
)
2711 struct talitos_private
*priv
;
2713 /* update context with ptr to dev */
2714 ctx
->dev
= talitos_alg
->dev
;
2716 /* assign SEC channel to tfm in round-robin fashion */
2717 priv
= dev_get_drvdata(ctx
->dev
);
2718 ctx
->ch
= atomic_inc_return(&priv
->last_chan
) &
2719 (priv
->num_channels
- 1);
2721 /* copy descriptor header template value */
2722 ctx
->desc_hdr_template
= talitos_alg
->algt
.desc_hdr_template
;
2724 /* select done notification */
2725 ctx
->desc_hdr_template
|= DESC_HDR_DONE_NOTIFY
;
2730 static int talitos_cra_init(struct crypto_tfm
*tfm
)
2732 struct crypto_alg
*alg
= tfm
->__crt_alg
;
2733 struct talitos_crypto_alg
*talitos_alg
;
2734 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2736 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_AHASH
)
2737 talitos_alg
= container_of(__crypto_ahash_alg(alg
),
2738 struct talitos_crypto_alg
,
2741 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
2744 return talitos_init_common(ctx
, talitos_alg
);
2747 static int talitos_cra_init_aead(struct crypto_aead
*tfm
)
2749 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
2750 struct talitos_crypto_alg
*talitos_alg
;
2751 struct talitos_ctx
*ctx
= crypto_aead_ctx(tfm
);
2753 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
2756 return talitos_init_common(ctx
, talitos_alg
);
2759 static int talitos_cra_init_ahash(struct crypto_tfm
*tfm
)
2761 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2763 talitos_cra_init(tfm
);
2766 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2767 sizeof(struct talitos_ahash_req_ctx
));
2773 * given the alg's descriptor header template, determine whether descriptor
2774 * type and primary/secondary execution units required match the hw
2775 * capabilities description provided in the device tree node.
2777 static int hw_supports(struct device
*dev
, __be32 desc_hdr_template
)
2779 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2782 ret
= (1 << DESC_TYPE(desc_hdr_template
) & priv
->desc_types
) &&
2783 (1 << PRIMARY_EU(desc_hdr_template
) & priv
->exec_units
);
2785 if (SECONDARY_EU(desc_hdr_template
))
2786 ret
= ret
&& (1 << SECONDARY_EU(desc_hdr_template
)
2787 & priv
->exec_units
);
2792 static int talitos_remove(struct platform_device
*ofdev
)
2794 struct device
*dev
= &ofdev
->dev
;
2795 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2796 struct talitos_crypto_alg
*t_alg
, *n
;
2799 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
2800 switch (t_alg
->algt
.type
) {
2801 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2803 case CRYPTO_ALG_TYPE_AEAD
:
2804 crypto_unregister_aead(&t_alg
->algt
.alg
.aead
);
2805 case CRYPTO_ALG_TYPE_AHASH
:
2806 crypto_unregister_ahash(&t_alg
->algt
.alg
.hash
);
2809 list_del(&t_alg
->entry
);
2813 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
))
2814 talitos_unregister_rng(dev
);
2816 for (i
= 0; priv
->chan
&& i
< priv
->num_channels
; i
++)
2817 kfree(priv
->chan
[i
].fifo
);
2821 for (i
= 0; i
< 2; i
++)
2823 free_irq(priv
->irq
[i
], dev
);
2824 irq_dispose_mapping(priv
->irq
[i
]);
2827 tasklet_kill(&priv
->done_task
[0]);
2829 tasklet_kill(&priv
->done_task
[1]);
2838 static struct talitos_crypto_alg
*talitos_alg_alloc(struct device
*dev
,
2839 struct talitos_alg_template
2842 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2843 struct talitos_crypto_alg
*t_alg
;
2844 struct crypto_alg
*alg
;
2846 t_alg
= kzalloc(sizeof(struct talitos_crypto_alg
), GFP_KERNEL
);
2848 return ERR_PTR(-ENOMEM
);
2850 t_alg
->algt
= *template;
2852 switch (t_alg
->algt
.type
) {
2853 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2854 alg
= &t_alg
->algt
.alg
.crypto
;
2855 alg
->cra_init
= talitos_cra_init
;
2856 alg
->cra_type
= &crypto_ablkcipher_type
;
2857 alg
->cra_ablkcipher
.setkey
= ablkcipher_setkey
;
2858 alg
->cra_ablkcipher
.encrypt
= ablkcipher_encrypt
;
2859 alg
->cra_ablkcipher
.decrypt
= ablkcipher_decrypt
;
2860 alg
->cra_ablkcipher
.geniv
= "eseqiv";
2862 case CRYPTO_ALG_TYPE_AEAD
:
2863 alg
= &t_alg
->algt
.alg
.aead
.base
;
2864 t_alg
->algt
.alg
.aead
.init
= talitos_cra_init_aead
;
2865 t_alg
->algt
.alg
.aead
.setkey
= aead_setkey
;
2866 t_alg
->algt
.alg
.aead
.encrypt
= aead_encrypt
;
2867 t_alg
->algt
.alg
.aead
.decrypt
= aead_decrypt
;
2869 case CRYPTO_ALG_TYPE_AHASH
:
2870 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
2871 alg
->cra_init
= talitos_cra_init_ahash
;
2872 alg
->cra_type
= &crypto_ahash_type
;
2873 t_alg
->algt
.alg
.hash
.init
= ahash_init
;
2874 t_alg
->algt
.alg
.hash
.update
= ahash_update
;
2875 t_alg
->algt
.alg
.hash
.final
= ahash_final
;
2876 t_alg
->algt
.alg
.hash
.finup
= ahash_finup
;
2877 t_alg
->algt
.alg
.hash
.digest
= ahash_digest
;
2878 t_alg
->algt
.alg
.hash
.setkey
= ahash_setkey
;
2879 t_alg
->algt
.alg
.hash
.import
= ahash_import
;
2880 t_alg
->algt
.alg
.hash
.export
= ahash_export
;
2882 if (!(priv
->features
& TALITOS_FTR_HMAC_OK
) &&
2883 !strncmp(alg
->cra_name
, "hmac", 4)) {
2885 return ERR_PTR(-ENOTSUPP
);
2887 if (!(priv
->features
& TALITOS_FTR_SHA224_HWINIT
) &&
2888 (!strcmp(alg
->cra_name
, "sha224") ||
2889 !strcmp(alg
->cra_name
, "hmac(sha224)"))) {
2890 t_alg
->algt
.alg
.hash
.init
= ahash_init_sha224_swinit
;
2891 t_alg
->algt
.desc_hdr_template
=
2892 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2893 DESC_HDR_SEL0_MDEUA
|
2894 DESC_HDR_MODE0_MDEU_SHA256
;
2898 dev_err(dev
, "unknown algorithm type %d\n", t_alg
->algt
.type
);
2900 return ERR_PTR(-EINVAL
);
2903 alg
->cra_module
= THIS_MODULE
;
2904 alg
->cra_priority
= TALITOS_CRA_PRIORITY
;
2905 alg
->cra_alignmask
= 0;
2906 alg
->cra_ctxsize
= sizeof(struct talitos_ctx
);
2907 alg
->cra_flags
|= CRYPTO_ALG_KERN_DRIVER_ONLY
;
2914 static int talitos_probe_irq(struct platform_device
*ofdev
)
2916 struct device
*dev
= &ofdev
->dev
;
2917 struct device_node
*np
= ofdev
->dev
.of_node
;
2918 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2920 bool is_sec1
= has_ftr_sec1(priv
);
2922 priv
->irq
[0] = irq_of_parse_and_map(np
, 0);
2923 if (!priv
->irq
[0]) {
2924 dev_err(dev
, "failed to map irq\n");
2928 err
= request_irq(priv
->irq
[0], talitos1_interrupt_4ch
, 0,
2929 dev_driver_string(dev
), dev
);
2933 priv
->irq
[1] = irq_of_parse_and_map(np
, 1);
2935 /* get the primary irq line */
2936 if (!priv
->irq
[1]) {
2937 err
= request_irq(priv
->irq
[0], talitos2_interrupt_4ch
, 0,
2938 dev_driver_string(dev
), dev
);
2942 err
= request_irq(priv
->irq
[0], talitos2_interrupt_ch0_2
, 0,
2943 dev_driver_string(dev
), dev
);
2947 /* get the secondary irq line */
2948 err
= request_irq(priv
->irq
[1], talitos2_interrupt_ch1_3
, 0,
2949 dev_driver_string(dev
), dev
);
2951 dev_err(dev
, "failed to request secondary irq\n");
2952 irq_dispose_mapping(priv
->irq
[1]);
2960 dev_err(dev
, "failed to request primary irq\n");
2961 irq_dispose_mapping(priv
->irq
[0]);
2968 static int talitos_probe(struct platform_device
*ofdev
)
2970 struct device
*dev
= &ofdev
->dev
;
2971 struct device_node
*np
= ofdev
->dev
.of_node
;
2972 struct talitos_private
*priv
;
2973 const unsigned int *prop
;
2977 priv
= kzalloc(sizeof(struct talitos_private
), GFP_KERNEL
);
2981 INIT_LIST_HEAD(&priv
->alg_list
);
2983 dev_set_drvdata(dev
, priv
);
2985 priv
->ofdev
= ofdev
;
2987 spin_lock_init(&priv
->reg_lock
);
2989 priv
->reg
= of_iomap(np
, 0);
2991 dev_err(dev
, "failed to of_iomap\n");
2996 /* get SEC version capabilities from device tree */
2997 prop
= of_get_property(np
, "fsl,num-channels", NULL
);
2999 priv
->num_channels
= *prop
;
3001 prop
= of_get_property(np
, "fsl,channel-fifo-len", NULL
);
3003 priv
->chfifo_len
= *prop
;
3005 prop
= of_get_property(np
, "fsl,exec-units-mask", NULL
);
3007 priv
->exec_units
= *prop
;
3009 prop
= of_get_property(np
, "fsl,descriptor-types-mask", NULL
);
3011 priv
->desc_types
= *prop
;
3013 if (!is_power_of_2(priv
->num_channels
) || !priv
->chfifo_len
||
3014 !priv
->exec_units
|| !priv
->desc_types
) {
3015 dev_err(dev
, "invalid property data in device tree node\n");
3020 if (of_device_is_compatible(np
, "fsl,sec3.0"))
3021 priv
->features
|= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
;
3023 if (of_device_is_compatible(np
, "fsl,sec2.1"))
3024 priv
->features
|= TALITOS_FTR_HW_AUTH_CHECK
|
3025 TALITOS_FTR_SHA224_HWINIT
|
3026 TALITOS_FTR_HMAC_OK
;
3028 if (of_device_is_compatible(np
, "fsl,sec1.0"))
3029 priv
->features
|= TALITOS_FTR_SEC1
;
3031 if (of_device_is_compatible(np
, "fsl,sec1.2")) {
3032 priv
->reg_deu
= priv
->reg
+ TALITOS12_DEU
;
3033 priv
->reg_aesu
= priv
->reg
+ TALITOS12_AESU
;
3034 priv
->reg_mdeu
= priv
->reg
+ TALITOS12_MDEU
;
3035 stride
= TALITOS1_CH_STRIDE
;
3036 } else if (of_device_is_compatible(np
, "fsl,sec1.0")) {
3037 priv
->reg_deu
= priv
->reg
+ TALITOS10_DEU
;
3038 priv
->reg_aesu
= priv
->reg
+ TALITOS10_AESU
;
3039 priv
->reg_mdeu
= priv
->reg
+ TALITOS10_MDEU
;
3040 priv
->reg_afeu
= priv
->reg
+ TALITOS10_AFEU
;
3041 priv
->reg_rngu
= priv
->reg
+ TALITOS10_RNGU
;
3042 priv
->reg_pkeu
= priv
->reg
+ TALITOS10_PKEU
;
3043 stride
= TALITOS1_CH_STRIDE
;
3045 priv
->reg_deu
= priv
->reg
+ TALITOS2_DEU
;
3046 priv
->reg_aesu
= priv
->reg
+ TALITOS2_AESU
;
3047 priv
->reg_mdeu
= priv
->reg
+ TALITOS2_MDEU
;
3048 priv
->reg_afeu
= priv
->reg
+ TALITOS2_AFEU
;
3049 priv
->reg_rngu
= priv
->reg
+ TALITOS2_RNGU
;
3050 priv
->reg_pkeu
= priv
->reg
+ TALITOS2_PKEU
;
3051 priv
->reg_keu
= priv
->reg
+ TALITOS2_KEU
;
3052 priv
->reg_crcu
= priv
->reg
+ TALITOS2_CRCU
;
3053 stride
= TALITOS2_CH_STRIDE
;
3056 err
= talitos_probe_irq(ofdev
);
3060 if (of_device_is_compatible(np
, "fsl,sec1.0")) {
3061 tasklet_init(&priv
->done_task
[0], talitos1_done_4ch
,
3062 (unsigned long)dev
);
3064 if (!priv
->irq
[1]) {
3065 tasklet_init(&priv
->done_task
[0], talitos2_done_4ch
,
3066 (unsigned long)dev
);
3068 tasklet_init(&priv
->done_task
[0], talitos2_done_ch0_2
,
3069 (unsigned long)dev
);
3070 tasklet_init(&priv
->done_task
[1], talitos2_done_ch1_3
,
3071 (unsigned long)dev
);
3075 priv
->chan
= kzalloc(sizeof(struct talitos_channel
) *
3076 priv
->num_channels
, GFP_KERNEL
);
3078 dev_err(dev
, "failed to allocate channel management space\n");
3083 priv
->fifo_len
= roundup_pow_of_two(priv
->chfifo_len
);
3085 for (i
= 0; i
< priv
->num_channels
; i
++) {
3086 priv
->chan
[i
].reg
= priv
->reg
+ stride
* (i
+ 1);
3087 if (!priv
->irq
[1] || !(i
& 1))
3088 priv
->chan
[i
].reg
+= TALITOS_CH_BASE_OFFSET
;
3090 spin_lock_init(&priv
->chan
[i
].head_lock
);
3091 spin_lock_init(&priv
->chan
[i
].tail_lock
);
3093 priv
->chan
[i
].fifo
= kzalloc(sizeof(struct talitos_request
) *
3094 priv
->fifo_len
, GFP_KERNEL
);
3095 if (!priv
->chan
[i
].fifo
) {
3096 dev_err(dev
, "failed to allocate request fifo %d\n", i
);
3101 atomic_set(&priv
->chan
[i
].submit_count
,
3102 -(priv
->chfifo_len
- 1));
3105 dma_set_mask(dev
, DMA_BIT_MASK(36));
3107 /* reset and initialize the h/w */
3108 err
= init_device(dev
);
3110 dev_err(dev
, "failed to initialize device\n");
3114 /* register the RNG, if available */
3115 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
)) {
3116 err
= talitos_register_rng(dev
);
3118 dev_err(dev
, "failed to register hwrng: %d\n", err
);
3121 dev_info(dev
, "hwrng\n");
3124 /* register crypto algorithms the device supports */
3125 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
3126 if (hw_supports(dev
, driver_algs
[i
].desc_hdr_template
)) {
3127 struct talitos_crypto_alg
*t_alg
;
3128 struct crypto_alg
*alg
= NULL
;
3130 t_alg
= talitos_alg_alloc(dev
, &driver_algs
[i
]);
3131 if (IS_ERR(t_alg
)) {
3132 err
= PTR_ERR(t_alg
);
3133 if (err
== -ENOTSUPP
)
3138 switch (t_alg
->algt
.type
) {
3139 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3140 err
= crypto_register_alg(
3141 &t_alg
->algt
.alg
.crypto
);
3142 alg
= &t_alg
->algt
.alg
.crypto
;
3145 case CRYPTO_ALG_TYPE_AEAD
:
3146 err
= crypto_register_aead(
3147 &t_alg
->algt
.alg
.aead
);
3148 alg
= &t_alg
->algt
.alg
.aead
.base
;
3151 case CRYPTO_ALG_TYPE_AHASH
:
3152 err
= crypto_register_ahash(
3153 &t_alg
->algt
.alg
.hash
);
3154 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
3158 dev_err(dev
, "%s alg registration failed\n",
3159 alg
->cra_driver_name
);
3162 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
3165 if (!list_empty(&priv
->alg_list
))
3166 dev_info(dev
, "%s algorithms registered in /proc/crypto\n",
3167 (char *)of_get_property(np
, "compatible", NULL
));
3172 talitos_remove(ofdev
);
3177 static const struct of_device_id talitos_match
[] = {
3178 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3180 .compatible
= "fsl,sec1.0",
3183 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3185 .compatible
= "fsl,sec2.0",
3190 MODULE_DEVICE_TABLE(of
, talitos_match
);
3192 static struct platform_driver talitos_driver
= {
3195 .of_match_table
= talitos_match
,
3197 .probe
= talitos_probe
,
3198 .remove
= talitos_remove
,
3201 module_platform_driver(talitos_driver
);
3203 MODULE_LICENSE("GPL");
3204 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3205 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");