2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr
*ptr
, dma_addr_t dma_addr
,
61 ptr
->ptr
= cpu_to_be32(lower_32_bits(dma_addr
));
63 ptr
->eptr
= upper_32_bits(dma_addr
);
66 static void to_talitos_ptr_len(struct talitos_ptr
*ptr
, unsigned int len
,
71 ptr
->len1
= cpu_to_be16(len
);
73 ptr
->len
= cpu_to_be16(len
);
77 static unsigned short from_talitos_ptr_len(struct talitos_ptr
*ptr
,
81 return be16_to_cpu(ptr
->len1
);
83 return be16_to_cpu(ptr
->len
);
86 static void to_talitos_ptr_extent_clear(struct talitos_ptr
*ptr
, bool is_sec1
)
93 * map virtual single (contiguous) pointer to h/w descriptor pointer
95 static void map_single_talitos_ptr(struct device
*dev
,
96 struct talitos_ptr
*ptr
,
97 unsigned int len
, void *data
,
98 enum dma_data_direction dir
)
100 dma_addr_t dma_addr
= dma_map_single(dev
, data
, len
, dir
);
101 struct talitos_private
*priv
= dev_get_drvdata(dev
);
102 bool is_sec1
= has_ftr_sec1(priv
);
104 to_talitos_ptr_len(ptr
, len
, is_sec1
);
105 to_talitos_ptr(ptr
, dma_addr
, is_sec1
);
106 to_talitos_ptr_extent_clear(ptr
, is_sec1
);
110 * unmap bus single (contiguous) h/w descriptor pointer
112 static void unmap_single_talitos_ptr(struct device
*dev
,
113 struct talitos_ptr
*ptr
,
114 enum dma_data_direction dir
)
116 struct talitos_private
*priv
= dev_get_drvdata(dev
);
117 bool is_sec1
= has_ftr_sec1(priv
);
119 dma_unmap_single(dev
, be32_to_cpu(ptr
->ptr
),
120 from_talitos_ptr_len(ptr
, is_sec1
), dir
);
123 static int reset_channel(struct device
*dev
, int ch
)
125 struct talitos_private
*priv
= dev_get_drvdata(dev
);
126 unsigned int timeout
= TALITOS_TIMEOUT
;
127 bool is_sec1
= has_ftr_sec1(priv
);
130 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
131 TALITOS1_CCCR_LO_RESET
);
133 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
) &
134 TALITOS1_CCCR_LO_RESET
) && --timeout
)
137 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
138 TALITOS2_CCCR_RESET
);
140 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
141 TALITOS2_CCCR_RESET
) && --timeout
)
146 dev_err(dev
, "failed to reset channel %d\n", ch
);
150 /* set 36-bit addressing, done writeback enable and done IRQ enable */
151 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, TALITOS_CCCR_LO_EAE
|
152 TALITOS_CCCR_LO_CDWE
| TALITOS_CCCR_LO_CDIE
);
154 /* and ICCR writeback, if available */
155 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
156 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
157 TALITOS_CCCR_LO_IWSE
);
162 static int reset_device(struct device
*dev
)
164 struct talitos_private
*priv
= dev_get_drvdata(dev
);
165 unsigned int timeout
= TALITOS_TIMEOUT
;
166 bool is_sec1
= has_ftr_sec1(priv
);
167 u32 mcr
= is_sec1
? TALITOS1_MCR_SWR
: TALITOS2_MCR_SWR
;
169 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
171 while ((in_be32(priv
->reg
+ TALITOS_MCR
) & mcr
)
176 mcr
= TALITOS_MCR_RCA1
| TALITOS_MCR_RCA3
;
177 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
181 dev_err(dev
, "failed to reset device\n");
189 * Reset and initialize the device
191 static int init_device(struct device
*dev
)
193 struct talitos_private
*priv
= dev_get_drvdata(dev
);
195 bool is_sec1
= has_ftr_sec1(priv
);
199 * errata documentation: warning: certain SEC interrupts
200 * are not fully cleared by writing the MCR:SWR bit,
201 * set bit twice to completely reset
203 err
= reset_device(dev
);
207 err
= reset_device(dev
);
212 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
213 err
= reset_channel(dev
, ch
);
218 /* enable channel done and error interrupts */
220 clrbits32(priv
->reg
+ TALITOS_IMR
, TALITOS1_IMR_INIT
);
221 clrbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS1_IMR_LO_INIT
);
222 /* disable parity error check in DEU (erroneous? test vect.) */
223 setbits32(priv
->reg_deu
+ TALITOS_EUICR
, TALITOS1_DEUICR_KPE
);
225 setbits32(priv
->reg
+ TALITOS_IMR
, TALITOS2_IMR_INIT
);
226 setbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS2_IMR_LO_INIT
);
229 /* disable integrity check error interrupts (use writeback instead) */
230 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
231 setbits32(priv
->reg_mdeu
+ TALITOS_EUICR_LO
,
232 TALITOS_MDEUICR_LO_ICE
);
238 * talitos_submit - submits a descriptor to the device for processing
239 * @dev: the SEC device to be used
240 * @ch: the SEC device channel to be used
241 * @desc: the descriptor to be processed by the device
242 * @callback: whom to call when processing is complete
243 * @context: a handle for use by caller (optional)
245 * desc must contain valid dma-mapped (bus physical) address pointers.
246 * callback must check err and feedback in descriptor header
247 * for device processing status.
249 int talitos_submit(struct device
*dev
, int ch
, struct talitos_desc
*desc
,
250 void (*callback
)(struct device
*dev
,
251 struct talitos_desc
*desc
,
252 void *context
, int error
),
255 struct talitos_private
*priv
= dev_get_drvdata(dev
);
256 struct talitos_request
*request
;
259 bool is_sec1
= has_ftr_sec1(priv
);
261 spin_lock_irqsave(&priv
->chan
[ch
].head_lock
, flags
);
263 if (!atomic_inc_not_zero(&priv
->chan
[ch
].submit_count
)) {
264 /* h/w fifo is full */
265 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
269 head
= priv
->chan
[ch
].head
;
270 request
= &priv
->chan
[ch
].fifo
[head
];
272 /* map descriptor and save caller data */
274 desc
->hdr1
= desc
->hdr
;
276 request
->dma_desc
= dma_map_single(dev
, &desc
->hdr1
,
280 request
->dma_desc
= dma_map_single(dev
, desc
,
284 request
->callback
= callback
;
285 request
->context
= context
;
287 /* increment fifo head */
288 priv
->chan
[ch
].head
= (priv
->chan
[ch
].head
+ 1) & (priv
->fifo_len
- 1);
291 request
->desc
= desc
;
295 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF
,
296 upper_32_bits(request
->dma_desc
));
297 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF_LO
,
298 lower_32_bits(request
->dma_desc
));
300 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
304 EXPORT_SYMBOL(talitos_submit
);
307 * process what was done, notify callback of error if not
309 static void flush_channel(struct device
*dev
, int ch
, int error
, int reset_ch
)
311 struct talitos_private
*priv
= dev_get_drvdata(dev
);
312 struct talitos_request
*request
, saved_req
;
315 bool is_sec1
= has_ftr_sec1(priv
);
317 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
319 tail
= priv
->chan
[ch
].tail
;
320 while (priv
->chan
[ch
].fifo
[tail
].desc
) {
323 request
= &priv
->chan
[ch
].fifo
[tail
];
325 /* descriptors with their done bits set don't get the error */
327 hdr
= is_sec1
? request
->desc
->hdr1
: request
->desc
->hdr
;
329 if ((hdr
& DESC_HDR_DONE
) == DESC_HDR_DONE
)
337 dma_unmap_single(dev
, request
->dma_desc
,
341 /* copy entries so we can call callback outside lock */
342 saved_req
.desc
= request
->desc
;
343 saved_req
.callback
= request
->callback
;
344 saved_req
.context
= request
->context
;
346 /* release request entry in fifo */
348 request
->desc
= NULL
;
350 /* increment fifo tail */
351 priv
->chan
[ch
].tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
353 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
355 atomic_dec(&priv
->chan
[ch
].submit_count
);
357 saved_req
.callback(dev
, saved_req
.desc
, saved_req
.context
,
359 /* channel may resume processing in single desc error case */
360 if (error
&& !reset_ch
&& status
== error
)
362 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
363 tail
= priv
->chan
[ch
].tail
;
366 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
370 * process completed requests for channels that have done status
372 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
373 static void talitos1_done_##name(unsigned long data) \
375 struct device *dev = (struct device *)data; \
376 struct talitos_private *priv = dev_get_drvdata(dev); \
377 unsigned long flags; \
379 if (ch_done_mask & 0x10000000) \
380 flush_channel(dev, 0, 0, 0); \
381 if (priv->num_channels == 1) \
383 if (ch_done_mask & 0x40000000) \
384 flush_channel(dev, 1, 0, 0); \
385 if (ch_done_mask & 0x00010000) \
386 flush_channel(dev, 2, 0, 0); \
387 if (ch_done_mask & 0x00040000) \
388 flush_channel(dev, 3, 0, 0); \
391 /* At this point, all completed channels have been processed */ \
392 /* Unmask done interrupts for channels completed later on. */ \
393 spin_lock_irqsave(&priv->reg_lock, flags); \
394 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
395 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
396 spin_unlock_irqrestore(&priv->reg_lock, flags); \
399 DEF_TALITOS1_DONE(4ch
, TALITOS1_ISR_4CHDONE
)
401 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
402 static void talitos2_done_##name(unsigned long data) \
404 struct device *dev = (struct device *)data; \
405 struct talitos_private *priv = dev_get_drvdata(dev); \
406 unsigned long flags; \
408 if (ch_done_mask & 1) \
409 flush_channel(dev, 0, 0, 0); \
410 if (priv->num_channels == 1) \
412 if (ch_done_mask & (1 << 2)) \
413 flush_channel(dev, 1, 0, 0); \
414 if (ch_done_mask & (1 << 4)) \
415 flush_channel(dev, 2, 0, 0); \
416 if (ch_done_mask & (1 << 6)) \
417 flush_channel(dev, 3, 0, 0); \
420 /* At this point, all completed channels have been processed */ \
421 /* Unmask done interrupts for channels completed later on. */ \
422 spin_lock_irqsave(&priv->reg_lock, flags); \
423 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
424 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
425 spin_unlock_irqrestore(&priv->reg_lock, flags); \
428 DEF_TALITOS2_DONE(4ch
, TALITOS2_ISR_4CHDONE
)
429 DEF_TALITOS2_DONE(ch0_2
, TALITOS2_ISR_CH_0_2_DONE
)
430 DEF_TALITOS2_DONE(ch1_3
, TALITOS2_ISR_CH_1_3_DONE
)
433 * locate current (offending) descriptor
435 static u32
current_desc_hdr(struct device
*dev
, int ch
)
437 struct talitos_private
*priv
= dev_get_drvdata(dev
);
441 cur_desc
= ((u64
)in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR
)) << 32;
442 cur_desc
|= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR_LO
);
445 dev_err(dev
, "CDPR is NULL, giving up search for offending descriptor\n");
449 tail
= priv
->chan
[ch
].tail
;
452 while (priv
->chan
[ch
].fifo
[iter
].dma_desc
!= cur_desc
) {
453 iter
= (iter
+ 1) & (priv
->fifo_len
- 1);
455 dev_err(dev
, "couldn't locate current descriptor\n");
460 return priv
->chan
[ch
].fifo
[iter
].desc
->hdr
;
464 * user diagnostics; report root cause of error based on execution unit status
466 static void report_eu_error(struct device
*dev
, int ch
, u32 desc_hdr
)
468 struct talitos_private
*priv
= dev_get_drvdata(dev
);
472 desc_hdr
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
);
474 switch (desc_hdr
& DESC_HDR_SEL0_MASK
) {
475 case DESC_HDR_SEL0_AFEU
:
476 dev_err(dev
, "AFEUISR 0x%08x_%08x\n",
477 in_be32(priv
->reg_afeu
+ TALITOS_EUISR
),
478 in_be32(priv
->reg_afeu
+ TALITOS_EUISR_LO
));
480 case DESC_HDR_SEL0_DEU
:
481 dev_err(dev
, "DEUISR 0x%08x_%08x\n",
482 in_be32(priv
->reg_deu
+ TALITOS_EUISR
),
483 in_be32(priv
->reg_deu
+ TALITOS_EUISR_LO
));
485 case DESC_HDR_SEL0_MDEUA
:
486 case DESC_HDR_SEL0_MDEUB
:
487 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
488 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR
),
489 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR_LO
));
491 case DESC_HDR_SEL0_RNG
:
492 dev_err(dev
, "RNGUISR 0x%08x_%08x\n",
493 in_be32(priv
->reg_rngu
+ TALITOS_ISR
),
494 in_be32(priv
->reg_rngu
+ TALITOS_ISR_LO
));
496 case DESC_HDR_SEL0_PKEU
:
497 dev_err(dev
, "PKEUISR 0x%08x_%08x\n",
498 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR
),
499 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR_LO
));
501 case DESC_HDR_SEL0_AESU
:
502 dev_err(dev
, "AESUISR 0x%08x_%08x\n",
503 in_be32(priv
->reg_aesu
+ TALITOS_EUISR
),
504 in_be32(priv
->reg_aesu
+ TALITOS_EUISR_LO
));
506 case DESC_HDR_SEL0_CRCU
:
507 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
508 in_be32(priv
->reg_crcu
+ TALITOS_EUISR
),
509 in_be32(priv
->reg_crcu
+ TALITOS_EUISR_LO
));
511 case DESC_HDR_SEL0_KEU
:
512 dev_err(dev
, "KEUISR 0x%08x_%08x\n",
513 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR
),
514 in_be32(priv
->reg_pkeu
+ TALITOS_EUISR_LO
));
518 switch (desc_hdr
& DESC_HDR_SEL1_MASK
) {
519 case DESC_HDR_SEL1_MDEUA
:
520 case DESC_HDR_SEL1_MDEUB
:
521 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
522 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR
),
523 in_be32(priv
->reg_mdeu
+ TALITOS_EUISR_LO
));
525 case DESC_HDR_SEL1_CRCU
:
526 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
527 in_be32(priv
->reg_crcu
+ TALITOS_EUISR
),
528 in_be32(priv
->reg_crcu
+ TALITOS_EUISR_LO
));
532 for (i
= 0; i
< 8; i
++)
533 dev_err(dev
, "DESCBUF 0x%08x_%08x\n",
534 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
+ 8*i
),
535 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF_LO
+ 8*i
));
539 * recover from error interrupts
541 static void talitos_error(struct device
*dev
, u32 isr
, u32 isr_lo
)
543 struct talitos_private
*priv
= dev_get_drvdata(dev
);
544 unsigned int timeout
= TALITOS_TIMEOUT
;
545 int ch
, error
, reset_dev
= 0;
547 bool is_sec1
= has_ftr_sec1(priv
);
548 int reset_ch
= is_sec1
? 1 : 0; /* only SEC2 supports continuation */
550 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
551 /* skip channels without errors */
553 /* bits 29, 31, 17, 19 */
554 if (!(isr
& (1 << (29 + (ch
& 1) * 2 - (ch
& 2) * 6))))
557 if (!(isr
& (1 << (ch
* 2 + 1))))
563 v_lo
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR_LO
);
565 if (v_lo
& TALITOS_CCPSR_LO_DOF
) {
566 dev_err(dev
, "double fetch fifo overflow error\n");
570 if (v_lo
& TALITOS_CCPSR_LO_SOF
) {
571 /* h/w dropped descriptor */
572 dev_err(dev
, "single fetch fifo overflow error\n");
575 if (v_lo
& TALITOS_CCPSR_LO_MDTE
)
576 dev_err(dev
, "master data transfer error\n");
577 if (v_lo
& TALITOS_CCPSR_LO_SGDLZ
)
578 dev_err(dev
, is_sec1
? "pointeur not complete error\n"
579 : "s/g data length zero error\n");
580 if (v_lo
& TALITOS_CCPSR_LO_FPZ
)
581 dev_err(dev
, is_sec1
? "parity error\n"
582 : "fetch pointer zero error\n");
583 if (v_lo
& TALITOS_CCPSR_LO_IDH
)
584 dev_err(dev
, "illegal descriptor header error\n");
585 if (v_lo
& TALITOS_CCPSR_LO_IEU
)
586 dev_err(dev
, is_sec1
? "static assignment error\n"
587 : "invalid exec unit error\n");
588 if (v_lo
& TALITOS_CCPSR_LO_EU
)
589 report_eu_error(dev
, ch
, current_desc_hdr(dev
, ch
));
591 if (v_lo
& TALITOS_CCPSR_LO_GB
)
592 dev_err(dev
, "gather boundary error\n");
593 if (v_lo
& TALITOS_CCPSR_LO_GRL
)
594 dev_err(dev
, "gather return/length error\n");
595 if (v_lo
& TALITOS_CCPSR_LO_SB
)
596 dev_err(dev
, "scatter boundary error\n");
597 if (v_lo
& TALITOS_CCPSR_LO_SRL
)
598 dev_err(dev
, "scatter return/length error\n");
601 flush_channel(dev
, ch
, error
, reset_ch
);
604 reset_channel(dev
, ch
);
606 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
608 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, 0);
609 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
610 TALITOS2_CCCR_CONT
) && --timeout
)
613 dev_err(dev
, "failed to restart channel %d\n",
619 if (reset_dev
|| (is_sec1
&& isr
& ~TALITOS1_ISR_4CHERR
) ||
620 (!is_sec1
&& isr
& ~TALITOS2_ISR_4CHERR
) || isr_lo
) {
621 if (is_sec1
&& (isr_lo
& TALITOS1_ISR_TEA_ERR
))
622 dev_err(dev
, "TEA error: ISR 0x%08x_%08x\n",
625 dev_err(dev
, "done overflow, internal time out, or "
626 "rngu error: ISR 0x%08x_%08x\n", isr
, isr_lo
);
628 /* purge request queues */
629 for (ch
= 0; ch
< priv
->num_channels
; ch
++)
630 flush_channel(dev
, ch
, -EIO
, 1);
632 /* reset and reinitialize the device */
637 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
638 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
640 struct device *dev = data; \
641 struct talitos_private *priv = dev_get_drvdata(dev); \
643 unsigned long flags; \
645 spin_lock_irqsave(&priv->reg_lock, flags); \
646 isr = in_be32(priv->reg + TALITOS_ISR); \
647 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
648 /* Acknowledge interrupt */ \
649 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
650 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
652 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
653 spin_unlock_irqrestore(&priv->reg_lock, flags); \
654 talitos_error(dev, isr & ch_err_mask, isr_lo); \
657 if (likely(isr & ch_done_mask)) { \
658 /* mask further done interrupts. */ \
659 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
660 /* done_task will unmask done interrupts at exit */ \
661 tasklet_schedule(&priv->done_task[tlet]); \
663 spin_unlock_irqrestore(&priv->reg_lock, flags); \
666 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
670 DEF_TALITOS1_INTERRUPT(4ch
, TALITOS1_ISR_4CHDONE
, TALITOS1_ISR_4CHERR
, 0)
672 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
673 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
675 struct device *dev = data; \
676 struct talitos_private *priv = dev_get_drvdata(dev); \
678 unsigned long flags; \
680 spin_lock_irqsave(&priv->reg_lock, flags); \
681 isr = in_be32(priv->reg + TALITOS_ISR); \
682 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
683 /* Acknowledge interrupt */ \
684 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
685 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
687 if (unlikely(isr & ch_err_mask || isr_lo)) { \
688 spin_unlock_irqrestore(&priv->reg_lock, flags); \
689 talitos_error(dev, isr & ch_err_mask, isr_lo); \
692 if (likely(isr & ch_done_mask)) { \
693 /* mask further done interrupts. */ \
694 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
695 /* done_task will unmask done interrupts at exit */ \
696 tasklet_schedule(&priv->done_task[tlet]); \
698 spin_unlock_irqrestore(&priv->reg_lock, flags); \
701 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
705 DEF_TALITOS2_INTERRUPT(4ch
, TALITOS2_ISR_4CHDONE
, TALITOS2_ISR_4CHERR
, 0)
706 DEF_TALITOS2_INTERRUPT(ch0_2
, TALITOS2_ISR_CH_0_2_DONE
, TALITOS2_ISR_CH_0_2_ERR
,
708 DEF_TALITOS2_INTERRUPT(ch1_3
, TALITOS2_ISR_CH_1_3_DONE
, TALITOS2_ISR_CH_1_3_ERR
,
714 static int talitos_rng_data_present(struct hwrng
*rng
, int wait
)
716 struct device
*dev
= (struct device
*)rng
->priv
;
717 struct talitos_private
*priv
= dev_get_drvdata(dev
);
721 for (i
= 0; i
< 20; i
++) {
722 ofl
= in_be32(priv
->reg_rngu
+ TALITOS_EUSR_LO
) &
723 TALITOS_RNGUSR_LO_OFL
;
732 static int talitos_rng_data_read(struct hwrng
*rng
, u32
*data
)
734 struct device
*dev
= (struct device
*)rng
->priv
;
735 struct talitos_private
*priv
= dev_get_drvdata(dev
);
737 /* rng fifo requires 64-bit accesses */
738 *data
= in_be32(priv
->reg_rngu
+ TALITOS_EU_FIFO
);
739 *data
= in_be32(priv
->reg_rngu
+ TALITOS_EU_FIFO_LO
);
744 static int talitos_rng_init(struct hwrng
*rng
)
746 struct device
*dev
= (struct device
*)rng
->priv
;
747 struct talitos_private
*priv
= dev_get_drvdata(dev
);
748 unsigned int timeout
= TALITOS_TIMEOUT
;
750 setbits32(priv
->reg_rngu
+ TALITOS_EURCR_LO
, TALITOS_RNGURCR_LO_SR
);
751 while (!(in_be32(priv
->reg_rngu
+ TALITOS_EUSR_LO
)
752 & TALITOS_RNGUSR_LO_RD
)
756 dev_err(dev
, "failed to reset rng hw\n");
760 /* start generating */
761 setbits32(priv
->reg_rngu
+ TALITOS_EUDSR_LO
, 0);
766 static int talitos_register_rng(struct device
*dev
)
768 struct talitos_private
*priv
= dev_get_drvdata(dev
);
771 priv
->rng
.name
= dev_driver_string(dev
),
772 priv
->rng
.init
= talitos_rng_init
,
773 priv
->rng
.data_present
= talitos_rng_data_present
,
774 priv
->rng
.data_read
= talitos_rng_data_read
,
775 priv
->rng
.priv
= (unsigned long)dev
;
777 err
= hwrng_register(&priv
->rng
);
779 priv
->rng_registered
= true;
784 static void talitos_unregister_rng(struct device
*dev
)
786 struct talitos_private
*priv
= dev_get_drvdata(dev
);
788 if (!priv
->rng_registered
)
791 hwrng_unregister(&priv
->rng
);
792 priv
->rng_registered
= false;
798 #define TALITOS_CRA_PRIORITY 3000
799 #define TALITOS_MAX_KEY_SIZE 96
800 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
805 __be32 desc_hdr_template
;
806 u8 key
[TALITOS_MAX_KEY_SIZE
];
807 u8 iv
[TALITOS_MAX_IV_LENGTH
];
809 unsigned int enckeylen
;
810 unsigned int authkeylen
;
813 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
814 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
816 struct talitos_ahash_req_ctx
{
817 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
818 unsigned int hw_context_size
;
819 u8 buf
[HASH_MAX_BLOCK_SIZE
];
820 u8 bufnext
[HASH_MAX_BLOCK_SIZE
];
824 unsigned int to_hash_later
;
826 struct scatterlist bufsl
[2];
827 struct scatterlist
*psrc
;
830 static int aead_setkey(struct crypto_aead
*authenc
,
831 const u8
*key
, unsigned int keylen
)
833 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
834 struct crypto_authenc_keys keys
;
836 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
839 if (keys
.authkeylen
+ keys
.enckeylen
> TALITOS_MAX_KEY_SIZE
)
842 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
843 memcpy(&ctx
->key
[keys
.authkeylen
], keys
.enckey
, keys
.enckeylen
);
845 ctx
->keylen
= keys
.authkeylen
+ keys
.enckeylen
;
846 ctx
->enckeylen
= keys
.enckeylen
;
847 ctx
->authkeylen
= keys
.authkeylen
;
852 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
857 * talitos_edesc - s/w-extended descriptor
858 * @src_nents: number of segments in input scatterlist
859 * @dst_nents: number of segments in output scatterlist
860 * @icv_ool: whether ICV is out-of-line
861 * @iv_dma: dma address of iv for checking continuity and link table
862 * @dma_len: length of dma mapped link_tbl space
863 * @dma_link_tbl: bus physical address of link_tbl/buf
864 * @desc: h/w descriptor
865 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
866 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
868 * if decrypting (with authcheck), or either one of src_nents or dst_nents
869 * is greater than 1, an integrity check value is concatenated to the end
872 struct talitos_edesc
{
878 dma_addr_t dma_link_tbl
;
879 struct talitos_desc desc
;
881 struct talitos_ptr link_tbl
[0];
886 static void talitos_sg_unmap(struct device
*dev
,
887 struct talitos_edesc
*edesc
,
888 struct scatterlist
*src
,
889 struct scatterlist
*dst
)
891 unsigned int src_nents
= edesc
->src_nents
? : 1;
892 unsigned int dst_nents
= edesc
->dst_nents
? : 1;
895 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
898 dma_unmap_sg(dev
, dst
, dst_nents
, DMA_FROM_DEVICE
);
901 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
904 static void ipsec_esp_unmap(struct device
*dev
,
905 struct talitos_edesc
*edesc
,
906 struct aead_request
*areq
)
908 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[6], DMA_FROM_DEVICE
);
909 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[3], DMA_TO_DEVICE
);
910 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
911 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[0], DMA_TO_DEVICE
);
913 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
);
916 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
921 * ipsec_esp descriptor callbacks
923 static void ipsec_esp_encrypt_done(struct device
*dev
,
924 struct talitos_desc
*desc
, void *context
,
927 struct aead_request
*areq
= context
;
928 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
929 unsigned int authsize
= crypto_aead_authsize(authenc
);
930 struct talitos_edesc
*edesc
;
931 struct scatterlist
*sg
;
934 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
936 ipsec_esp_unmap(dev
, edesc
, areq
);
938 /* copy the generated ICV to dst */
939 if (edesc
->icv_ool
) {
940 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
941 edesc
->dst_nents
+ 2];
942 sg
= sg_last(areq
->dst
, edesc
->dst_nents
);
943 memcpy((char *)sg_virt(sg
) + sg
->length
- authsize
,
949 aead_request_complete(areq
, err
);
952 static void ipsec_esp_decrypt_swauth_done(struct device
*dev
,
953 struct talitos_desc
*desc
,
954 void *context
, int err
)
956 struct aead_request
*req
= context
;
957 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
958 unsigned int authsize
= crypto_aead_authsize(authenc
);
959 struct talitos_edesc
*edesc
;
960 struct scatterlist
*sg
;
963 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
965 ipsec_esp_unmap(dev
, edesc
, req
);
969 sg
= sg_last(req
->dst
, edesc
->dst_nents
? : 1);
970 icv
= (char *)sg_virt(sg
) + sg
->length
- authsize
;
972 if (edesc
->dma_len
) {
973 oicv
= (char *)&edesc
->link_tbl
[edesc
->src_nents
+
974 edesc
->dst_nents
+ 2];
976 icv
= oicv
+ authsize
;
978 oicv
= (char *)&edesc
->link_tbl
[0];
980 err
= crypto_memneq(oicv
, icv
, authsize
) ? -EBADMSG
: 0;
985 aead_request_complete(req
, err
);
988 static void ipsec_esp_decrypt_hwauth_done(struct device
*dev
,
989 struct talitos_desc
*desc
,
990 void *context
, int err
)
992 struct aead_request
*req
= context
;
993 struct talitos_edesc
*edesc
;
995 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
997 ipsec_esp_unmap(dev
, edesc
, req
);
999 /* check ICV auth status */
1000 if (!err
&& ((desc
->hdr_lo
& DESC_HDR_LO_ICCR1_MASK
) !=
1001 DESC_HDR_LO_ICCR1_PASS
))
1006 aead_request_complete(req
, err
);
1010 * convert scatterlist to SEC h/w link table format
1011 * stop at cryptlen bytes
1013 static int sg_to_link_tbl_offset(struct scatterlist
*sg
, int sg_count
,
1014 unsigned int offset
, int cryptlen
,
1015 struct talitos_ptr
*link_tbl_ptr
)
1017 int n_sg
= sg_count
;
1020 while (cryptlen
&& sg
&& n_sg
--) {
1021 unsigned int len
= sg_dma_len(sg
);
1023 if (offset
>= len
) {
1033 to_talitos_ptr(link_tbl_ptr
+ count
,
1034 sg_dma_address(sg
) + offset
, 0);
1035 link_tbl_ptr
[count
].len
= cpu_to_be16(len
);
1036 link_tbl_ptr
[count
].j_extent
= 0;
1045 /* tag end of link table */
1047 link_tbl_ptr
[count
- 1].j_extent
= DESC_PTR_LNKTBL_RETURN
;
1052 static inline int sg_to_link_tbl(struct scatterlist
*sg
, int sg_count
,
1054 struct talitos_ptr
*link_tbl_ptr
)
1056 return sg_to_link_tbl_offset(sg
, sg_count
, 0, cryptlen
,
1061 * fill in and submit ipsec_esp descriptor
1063 static int ipsec_esp(struct talitos_edesc
*edesc
, struct aead_request
*areq
,
1064 void (*callback
)(struct device
*dev
,
1065 struct talitos_desc
*desc
,
1066 void *context
, int error
))
1068 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
1069 unsigned int authsize
= crypto_aead_authsize(aead
);
1070 struct talitos_ctx
*ctx
= crypto_aead_ctx(aead
);
1071 struct device
*dev
= ctx
->dev
;
1072 struct talitos_desc
*desc
= &edesc
->desc
;
1073 unsigned int cryptlen
= areq
->cryptlen
;
1074 unsigned int ivsize
= crypto_aead_ivsize(aead
);
1077 int sg_link_tbl_len
;
1080 map_single_talitos_ptr(dev
, &desc
->ptr
[0], ctx
->authkeylen
, &ctx
->key
,
1083 sg_count
= dma_map_sg(dev
, areq
->src
, edesc
->src_nents
?: 1,
1084 (areq
->src
== areq
->dst
) ? DMA_BIDIRECTIONAL
1088 desc
->ptr
[1].len
= cpu_to_be16(areq
->assoclen
);
1090 (ret
= sg_to_link_tbl_offset(areq
->src
, sg_count
, 0,
1092 &edesc
->link_tbl
[tbl_off
])) > 1) {
1095 to_talitos_ptr(&desc
->ptr
[1], edesc
->dma_link_tbl
+ tbl_off
*
1096 sizeof(struct talitos_ptr
), 0);
1097 desc
->ptr
[1].j_extent
= DESC_PTR_LNKTBL_JUMP
;
1099 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1100 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1102 to_talitos_ptr(&desc
->ptr
[1], sg_dma_address(areq
->src
), 0);
1103 desc
->ptr
[1].j_extent
= 0;
1107 to_talitos_ptr(&desc
->ptr
[2], edesc
->iv_dma
, 0);
1108 desc
->ptr
[2].len
= cpu_to_be16(ivsize
);
1109 desc
->ptr
[2].j_extent
= 0;
1112 map_single_talitos_ptr(dev
, &desc
->ptr
[3], ctx
->enckeylen
,
1113 (char *)&ctx
->key
+ ctx
->authkeylen
,
1118 * map and adjust cipher len to aead request cryptlen.
1119 * extent is bytes of HMAC postpended to ciphertext,
1120 * typically 12 for ipsec
1122 desc
->ptr
[4].len
= cpu_to_be16(cryptlen
);
1123 desc
->ptr
[4].j_extent
= authsize
;
1125 sg_link_tbl_len
= cryptlen
;
1126 if (edesc
->desc
.hdr
& DESC_HDR_MODE1_MDEU_CICV
)
1127 sg_link_tbl_len
+= authsize
;
1130 (ret
= sg_to_link_tbl_offset(areq
->src
, sg_count
, areq
->assoclen
,
1132 &edesc
->link_tbl
[tbl_off
])) > 1) {
1134 desc
->ptr
[4].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1135 to_talitos_ptr(&desc
->ptr
[4], edesc
->dma_link_tbl
+
1137 sizeof(struct talitos_ptr
), 0);
1138 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1142 to_talitos_ptr(&desc
->ptr
[4], sg_dma_address(areq
->src
), 0);
1145 desc
->ptr
[5].len
= cpu_to_be16(cryptlen
);
1146 desc
->ptr
[5].j_extent
= authsize
;
1148 if (areq
->src
!= areq
->dst
)
1149 sg_count
= dma_map_sg(dev
, areq
->dst
, edesc
->dst_nents
? : 1,
1152 edesc
->icv_ool
= false;
1155 (sg_count
= sg_to_link_tbl_offset(areq
->dst
, sg_count
,
1156 areq
->assoclen
, cryptlen
,
1157 &edesc
->link_tbl
[tbl_off
])) >
1159 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
1161 to_talitos_ptr(&desc
->ptr
[5], edesc
->dma_link_tbl
+
1162 tbl_off
* sizeof(struct talitos_ptr
), 0);
1164 /* Add an entry to the link table for ICV data */
1165 tbl_ptr
+= sg_count
- 1;
1166 tbl_ptr
->j_extent
= 0;
1168 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
1169 tbl_ptr
->len
= cpu_to_be16(authsize
);
1171 /* icv data follows link tables */
1172 to_talitos_ptr(tbl_ptr
, edesc
->dma_link_tbl
+
1173 (edesc
->src_nents
+ edesc
->dst_nents
+
1174 2) * sizeof(struct talitos_ptr
) +
1176 desc
->ptr
[5].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1177 dma_sync_single_for_device(ctx
->dev
, edesc
->dma_link_tbl
,
1178 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1180 edesc
->icv_ool
= true;
1182 to_talitos_ptr(&desc
->ptr
[5], sg_dma_address(areq
->dst
), 0);
1185 map_single_talitos_ptr(dev
, &desc
->ptr
[6], ivsize
, ctx
->iv
,
1188 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1189 if (ret
!= -EINPROGRESS
) {
1190 ipsec_esp_unmap(dev
, edesc
, areq
);
1197 * allocate and map the extended descriptor
1199 static struct talitos_edesc
*talitos_edesc_alloc(struct device
*dev
,
1200 struct scatterlist
*src
,
1201 struct scatterlist
*dst
,
1203 unsigned int assoclen
,
1204 unsigned int cryptlen
,
1205 unsigned int authsize
,
1206 unsigned int ivsize
,
1211 struct talitos_edesc
*edesc
;
1212 int src_nents
, dst_nents
, alloc_len
, dma_len
;
1213 dma_addr_t iv_dma
= 0;
1214 gfp_t flags
= cryptoflags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1216 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1217 bool is_sec1
= has_ftr_sec1(priv
);
1218 int max_len
= is_sec1
? TALITOS1_MAX_DATA_LEN
: TALITOS2_MAX_DATA_LEN
;
1221 if (cryptlen
+ authsize
> max_len
) {
1222 dev_err(dev
, "length exceeds h/w max limit\n");
1223 return ERR_PTR(-EINVAL
);
1227 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
1229 if (!dst
|| dst
== src
) {
1230 src_nents
= sg_nents_for_len(src
,
1231 assoclen
+ cryptlen
+ authsize
);
1232 if (src_nents
< 0) {
1233 dev_err(dev
, "Invalid number of src SG.\n");
1234 err
= ERR_PTR(-EINVAL
);
1237 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1238 dst_nents
= dst
? src_nents
: 0;
1239 } else { /* dst && dst != src*/
1240 src_nents
= sg_nents_for_len(src
, assoclen
+ cryptlen
+
1241 (encrypt
? 0 : authsize
));
1242 if (src_nents
< 0) {
1243 dev_err(dev
, "Invalid number of src SG.\n");
1244 err
= ERR_PTR(-EINVAL
);
1247 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1248 dst_nents
= sg_nents_for_len(dst
, assoclen
+ cryptlen
+
1249 (encrypt
? authsize
: 0));
1250 if (dst_nents
< 0) {
1251 dev_err(dev
, "Invalid number of dst SG.\n");
1252 err
= ERR_PTR(-EINVAL
);
1255 dst_nents
= (dst_nents
== 1) ? 0 : dst_nents
;
1259 * allocate space for base edesc plus the link tables,
1260 * allowing for two separate entries for AD and generated ICV (+ 2),
1261 * and space for two sets of ICVs (stashed and generated)
1263 alloc_len
= sizeof(struct talitos_edesc
);
1264 if (src_nents
|| dst_nents
) {
1266 dma_len
= (src_nents
? cryptlen
: 0) +
1267 (dst_nents
? cryptlen
: 0);
1269 dma_len
= (src_nents
+ dst_nents
+ 2) *
1270 sizeof(struct talitos_ptr
) + authsize
* 2;
1271 alloc_len
+= dma_len
;
1274 alloc_len
+= icv_stashing
? authsize
: 0;
1277 edesc
= kmalloc(alloc_len
, GFP_DMA
| flags
);
1279 dev_err(dev
, "could not allocate edescriptor\n");
1280 err
= ERR_PTR(-ENOMEM
);
1284 edesc
->src_nents
= src_nents
;
1285 edesc
->dst_nents
= dst_nents
;
1286 edesc
->iv_dma
= iv_dma
;
1287 edesc
->dma_len
= dma_len
;
1289 edesc
->dma_link_tbl
= dma_map_single(dev
, &edesc
->link_tbl
[0],
1296 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
1300 static struct talitos_edesc
*aead_edesc_alloc(struct aead_request
*areq
, u8
*iv
,
1301 int icv_stashing
, bool encrypt
)
1303 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1304 unsigned int authsize
= crypto_aead_authsize(authenc
);
1305 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1306 unsigned int ivsize
= crypto_aead_ivsize(authenc
);
1308 return talitos_edesc_alloc(ctx
->dev
, areq
->src
, areq
->dst
,
1309 iv
, areq
->assoclen
, areq
->cryptlen
,
1310 authsize
, ivsize
, icv_stashing
,
1311 areq
->base
.flags
, encrypt
);
1314 static int aead_encrypt(struct aead_request
*req
)
1316 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1317 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1318 struct talitos_edesc
*edesc
;
1320 /* allocate extended descriptor */
1321 edesc
= aead_edesc_alloc(req
, req
->iv
, 0, true);
1323 return PTR_ERR(edesc
);
1326 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1328 return ipsec_esp(edesc
, req
, ipsec_esp_encrypt_done
);
1331 static int aead_decrypt(struct aead_request
*req
)
1333 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1334 unsigned int authsize
= crypto_aead_authsize(authenc
);
1335 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1336 struct talitos_private
*priv
= dev_get_drvdata(ctx
->dev
);
1337 struct talitos_edesc
*edesc
;
1338 struct scatterlist
*sg
;
1341 req
->cryptlen
-= authsize
;
1343 /* allocate extended descriptor */
1344 edesc
= aead_edesc_alloc(req
, req
->iv
, 1, false);
1346 return PTR_ERR(edesc
);
1348 if ((priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
) &&
1349 ((!edesc
->src_nents
&& !edesc
->dst_nents
) ||
1350 priv
->features
& TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
)) {
1352 /* decrypt and check the ICV */
1353 edesc
->desc
.hdr
= ctx
->desc_hdr_template
|
1354 DESC_HDR_DIR_INBOUND
|
1355 DESC_HDR_MODE1_MDEU_CICV
;
1357 /* reset integrity check result bits */
1358 edesc
->desc
.hdr_lo
= 0;
1360 return ipsec_esp(edesc
, req
, ipsec_esp_decrypt_hwauth_done
);
1363 /* Have to check the ICV with software */
1364 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1366 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1368 icvdata
= (char *)&edesc
->link_tbl
[edesc
->src_nents
+
1369 edesc
->dst_nents
+ 2];
1371 icvdata
= &edesc
->link_tbl
[0];
1373 sg
= sg_last(req
->src
, edesc
->src_nents
? : 1);
1375 memcpy(icvdata
, (char *)sg_virt(sg
) + sg
->length
- authsize
, authsize
);
1377 return ipsec_esp(edesc
, req
, ipsec_esp_decrypt_swauth_done
);
1380 static int ablkcipher_setkey(struct crypto_ablkcipher
*cipher
,
1381 const u8
*key
, unsigned int keylen
)
1383 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1385 memcpy(&ctx
->key
, key
, keylen
);
1386 ctx
->keylen
= keylen
;
1391 static void unmap_sg_talitos_ptr(struct device
*dev
, struct scatterlist
*src
,
1392 struct scatterlist
*dst
, unsigned int len
,
1393 struct talitos_edesc
*edesc
)
1395 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1396 bool is_sec1
= has_ftr_sec1(priv
);
1399 if (!edesc
->src_nents
) {
1400 dma_unmap_sg(dev
, src
, 1,
1401 dst
!= src
? DMA_TO_DEVICE
1402 : DMA_BIDIRECTIONAL
);
1404 if (dst
&& edesc
->dst_nents
) {
1405 dma_sync_single_for_device(dev
,
1406 edesc
->dma_link_tbl
+ len
,
1407 len
, DMA_FROM_DEVICE
);
1408 sg_copy_from_buffer(dst
, edesc
->dst_nents
? : 1,
1409 edesc
->buf
+ len
, len
);
1410 } else if (dst
&& dst
!= src
) {
1411 dma_unmap_sg(dev
, dst
, 1, DMA_FROM_DEVICE
);
1414 talitos_sg_unmap(dev
, edesc
, src
, dst
);
1418 static void common_nonsnoop_unmap(struct device
*dev
,
1419 struct talitos_edesc
*edesc
,
1420 struct ablkcipher_request
*areq
)
1422 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1424 unmap_sg_talitos_ptr(dev
, areq
->src
, areq
->dst
, areq
->nbytes
, edesc
);
1425 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
1426 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1], DMA_TO_DEVICE
);
1429 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1433 static void ablkcipher_done(struct device
*dev
,
1434 struct talitos_desc
*desc
, void *context
,
1437 struct ablkcipher_request
*areq
= context
;
1438 struct talitos_edesc
*edesc
;
1440 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1442 common_nonsnoop_unmap(dev
, edesc
, areq
);
1446 areq
->base
.complete(&areq
->base
, err
);
1449 int map_sg_in_talitos_ptr(struct device
*dev
, struct scatterlist
*src
,
1450 unsigned int len
, struct talitos_edesc
*edesc
,
1451 enum dma_data_direction dir
, struct talitos_ptr
*ptr
)
1454 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1455 bool is_sec1
= has_ftr_sec1(priv
);
1457 to_talitos_ptr_len(ptr
, len
, is_sec1
);
1460 sg_count
= edesc
->src_nents
? : 1;
1462 if (sg_count
== 1) {
1463 dma_map_sg(dev
, src
, 1, dir
);
1464 to_talitos_ptr(ptr
, sg_dma_address(src
), is_sec1
);
1466 sg_copy_to_buffer(src
, sg_count
, edesc
->buf
, len
);
1467 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
, is_sec1
);
1468 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1469 len
, DMA_TO_DEVICE
);
1472 to_talitos_ptr_extent_clear(ptr
, is_sec1
);
1474 sg_count
= dma_map_sg(dev
, src
, edesc
->src_nents
? : 1, dir
);
1476 if (sg_count
== 1) {
1477 to_talitos_ptr(ptr
, sg_dma_address(src
), is_sec1
);
1479 sg_count
= sg_to_link_tbl(src
, sg_count
, len
,
1480 &edesc
->link_tbl
[0]);
1482 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
, 0);
1483 ptr
->j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1484 dma_sync_single_for_device(dev
,
1485 edesc
->dma_link_tbl
,
1489 /* Only one segment now, so no link tbl needed*/
1490 to_talitos_ptr(ptr
, sg_dma_address(src
),
1498 void map_sg_out_talitos_ptr(struct device
*dev
, struct scatterlist
*dst
,
1499 unsigned int len
, struct talitos_edesc
*edesc
,
1500 enum dma_data_direction dir
,
1501 struct talitos_ptr
*ptr
, int sg_count
)
1503 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1504 bool is_sec1
= has_ftr_sec1(priv
);
1506 if (dir
!= DMA_NONE
)
1507 sg_count
= dma_map_sg(dev
, dst
, edesc
->dst_nents
? : 1, dir
);
1509 to_talitos_ptr_len(ptr
, len
, is_sec1
);
1512 if (sg_count
== 1) {
1513 if (dir
!= DMA_NONE
)
1514 dma_map_sg(dev
, dst
, 1, dir
);
1515 to_talitos_ptr(ptr
, sg_dma_address(dst
), is_sec1
);
1517 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
+ len
, is_sec1
);
1518 dma_sync_single_for_device(dev
,
1519 edesc
->dma_link_tbl
+ len
,
1520 len
, DMA_FROM_DEVICE
);
1523 to_talitos_ptr_extent_clear(ptr
, is_sec1
);
1525 if (sg_count
== 1) {
1526 to_talitos_ptr(ptr
, sg_dma_address(dst
), is_sec1
);
1528 struct talitos_ptr
*link_tbl_ptr
=
1529 &edesc
->link_tbl
[edesc
->src_nents
+ 1];
1531 to_talitos_ptr(ptr
, edesc
->dma_link_tbl
+
1532 (edesc
->src_nents
+ 1) *
1533 sizeof(struct talitos_ptr
), 0);
1534 ptr
->j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1535 sg_to_link_tbl(dst
, sg_count
, len
, link_tbl_ptr
);
1536 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1543 static int common_nonsnoop(struct talitos_edesc
*edesc
,
1544 struct ablkcipher_request
*areq
,
1545 void (*callback
) (struct device
*dev
,
1546 struct talitos_desc
*desc
,
1547 void *context
, int error
))
1549 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1550 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1551 struct device
*dev
= ctx
->dev
;
1552 struct talitos_desc
*desc
= &edesc
->desc
;
1553 unsigned int cryptlen
= areq
->nbytes
;
1554 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1556 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1557 bool is_sec1
= has_ftr_sec1(priv
);
1559 /* first DWORD empty */
1560 desc
->ptr
[0] = zero_entry
;
1563 to_talitos_ptr(&desc
->ptr
[1], edesc
->iv_dma
, is_sec1
);
1564 to_talitos_ptr_len(&desc
->ptr
[1], ivsize
, is_sec1
);
1565 to_talitos_ptr_extent_clear(&desc
->ptr
[1], is_sec1
);
1568 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1569 (char *)&ctx
->key
, DMA_TO_DEVICE
);
1574 sg_count
= map_sg_in_talitos_ptr(dev
, areq
->src
, cryptlen
, edesc
,
1575 (areq
->src
== areq
->dst
) ?
1576 DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
,
1580 map_sg_out_talitos_ptr(dev
, areq
->dst
, cryptlen
, edesc
,
1581 (areq
->src
== areq
->dst
) ? DMA_NONE
1583 &desc
->ptr
[4], sg_count
);
1586 map_single_talitos_ptr(dev
, &desc
->ptr
[5], ivsize
, ctx
->iv
,
1589 /* last DWORD empty */
1590 desc
->ptr
[6] = zero_entry
;
1592 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1593 if (ret
!= -EINPROGRESS
) {
1594 common_nonsnoop_unmap(dev
, edesc
, areq
);
1600 static struct talitos_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
*
1603 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1604 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1605 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1607 return talitos_edesc_alloc(ctx
->dev
, areq
->src
, areq
->dst
,
1608 areq
->info
, 0, areq
->nbytes
, 0, ivsize
, 0,
1609 areq
->base
.flags
, encrypt
);
1612 static int ablkcipher_encrypt(struct ablkcipher_request
*areq
)
1614 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1615 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1616 struct talitos_edesc
*edesc
;
1618 /* allocate extended descriptor */
1619 edesc
= ablkcipher_edesc_alloc(areq
, true);
1621 return PTR_ERR(edesc
);
1624 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1626 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1629 static int ablkcipher_decrypt(struct ablkcipher_request
*areq
)
1631 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1632 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1633 struct talitos_edesc
*edesc
;
1635 /* allocate extended descriptor */
1636 edesc
= ablkcipher_edesc_alloc(areq
, false);
1638 return PTR_ERR(edesc
);
1640 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1642 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1645 static void common_nonsnoop_hash_unmap(struct device
*dev
,
1646 struct talitos_edesc
*edesc
,
1647 struct ahash_request
*areq
)
1649 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1650 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1651 bool is_sec1
= has_ftr_sec1(priv
);
1653 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1655 unmap_sg_talitos_ptr(dev
, req_ctx
->psrc
, NULL
, 0, edesc
);
1657 /* When using hashctx-in, must unmap it. */
1658 if (from_talitos_ptr_len(&edesc
->desc
.ptr
[1], is_sec1
))
1659 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1],
1662 if (from_talitos_ptr_len(&edesc
->desc
.ptr
[2], is_sec1
))
1663 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2],
1667 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1672 static void ahash_done(struct device
*dev
,
1673 struct talitos_desc
*desc
, void *context
,
1676 struct ahash_request
*areq
= context
;
1677 struct talitos_edesc
*edesc
=
1678 container_of(desc
, struct talitos_edesc
, desc
);
1679 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1681 if (!req_ctx
->last
&& req_ctx
->to_hash_later
) {
1682 /* Position any partial block for next update/final/finup */
1683 memcpy(req_ctx
->buf
, req_ctx
->bufnext
, req_ctx
->to_hash_later
);
1684 req_ctx
->nbuf
= req_ctx
->to_hash_later
;
1686 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1690 areq
->base
.complete(&areq
->base
, err
);
1694 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1695 * ourself and submit a padded block
1697 void talitos_handle_buggy_hash(struct talitos_ctx
*ctx
,
1698 struct talitos_edesc
*edesc
,
1699 struct talitos_ptr
*ptr
)
1701 static u8 padded_hash
[64] = {
1702 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1703 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1704 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1705 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1708 pr_err_once("Bug in SEC1, padding ourself\n");
1709 edesc
->desc
.hdr
&= ~DESC_HDR_MODE0_MDEU_PAD
;
1710 map_single_talitos_ptr(ctx
->dev
, ptr
, sizeof(padded_hash
),
1711 (char *)padded_hash
, DMA_TO_DEVICE
);
1714 static int common_nonsnoop_hash(struct talitos_edesc
*edesc
,
1715 struct ahash_request
*areq
, unsigned int length
,
1716 void (*callback
) (struct device
*dev
,
1717 struct talitos_desc
*desc
,
1718 void *context
, int error
))
1720 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1721 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1722 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1723 struct device
*dev
= ctx
->dev
;
1724 struct talitos_desc
*desc
= &edesc
->desc
;
1726 struct talitos_private
*priv
= dev_get_drvdata(dev
);
1727 bool is_sec1
= has_ftr_sec1(priv
);
1729 /* first DWORD empty */
1730 desc
->ptr
[0] = zero_entry
;
1732 /* hash context in */
1733 if (!req_ctx
->first
|| req_ctx
->swinit
) {
1734 map_single_talitos_ptr(dev
, &desc
->ptr
[1],
1735 req_ctx
->hw_context_size
,
1736 (char *)req_ctx
->hw_context
,
1738 req_ctx
->swinit
= 0;
1740 desc
->ptr
[1] = zero_entry
;
1741 /* Indicate next op is not the first. */
1747 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1748 (char *)&ctx
->key
, DMA_TO_DEVICE
);
1750 desc
->ptr
[2] = zero_entry
;
1755 map_sg_in_talitos_ptr(dev
, req_ctx
->psrc
, length
, edesc
,
1756 DMA_TO_DEVICE
, &desc
->ptr
[3]);
1758 /* fifth DWORD empty */
1759 desc
->ptr
[4] = zero_entry
;
1761 /* hash/HMAC out -or- hash context out */
1763 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1764 crypto_ahash_digestsize(tfm
),
1765 areq
->result
, DMA_FROM_DEVICE
);
1767 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1768 req_ctx
->hw_context_size
,
1769 req_ctx
->hw_context
, DMA_FROM_DEVICE
);
1771 /* last DWORD empty */
1772 desc
->ptr
[6] = zero_entry
;
1774 if (is_sec1
&& from_talitos_ptr_len(&desc
->ptr
[3], true) == 0)
1775 talitos_handle_buggy_hash(ctx
, edesc
, &desc
->ptr
[3]);
1777 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1778 if (ret
!= -EINPROGRESS
) {
1779 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1785 static struct talitos_edesc
*ahash_edesc_alloc(struct ahash_request
*areq
,
1786 unsigned int nbytes
)
1788 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1789 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1790 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1792 return talitos_edesc_alloc(ctx
->dev
, req_ctx
->psrc
, NULL
, NULL
, 0,
1793 nbytes
, 0, 0, 0, areq
->base
.flags
, false);
1796 static int ahash_init(struct ahash_request
*areq
)
1798 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1799 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1801 /* Initialize the context */
1803 req_ctx
->first
= 1; /* first indicates h/w must init its context */
1804 req_ctx
->swinit
= 0; /* assume h/w init of context */
1805 req_ctx
->hw_context_size
=
1806 (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
1807 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1808 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
1814 * on h/w without explicit sha224 support, we initialize h/w context
1815 * manually with sha224 constants, and tell it to run sha256.
1817 static int ahash_init_sha224_swinit(struct ahash_request
*areq
)
1819 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1822 req_ctx
->swinit
= 1;/* prevent h/w initting context with sha256 values*/
1824 req_ctx
->hw_context
[0] = SHA224_H0
;
1825 req_ctx
->hw_context
[1] = SHA224_H1
;
1826 req_ctx
->hw_context
[2] = SHA224_H2
;
1827 req_ctx
->hw_context
[3] = SHA224_H3
;
1828 req_ctx
->hw_context
[4] = SHA224_H4
;
1829 req_ctx
->hw_context
[5] = SHA224_H5
;
1830 req_ctx
->hw_context
[6] = SHA224_H6
;
1831 req_ctx
->hw_context
[7] = SHA224_H7
;
1833 /* init 64-bit count */
1834 req_ctx
->hw_context
[8] = 0;
1835 req_ctx
->hw_context
[9] = 0;
1840 static int ahash_process_req(struct ahash_request
*areq
, unsigned int nbytes
)
1842 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1843 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1844 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1845 struct talitos_edesc
*edesc
;
1846 unsigned int blocksize
=
1847 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1848 unsigned int nbytes_to_hash
;
1849 unsigned int to_hash_later
;
1853 if (!req_ctx
->last
&& (nbytes
+ req_ctx
->nbuf
<= blocksize
)) {
1854 /* Buffer up to one whole block */
1855 nents
= sg_nents_for_len(areq
->src
, nbytes
);
1857 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
1860 sg_copy_to_buffer(areq
->src
, nents
,
1861 req_ctx
->buf
+ req_ctx
->nbuf
, nbytes
);
1862 req_ctx
->nbuf
+= nbytes
;
1866 /* At least (blocksize + 1) bytes are available to hash */
1867 nbytes_to_hash
= nbytes
+ req_ctx
->nbuf
;
1868 to_hash_later
= nbytes_to_hash
& (blocksize
- 1);
1872 else if (to_hash_later
)
1873 /* There is a partial block. Hash the full block(s) now */
1874 nbytes_to_hash
-= to_hash_later
;
1876 /* Keep one block buffered */
1877 nbytes_to_hash
-= blocksize
;
1878 to_hash_later
= blocksize
;
1881 /* Chain in any previously buffered data */
1882 if (req_ctx
->nbuf
) {
1883 nsg
= (req_ctx
->nbuf
< nbytes_to_hash
) ? 2 : 1;
1884 sg_init_table(req_ctx
->bufsl
, nsg
);
1885 sg_set_buf(req_ctx
->bufsl
, req_ctx
->buf
, req_ctx
->nbuf
);
1887 sg_chain(req_ctx
->bufsl
, 2, areq
->src
);
1888 req_ctx
->psrc
= req_ctx
->bufsl
;
1890 req_ctx
->psrc
= areq
->src
;
1892 if (to_hash_later
) {
1893 nents
= sg_nents_for_len(areq
->src
, nbytes
);
1895 dev_err(ctx
->dev
, "Invalid number of src SG.\n");
1898 sg_pcopy_to_buffer(areq
->src
, nents
,
1901 nbytes
- to_hash_later
);
1903 req_ctx
->to_hash_later
= to_hash_later
;
1905 /* Allocate extended descriptor */
1906 edesc
= ahash_edesc_alloc(areq
, nbytes_to_hash
);
1908 return PTR_ERR(edesc
);
1910 edesc
->desc
.hdr
= ctx
->desc_hdr_template
;
1912 /* On last one, request SEC to pad; otherwise continue */
1914 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_PAD
;
1916 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_CONT
;
1918 /* request SEC to INIT hash. */
1919 if (req_ctx
->first
&& !req_ctx
->swinit
)
1920 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_INIT
;
1922 /* When the tfm context has a keylen, it's an HMAC.
1923 * A first or last (ie. not middle) descriptor must request HMAC.
1925 if (ctx
->keylen
&& (req_ctx
->first
|| req_ctx
->last
))
1926 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_HMAC
;
1928 return common_nonsnoop_hash(edesc
, areq
, nbytes_to_hash
,
1932 static int ahash_update(struct ahash_request
*areq
)
1934 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1938 return ahash_process_req(areq
, areq
->nbytes
);
1941 static int ahash_final(struct ahash_request
*areq
)
1943 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1947 return ahash_process_req(areq
, 0);
1950 static int ahash_finup(struct ahash_request
*areq
)
1952 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1956 return ahash_process_req(areq
, areq
->nbytes
);
1959 static int ahash_digest(struct ahash_request
*areq
)
1961 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1962 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
1967 return ahash_process_req(areq
, areq
->nbytes
);
1970 struct keyhash_result
{
1971 struct completion completion
;
1975 static void keyhash_complete(struct crypto_async_request
*req
, int err
)
1977 struct keyhash_result
*res
= req
->data
;
1979 if (err
== -EINPROGRESS
)
1983 complete(&res
->completion
);
1986 static int keyhash(struct crypto_ahash
*tfm
, const u8
*key
, unsigned int keylen
,
1989 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1991 struct scatterlist sg
[1];
1992 struct ahash_request
*req
;
1993 struct keyhash_result hresult
;
1996 init_completion(&hresult
.completion
);
1998 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
2002 /* Keep tfm keylen == 0 during hash of the long key */
2004 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
2005 keyhash_complete
, &hresult
);
2007 sg_init_one(&sg
[0], key
, keylen
);
2009 ahash_request_set_crypt(req
, sg
, hash
, keylen
);
2010 ret
= crypto_ahash_digest(req
);
2016 ret
= wait_for_completion_interruptible(
2017 &hresult
.completion
);
2024 ahash_request_free(req
);
2029 static int ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
2030 unsigned int keylen
)
2032 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
2033 unsigned int blocksize
=
2034 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
2035 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
2036 unsigned int keysize
= keylen
;
2037 u8 hash
[SHA512_DIGEST_SIZE
];
2040 if (keylen
<= blocksize
)
2041 memcpy(ctx
->key
, key
, keysize
);
2043 /* Must get the hash of the long key */
2044 ret
= keyhash(tfm
, key
, keylen
, hash
);
2047 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
2051 keysize
= digestsize
;
2052 memcpy(ctx
->key
, hash
, digestsize
);
2055 ctx
->keylen
= keysize
;
2061 struct talitos_alg_template
{
2064 struct crypto_alg crypto
;
2065 struct ahash_alg hash
;
2066 struct aead_alg aead
;
2068 __be32 desc_hdr_template
;
2071 static struct talitos_alg_template driver_algs
[] = {
2072 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2073 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2076 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
2077 .cra_driver_name
= "authenc-hmac-sha1-"
2079 .cra_blocksize
= AES_BLOCK_SIZE
,
2080 .cra_flags
= CRYPTO_ALG_ASYNC
,
2082 .ivsize
= AES_BLOCK_SIZE
,
2083 .maxauthsize
= SHA1_DIGEST_SIZE
,
2085 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2086 DESC_HDR_SEL0_AESU
|
2087 DESC_HDR_MODE0_AESU_CBC
|
2088 DESC_HDR_SEL1_MDEUA
|
2089 DESC_HDR_MODE1_MDEU_INIT
|
2090 DESC_HDR_MODE1_MDEU_PAD
|
2091 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2093 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2096 .cra_name
= "authenc(hmac(sha1),"
2098 .cra_driver_name
= "authenc-hmac-sha1-"
2100 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2101 .cra_flags
= CRYPTO_ALG_ASYNC
,
2103 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2104 .maxauthsize
= SHA1_DIGEST_SIZE
,
2106 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2108 DESC_HDR_MODE0_DEU_CBC
|
2109 DESC_HDR_MODE0_DEU_3DES
|
2110 DESC_HDR_SEL1_MDEUA
|
2111 DESC_HDR_MODE1_MDEU_INIT
|
2112 DESC_HDR_MODE1_MDEU_PAD
|
2113 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
2115 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2118 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
2119 .cra_driver_name
= "authenc-hmac-sha224-"
2121 .cra_blocksize
= AES_BLOCK_SIZE
,
2122 .cra_flags
= CRYPTO_ALG_ASYNC
,
2124 .ivsize
= AES_BLOCK_SIZE
,
2125 .maxauthsize
= SHA224_DIGEST_SIZE
,
2127 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2128 DESC_HDR_SEL0_AESU
|
2129 DESC_HDR_MODE0_AESU_CBC
|
2130 DESC_HDR_SEL1_MDEUA
|
2131 DESC_HDR_MODE1_MDEU_INIT
|
2132 DESC_HDR_MODE1_MDEU_PAD
|
2133 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2135 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2138 .cra_name
= "authenc(hmac(sha224),"
2140 .cra_driver_name
= "authenc-hmac-sha224-"
2142 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2143 .cra_flags
= CRYPTO_ALG_ASYNC
,
2145 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2146 .maxauthsize
= SHA224_DIGEST_SIZE
,
2148 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2150 DESC_HDR_MODE0_DEU_CBC
|
2151 DESC_HDR_MODE0_DEU_3DES
|
2152 DESC_HDR_SEL1_MDEUA
|
2153 DESC_HDR_MODE1_MDEU_INIT
|
2154 DESC_HDR_MODE1_MDEU_PAD
|
2155 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
2157 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2160 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
2161 .cra_driver_name
= "authenc-hmac-sha256-"
2163 .cra_blocksize
= AES_BLOCK_SIZE
,
2164 .cra_flags
= CRYPTO_ALG_ASYNC
,
2166 .ivsize
= AES_BLOCK_SIZE
,
2167 .maxauthsize
= SHA256_DIGEST_SIZE
,
2169 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2170 DESC_HDR_SEL0_AESU
|
2171 DESC_HDR_MODE0_AESU_CBC
|
2172 DESC_HDR_SEL1_MDEUA
|
2173 DESC_HDR_MODE1_MDEU_INIT
|
2174 DESC_HDR_MODE1_MDEU_PAD
|
2175 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2177 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2180 .cra_name
= "authenc(hmac(sha256),"
2182 .cra_driver_name
= "authenc-hmac-sha256-"
2184 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2185 .cra_flags
= CRYPTO_ALG_ASYNC
,
2187 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2188 .maxauthsize
= SHA256_DIGEST_SIZE
,
2190 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2192 DESC_HDR_MODE0_DEU_CBC
|
2193 DESC_HDR_MODE0_DEU_3DES
|
2194 DESC_HDR_SEL1_MDEUA
|
2195 DESC_HDR_MODE1_MDEU_INIT
|
2196 DESC_HDR_MODE1_MDEU_PAD
|
2197 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2199 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2202 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
2203 .cra_driver_name
= "authenc-hmac-sha384-"
2205 .cra_blocksize
= AES_BLOCK_SIZE
,
2206 .cra_flags
= CRYPTO_ALG_ASYNC
,
2208 .ivsize
= AES_BLOCK_SIZE
,
2209 .maxauthsize
= SHA384_DIGEST_SIZE
,
2211 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2212 DESC_HDR_SEL0_AESU
|
2213 DESC_HDR_MODE0_AESU_CBC
|
2214 DESC_HDR_SEL1_MDEUB
|
2215 DESC_HDR_MODE1_MDEU_INIT
|
2216 DESC_HDR_MODE1_MDEU_PAD
|
2217 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2219 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2222 .cra_name
= "authenc(hmac(sha384),"
2224 .cra_driver_name
= "authenc-hmac-sha384-"
2226 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2227 .cra_flags
= CRYPTO_ALG_ASYNC
,
2229 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2230 .maxauthsize
= SHA384_DIGEST_SIZE
,
2232 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2234 DESC_HDR_MODE0_DEU_CBC
|
2235 DESC_HDR_MODE0_DEU_3DES
|
2236 DESC_HDR_SEL1_MDEUB
|
2237 DESC_HDR_MODE1_MDEU_INIT
|
2238 DESC_HDR_MODE1_MDEU_PAD
|
2239 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2241 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2244 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
2245 .cra_driver_name
= "authenc-hmac-sha512-"
2247 .cra_blocksize
= AES_BLOCK_SIZE
,
2248 .cra_flags
= CRYPTO_ALG_ASYNC
,
2250 .ivsize
= AES_BLOCK_SIZE
,
2251 .maxauthsize
= SHA512_DIGEST_SIZE
,
2253 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2254 DESC_HDR_SEL0_AESU
|
2255 DESC_HDR_MODE0_AESU_CBC
|
2256 DESC_HDR_SEL1_MDEUB
|
2257 DESC_HDR_MODE1_MDEU_INIT
|
2258 DESC_HDR_MODE1_MDEU_PAD
|
2259 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2261 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2264 .cra_name
= "authenc(hmac(sha512),"
2266 .cra_driver_name
= "authenc-hmac-sha512-"
2268 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2269 .cra_flags
= CRYPTO_ALG_ASYNC
,
2271 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2272 .maxauthsize
= SHA512_DIGEST_SIZE
,
2274 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2276 DESC_HDR_MODE0_DEU_CBC
|
2277 DESC_HDR_MODE0_DEU_3DES
|
2278 DESC_HDR_SEL1_MDEUB
|
2279 DESC_HDR_MODE1_MDEU_INIT
|
2280 DESC_HDR_MODE1_MDEU_PAD
|
2281 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2283 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2286 .cra_name
= "authenc(hmac(md5),cbc(aes))",
2287 .cra_driver_name
= "authenc-hmac-md5-"
2289 .cra_blocksize
= AES_BLOCK_SIZE
,
2290 .cra_flags
= CRYPTO_ALG_ASYNC
,
2292 .ivsize
= AES_BLOCK_SIZE
,
2293 .maxauthsize
= MD5_DIGEST_SIZE
,
2295 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2296 DESC_HDR_SEL0_AESU
|
2297 DESC_HDR_MODE0_AESU_CBC
|
2298 DESC_HDR_SEL1_MDEUA
|
2299 DESC_HDR_MODE1_MDEU_INIT
|
2300 DESC_HDR_MODE1_MDEU_PAD
|
2301 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2303 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2306 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2307 .cra_driver_name
= "authenc-hmac-md5-"
2309 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2310 .cra_flags
= CRYPTO_ALG_ASYNC
,
2312 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2313 .maxauthsize
= MD5_DIGEST_SIZE
,
2315 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2317 DESC_HDR_MODE0_DEU_CBC
|
2318 DESC_HDR_MODE0_DEU_3DES
|
2319 DESC_HDR_SEL1_MDEUA
|
2320 DESC_HDR_MODE1_MDEU_INIT
|
2321 DESC_HDR_MODE1_MDEU_PAD
|
2322 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2324 /* ABLKCIPHER algorithms. */
2325 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2327 .cra_name
= "ecb(aes)",
2328 .cra_driver_name
= "ecb-aes-talitos",
2329 .cra_blocksize
= AES_BLOCK_SIZE
,
2330 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2333 .min_keysize
= AES_MIN_KEY_SIZE
,
2334 .max_keysize
= AES_MAX_KEY_SIZE
,
2335 .ivsize
= AES_BLOCK_SIZE
,
2338 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2341 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2343 .cra_name
= "cbc(aes)",
2344 .cra_driver_name
= "cbc-aes-talitos",
2345 .cra_blocksize
= AES_BLOCK_SIZE
,
2346 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2349 .min_keysize
= AES_MIN_KEY_SIZE
,
2350 .max_keysize
= AES_MAX_KEY_SIZE
,
2351 .ivsize
= AES_BLOCK_SIZE
,
2354 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2355 DESC_HDR_SEL0_AESU
|
2356 DESC_HDR_MODE0_AESU_CBC
,
2358 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2360 .cra_name
= "ctr(aes)",
2361 .cra_driver_name
= "ctr-aes-talitos",
2362 .cra_blocksize
= AES_BLOCK_SIZE
,
2363 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2366 .min_keysize
= AES_MIN_KEY_SIZE
,
2367 .max_keysize
= AES_MAX_KEY_SIZE
,
2368 .ivsize
= AES_BLOCK_SIZE
,
2371 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2372 DESC_HDR_SEL0_AESU
|
2373 DESC_HDR_MODE0_AESU_CTR
,
2375 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2377 .cra_name
= "ecb(des)",
2378 .cra_driver_name
= "ecb-des-talitos",
2379 .cra_blocksize
= DES_BLOCK_SIZE
,
2380 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2383 .min_keysize
= DES_KEY_SIZE
,
2384 .max_keysize
= DES_KEY_SIZE
,
2385 .ivsize
= DES_BLOCK_SIZE
,
2388 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2391 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2393 .cra_name
= "cbc(des)",
2394 .cra_driver_name
= "cbc-des-talitos",
2395 .cra_blocksize
= DES_BLOCK_SIZE
,
2396 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2399 .min_keysize
= DES_KEY_SIZE
,
2400 .max_keysize
= DES_KEY_SIZE
,
2401 .ivsize
= DES_BLOCK_SIZE
,
2404 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2406 DESC_HDR_MODE0_DEU_CBC
,
2408 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2410 .cra_name
= "ecb(des3_ede)",
2411 .cra_driver_name
= "ecb-3des-talitos",
2412 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2413 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2416 .min_keysize
= DES3_EDE_KEY_SIZE
,
2417 .max_keysize
= DES3_EDE_KEY_SIZE
,
2418 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2421 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2423 DESC_HDR_MODE0_DEU_3DES
,
2425 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2427 .cra_name
= "cbc(des3_ede)",
2428 .cra_driver_name
= "cbc-3des-talitos",
2429 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2430 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2433 .min_keysize
= DES3_EDE_KEY_SIZE
,
2434 .max_keysize
= DES3_EDE_KEY_SIZE
,
2435 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2438 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2440 DESC_HDR_MODE0_DEU_CBC
|
2441 DESC_HDR_MODE0_DEU_3DES
,
2443 /* AHASH algorithms. */
2444 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2446 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2449 .cra_driver_name
= "md5-talitos",
2450 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
2451 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2455 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2456 DESC_HDR_SEL0_MDEUA
|
2457 DESC_HDR_MODE0_MDEU_MD5
,
2459 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2461 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2464 .cra_driver_name
= "sha1-talitos",
2465 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2466 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2470 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2471 DESC_HDR_SEL0_MDEUA
|
2472 DESC_HDR_MODE0_MDEU_SHA1
,
2474 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2476 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2478 .cra_name
= "sha224",
2479 .cra_driver_name
= "sha224-talitos",
2480 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2481 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2485 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2486 DESC_HDR_SEL0_MDEUA
|
2487 DESC_HDR_MODE0_MDEU_SHA224
,
2489 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2491 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2493 .cra_name
= "sha256",
2494 .cra_driver_name
= "sha256-talitos",
2495 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2496 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2500 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2501 DESC_HDR_SEL0_MDEUA
|
2502 DESC_HDR_MODE0_MDEU_SHA256
,
2504 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2506 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2508 .cra_name
= "sha384",
2509 .cra_driver_name
= "sha384-talitos",
2510 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2511 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2515 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2516 DESC_HDR_SEL0_MDEUB
|
2517 DESC_HDR_MODE0_MDEUB_SHA384
,
2519 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2521 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2523 .cra_name
= "sha512",
2524 .cra_driver_name
= "sha512-talitos",
2525 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2526 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2530 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2531 DESC_HDR_SEL0_MDEUB
|
2532 DESC_HDR_MODE0_MDEUB_SHA512
,
2534 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2536 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2538 .cra_name
= "hmac(md5)",
2539 .cra_driver_name
= "hmac-md5-talitos",
2540 .cra_blocksize
= MD5_HMAC_BLOCK_SIZE
,
2541 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2545 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2546 DESC_HDR_SEL0_MDEUA
|
2547 DESC_HDR_MODE0_MDEU_MD5
,
2549 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2551 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2553 .cra_name
= "hmac(sha1)",
2554 .cra_driver_name
= "hmac-sha1-talitos",
2555 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2556 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2560 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2561 DESC_HDR_SEL0_MDEUA
|
2562 DESC_HDR_MODE0_MDEU_SHA1
,
2564 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2566 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2568 .cra_name
= "hmac(sha224)",
2569 .cra_driver_name
= "hmac-sha224-talitos",
2570 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2571 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2575 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2576 DESC_HDR_SEL0_MDEUA
|
2577 DESC_HDR_MODE0_MDEU_SHA224
,
2579 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2581 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2583 .cra_name
= "hmac(sha256)",
2584 .cra_driver_name
= "hmac-sha256-talitos",
2585 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2586 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2590 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2591 DESC_HDR_SEL0_MDEUA
|
2592 DESC_HDR_MODE0_MDEU_SHA256
,
2594 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2596 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2598 .cra_name
= "hmac(sha384)",
2599 .cra_driver_name
= "hmac-sha384-talitos",
2600 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2601 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2605 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2606 DESC_HDR_SEL0_MDEUB
|
2607 DESC_HDR_MODE0_MDEUB_SHA384
,
2609 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2611 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2613 .cra_name
= "hmac(sha512)",
2614 .cra_driver_name
= "hmac-sha512-talitos",
2615 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2616 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2620 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2621 DESC_HDR_SEL0_MDEUB
|
2622 DESC_HDR_MODE0_MDEUB_SHA512
,
2626 struct talitos_crypto_alg
{
2627 struct list_head entry
;
2629 struct talitos_alg_template algt
;
2632 static int talitos_cra_init(struct crypto_tfm
*tfm
)
2634 struct crypto_alg
*alg
= tfm
->__crt_alg
;
2635 struct talitos_crypto_alg
*talitos_alg
;
2636 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2637 struct talitos_private
*priv
;
2639 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_AHASH
)
2640 talitos_alg
= container_of(__crypto_ahash_alg(alg
),
2641 struct talitos_crypto_alg
,
2644 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
2647 /* update context with ptr to dev */
2648 ctx
->dev
= talitos_alg
->dev
;
2650 /* assign SEC channel to tfm in round-robin fashion */
2651 priv
= dev_get_drvdata(ctx
->dev
);
2652 ctx
->ch
= atomic_inc_return(&priv
->last_chan
) &
2653 (priv
->num_channels
- 1);
2655 /* copy descriptor header template value */
2656 ctx
->desc_hdr_template
= talitos_alg
->algt
.desc_hdr_template
;
2658 /* select done notification */
2659 ctx
->desc_hdr_template
|= DESC_HDR_DONE_NOTIFY
;
2664 static int talitos_cra_init_aead(struct crypto_aead
*tfm
)
2666 talitos_cra_init(crypto_aead_tfm(tfm
));
2670 static int talitos_cra_init_ahash(struct crypto_tfm
*tfm
)
2672 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2674 talitos_cra_init(tfm
);
2677 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2678 sizeof(struct talitos_ahash_req_ctx
));
2684 * given the alg's descriptor header template, determine whether descriptor
2685 * type and primary/secondary execution units required match the hw
2686 * capabilities description provided in the device tree node.
2688 static int hw_supports(struct device
*dev
, __be32 desc_hdr_template
)
2690 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2693 ret
= (1 << DESC_TYPE(desc_hdr_template
) & priv
->desc_types
) &&
2694 (1 << PRIMARY_EU(desc_hdr_template
) & priv
->exec_units
);
2696 if (SECONDARY_EU(desc_hdr_template
))
2697 ret
= ret
&& (1 << SECONDARY_EU(desc_hdr_template
)
2698 & priv
->exec_units
);
2703 static int talitos_remove(struct platform_device
*ofdev
)
2705 struct device
*dev
= &ofdev
->dev
;
2706 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2707 struct talitos_crypto_alg
*t_alg
, *n
;
2710 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
2711 switch (t_alg
->algt
.type
) {
2712 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2714 case CRYPTO_ALG_TYPE_AEAD
:
2715 crypto_unregister_aead(&t_alg
->algt
.alg
.aead
);
2716 case CRYPTO_ALG_TYPE_AHASH
:
2717 crypto_unregister_ahash(&t_alg
->algt
.alg
.hash
);
2720 list_del(&t_alg
->entry
);
2724 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
))
2725 talitos_unregister_rng(dev
);
2727 for (i
= 0; priv
->chan
&& i
< priv
->num_channels
; i
++)
2728 kfree(priv
->chan
[i
].fifo
);
2732 for (i
= 0; i
< 2; i
++)
2734 free_irq(priv
->irq
[i
], dev
);
2735 irq_dispose_mapping(priv
->irq
[i
]);
2738 tasklet_kill(&priv
->done_task
[0]);
2740 tasklet_kill(&priv
->done_task
[1]);
2749 static struct talitos_crypto_alg
*talitos_alg_alloc(struct device
*dev
,
2750 struct talitos_alg_template
2753 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2754 struct talitos_crypto_alg
*t_alg
;
2755 struct crypto_alg
*alg
;
2757 t_alg
= kzalloc(sizeof(struct talitos_crypto_alg
), GFP_KERNEL
);
2759 return ERR_PTR(-ENOMEM
);
2761 t_alg
->algt
= *template;
2763 switch (t_alg
->algt
.type
) {
2764 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2765 alg
= &t_alg
->algt
.alg
.crypto
;
2766 alg
->cra_init
= talitos_cra_init
;
2767 alg
->cra_type
= &crypto_ablkcipher_type
;
2768 alg
->cra_ablkcipher
.setkey
= ablkcipher_setkey
;
2769 alg
->cra_ablkcipher
.encrypt
= ablkcipher_encrypt
;
2770 alg
->cra_ablkcipher
.decrypt
= ablkcipher_decrypt
;
2771 alg
->cra_ablkcipher
.geniv
= "eseqiv";
2773 case CRYPTO_ALG_TYPE_AEAD
:
2774 alg
= &t_alg
->algt
.alg
.aead
.base
;
2775 t_alg
->algt
.alg
.aead
.init
= talitos_cra_init_aead
;
2776 t_alg
->algt
.alg
.aead
.setkey
= aead_setkey
;
2777 t_alg
->algt
.alg
.aead
.encrypt
= aead_encrypt
;
2778 t_alg
->algt
.alg
.aead
.decrypt
= aead_decrypt
;
2780 case CRYPTO_ALG_TYPE_AHASH
:
2781 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
2782 alg
->cra_init
= talitos_cra_init_ahash
;
2783 alg
->cra_type
= &crypto_ahash_type
;
2784 t_alg
->algt
.alg
.hash
.init
= ahash_init
;
2785 t_alg
->algt
.alg
.hash
.update
= ahash_update
;
2786 t_alg
->algt
.alg
.hash
.final
= ahash_final
;
2787 t_alg
->algt
.alg
.hash
.finup
= ahash_finup
;
2788 t_alg
->algt
.alg
.hash
.digest
= ahash_digest
;
2789 t_alg
->algt
.alg
.hash
.setkey
= ahash_setkey
;
2791 if (!(priv
->features
& TALITOS_FTR_HMAC_OK
) &&
2792 !strncmp(alg
->cra_name
, "hmac", 4)) {
2794 return ERR_PTR(-ENOTSUPP
);
2796 if (!(priv
->features
& TALITOS_FTR_SHA224_HWINIT
) &&
2797 (!strcmp(alg
->cra_name
, "sha224") ||
2798 !strcmp(alg
->cra_name
, "hmac(sha224)"))) {
2799 t_alg
->algt
.alg
.hash
.init
= ahash_init_sha224_swinit
;
2800 t_alg
->algt
.desc_hdr_template
=
2801 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2802 DESC_HDR_SEL0_MDEUA
|
2803 DESC_HDR_MODE0_MDEU_SHA256
;
2807 dev_err(dev
, "unknown algorithm type %d\n", t_alg
->algt
.type
);
2809 return ERR_PTR(-EINVAL
);
2812 alg
->cra_module
= THIS_MODULE
;
2813 alg
->cra_priority
= TALITOS_CRA_PRIORITY
;
2814 alg
->cra_alignmask
= 0;
2815 alg
->cra_ctxsize
= sizeof(struct talitos_ctx
);
2816 alg
->cra_flags
|= CRYPTO_ALG_KERN_DRIVER_ONLY
;
2823 static int talitos_probe_irq(struct platform_device
*ofdev
)
2825 struct device
*dev
= &ofdev
->dev
;
2826 struct device_node
*np
= ofdev
->dev
.of_node
;
2827 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2829 bool is_sec1
= has_ftr_sec1(priv
);
2831 priv
->irq
[0] = irq_of_parse_and_map(np
, 0);
2832 if (!priv
->irq
[0]) {
2833 dev_err(dev
, "failed to map irq\n");
2837 err
= request_irq(priv
->irq
[0], talitos1_interrupt_4ch
, 0,
2838 dev_driver_string(dev
), dev
);
2842 priv
->irq
[1] = irq_of_parse_and_map(np
, 1);
2844 /* get the primary irq line */
2845 if (!priv
->irq
[1]) {
2846 err
= request_irq(priv
->irq
[0], talitos2_interrupt_4ch
, 0,
2847 dev_driver_string(dev
), dev
);
2851 err
= request_irq(priv
->irq
[0], talitos2_interrupt_ch0_2
, 0,
2852 dev_driver_string(dev
), dev
);
2856 /* get the secondary irq line */
2857 err
= request_irq(priv
->irq
[1], talitos2_interrupt_ch1_3
, 0,
2858 dev_driver_string(dev
), dev
);
2860 dev_err(dev
, "failed to request secondary irq\n");
2861 irq_dispose_mapping(priv
->irq
[1]);
2869 dev_err(dev
, "failed to request primary irq\n");
2870 irq_dispose_mapping(priv
->irq
[0]);
2877 static int talitos_probe(struct platform_device
*ofdev
)
2879 struct device
*dev
= &ofdev
->dev
;
2880 struct device_node
*np
= ofdev
->dev
.of_node
;
2881 struct talitos_private
*priv
;
2882 const unsigned int *prop
;
2886 priv
= kzalloc(sizeof(struct talitos_private
), GFP_KERNEL
);
2890 INIT_LIST_HEAD(&priv
->alg_list
);
2892 dev_set_drvdata(dev
, priv
);
2894 priv
->ofdev
= ofdev
;
2896 spin_lock_init(&priv
->reg_lock
);
2898 priv
->reg
= of_iomap(np
, 0);
2900 dev_err(dev
, "failed to of_iomap\n");
2905 /* get SEC version capabilities from device tree */
2906 prop
= of_get_property(np
, "fsl,num-channels", NULL
);
2908 priv
->num_channels
= *prop
;
2910 prop
= of_get_property(np
, "fsl,channel-fifo-len", NULL
);
2912 priv
->chfifo_len
= *prop
;
2914 prop
= of_get_property(np
, "fsl,exec-units-mask", NULL
);
2916 priv
->exec_units
= *prop
;
2918 prop
= of_get_property(np
, "fsl,descriptor-types-mask", NULL
);
2920 priv
->desc_types
= *prop
;
2922 if (!is_power_of_2(priv
->num_channels
) || !priv
->chfifo_len
||
2923 !priv
->exec_units
|| !priv
->desc_types
) {
2924 dev_err(dev
, "invalid property data in device tree node\n");
2929 if (of_device_is_compatible(np
, "fsl,sec3.0"))
2930 priv
->features
|= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
;
2932 if (of_device_is_compatible(np
, "fsl,sec2.1"))
2933 priv
->features
|= TALITOS_FTR_HW_AUTH_CHECK
|
2934 TALITOS_FTR_SHA224_HWINIT
|
2935 TALITOS_FTR_HMAC_OK
;
2937 if (of_device_is_compatible(np
, "fsl,sec1.0"))
2938 priv
->features
|= TALITOS_FTR_SEC1
;
2940 if (of_device_is_compatible(np
, "fsl,sec1.2")) {
2941 priv
->reg_deu
= priv
->reg
+ TALITOS12_DEU
;
2942 priv
->reg_aesu
= priv
->reg
+ TALITOS12_AESU
;
2943 priv
->reg_mdeu
= priv
->reg
+ TALITOS12_MDEU
;
2944 stride
= TALITOS1_CH_STRIDE
;
2945 } else if (of_device_is_compatible(np
, "fsl,sec1.0")) {
2946 priv
->reg_deu
= priv
->reg
+ TALITOS10_DEU
;
2947 priv
->reg_aesu
= priv
->reg
+ TALITOS10_AESU
;
2948 priv
->reg_mdeu
= priv
->reg
+ TALITOS10_MDEU
;
2949 priv
->reg_afeu
= priv
->reg
+ TALITOS10_AFEU
;
2950 priv
->reg_rngu
= priv
->reg
+ TALITOS10_RNGU
;
2951 priv
->reg_pkeu
= priv
->reg
+ TALITOS10_PKEU
;
2952 stride
= TALITOS1_CH_STRIDE
;
2954 priv
->reg_deu
= priv
->reg
+ TALITOS2_DEU
;
2955 priv
->reg_aesu
= priv
->reg
+ TALITOS2_AESU
;
2956 priv
->reg_mdeu
= priv
->reg
+ TALITOS2_MDEU
;
2957 priv
->reg_afeu
= priv
->reg
+ TALITOS2_AFEU
;
2958 priv
->reg_rngu
= priv
->reg
+ TALITOS2_RNGU
;
2959 priv
->reg_pkeu
= priv
->reg
+ TALITOS2_PKEU
;
2960 priv
->reg_keu
= priv
->reg
+ TALITOS2_KEU
;
2961 priv
->reg_crcu
= priv
->reg
+ TALITOS2_CRCU
;
2962 stride
= TALITOS2_CH_STRIDE
;
2965 err
= talitos_probe_irq(ofdev
);
2969 if (of_device_is_compatible(np
, "fsl,sec1.0")) {
2970 tasklet_init(&priv
->done_task
[0], talitos1_done_4ch
,
2971 (unsigned long)dev
);
2973 if (!priv
->irq
[1]) {
2974 tasklet_init(&priv
->done_task
[0], talitos2_done_4ch
,
2975 (unsigned long)dev
);
2977 tasklet_init(&priv
->done_task
[0], talitos2_done_ch0_2
,
2978 (unsigned long)dev
);
2979 tasklet_init(&priv
->done_task
[1], talitos2_done_ch1_3
,
2980 (unsigned long)dev
);
2984 priv
->chan
= kzalloc(sizeof(struct talitos_channel
) *
2985 priv
->num_channels
, GFP_KERNEL
);
2987 dev_err(dev
, "failed to allocate channel management space\n");
2992 priv
->fifo_len
= roundup_pow_of_two(priv
->chfifo_len
);
2994 for (i
= 0; i
< priv
->num_channels
; i
++) {
2995 priv
->chan
[i
].reg
= priv
->reg
+ stride
* (i
+ 1);
2996 if (!priv
->irq
[1] || !(i
& 1))
2997 priv
->chan
[i
].reg
+= TALITOS_CH_BASE_OFFSET
;
2999 spin_lock_init(&priv
->chan
[i
].head_lock
);
3000 spin_lock_init(&priv
->chan
[i
].tail_lock
);
3002 priv
->chan
[i
].fifo
= kzalloc(sizeof(struct talitos_request
) *
3003 priv
->fifo_len
, GFP_KERNEL
);
3004 if (!priv
->chan
[i
].fifo
) {
3005 dev_err(dev
, "failed to allocate request fifo %d\n", i
);
3010 atomic_set(&priv
->chan
[i
].submit_count
,
3011 -(priv
->chfifo_len
- 1));
3014 dma_set_mask(dev
, DMA_BIT_MASK(36));
3016 /* reset and initialize the h/w */
3017 err
= init_device(dev
);
3019 dev_err(dev
, "failed to initialize device\n");
3023 /* register the RNG, if available */
3024 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
)) {
3025 err
= talitos_register_rng(dev
);
3027 dev_err(dev
, "failed to register hwrng: %d\n", err
);
3030 dev_info(dev
, "hwrng\n");
3033 /* register crypto algorithms the device supports */
3034 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
3035 if (hw_supports(dev
, driver_algs
[i
].desc_hdr_template
)) {
3036 struct talitos_crypto_alg
*t_alg
;
3037 struct crypto_alg
*alg
= NULL
;
3039 t_alg
= talitos_alg_alloc(dev
, &driver_algs
[i
]);
3040 if (IS_ERR(t_alg
)) {
3041 err
= PTR_ERR(t_alg
);
3042 if (err
== -ENOTSUPP
)
3047 switch (t_alg
->algt
.type
) {
3048 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
3049 err
= crypto_register_alg(
3050 &t_alg
->algt
.alg
.crypto
);
3051 alg
= &t_alg
->algt
.alg
.crypto
;
3054 case CRYPTO_ALG_TYPE_AEAD
:
3055 err
= crypto_register_aead(
3056 &t_alg
->algt
.alg
.aead
);
3057 alg
= &t_alg
->algt
.alg
.aead
.base
;
3060 case CRYPTO_ALG_TYPE_AHASH
:
3061 err
= crypto_register_ahash(
3062 &t_alg
->algt
.alg
.hash
);
3063 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
3067 dev_err(dev
, "%s alg registration failed\n",
3068 alg
->cra_driver_name
);
3071 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
3074 if (!list_empty(&priv
->alg_list
))
3075 dev_info(dev
, "%s algorithms registered in /proc/crypto\n",
3076 (char *)of_get_property(np
, "compatible", NULL
));
3081 talitos_remove(ofdev
);
3086 static const struct of_device_id talitos_match
[] = {
3087 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3089 .compatible
= "fsl,sec1.0",
3092 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3094 .compatible
= "fsl,sec2.0",
3099 MODULE_DEVICE_TABLE(of
, talitos_match
);
3101 static struct platform_driver talitos_driver
= {
3104 .of_match_table
= talitos_match
,
3106 .probe
= talitos_probe
,
3107 .remove
= talitos_remove
,
3110 module_platform_driver(talitos_driver
);
3112 MODULE_LICENSE("GPL");
3113 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3114 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");