2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
58 static void to_talitos_ptr(struct talitos_ptr
*talitos_ptr
, dma_addr_t dma_addr
)
60 talitos_ptr
->ptr
= cpu_to_be32(lower_32_bits(dma_addr
));
61 talitos_ptr
->eptr
= upper_32_bits(dma_addr
);
65 * map virtual single (contiguous) pointer to h/w descriptor pointer
67 static void map_single_talitos_ptr(struct device
*dev
,
68 struct talitos_ptr
*talitos_ptr
,
69 unsigned short len
, void *data
,
71 enum dma_data_direction dir
)
73 dma_addr_t dma_addr
= dma_map_single(dev
, data
, len
, dir
);
75 talitos_ptr
->len
= cpu_to_be16(len
);
76 to_talitos_ptr(talitos_ptr
, dma_addr
);
77 talitos_ptr
->j_extent
= extent
;
81 * unmap bus single (contiguous) h/w descriptor pointer
83 static void unmap_single_talitos_ptr(struct device
*dev
,
84 struct talitos_ptr
*talitos_ptr
,
85 enum dma_data_direction dir
)
87 dma_unmap_single(dev
, be32_to_cpu(talitos_ptr
->ptr
),
88 be16_to_cpu(talitos_ptr
->len
), dir
);
91 static int reset_channel(struct device
*dev
, int ch
)
93 struct talitos_private
*priv
= dev_get_drvdata(dev
);
94 unsigned int timeout
= TALITOS_TIMEOUT
;
96 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
, TALITOS_CCCR_RESET
);
98 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) & TALITOS_CCCR_RESET
)
103 dev_err(dev
, "failed to reset channel %d\n", ch
);
107 /* set 36-bit addressing, done writeback enable and done IRQ enable */
108 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, TALITOS_CCCR_LO_EAE
|
109 TALITOS_CCCR_LO_CDWE
| TALITOS_CCCR_LO_CDIE
);
111 /* and ICCR writeback, if available */
112 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
113 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
,
114 TALITOS_CCCR_LO_IWSE
);
119 static int reset_device(struct device
*dev
)
121 struct talitos_private
*priv
= dev_get_drvdata(dev
);
122 unsigned int timeout
= TALITOS_TIMEOUT
;
123 u32 mcr
= TALITOS_MCR_SWR
;
125 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
127 while ((in_be32(priv
->reg
+ TALITOS_MCR
) & TALITOS_MCR_SWR
)
132 mcr
= TALITOS_MCR_RCA1
| TALITOS_MCR_RCA3
;
133 setbits32(priv
->reg
+ TALITOS_MCR
, mcr
);
137 dev_err(dev
, "failed to reset device\n");
145 * Reset and initialize the device
147 static int init_device(struct device
*dev
)
149 struct talitos_private
*priv
= dev_get_drvdata(dev
);
154 * errata documentation: warning: certain SEC interrupts
155 * are not fully cleared by writing the MCR:SWR bit,
156 * set bit twice to completely reset
158 err
= reset_device(dev
);
162 err
= reset_device(dev
);
167 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
168 err
= reset_channel(dev
, ch
);
173 /* enable channel done and error interrupts */
174 setbits32(priv
->reg
+ TALITOS_IMR
, TALITOS_IMR_INIT
);
175 setbits32(priv
->reg
+ TALITOS_IMR_LO
, TALITOS_IMR_LO_INIT
);
177 /* disable integrity check error interrupts (use writeback instead) */
178 if (priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
)
179 setbits32(priv
->reg
+ TALITOS_MDEUICR_LO
,
180 TALITOS_MDEUICR_LO_ICE
);
186 * talitos_submit - submits a descriptor to the device for processing
187 * @dev: the SEC device to be used
188 * @ch: the SEC device channel to be used
189 * @desc: the descriptor to be processed by the device
190 * @callback: whom to call when processing is complete
191 * @context: a handle for use by caller (optional)
193 * desc must contain valid dma-mapped (bus physical) address pointers.
194 * callback must check err and feedback in descriptor header
195 * for device processing status.
197 int talitos_submit(struct device
*dev
, int ch
, struct talitos_desc
*desc
,
198 void (*callback
)(struct device
*dev
,
199 struct talitos_desc
*desc
,
200 void *context
, int error
),
203 struct talitos_private
*priv
= dev_get_drvdata(dev
);
204 struct talitos_request
*request
;
208 spin_lock_irqsave(&priv
->chan
[ch
].head_lock
, flags
);
210 if (!atomic_inc_not_zero(&priv
->chan
[ch
].submit_count
)) {
211 /* h/w fifo is full */
212 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
216 head
= priv
->chan
[ch
].head
;
217 request
= &priv
->chan
[ch
].fifo
[head
];
219 /* map descriptor and save caller data */
220 request
->dma_desc
= dma_map_single(dev
, desc
, sizeof(*desc
),
222 request
->callback
= callback
;
223 request
->context
= context
;
225 /* increment fifo head */
226 priv
->chan
[ch
].head
= (priv
->chan
[ch
].head
+ 1) & (priv
->fifo_len
- 1);
229 request
->desc
= desc
;
233 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF
,
234 upper_32_bits(request
->dma_desc
));
235 out_be32(priv
->chan
[ch
].reg
+ TALITOS_FF_LO
,
236 lower_32_bits(request
->dma_desc
));
238 spin_unlock_irqrestore(&priv
->chan
[ch
].head_lock
, flags
);
242 EXPORT_SYMBOL(talitos_submit
);
245 * process what was done, notify callback of error if not
247 static void flush_channel(struct device
*dev
, int ch
, int error
, int reset_ch
)
249 struct talitos_private
*priv
= dev_get_drvdata(dev
);
250 struct talitos_request
*request
, saved_req
;
254 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
256 tail
= priv
->chan
[ch
].tail
;
257 while (priv
->chan
[ch
].fifo
[tail
].desc
) {
258 request
= &priv
->chan
[ch
].fifo
[tail
];
260 /* descriptors with their done bits set don't get the error */
262 if ((request
->desc
->hdr
& DESC_HDR_DONE
) == DESC_HDR_DONE
)
270 dma_unmap_single(dev
, request
->dma_desc
,
271 sizeof(struct talitos_desc
),
274 /* copy entries so we can call callback outside lock */
275 saved_req
.desc
= request
->desc
;
276 saved_req
.callback
= request
->callback
;
277 saved_req
.context
= request
->context
;
279 /* release request entry in fifo */
281 request
->desc
= NULL
;
283 /* increment fifo tail */
284 priv
->chan
[ch
].tail
= (tail
+ 1) & (priv
->fifo_len
- 1);
286 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
288 atomic_dec(&priv
->chan
[ch
].submit_count
);
290 saved_req
.callback(dev
, saved_req
.desc
, saved_req
.context
,
292 /* channel may resume processing in single desc error case */
293 if (error
&& !reset_ch
&& status
== error
)
295 spin_lock_irqsave(&priv
->chan
[ch
].tail_lock
, flags
);
296 tail
= priv
->chan
[ch
].tail
;
299 spin_unlock_irqrestore(&priv
->chan
[ch
].tail_lock
, flags
);
303 * process completed requests for channels that have done status
305 #define DEF_TALITOS_DONE(name, ch_done_mask) \
306 static void talitos_done_##name(unsigned long data) \
308 struct device *dev = (struct device *)data; \
309 struct talitos_private *priv = dev_get_drvdata(dev); \
310 unsigned long flags; \
312 if (ch_done_mask & 1) \
313 flush_channel(dev, 0, 0, 0); \
314 if (priv->num_channels == 1) \
316 if (ch_done_mask & (1 << 2)) \
317 flush_channel(dev, 1, 0, 0); \
318 if (ch_done_mask & (1 << 4)) \
319 flush_channel(dev, 2, 0, 0); \
320 if (ch_done_mask & (1 << 6)) \
321 flush_channel(dev, 3, 0, 0); \
324 /* At this point, all completed channels have been processed */ \
325 /* Unmask done interrupts for channels completed later on. */ \
326 spin_lock_irqsave(&priv->reg_lock, flags); \
327 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
328 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS_IMR_LO_INIT); \
329 spin_unlock_irqrestore(&priv->reg_lock, flags); \
331 DEF_TALITOS_DONE(4ch
, TALITOS_ISR_4CHDONE
)
332 DEF_TALITOS_DONE(ch0_2
, TALITOS_ISR_CH_0_2_DONE
)
333 DEF_TALITOS_DONE(ch1_3
, TALITOS_ISR_CH_1_3_DONE
)
336 * locate current (offending) descriptor
338 static u32
current_desc_hdr(struct device
*dev
, int ch
)
340 struct talitos_private
*priv
= dev_get_drvdata(dev
);
344 cur_desc
= ((u64
)in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR
)) << 32;
345 cur_desc
|= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CDPR_LO
);
348 dev_err(dev
, "CDPR is NULL, giving up search for offending descriptor\n");
352 tail
= priv
->chan
[ch
].tail
;
355 while (priv
->chan
[ch
].fifo
[iter
].dma_desc
!= cur_desc
) {
356 iter
= (iter
+ 1) & (priv
->fifo_len
- 1);
358 dev_err(dev
, "couldn't locate current descriptor\n");
363 return priv
->chan
[ch
].fifo
[iter
].desc
->hdr
;
367 * user diagnostics; report root cause of error based on execution unit status
369 static void report_eu_error(struct device
*dev
, int ch
, u32 desc_hdr
)
371 struct talitos_private
*priv
= dev_get_drvdata(dev
);
375 desc_hdr
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
);
377 switch (desc_hdr
& DESC_HDR_SEL0_MASK
) {
378 case DESC_HDR_SEL0_AFEU
:
379 dev_err(dev
, "AFEUISR 0x%08x_%08x\n",
380 in_be32(priv
->reg
+ TALITOS_AFEUISR
),
381 in_be32(priv
->reg
+ TALITOS_AFEUISR_LO
));
383 case DESC_HDR_SEL0_DEU
:
384 dev_err(dev
, "DEUISR 0x%08x_%08x\n",
385 in_be32(priv
->reg
+ TALITOS_DEUISR
),
386 in_be32(priv
->reg
+ TALITOS_DEUISR_LO
));
388 case DESC_HDR_SEL0_MDEUA
:
389 case DESC_HDR_SEL0_MDEUB
:
390 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
391 in_be32(priv
->reg
+ TALITOS_MDEUISR
),
392 in_be32(priv
->reg
+ TALITOS_MDEUISR_LO
));
394 case DESC_HDR_SEL0_RNG
:
395 dev_err(dev
, "RNGUISR 0x%08x_%08x\n",
396 in_be32(priv
->reg
+ TALITOS_RNGUISR
),
397 in_be32(priv
->reg
+ TALITOS_RNGUISR_LO
));
399 case DESC_HDR_SEL0_PKEU
:
400 dev_err(dev
, "PKEUISR 0x%08x_%08x\n",
401 in_be32(priv
->reg
+ TALITOS_PKEUISR
),
402 in_be32(priv
->reg
+ TALITOS_PKEUISR_LO
));
404 case DESC_HDR_SEL0_AESU
:
405 dev_err(dev
, "AESUISR 0x%08x_%08x\n",
406 in_be32(priv
->reg
+ TALITOS_AESUISR
),
407 in_be32(priv
->reg
+ TALITOS_AESUISR_LO
));
409 case DESC_HDR_SEL0_CRCU
:
410 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
411 in_be32(priv
->reg
+ TALITOS_CRCUISR
),
412 in_be32(priv
->reg
+ TALITOS_CRCUISR_LO
));
414 case DESC_HDR_SEL0_KEU
:
415 dev_err(dev
, "KEUISR 0x%08x_%08x\n",
416 in_be32(priv
->reg
+ TALITOS_KEUISR
),
417 in_be32(priv
->reg
+ TALITOS_KEUISR_LO
));
421 switch (desc_hdr
& DESC_HDR_SEL1_MASK
) {
422 case DESC_HDR_SEL1_MDEUA
:
423 case DESC_HDR_SEL1_MDEUB
:
424 dev_err(dev
, "MDEUISR 0x%08x_%08x\n",
425 in_be32(priv
->reg
+ TALITOS_MDEUISR
),
426 in_be32(priv
->reg
+ TALITOS_MDEUISR_LO
));
428 case DESC_HDR_SEL1_CRCU
:
429 dev_err(dev
, "CRCUISR 0x%08x_%08x\n",
430 in_be32(priv
->reg
+ TALITOS_CRCUISR
),
431 in_be32(priv
->reg
+ TALITOS_CRCUISR_LO
));
435 for (i
= 0; i
< 8; i
++)
436 dev_err(dev
, "DESCBUF 0x%08x_%08x\n",
437 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF
+ 8*i
),
438 in_be32(priv
->chan
[ch
].reg
+ TALITOS_DESCBUF_LO
+ 8*i
));
442 * recover from error interrupts
444 static void talitos_error(struct device
*dev
, u32 isr
, u32 isr_lo
)
446 struct talitos_private
*priv
= dev_get_drvdata(dev
);
447 unsigned int timeout
= TALITOS_TIMEOUT
;
448 int ch
, error
, reset_dev
= 0, reset_ch
= 0;
451 for (ch
= 0; ch
< priv
->num_channels
; ch
++) {
452 /* skip channels without errors */
453 if (!(isr
& (1 << (ch
* 2 + 1))))
458 v
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR
);
459 v_lo
= in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCPSR_LO
);
461 if (v_lo
& TALITOS_CCPSR_LO_DOF
) {
462 dev_err(dev
, "double fetch fifo overflow error\n");
466 if (v_lo
& TALITOS_CCPSR_LO_SOF
) {
467 /* h/w dropped descriptor */
468 dev_err(dev
, "single fetch fifo overflow error\n");
471 if (v_lo
& TALITOS_CCPSR_LO_MDTE
)
472 dev_err(dev
, "master data transfer error\n");
473 if (v_lo
& TALITOS_CCPSR_LO_SGDLZ
)
474 dev_err(dev
, "s/g data length zero error\n");
475 if (v_lo
& TALITOS_CCPSR_LO_FPZ
)
476 dev_err(dev
, "fetch pointer zero error\n");
477 if (v_lo
& TALITOS_CCPSR_LO_IDH
)
478 dev_err(dev
, "illegal descriptor header error\n");
479 if (v_lo
& TALITOS_CCPSR_LO_IEU
)
480 dev_err(dev
, "invalid execution unit error\n");
481 if (v_lo
& TALITOS_CCPSR_LO_EU
)
482 report_eu_error(dev
, ch
, current_desc_hdr(dev
, ch
));
483 if (v_lo
& TALITOS_CCPSR_LO_GB
)
484 dev_err(dev
, "gather boundary error\n");
485 if (v_lo
& TALITOS_CCPSR_LO_GRL
)
486 dev_err(dev
, "gather return/length error\n");
487 if (v_lo
& TALITOS_CCPSR_LO_SB
)
488 dev_err(dev
, "scatter boundary error\n");
489 if (v_lo
& TALITOS_CCPSR_LO_SRL
)
490 dev_err(dev
, "scatter return/length error\n");
492 flush_channel(dev
, ch
, error
, reset_ch
);
495 reset_channel(dev
, ch
);
497 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
,
499 setbits32(priv
->chan
[ch
].reg
+ TALITOS_CCCR_LO
, 0);
500 while ((in_be32(priv
->chan
[ch
].reg
+ TALITOS_CCCR
) &
501 TALITOS_CCCR_CONT
) && --timeout
)
504 dev_err(dev
, "failed to restart channel %d\n",
510 if (reset_dev
|| isr
& ~TALITOS_ISR_4CHERR
|| isr_lo
) {
511 dev_err(dev
, "done overflow, internal time out, or rngu error: "
512 "ISR 0x%08x_%08x\n", isr
, isr_lo
);
514 /* purge request queues */
515 for (ch
= 0; ch
< priv
->num_channels
; ch
++)
516 flush_channel(dev
, ch
, -EIO
, 1);
518 /* reset and reinitialize the device */
523 #define DEF_TALITOS_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
524 static irqreturn_t talitos_interrupt_##name(int irq, void *data) \
526 struct device *dev = data; \
527 struct talitos_private *priv = dev_get_drvdata(dev); \
529 unsigned long flags; \
531 spin_lock_irqsave(&priv->reg_lock, flags); \
532 isr = in_be32(priv->reg + TALITOS_ISR); \
533 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
534 /* Acknowledge interrupt */ \
535 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
536 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
538 if (unlikely(isr & ch_err_mask || isr_lo)) { \
539 spin_unlock_irqrestore(&priv->reg_lock, flags); \
540 talitos_error(dev, isr & ch_err_mask, isr_lo); \
543 if (likely(isr & ch_done_mask)) { \
544 /* mask further done interrupts. */ \
545 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
546 /* done_task will unmask done interrupts at exit */ \
547 tasklet_schedule(&priv->done_task[tlet]); \
549 spin_unlock_irqrestore(&priv->reg_lock, flags); \
552 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
555 DEF_TALITOS_INTERRUPT(4ch
, TALITOS_ISR_4CHDONE
, TALITOS_ISR_4CHERR
, 0)
556 DEF_TALITOS_INTERRUPT(ch0_2
, TALITOS_ISR_CH_0_2_DONE
, TALITOS_ISR_CH_0_2_ERR
, 0)
557 DEF_TALITOS_INTERRUPT(ch1_3
, TALITOS_ISR_CH_1_3_DONE
, TALITOS_ISR_CH_1_3_ERR
, 1)
562 static int talitos_rng_data_present(struct hwrng
*rng
, int wait
)
564 struct device
*dev
= (struct device
*)rng
->priv
;
565 struct talitos_private
*priv
= dev_get_drvdata(dev
);
569 for (i
= 0; i
< 20; i
++) {
570 ofl
= in_be32(priv
->reg
+ TALITOS_RNGUSR_LO
) &
571 TALITOS_RNGUSR_LO_OFL
;
580 static int talitos_rng_data_read(struct hwrng
*rng
, u32
*data
)
582 struct device
*dev
= (struct device
*)rng
->priv
;
583 struct talitos_private
*priv
= dev_get_drvdata(dev
);
585 /* rng fifo requires 64-bit accesses */
586 *data
= in_be32(priv
->reg
+ TALITOS_RNGU_FIFO
);
587 *data
= in_be32(priv
->reg
+ TALITOS_RNGU_FIFO_LO
);
592 static int talitos_rng_init(struct hwrng
*rng
)
594 struct device
*dev
= (struct device
*)rng
->priv
;
595 struct talitos_private
*priv
= dev_get_drvdata(dev
);
596 unsigned int timeout
= TALITOS_TIMEOUT
;
598 setbits32(priv
->reg
+ TALITOS_RNGURCR_LO
, TALITOS_RNGURCR_LO_SR
);
599 while (!(in_be32(priv
->reg
+ TALITOS_RNGUSR_LO
) & TALITOS_RNGUSR_LO_RD
)
603 dev_err(dev
, "failed to reset rng hw\n");
607 /* start generating */
608 setbits32(priv
->reg
+ TALITOS_RNGUDSR_LO
, 0);
613 static int talitos_register_rng(struct device
*dev
)
615 struct talitos_private
*priv
= dev_get_drvdata(dev
);
617 priv
->rng
.name
= dev_driver_string(dev
),
618 priv
->rng
.init
= talitos_rng_init
,
619 priv
->rng
.data_present
= talitos_rng_data_present
,
620 priv
->rng
.data_read
= talitos_rng_data_read
,
621 priv
->rng
.priv
= (unsigned long)dev
;
623 return hwrng_register(&priv
->rng
);
626 static void talitos_unregister_rng(struct device
*dev
)
628 struct talitos_private
*priv
= dev_get_drvdata(dev
);
630 hwrng_unregister(&priv
->rng
);
636 #define TALITOS_CRA_PRIORITY 3000
637 #define TALITOS_MAX_KEY_SIZE 96
638 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
640 #define MD5_BLOCK_SIZE 64
645 __be32 desc_hdr_template
;
646 u8 key
[TALITOS_MAX_KEY_SIZE
];
647 u8 iv
[TALITOS_MAX_IV_LENGTH
];
649 unsigned int enckeylen
;
650 unsigned int authkeylen
;
651 unsigned int authsize
;
654 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
655 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
657 struct talitos_ahash_req_ctx
{
658 u32 hw_context
[TALITOS_MDEU_MAX_CONTEXT_SIZE
/ sizeof(u32
)];
659 unsigned int hw_context_size
;
660 u8 buf
[HASH_MAX_BLOCK_SIZE
];
661 u8 bufnext
[HASH_MAX_BLOCK_SIZE
];
665 unsigned int to_hash_later
;
667 struct scatterlist bufsl
[2];
668 struct scatterlist
*psrc
;
671 static int aead_setauthsize(struct crypto_aead
*authenc
,
672 unsigned int authsize
)
674 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
676 ctx
->authsize
= authsize
;
681 static int aead_setkey(struct crypto_aead
*authenc
,
682 const u8
*key
, unsigned int keylen
)
684 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
685 struct crypto_authenc_keys keys
;
687 if (crypto_authenc_extractkeys(&keys
, key
, keylen
) != 0)
690 if (keys
.authkeylen
+ keys
.enckeylen
> TALITOS_MAX_KEY_SIZE
)
693 memcpy(ctx
->key
, keys
.authkey
, keys
.authkeylen
);
694 memcpy(&ctx
->key
[keys
.authkeylen
], keys
.enckey
, keys
.enckeylen
);
696 ctx
->keylen
= keys
.authkeylen
+ keys
.enckeylen
;
697 ctx
->enckeylen
= keys
.enckeylen
;
698 ctx
->authkeylen
= keys
.authkeylen
;
703 crypto_aead_set_flags(authenc
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
708 * talitos_edesc - s/w-extended descriptor
709 * @assoc_nents: number of segments in associated data scatterlist
710 * @src_nents: number of segments in input scatterlist
711 * @dst_nents: number of segments in output scatterlist
712 * @assoc_chained: whether assoc is chained or not
713 * @src_chained: whether src is chained or not
714 * @dst_chained: whether dst is chained or not
715 * @iv_dma: dma address of iv for checking continuity and link table
716 * @dma_len: length of dma mapped link_tbl space
717 * @dma_link_tbl: bus physical address of link_tbl
718 * @desc: h/w descriptor
719 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1)
721 * if decrypting (with authcheck), or either one of src_nents or dst_nents
722 * is greater than 1, an integrity check value is concatenated to the end
725 struct talitos_edesc
{
734 dma_addr_t dma_link_tbl
;
735 struct talitos_desc desc
;
736 struct talitos_ptr link_tbl
[0];
739 static int talitos_map_sg(struct device
*dev
, struct scatterlist
*sg
,
740 unsigned int nents
, enum dma_data_direction dir
,
743 if (unlikely(chained
))
745 dma_map_sg(dev
, sg
, 1, dir
);
746 sg
= scatterwalk_sg_next(sg
);
749 dma_map_sg(dev
, sg
, nents
, dir
);
753 static void talitos_unmap_sg_chain(struct device
*dev
, struct scatterlist
*sg
,
754 enum dma_data_direction dir
)
757 dma_unmap_sg(dev
, sg
, 1, dir
);
758 sg
= scatterwalk_sg_next(sg
);
762 static void talitos_sg_unmap(struct device
*dev
,
763 struct talitos_edesc
*edesc
,
764 struct scatterlist
*src
,
765 struct scatterlist
*dst
)
767 unsigned int src_nents
= edesc
->src_nents
? : 1;
768 unsigned int dst_nents
= edesc
->dst_nents
? : 1;
771 if (edesc
->src_chained
)
772 talitos_unmap_sg_chain(dev
, src
, DMA_TO_DEVICE
);
774 dma_unmap_sg(dev
, src
, src_nents
, DMA_TO_DEVICE
);
777 if (edesc
->dst_chained
)
778 talitos_unmap_sg_chain(dev
, dst
,
781 dma_unmap_sg(dev
, dst
, dst_nents
,
785 if (edesc
->src_chained
)
786 talitos_unmap_sg_chain(dev
, src
, DMA_BIDIRECTIONAL
);
788 dma_unmap_sg(dev
, src
, src_nents
, DMA_BIDIRECTIONAL
);
791 static void ipsec_esp_unmap(struct device
*dev
,
792 struct talitos_edesc
*edesc
,
793 struct aead_request
*areq
)
795 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[6], DMA_FROM_DEVICE
);
796 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[3], DMA_TO_DEVICE
);
797 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
798 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[0], DMA_TO_DEVICE
);
800 if (edesc
->assoc_chained
)
801 talitos_unmap_sg_chain(dev
, areq
->assoc
, DMA_TO_DEVICE
);
802 else if (areq
->assoclen
)
803 /* assoc_nents counts also for IV in non-contiguous cases */
804 dma_unmap_sg(dev
, areq
->assoc
,
805 edesc
->assoc_nents
? edesc
->assoc_nents
- 1 : 1,
808 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
);
811 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
816 * ipsec_esp descriptor callbacks
818 static void ipsec_esp_encrypt_done(struct device
*dev
,
819 struct talitos_desc
*desc
, void *context
,
822 struct aead_request
*areq
= context
;
823 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
824 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
825 struct talitos_edesc
*edesc
;
826 struct scatterlist
*sg
;
829 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
831 ipsec_esp_unmap(dev
, edesc
, areq
);
833 /* copy the generated ICV to dst */
834 if (edesc
->dst_nents
) {
835 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
836 edesc
->dst_nents
+ 2 +
838 sg
= sg_last(areq
->dst
, edesc
->dst_nents
);
839 memcpy((char *)sg_virt(sg
) + sg
->length
- ctx
->authsize
,
840 icvdata
, ctx
->authsize
);
845 aead_request_complete(areq
, err
);
848 static void ipsec_esp_decrypt_swauth_done(struct device
*dev
,
849 struct talitos_desc
*desc
,
850 void *context
, int err
)
852 struct aead_request
*req
= context
;
853 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
854 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
855 struct talitos_edesc
*edesc
;
856 struct scatterlist
*sg
;
859 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
861 ipsec_esp_unmap(dev
, edesc
, req
);
866 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
867 edesc
->dst_nents
+ 2 +
870 icvdata
= &edesc
->link_tbl
[0];
872 sg
= sg_last(req
->dst
, edesc
->dst_nents
? : 1);
873 err
= memcmp(icvdata
, (char *)sg_virt(sg
) + sg
->length
-
874 ctx
->authsize
, ctx
->authsize
) ? -EBADMSG
: 0;
879 aead_request_complete(req
, err
);
882 static void ipsec_esp_decrypt_hwauth_done(struct device
*dev
,
883 struct talitos_desc
*desc
,
884 void *context
, int err
)
886 struct aead_request
*req
= context
;
887 struct talitos_edesc
*edesc
;
889 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
891 ipsec_esp_unmap(dev
, edesc
, req
);
893 /* check ICV auth status */
894 if (!err
&& ((desc
->hdr_lo
& DESC_HDR_LO_ICCR1_MASK
) !=
895 DESC_HDR_LO_ICCR1_PASS
))
900 aead_request_complete(req
, err
);
904 * convert scatterlist to SEC h/w link table format
905 * stop at cryptlen bytes
907 static int sg_to_link_tbl(struct scatterlist
*sg
, int sg_count
,
908 int cryptlen
, struct talitos_ptr
*link_tbl_ptr
)
913 to_talitos_ptr(link_tbl_ptr
, sg_dma_address(sg
));
914 link_tbl_ptr
->len
= cpu_to_be16(sg_dma_len(sg
));
915 link_tbl_ptr
->j_extent
= 0;
917 cryptlen
-= sg_dma_len(sg
);
918 sg
= scatterwalk_sg_next(sg
);
921 /* adjust (decrease) last one (or two) entry's len to cryptlen */
923 while (be16_to_cpu(link_tbl_ptr
->len
) <= (-cryptlen
)) {
924 /* Empty this entry, and move to previous one */
925 cryptlen
+= be16_to_cpu(link_tbl_ptr
->len
);
926 link_tbl_ptr
->len
= 0;
930 be16_add_cpu(&link_tbl_ptr
->len
, cryptlen
);
932 /* tag end of link table */
933 link_tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
939 * fill in and submit ipsec_esp descriptor
941 static int ipsec_esp(struct talitos_edesc
*edesc
, struct aead_request
*areq
,
942 u64 seq
, void (*callback
) (struct device
*dev
,
943 struct talitos_desc
*desc
,
944 void *context
, int error
))
946 struct crypto_aead
*aead
= crypto_aead_reqtfm(areq
);
947 struct talitos_ctx
*ctx
= crypto_aead_ctx(aead
);
948 struct device
*dev
= ctx
->dev
;
949 struct talitos_desc
*desc
= &edesc
->desc
;
950 unsigned int cryptlen
= areq
->cryptlen
;
951 unsigned int authsize
= ctx
->authsize
;
952 unsigned int ivsize
= crypto_aead_ivsize(aead
);
957 map_single_talitos_ptr(dev
, &desc
->ptr
[0], ctx
->authkeylen
, &ctx
->key
,
961 desc
->ptr
[1].len
= cpu_to_be16(areq
->assoclen
+ ivsize
);
962 if (edesc
->assoc_nents
) {
963 int tbl_off
= edesc
->src_nents
+ edesc
->dst_nents
+ 2;
964 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
966 to_talitos_ptr(&desc
->ptr
[1], edesc
->dma_link_tbl
+ tbl_off
*
967 sizeof(struct talitos_ptr
));
968 desc
->ptr
[1].j_extent
= DESC_PTR_LNKTBL_JUMP
;
970 /* assoc_nents - 1 entries for assoc, 1 for IV */
971 sg_count
= sg_to_link_tbl(areq
->assoc
, edesc
->assoc_nents
- 1,
972 areq
->assoclen
, tbl_ptr
);
974 /* add IV to link table */
975 tbl_ptr
+= sg_count
- 1;
976 tbl_ptr
->j_extent
= 0;
978 to_talitos_ptr(tbl_ptr
, edesc
->iv_dma
);
979 tbl_ptr
->len
= cpu_to_be16(ivsize
);
980 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
982 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
983 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
986 to_talitos_ptr(&desc
->ptr
[1],
987 sg_dma_address(areq
->assoc
));
989 to_talitos_ptr(&desc
->ptr
[1], edesc
->iv_dma
);
990 desc
->ptr
[1].j_extent
= 0;
994 to_talitos_ptr(&desc
->ptr
[2], edesc
->iv_dma
);
995 desc
->ptr
[2].len
= cpu_to_be16(ivsize
);
996 desc
->ptr
[2].j_extent
= 0;
997 /* Sync needed for the aead_givencrypt case */
998 dma_sync_single_for_device(dev
, edesc
->iv_dma
, ivsize
, DMA_TO_DEVICE
);
1001 map_single_talitos_ptr(dev
, &desc
->ptr
[3], ctx
->enckeylen
,
1002 (char *)&ctx
->key
+ ctx
->authkeylen
, 0,
1007 * map and adjust cipher len to aead request cryptlen.
1008 * extent is bytes of HMAC postpended to ciphertext,
1009 * typically 12 for ipsec
1011 desc
->ptr
[4].len
= cpu_to_be16(cryptlen
);
1012 desc
->ptr
[4].j_extent
= authsize
;
1014 sg_count
= talitos_map_sg(dev
, areq
->src
, edesc
->src_nents
? : 1,
1015 (areq
->src
== areq
->dst
) ? DMA_BIDIRECTIONAL
1017 edesc
->src_chained
);
1019 if (sg_count
== 1) {
1020 to_talitos_ptr(&desc
->ptr
[4], sg_dma_address(areq
->src
));
1022 sg_link_tbl_len
= cryptlen
;
1024 if (edesc
->desc
.hdr
& DESC_HDR_MODE1_MDEU_CICV
)
1025 sg_link_tbl_len
= cryptlen
+ authsize
;
1027 sg_count
= sg_to_link_tbl(areq
->src
, sg_count
, sg_link_tbl_len
,
1028 &edesc
->link_tbl
[0]);
1030 desc
->ptr
[4].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1031 to_talitos_ptr(&desc
->ptr
[4], edesc
->dma_link_tbl
);
1032 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1036 /* Only one segment now, so no link tbl needed */
1037 to_talitos_ptr(&desc
->ptr
[4],
1038 sg_dma_address(areq
->src
));
1043 desc
->ptr
[5].len
= cpu_to_be16(cryptlen
);
1044 desc
->ptr
[5].j_extent
= authsize
;
1046 if (areq
->src
!= areq
->dst
)
1047 sg_count
= talitos_map_sg(dev
, areq
->dst
,
1048 edesc
->dst_nents
? : 1,
1049 DMA_FROM_DEVICE
, edesc
->dst_chained
);
1051 if (sg_count
== 1) {
1052 to_talitos_ptr(&desc
->ptr
[5], sg_dma_address(areq
->dst
));
1054 int tbl_off
= edesc
->src_nents
+ 1;
1055 struct talitos_ptr
*tbl_ptr
= &edesc
->link_tbl
[tbl_off
];
1057 to_talitos_ptr(&desc
->ptr
[5], edesc
->dma_link_tbl
+
1058 tbl_off
* sizeof(struct talitos_ptr
));
1059 sg_count
= sg_to_link_tbl(areq
->dst
, sg_count
, cryptlen
,
1062 /* Add an entry to the link table for ICV data */
1063 tbl_ptr
+= sg_count
- 1;
1064 tbl_ptr
->j_extent
= 0;
1066 tbl_ptr
->j_extent
= DESC_PTR_LNKTBL_RETURN
;
1067 tbl_ptr
->len
= cpu_to_be16(authsize
);
1069 /* icv data follows link tables */
1070 to_talitos_ptr(tbl_ptr
, edesc
->dma_link_tbl
+
1071 (tbl_off
+ edesc
->dst_nents
+ 1 +
1072 edesc
->assoc_nents
) *
1073 sizeof(struct talitos_ptr
));
1074 desc
->ptr
[5].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1075 dma_sync_single_for_device(ctx
->dev
, edesc
->dma_link_tbl
,
1076 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1080 map_single_talitos_ptr(dev
, &desc
->ptr
[6], ivsize
, ctx
->iv
, 0,
1083 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1084 if (ret
!= -EINPROGRESS
) {
1085 ipsec_esp_unmap(dev
, edesc
, areq
);
1092 * derive number of elements in scatterlist
1094 static int sg_count(struct scatterlist
*sg_list
, int nbytes
, bool *chained
)
1096 struct scatterlist
*sg
= sg_list
;
1100 while (nbytes
> 0) {
1102 nbytes
-= sg
->length
;
1103 if (!sg_is_last(sg
) && (sg
+ 1)->length
== 0)
1105 sg
= scatterwalk_sg_next(sg
);
1112 * allocate and map the extended descriptor
1114 static struct talitos_edesc
*talitos_edesc_alloc(struct device
*dev
,
1115 struct scatterlist
*assoc
,
1116 struct scatterlist
*src
,
1117 struct scatterlist
*dst
,
1119 unsigned int assoclen
,
1120 unsigned int cryptlen
,
1121 unsigned int authsize
,
1122 unsigned int ivsize
,
1127 struct talitos_edesc
*edesc
;
1128 int assoc_nents
= 0, src_nents
, dst_nents
, alloc_len
, dma_len
;
1129 bool assoc_chained
= false, src_chained
= false, dst_chained
= false;
1130 dma_addr_t iv_dma
= 0;
1131 gfp_t flags
= cryptoflags
& CRYPTO_TFM_REQ_MAY_SLEEP
? GFP_KERNEL
:
1134 if (cryptlen
+ authsize
> TALITOS_MAX_DATA_LEN
) {
1135 dev_err(dev
, "length exceeds h/w max limit\n");
1136 return ERR_PTR(-EINVAL
);
1140 iv_dma
= dma_map_single(dev
, iv
, ivsize
, DMA_TO_DEVICE
);
1144 * Currently it is assumed that iv is provided whenever assoc
1149 assoc_nents
= sg_count(assoc
, assoclen
, &assoc_chained
);
1150 talitos_map_sg(dev
, assoc
, assoc_nents
, DMA_TO_DEVICE
,
1152 assoc_nents
= (assoc_nents
== 1) ? 0 : assoc_nents
;
1154 if (assoc_nents
|| sg_dma_address(assoc
) + assoclen
!= iv_dma
)
1155 assoc_nents
= assoc_nents
? assoc_nents
+ 1 : 2;
1158 if (!dst
|| dst
== src
) {
1159 src_nents
= sg_count(src
, cryptlen
+ authsize
, &src_chained
);
1160 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1161 dst_nents
= dst
? src_nents
: 0;
1162 } else { /* dst && dst != src*/
1163 src_nents
= sg_count(src
, cryptlen
+ (encrypt
? 0 : authsize
),
1165 src_nents
= (src_nents
== 1) ? 0 : src_nents
;
1166 dst_nents
= sg_count(dst
, cryptlen
+ (encrypt
? authsize
: 0),
1168 dst_nents
= (dst_nents
== 1) ? 0 : dst_nents
;
1172 * allocate space for base edesc plus the link tables,
1173 * allowing for two separate entries for ICV and generated ICV (+ 2),
1174 * and the ICV data itself
1176 alloc_len
= sizeof(struct talitos_edesc
);
1177 if (assoc_nents
|| src_nents
|| dst_nents
) {
1178 dma_len
= (src_nents
+ dst_nents
+ 2 + assoc_nents
) *
1179 sizeof(struct talitos_ptr
) + authsize
;
1180 alloc_len
+= dma_len
;
1183 alloc_len
+= icv_stashing
? authsize
: 0;
1186 edesc
= kmalloc(alloc_len
, GFP_DMA
| flags
);
1189 talitos_unmap_sg_chain(dev
, assoc
, DMA_TO_DEVICE
);
1191 dma_unmap_sg(dev
, assoc
,
1192 assoc_nents
? assoc_nents
- 1 : 1,
1196 dma_unmap_single(dev
, iv_dma
, ivsize
, DMA_TO_DEVICE
);
1198 dev_err(dev
, "could not allocate edescriptor\n");
1199 return ERR_PTR(-ENOMEM
);
1202 edesc
->assoc_nents
= assoc_nents
;
1203 edesc
->src_nents
= src_nents
;
1204 edesc
->dst_nents
= dst_nents
;
1205 edesc
->assoc_chained
= assoc_chained
;
1206 edesc
->src_chained
= src_chained
;
1207 edesc
->dst_chained
= dst_chained
;
1208 edesc
->iv_dma
= iv_dma
;
1209 edesc
->dma_len
= dma_len
;
1211 edesc
->dma_link_tbl
= dma_map_single(dev
, &edesc
->link_tbl
[0],
1218 static struct talitos_edesc
*aead_edesc_alloc(struct aead_request
*areq
, u8
*iv
,
1219 int icv_stashing
, bool encrypt
)
1221 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1222 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1223 unsigned int ivsize
= crypto_aead_ivsize(authenc
);
1225 return talitos_edesc_alloc(ctx
->dev
, areq
->assoc
, areq
->src
, areq
->dst
,
1226 iv
, areq
->assoclen
, areq
->cryptlen
,
1227 ctx
->authsize
, ivsize
, icv_stashing
,
1228 areq
->base
.flags
, encrypt
);
1231 static int aead_encrypt(struct aead_request
*req
)
1233 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1234 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1235 struct talitos_edesc
*edesc
;
1237 /* allocate extended descriptor */
1238 edesc
= aead_edesc_alloc(req
, req
->iv
, 0, true);
1240 return PTR_ERR(edesc
);
1243 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1245 return ipsec_esp(edesc
, req
, 0, ipsec_esp_encrypt_done
);
1248 static int aead_decrypt(struct aead_request
*req
)
1250 struct crypto_aead
*authenc
= crypto_aead_reqtfm(req
);
1251 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1252 unsigned int authsize
= ctx
->authsize
;
1253 struct talitos_private
*priv
= dev_get_drvdata(ctx
->dev
);
1254 struct talitos_edesc
*edesc
;
1255 struct scatterlist
*sg
;
1258 req
->cryptlen
-= authsize
;
1260 /* allocate extended descriptor */
1261 edesc
= aead_edesc_alloc(req
, req
->iv
, 1, false);
1263 return PTR_ERR(edesc
);
1265 if ((priv
->features
& TALITOS_FTR_HW_AUTH_CHECK
) &&
1266 ((!edesc
->src_nents
&& !edesc
->dst_nents
) ||
1267 priv
->features
& TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
)) {
1269 /* decrypt and check the ICV */
1270 edesc
->desc
.hdr
= ctx
->desc_hdr_template
|
1271 DESC_HDR_DIR_INBOUND
|
1272 DESC_HDR_MODE1_MDEU_CICV
;
1274 /* reset integrity check result bits */
1275 edesc
->desc
.hdr_lo
= 0;
1277 return ipsec_esp(edesc
, req
, 0, ipsec_esp_decrypt_hwauth_done
);
1280 /* Have to check the ICV with software */
1281 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1283 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1285 icvdata
= &edesc
->link_tbl
[edesc
->src_nents
+
1286 edesc
->dst_nents
+ 2 +
1287 edesc
->assoc_nents
];
1289 icvdata
= &edesc
->link_tbl
[0];
1291 sg
= sg_last(req
->src
, edesc
->src_nents
? : 1);
1293 memcpy(icvdata
, (char *)sg_virt(sg
) + sg
->length
- ctx
->authsize
,
1296 return ipsec_esp(edesc
, req
, 0, ipsec_esp_decrypt_swauth_done
);
1299 static int aead_givencrypt(struct aead_givcrypt_request
*req
)
1301 struct aead_request
*areq
= &req
->areq
;
1302 struct crypto_aead
*authenc
= crypto_aead_reqtfm(areq
);
1303 struct talitos_ctx
*ctx
= crypto_aead_ctx(authenc
);
1304 struct talitos_edesc
*edesc
;
1306 /* allocate extended descriptor */
1307 edesc
= aead_edesc_alloc(areq
, req
->giv
, 0, true);
1309 return PTR_ERR(edesc
);
1312 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1314 memcpy(req
->giv
, ctx
->iv
, crypto_aead_ivsize(authenc
));
1315 /* avoid consecutive packets going out with same IV */
1316 *(__be64
*)req
->giv
^= cpu_to_be64(req
->seq
);
1318 return ipsec_esp(edesc
, areq
, req
->seq
, ipsec_esp_encrypt_done
);
1321 static int ablkcipher_setkey(struct crypto_ablkcipher
*cipher
,
1322 const u8
*key
, unsigned int keylen
)
1324 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1326 memcpy(&ctx
->key
, key
, keylen
);
1327 ctx
->keylen
= keylen
;
1332 static void common_nonsnoop_unmap(struct device
*dev
,
1333 struct talitos_edesc
*edesc
,
1334 struct ablkcipher_request
*areq
)
1336 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1337 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2], DMA_TO_DEVICE
);
1338 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1], DMA_TO_DEVICE
);
1340 talitos_sg_unmap(dev
, edesc
, areq
->src
, areq
->dst
);
1343 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1347 static void ablkcipher_done(struct device
*dev
,
1348 struct talitos_desc
*desc
, void *context
,
1351 struct ablkcipher_request
*areq
= context
;
1352 struct talitos_edesc
*edesc
;
1354 edesc
= container_of(desc
, struct talitos_edesc
, desc
);
1356 common_nonsnoop_unmap(dev
, edesc
, areq
);
1360 areq
->base
.complete(&areq
->base
, err
);
1363 static int common_nonsnoop(struct talitos_edesc
*edesc
,
1364 struct ablkcipher_request
*areq
,
1365 void (*callback
) (struct device
*dev
,
1366 struct talitos_desc
*desc
,
1367 void *context
, int error
))
1369 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1370 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1371 struct device
*dev
= ctx
->dev
;
1372 struct talitos_desc
*desc
= &edesc
->desc
;
1373 unsigned int cryptlen
= areq
->nbytes
;
1374 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1377 /* first DWORD empty */
1378 desc
->ptr
[0].len
= 0;
1379 to_talitos_ptr(&desc
->ptr
[0], 0);
1380 desc
->ptr
[0].j_extent
= 0;
1383 to_talitos_ptr(&desc
->ptr
[1], edesc
->iv_dma
);
1384 desc
->ptr
[1].len
= cpu_to_be16(ivsize
);
1385 desc
->ptr
[1].j_extent
= 0;
1388 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1389 (char *)&ctx
->key
, 0, DMA_TO_DEVICE
);
1394 desc
->ptr
[3].len
= cpu_to_be16(cryptlen
);
1395 desc
->ptr
[3].j_extent
= 0;
1397 sg_count
= talitos_map_sg(dev
, areq
->src
, edesc
->src_nents
? : 1,
1398 (areq
->src
== areq
->dst
) ? DMA_BIDIRECTIONAL
1400 edesc
->src_chained
);
1402 if (sg_count
== 1) {
1403 to_talitos_ptr(&desc
->ptr
[3], sg_dma_address(areq
->src
));
1405 sg_count
= sg_to_link_tbl(areq
->src
, sg_count
, cryptlen
,
1406 &edesc
->link_tbl
[0]);
1408 to_talitos_ptr(&desc
->ptr
[3], edesc
->dma_link_tbl
);
1409 desc
->ptr
[3].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1410 dma_sync_single_for_device(dev
, edesc
->dma_link_tbl
,
1414 /* Only one segment now, so no link tbl needed */
1415 to_talitos_ptr(&desc
->ptr
[3],
1416 sg_dma_address(areq
->src
));
1421 desc
->ptr
[4].len
= cpu_to_be16(cryptlen
);
1422 desc
->ptr
[4].j_extent
= 0;
1424 if (areq
->src
!= areq
->dst
)
1425 sg_count
= talitos_map_sg(dev
, areq
->dst
,
1426 edesc
->dst_nents
? : 1,
1427 DMA_FROM_DEVICE
, edesc
->dst_chained
);
1429 if (sg_count
== 1) {
1430 to_talitos_ptr(&desc
->ptr
[4], sg_dma_address(areq
->dst
));
1432 struct talitos_ptr
*link_tbl_ptr
=
1433 &edesc
->link_tbl
[edesc
->src_nents
+ 1];
1435 to_talitos_ptr(&desc
->ptr
[4], edesc
->dma_link_tbl
+
1436 (edesc
->src_nents
+ 1) *
1437 sizeof(struct talitos_ptr
));
1438 desc
->ptr
[4].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1439 sg_count
= sg_to_link_tbl(areq
->dst
, sg_count
, cryptlen
,
1441 dma_sync_single_for_device(ctx
->dev
, edesc
->dma_link_tbl
,
1442 edesc
->dma_len
, DMA_BIDIRECTIONAL
);
1446 map_single_talitos_ptr(dev
, &desc
->ptr
[5], ivsize
, ctx
->iv
, 0,
1449 /* last DWORD empty */
1450 desc
->ptr
[6].len
= 0;
1451 to_talitos_ptr(&desc
->ptr
[6], 0);
1452 desc
->ptr
[6].j_extent
= 0;
1454 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1455 if (ret
!= -EINPROGRESS
) {
1456 common_nonsnoop_unmap(dev
, edesc
, areq
);
1462 static struct talitos_edesc
*ablkcipher_edesc_alloc(struct ablkcipher_request
*
1465 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1466 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1467 unsigned int ivsize
= crypto_ablkcipher_ivsize(cipher
);
1469 return talitos_edesc_alloc(ctx
->dev
, NULL
, areq
->src
, areq
->dst
,
1470 areq
->info
, 0, areq
->nbytes
, 0, ivsize
, 0,
1471 areq
->base
.flags
, encrypt
);
1474 static int ablkcipher_encrypt(struct ablkcipher_request
*areq
)
1476 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1477 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1478 struct talitos_edesc
*edesc
;
1480 /* allocate extended descriptor */
1481 edesc
= ablkcipher_edesc_alloc(areq
, true);
1483 return PTR_ERR(edesc
);
1486 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_MODE0_ENCRYPT
;
1488 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1491 static int ablkcipher_decrypt(struct ablkcipher_request
*areq
)
1493 struct crypto_ablkcipher
*cipher
= crypto_ablkcipher_reqtfm(areq
);
1494 struct talitos_ctx
*ctx
= crypto_ablkcipher_ctx(cipher
);
1495 struct talitos_edesc
*edesc
;
1497 /* allocate extended descriptor */
1498 edesc
= ablkcipher_edesc_alloc(areq
, false);
1500 return PTR_ERR(edesc
);
1502 edesc
->desc
.hdr
= ctx
->desc_hdr_template
| DESC_HDR_DIR_INBOUND
;
1504 return common_nonsnoop(edesc
, areq
, ablkcipher_done
);
1507 static void common_nonsnoop_hash_unmap(struct device
*dev
,
1508 struct talitos_edesc
*edesc
,
1509 struct ahash_request
*areq
)
1511 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1513 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[5], DMA_FROM_DEVICE
);
1515 /* When using hashctx-in, must unmap it. */
1516 if (edesc
->desc
.ptr
[1].len
)
1517 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[1],
1520 if (edesc
->desc
.ptr
[2].len
)
1521 unmap_single_talitos_ptr(dev
, &edesc
->desc
.ptr
[2],
1524 talitos_sg_unmap(dev
, edesc
, req_ctx
->psrc
, NULL
);
1527 dma_unmap_single(dev
, edesc
->dma_link_tbl
, edesc
->dma_len
,
1532 static void ahash_done(struct device
*dev
,
1533 struct talitos_desc
*desc
, void *context
,
1536 struct ahash_request
*areq
= context
;
1537 struct talitos_edesc
*edesc
=
1538 container_of(desc
, struct talitos_edesc
, desc
);
1539 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1541 if (!req_ctx
->last
&& req_ctx
->to_hash_later
) {
1542 /* Position any partial block for next update/final/finup */
1543 memcpy(req_ctx
->buf
, req_ctx
->bufnext
, req_ctx
->to_hash_later
);
1544 req_ctx
->nbuf
= req_ctx
->to_hash_later
;
1546 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1550 areq
->base
.complete(&areq
->base
, err
);
1553 static int common_nonsnoop_hash(struct talitos_edesc
*edesc
,
1554 struct ahash_request
*areq
, unsigned int length
,
1555 void (*callback
) (struct device
*dev
,
1556 struct talitos_desc
*desc
,
1557 void *context
, int error
))
1559 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1560 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1561 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1562 struct device
*dev
= ctx
->dev
;
1563 struct talitos_desc
*desc
= &edesc
->desc
;
1566 /* first DWORD empty */
1567 desc
->ptr
[0] = zero_entry
;
1569 /* hash context in */
1570 if (!req_ctx
->first
|| req_ctx
->swinit
) {
1571 map_single_talitos_ptr(dev
, &desc
->ptr
[1],
1572 req_ctx
->hw_context_size
,
1573 (char *)req_ctx
->hw_context
, 0,
1575 req_ctx
->swinit
= 0;
1577 desc
->ptr
[1] = zero_entry
;
1578 /* Indicate next op is not the first. */
1584 map_single_talitos_ptr(dev
, &desc
->ptr
[2], ctx
->keylen
,
1585 (char *)&ctx
->key
, 0, DMA_TO_DEVICE
);
1587 desc
->ptr
[2] = zero_entry
;
1592 desc
->ptr
[3].len
= cpu_to_be16(length
);
1593 desc
->ptr
[3].j_extent
= 0;
1595 sg_count
= talitos_map_sg(dev
, req_ctx
->psrc
,
1596 edesc
->src_nents
? : 1,
1597 DMA_TO_DEVICE
, edesc
->src_chained
);
1599 if (sg_count
== 1) {
1600 to_talitos_ptr(&desc
->ptr
[3], sg_dma_address(req_ctx
->psrc
));
1602 sg_count
= sg_to_link_tbl(req_ctx
->psrc
, sg_count
, length
,
1603 &edesc
->link_tbl
[0]);
1605 desc
->ptr
[3].j_extent
|= DESC_PTR_LNKTBL_JUMP
;
1606 to_talitos_ptr(&desc
->ptr
[3], edesc
->dma_link_tbl
);
1607 dma_sync_single_for_device(ctx
->dev
,
1608 edesc
->dma_link_tbl
,
1612 /* Only one segment now, so no link tbl needed */
1613 to_talitos_ptr(&desc
->ptr
[3],
1614 sg_dma_address(req_ctx
->psrc
));
1618 /* fifth DWORD empty */
1619 desc
->ptr
[4] = zero_entry
;
1621 /* hash/HMAC out -or- hash context out */
1623 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1624 crypto_ahash_digestsize(tfm
),
1625 areq
->result
, 0, DMA_FROM_DEVICE
);
1627 map_single_talitos_ptr(dev
, &desc
->ptr
[5],
1628 req_ctx
->hw_context_size
,
1629 req_ctx
->hw_context
, 0, DMA_FROM_DEVICE
);
1631 /* last DWORD empty */
1632 desc
->ptr
[6] = zero_entry
;
1634 ret
= talitos_submit(dev
, ctx
->ch
, desc
, callback
, areq
);
1635 if (ret
!= -EINPROGRESS
) {
1636 common_nonsnoop_hash_unmap(dev
, edesc
, areq
);
1642 static struct talitos_edesc
*ahash_edesc_alloc(struct ahash_request
*areq
,
1643 unsigned int nbytes
)
1645 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1646 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1647 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1649 return talitos_edesc_alloc(ctx
->dev
, NULL
, req_ctx
->psrc
, NULL
, NULL
, 0,
1650 nbytes
, 0, 0, 0, areq
->base
.flags
, false);
1653 static int ahash_init(struct ahash_request
*areq
)
1655 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1656 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1658 /* Initialize the context */
1660 req_ctx
->first
= 1; /* first indicates h/w must init its context */
1661 req_ctx
->swinit
= 0; /* assume h/w init of context */
1662 req_ctx
->hw_context_size
=
1663 (crypto_ahash_digestsize(tfm
) <= SHA256_DIGEST_SIZE
)
1664 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1665 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
;
1671 * on h/w without explicit sha224 support, we initialize h/w context
1672 * manually with sha224 constants, and tell it to run sha256.
1674 static int ahash_init_sha224_swinit(struct ahash_request
*areq
)
1676 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1679 req_ctx
->swinit
= 1;/* prevent h/w initting context with sha256 values*/
1681 req_ctx
->hw_context
[0] = SHA224_H0
;
1682 req_ctx
->hw_context
[1] = SHA224_H1
;
1683 req_ctx
->hw_context
[2] = SHA224_H2
;
1684 req_ctx
->hw_context
[3] = SHA224_H3
;
1685 req_ctx
->hw_context
[4] = SHA224_H4
;
1686 req_ctx
->hw_context
[5] = SHA224_H5
;
1687 req_ctx
->hw_context
[6] = SHA224_H6
;
1688 req_ctx
->hw_context
[7] = SHA224_H7
;
1690 /* init 64-bit count */
1691 req_ctx
->hw_context
[8] = 0;
1692 req_ctx
->hw_context
[9] = 0;
1697 static int ahash_process_req(struct ahash_request
*areq
, unsigned int nbytes
)
1699 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
1700 struct talitos_ctx
*ctx
= crypto_ahash_ctx(tfm
);
1701 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1702 struct talitos_edesc
*edesc
;
1703 unsigned int blocksize
=
1704 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1705 unsigned int nbytes_to_hash
;
1706 unsigned int to_hash_later
;
1710 if (!req_ctx
->last
&& (nbytes
+ req_ctx
->nbuf
<= blocksize
)) {
1711 /* Buffer up to one whole block */
1712 sg_copy_to_buffer(areq
->src
,
1713 sg_count(areq
->src
, nbytes
, &chained
),
1714 req_ctx
->buf
+ req_ctx
->nbuf
, nbytes
);
1715 req_ctx
->nbuf
+= nbytes
;
1719 /* At least (blocksize + 1) bytes are available to hash */
1720 nbytes_to_hash
= nbytes
+ req_ctx
->nbuf
;
1721 to_hash_later
= nbytes_to_hash
& (blocksize
- 1);
1725 else if (to_hash_later
)
1726 /* There is a partial block. Hash the full block(s) now */
1727 nbytes_to_hash
-= to_hash_later
;
1729 /* Keep one block buffered */
1730 nbytes_to_hash
-= blocksize
;
1731 to_hash_later
= blocksize
;
1734 /* Chain in any previously buffered data */
1735 if (req_ctx
->nbuf
) {
1736 nsg
= (req_ctx
->nbuf
< nbytes_to_hash
) ? 2 : 1;
1737 sg_init_table(req_ctx
->bufsl
, nsg
);
1738 sg_set_buf(req_ctx
->bufsl
, req_ctx
->buf
, req_ctx
->nbuf
);
1740 scatterwalk_sg_chain(req_ctx
->bufsl
, 2, areq
->src
);
1741 req_ctx
->psrc
= req_ctx
->bufsl
;
1743 req_ctx
->psrc
= areq
->src
;
1745 if (to_hash_later
) {
1746 int nents
= sg_count(areq
->src
, nbytes
, &chained
);
1747 sg_pcopy_to_buffer(areq
->src
, nents
,
1750 nbytes
- to_hash_later
);
1752 req_ctx
->to_hash_later
= to_hash_later
;
1754 /* Allocate extended descriptor */
1755 edesc
= ahash_edesc_alloc(areq
, nbytes_to_hash
);
1757 return PTR_ERR(edesc
);
1759 edesc
->desc
.hdr
= ctx
->desc_hdr_template
;
1761 /* On last one, request SEC to pad; otherwise continue */
1763 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_PAD
;
1765 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_CONT
;
1767 /* request SEC to INIT hash. */
1768 if (req_ctx
->first
&& !req_ctx
->swinit
)
1769 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_INIT
;
1771 /* When the tfm context has a keylen, it's an HMAC.
1772 * A first or last (ie. not middle) descriptor must request HMAC.
1774 if (ctx
->keylen
&& (req_ctx
->first
|| req_ctx
->last
))
1775 edesc
->desc
.hdr
|= DESC_HDR_MODE0_MDEU_HMAC
;
1777 return common_nonsnoop_hash(edesc
, areq
, nbytes_to_hash
,
1781 static int ahash_update(struct ahash_request
*areq
)
1783 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1787 return ahash_process_req(areq
, areq
->nbytes
);
1790 static int ahash_final(struct ahash_request
*areq
)
1792 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1796 return ahash_process_req(areq
, 0);
1799 static int ahash_finup(struct ahash_request
*areq
)
1801 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1805 return ahash_process_req(areq
, areq
->nbytes
);
1808 static int ahash_digest(struct ahash_request
*areq
)
1810 struct talitos_ahash_req_ctx
*req_ctx
= ahash_request_ctx(areq
);
1811 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(areq
);
1816 return ahash_process_req(areq
, areq
->nbytes
);
1819 struct keyhash_result
{
1820 struct completion completion
;
1824 static void keyhash_complete(struct crypto_async_request
*req
, int err
)
1826 struct keyhash_result
*res
= req
->data
;
1828 if (err
== -EINPROGRESS
)
1832 complete(&res
->completion
);
1835 static int keyhash(struct crypto_ahash
*tfm
, const u8
*key
, unsigned int keylen
,
1838 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1840 struct scatterlist sg
[1];
1841 struct ahash_request
*req
;
1842 struct keyhash_result hresult
;
1845 init_completion(&hresult
.completion
);
1847 req
= ahash_request_alloc(tfm
, GFP_KERNEL
);
1851 /* Keep tfm keylen == 0 during hash of the long key */
1853 ahash_request_set_callback(req
, CRYPTO_TFM_REQ_MAY_BACKLOG
,
1854 keyhash_complete
, &hresult
);
1856 sg_init_one(&sg
[0], key
, keylen
);
1858 ahash_request_set_crypt(req
, sg
, hash
, keylen
);
1859 ret
= crypto_ahash_digest(req
);
1865 ret
= wait_for_completion_interruptible(
1866 &hresult
.completion
);
1873 ahash_request_free(req
);
1878 static int ahash_setkey(struct crypto_ahash
*tfm
, const u8
*key
,
1879 unsigned int keylen
)
1881 struct talitos_ctx
*ctx
= crypto_tfm_ctx(crypto_ahash_tfm(tfm
));
1882 unsigned int blocksize
=
1883 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm
));
1884 unsigned int digestsize
= crypto_ahash_digestsize(tfm
);
1885 unsigned int keysize
= keylen
;
1886 u8 hash
[SHA512_DIGEST_SIZE
];
1889 if (keylen
<= blocksize
)
1890 memcpy(ctx
->key
, key
, keysize
);
1892 /* Must get the hash of the long key */
1893 ret
= keyhash(tfm
, key
, keylen
, hash
);
1896 crypto_ahash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
1900 keysize
= digestsize
;
1901 memcpy(ctx
->key
, hash
, digestsize
);
1904 ctx
->keylen
= keysize
;
1910 struct talitos_alg_template
{
1913 struct crypto_alg crypto
;
1914 struct ahash_alg hash
;
1916 __be32 desc_hdr_template
;
1919 static struct talitos_alg_template driver_algs
[] = {
1920 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
1921 { .type
= CRYPTO_ALG_TYPE_AEAD
,
1923 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1924 .cra_driver_name
= "authenc-hmac-sha1-cbc-aes-talitos",
1925 .cra_blocksize
= AES_BLOCK_SIZE
,
1926 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1928 .ivsize
= AES_BLOCK_SIZE
,
1929 .maxauthsize
= SHA1_DIGEST_SIZE
,
1932 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
1933 DESC_HDR_SEL0_AESU
|
1934 DESC_HDR_MODE0_AESU_CBC
|
1935 DESC_HDR_SEL1_MDEUA
|
1936 DESC_HDR_MODE1_MDEU_INIT
|
1937 DESC_HDR_MODE1_MDEU_PAD
|
1938 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
1940 { .type
= CRYPTO_ALG_TYPE_AEAD
,
1942 .cra_name
= "authenc(hmac(sha1),cbc(des3_ede))",
1943 .cra_driver_name
= "authenc-hmac-sha1-cbc-3des-talitos",
1944 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1945 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1947 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1948 .maxauthsize
= SHA1_DIGEST_SIZE
,
1951 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
1953 DESC_HDR_MODE0_DEU_CBC
|
1954 DESC_HDR_MODE0_DEU_3DES
|
1955 DESC_HDR_SEL1_MDEUA
|
1956 DESC_HDR_MODE1_MDEU_INIT
|
1957 DESC_HDR_MODE1_MDEU_PAD
|
1958 DESC_HDR_MODE1_MDEU_SHA1_HMAC
,
1960 { .type
= CRYPTO_ALG_TYPE_AEAD
,
1962 .cra_name
= "authenc(hmac(sha224),cbc(aes))",
1963 .cra_driver_name
= "authenc-hmac-sha224-cbc-aes-talitos",
1964 .cra_blocksize
= AES_BLOCK_SIZE
,
1965 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1967 .ivsize
= AES_BLOCK_SIZE
,
1968 .maxauthsize
= SHA224_DIGEST_SIZE
,
1971 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
1972 DESC_HDR_SEL0_AESU
|
1973 DESC_HDR_MODE0_AESU_CBC
|
1974 DESC_HDR_SEL1_MDEUA
|
1975 DESC_HDR_MODE1_MDEU_INIT
|
1976 DESC_HDR_MODE1_MDEU_PAD
|
1977 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
1979 { .type
= CRYPTO_ALG_TYPE_AEAD
,
1981 .cra_name
= "authenc(hmac(sha224),cbc(des3_ede))",
1982 .cra_driver_name
= "authenc-hmac-sha224-cbc-3des-talitos",
1983 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
1984 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1986 .ivsize
= DES3_EDE_BLOCK_SIZE
,
1987 .maxauthsize
= SHA224_DIGEST_SIZE
,
1990 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
1992 DESC_HDR_MODE0_DEU_CBC
|
1993 DESC_HDR_MODE0_DEU_3DES
|
1994 DESC_HDR_SEL1_MDEUA
|
1995 DESC_HDR_MODE1_MDEU_INIT
|
1996 DESC_HDR_MODE1_MDEU_PAD
|
1997 DESC_HDR_MODE1_MDEU_SHA224_HMAC
,
1999 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2001 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
2002 .cra_driver_name
= "authenc-hmac-sha256-cbc-aes-talitos",
2003 .cra_blocksize
= AES_BLOCK_SIZE
,
2004 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2006 .ivsize
= AES_BLOCK_SIZE
,
2007 .maxauthsize
= SHA256_DIGEST_SIZE
,
2010 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2011 DESC_HDR_SEL0_AESU
|
2012 DESC_HDR_MODE0_AESU_CBC
|
2013 DESC_HDR_SEL1_MDEUA
|
2014 DESC_HDR_MODE1_MDEU_INIT
|
2015 DESC_HDR_MODE1_MDEU_PAD
|
2016 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2018 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2020 .cra_name
= "authenc(hmac(sha256),cbc(des3_ede))",
2021 .cra_driver_name
= "authenc-hmac-sha256-cbc-3des-talitos",
2022 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2023 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2025 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2026 .maxauthsize
= SHA256_DIGEST_SIZE
,
2029 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2031 DESC_HDR_MODE0_DEU_CBC
|
2032 DESC_HDR_MODE0_DEU_3DES
|
2033 DESC_HDR_SEL1_MDEUA
|
2034 DESC_HDR_MODE1_MDEU_INIT
|
2035 DESC_HDR_MODE1_MDEU_PAD
|
2036 DESC_HDR_MODE1_MDEU_SHA256_HMAC
,
2038 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2040 .cra_name
= "authenc(hmac(sha384),cbc(aes))",
2041 .cra_driver_name
= "authenc-hmac-sha384-cbc-aes-talitos",
2042 .cra_blocksize
= AES_BLOCK_SIZE
,
2043 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2045 .ivsize
= AES_BLOCK_SIZE
,
2046 .maxauthsize
= SHA384_DIGEST_SIZE
,
2049 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2050 DESC_HDR_SEL0_AESU
|
2051 DESC_HDR_MODE0_AESU_CBC
|
2052 DESC_HDR_SEL1_MDEUB
|
2053 DESC_HDR_MODE1_MDEU_INIT
|
2054 DESC_HDR_MODE1_MDEU_PAD
|
2055 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2057 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2059 .cra_name
= "authenc(hmac(sha384),cbc(des3_ede))",
2060 .cra_driver_name
= "authenc-hmac-sha384-cbc-3des-talitos",
2061 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2062 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2064 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2065 .maxauthsize
= SHA384_DIGEST_SIZE
,
2068 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2070 DESC_HDR_MODE0_DEU_CBC
|
2071 DESC_HDR_MODE0_DEU_3DES
|
2072 DESC_HDR_SEL1_MDEUB
|
2073 DESC_HDR_MODE1_MDEU_INIT
|
2074 DESC_HDR_MODE1_MDEU_PAD
|
2075 DESC_HDR_MODE1_MDEUB_SHA384_HMAC
,
2077 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2079 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
2080 .cra_driver_name
= "authenc-hmac-sha512-cbc-aes-talitos",
2081 .cra_blocksize
= AES_BLOCK_SIZE
,
2082 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2084 .ivsize
= AES_BLOCK_SIZE
,
2085 .maxauthsize
= SHA512_DIGEST_SIZE
,
2088 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2089 DESC_HDR_SEL0_AESU
|
2090 DESC_HDR_MODE0_AESU_CBC
|
2091 DESC_HDR_SEL1_MDEUB
|
2092 DESC_HDR_MODE1_MDEU_INIT
|
2093 DESC_HDR_MODE1_MDEU_PAD
|
2094 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2096 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2098 .cra_name
= "authenc(hmac(sha512),cbc(des3_ede))",
2099 .cra_driver_name
= "authenc-hmac-sha512-cbc-3des-talitos",
2100 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2101 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2103 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2104 .maxauthsize
= SHA512_DIGEST_SIZE
,
2107 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2109 DESC_HDR_MODE0_DEU_CBC
|
2110 DESC_HDR_MODE0_DEU_3DES
|
2111 DESC_HDR_SEL1_MDEUB
|
2112 DESC_HDR_MODE1_MDEU_INIT
|
2113 DESC_HDR_MODE1_MDEU_PAD
|
2114 DESC_HDR_MODE1_MDEUB_SHA512_HMAC
,
2116 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2118 .cra_name
= "authenc(hmac(md5),cbc(aes))",
2119 .cra_driver_name
= "authenc-hmac-md5-cbc-aes-talitos",
2120 .cra_blocksize
= AES_BLOCK_SIZE
,
2121 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2123 .ivsize
= AES_BLOCK_SIZE
,
2124 .maxauthsize
= MD5_DIGEST_SIZE
,
2127 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2128 DESC_HDR_SEL0_AESU
|
2129 DESC_HDR_MODE0_AESU_CBC
|
2130 DESC_HDR_SEL1_MDEUA
|
2131 DESC_HDR_MODE1_MDEU_INIT
|
2132 DESC_HDR_MODE1_MDEU_PAD
|
2133 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2135 { .type
= CRYPTO_ALG_TYPE_AEAD
,
2137 .cra_name
= "authenc(hmac(md5),cbc(des3_ede))",
2138 .cra_driver_name
= "authenc-hmac-md5-cbc-3des-talitos",
2139 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2140 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
2142 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2143 .maxauthsize
= MD5_DIGEST_SIZE
,
2146 .desc_hdr_template
= DESC_HDR_TYPE_IPSEC_ESP
|
2148 DESC_HDR_MODE0_DEU_CBC
|
2149 DESC_HDR_MODE0_DEU_3DES
|
2150 DESC_HDR_SEL1_MDEUA
|
2151 DESC_HDR_MODE1_MDEU_INIT
|
2152 DESC_HDR_MODE1_MDEU_PAD
|
2153 DESC_HDR_MODE1_MDEU_MD5_HMAC
,
2155 /* ABLKCIPHER algorithms. */
2156 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2158 .cra_name
= "cbc(aes)",
2159 .cra_driver_name
= "cbc-aes-talitos",
2160 .cra_blocksize
= AES_BLOCK_SIZE
,
2161 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2164 .min_keysize
= AES_MIN_KEY_SIZE
,
2165 .max_keysize
= AES_MAX_KEY_SIZE
,
2166 .ivsize
= AES_BLOCK_SIZE
,
2169 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2170 DESC_HDR_SEL0_AESU
|
2171 DESC_HDR_MODE0_AESU_CBC
,
2173 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
,
2175 .cra_name
= "cbc(des3_ede)",
2176 .cra_driver_name
= "cbc-3des-talitos",
2177 .cra_blocksize
= DES3_EDE_BLOCK_SIZE
,
2178 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
2181 .min_keysize
= DES3_EDE_KEY_SIZE
,
2182 .max_keysize
= DES3_EDE_KEY_SIZE
,
2183 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2186 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2188 DESC_HDR_MODE0_DEU_CBC
|
2189 DESC_HDR_MODE0_DEU_3DES
,
2191 /* AHASH algorithms. */
2192 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2194 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2197 .cra_driver_name
= "md5-talitos",
2198 .cra_blocksize
= MD5_BLOCK_SIZE
,
2199 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2203 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2204 DESC_HDR_SEL0_MDEUA
|
2205 DESC_HDR_MODE0_MDEU_MD5
,
2207 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2209 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2212 .cra_driver_name
= "sha1-talitos",
2213 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2214 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2218 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2219 DESC_HDR_SEL0_MDEUA
|
2220 DESC_HDR_MODE0_MDEU_SHA1
,
2222 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2224 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2226 .cra_name
= "sha224",
2227 .cra_driver_name
= "sha224-talitos",
2228 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2229 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2233 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2234 DESC_HDR_SEL0_MDEUA
|
2235 DESC_HDR_MODE0_MDEU_SHA224
,
2237 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2239 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2241 .cra_name
= "sha256",
2242 .cra_driver_name
= "sha256-talitos",
2243 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2244 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2248 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2249 DESC_HDR_SEL0_MDEUA
|
2250 DESC_HDR_MODE0_MDEU_SHA256
,
2252 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2254 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2256 .cra_name
= "sha384",
2257 .cra_driver_name
= "sha384-talitos",
2258 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2259 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2263 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2264 DESC_HDR_SEL0_MDEUB
|
2265 DESC_HDR_MODE0_MDEUB_SHA384
,
2267 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2269 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2271 .cra_name
= "sha512",
2272 .cra_driver_name
= "sha512-talitos",
2273 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2274 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2278 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2279 DESC_HDR_SEL0_MDEUB
|
2280 DESC_HDR_MODE0_MDEUB_SHA512
,
2282 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2284 .halg
.digestsize
= MD5_DIGEST_SIZE
,
2286 .cra_name
= "hmac(md5)",
2287 .cra_driver_name
= "hmac-md5-talitos",
2288 .cra_blocksize
= MD5_BLOCK_SIZE
,
2289 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2293 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2294 DESC_HDR_SEL0_MDEUA
|
2295 DESC_HDR_MODE0_MDEU_MD5
,
2297 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2299 .halg
.digestsize
= SHA1_DIGEST_SIZE
,
2301 .cra_name
= "hmac(sha1)",
2302 .cra_driver_name
= "hmac-sha1-talitos",
2303 .cra_blocksize
= SHA1_BLOCK_SIZE
,
2304 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2308 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2309 DESC_HDR_SEL0_MDEUA
|
2310 DESC_HDR_MODE0_MDEU_SHA1
,
2312 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2314 .halg
.digestsize
= SHA224_DIGEST_SIZE
,
2316 .cra_name
= "hmac(sha224)",
2317 .cra_driver_name
= "hmac-sha224-talitos",
2318 .cra_blocksize
= SHA224_BLOCK_SIZE
,
2319 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2323 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2324 DESC_HDR_SEL0_MDEUA
|
2325 DESC_HDR_MODE0_MDEU_SHA224
,
2327 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2329 .halg
.digestsize
= SHA256_DIGEST_SIZE
,
2331 .cra_name
= "hmac(sha256)",
2332 .cra_driver_name
= "hmac-sha256-talitos",
2333 .cra_blocksize
= SHA256_BLOCK_SIZE
,
2334 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2338 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2339 DESC_HDR_SEL0_MDEUA
|
2340 DESC_HDR_MODE0_MDEU_SHA256
,
2342 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2344 .halg
.digestsize
= SHA384_DIGEST_SIZE
,
2346 .cra_name
= "hmac(sha384)",
2347 .cra_driver_name
= "hmac-sha384-talitos",
2348 .cra_blocksize
= SHA384_BLOCK_SIZE
,
2349 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2353 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2354 DESC_HDR_SEL0_MDEUB
|
2355 DESC_HDR_MODE0_MDEUB_SHA384
,
2357 { .type
= CRYPTO_ALG_TYPE_AHASH
,
2359 .halg
.digestsize
= SHA512_DIGEST_SIZE
,
2361 .cra_name
= "hmac(sha512)",
2362 .cra_driver_name
= "hmac-sha512-talitos",
2363 .cra_blocksize
= SHA512_BLOCK_SIZE
,
2364 .cra_flags
= CRYPTO_ALG_TYPE_AHASH
|
2368 .desc_hdr_template
= DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2369 DESC_HDR_SEL0_MDEUB
|
2370 DESC_HDR_MODE0_MDEUB_SHA512
,
2374 struct talitos_crypto_alg
{
2375 struct list_head entry
;
2377 struct talitos_alg_template algt
;
2380 static int talitos_cra_init(struct crypto_tfm
*tfm
)
2382 struct crypto_alg
*alg
= tfm
->__crt_alg
;
2383 struct talitos_crypto_alg
*talitos_alg
;
2384 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2385 struct talitos_private
*priv
;
2387 if ((alg
->cra_flags
& CRYPTO_ALG_TYPE_MASK
) == CRYPTO_ALG_TYPE_AHASH
)
2388 talitos_alg
= container_of(__crypto_ahash_alg(alg
),
2389 struct talitos_crypto_alg
,
2392 talitos_alg
= container_of(alg
, struct talitos_crypto_alg
,
2395 /* update context with ptr to dev */
2396 ctx
->dev
= talitos_alg
->dev
;
2398 /* assign SEC channel to tfm in round-robin fashion */
2399 priv
= dev_get_drvdata(ctx
->dev
);
2400 ctx
->ch
= atomic_inc_return(&priv
->last_chan
) &
2401 (priv
->num_channels
- 1);
2403 /* copy descriptor header template value */
2404 ctx
->desc_hdr_template
= talitos_alg
->algt
.desc_hdr_template
;
2406 /* select done notification */
2407 ctx
->desc_hdr_template
|= DESC_HDR_DONE_NOTIFY
;
2412 static int talitos_cra_init_aead(struct crypto_tfm
*tfm
)
2414 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2416 talitos_cra_init(tfm
);
2418 /* random first IV */
2419 get_random_bytes(ctx
->iv
, TALITOS_MAX_IV_LENGTH
);
2424 static int talitos_cra_init_ahash(struct crypto_tfm
*tfm
)
2426 struct talitos_ctx
*ctx
= crypto_tfm_ctx(tfm
);
2428 talitos_cra_init(tfm
);
2431 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm
),
2432 sizeof(struct talitos_ahash_req_ctx
));
2438 * given the alg's descriptor header template, determine whether descriptor
2439 * type and primary/secondary execution units required match the hw
2440 * capabilities description provided in the device tree node.
2442 static int hw_supports(struct device
*dev
, __be32 desc_hdr_template
)
2444 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2447 ret
= (1 << DESC_TYPE(desc_hdr_template
) & priv
->desc_types
) &&
2448 (1 << PRIMARY_EU(desc_hdr_template
) & priv
->exec_units
);
2450 if (SECONDARY_EU(desc_hdr_template
))
2451 ret
= ret
&& (1 << SECONDARY_EU(desc_hdr_template
)
2452 & priv
->exec_units
);
2457 static int talitos_remove(struct platform_device
*ofdev
)
2459 struct device
*dev
= &ofdev
->dev
;
2460 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2461 struct talitos_crypto_alg
*t_alg
, *n
;
2464 list_for_each_entry_safe(t_alg
, n
, &priv
->alg_list
, entry
) {
2465 switch (t_alg
->algt
.type
) {
2466 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2467 case CRYPTO_ALG_TYPE_AEAD
:
2468 crypto_unregister_alg(&t_alg
->algt
.alg
.crypto
);
2470 case CRYPTO_ALG_TYPE_AHASH
:
2471 crypto_unregister_ahash(&t_alg
->algt
.alg
.hash
);
2474 list_del(&t_alg
->entry
);
2478 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
))
2479 talitos_unregister_rng(dev
);
2481 for (i
= 0; i
< priv
->num_channels
; i
++)
2482 kfree(priv
->chan
[i
].fifo
);
2486 for (i
= 0; i
< 2; i
++)
2488 free_irq(priv
->irq
[i
], dev
);
2489 irq_dispose_mapping(priv
->irq
[i
]);
2492 tasklet_kill(&priv
->done_task
[0]);
2494 tasklet_kill(&priv
->done_task
[1]);
2503 static struct talitos_crypto_alg
*talitos_alg_alloc(struct device
*dev
,
2504 struct talitos_alg_template
2507 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2508 struct talitos_crypto_alg
*t_alg
;
2509 struct crypto_alg
*alg
;
2511 t_alg
= kzalloc(sizeof(struct talitos_crypto_alg
), GFP_KERNEL
);
2513 return ERR_PTR(-ENOMEM
);
2515 t_alg
->algt
= *template;
2517 switch (t_alg
->algt
.type
) {
2518 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2519 alg
= &t_alg
->algt
.alg
.crypto
;
2520 alg
->cra_init
= talitos_cra_init
;
2521 alg
->cra_type
= &crypto_ablkcipher_type
;
2522 alg
->cra_ablkcipher
.setkey
= ablkcipher_setkey
;
2523 alg
->cra_ablkcipher
.encrypt
= ablkcipher_encrypt
;
2524 alg
->cra_ablkcipher
.decrypt
= ablkcipher_decrypt
;
2525 alg
->cra_ablkcipher
.geniv
= "eseqiv";
2527 case CRYPTO_ALG_TYPE_AEAD
:
2528 alg
= &t_alg
->algt
.alg
.crypto
;
2529 alg
->cra_init
= talitos_cra_init_aead
;
2530 alg
->cra_type
= &crypto_aead_type
;
2531 alg
->cra_aead
.setkey
= aead_setkey
;
2532 alg
->cra_aead
.setauthsize
= aead_setauthsize
;
2533 alg
->cra_aead
.encrypt
= aead_encrypt
;
2534 alg
->cra_aead
.decrypt
= aead_decrypt
;
2535 alg
->cra_aead
.givencrypt
= aead_givencrypt
;
2536 alg
->cra_aead
.geniv
= "<built-in>";
2538 case CRYPTO_ALG_TYPE_AHASH
:
2539 alg
= &t_alg
->algt
.alg
.hash
.halg
.base
;
2540 alg
->cra_init
= talitos_cra_init_ahash
;
2541 alg
->cra_type
= &crypto_ahash_type
;
2542 t_alg
->algt
.alg
.hash
.init
= ahash_init
;
2543 t_alg
->algt
.alg
.hash
.update
= ahash_update
;
2544 t_alg
->algt
.alg
.hash
.final
= ahash_final
;
2545 t_alg
->algt
.alg
.hash
.finup
= ahash_finup
;
2546 t_alg
->algt
.alg
.hash
.digest
= ahash_digest
;
2547 t_alg
->algt
.alg
.hash
.setkey
= ahash_setkey
;
2549 if (!(priv
->features
& TALITOS_FTR_HMAC_OK
) &&
2550 !strncmp(alg
->cra_name
, "hmac", 4)) {
2552 return ERR_PTR(-ENOTSUPP
);
2554 if (!(priv
->features
& TALITOS_FTR_SHA224_HWINIT
) &&
2555 (!strcmp(alg
->cra_name
, "sha224") ||
2556 !strcmp(alg
->cra_name
, "hmac(sha224)"))) {
2557 t_alg
->algt
.alg
.hash
.init
= ahash_init_sha224_swinit
;
2558 t_alg
->algt
.desc_hdr_template
=
2559 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU
|
2560 DESC_HDR_SEL0_MDEUA
|
2561 DESC_HDR_MODE0_MDEU_SHA256
;
2565 dev_err(dev
, "unknown algorithm type %d\n", t_alg
->algt
.type
);
2566 return ERR_PTR(-EINVAL
);
2569 alg
->cra_module
= THIS_MODULE
;
2570 alg
->cra_priority
= TALITOS_CRA_PRIORITY
;
2571 alg
->cra_alignmask
= 0;
2572 alg
->cra_ctxsize
= sizeof(struct talitos_ctx
);
2573 alg
->cra_flags
|= CRYPTO_ALG_KERN_DRIVER_ONLY
;
2580 static int talitos_probe_irq(struct platform_device
*ofdev
)
2582 struct device
*dev
= &ofdev
->dev
;
2583 struct device_node
*np
= ofdev
->dev
.of_node
;
2584 struct talitos_private
*priv
= dev_get_drvdata(dev
);
2587 priv
->irq
[0] = irq_of_parse_and_map(np
, 0);
2588 if (!priv
->irq
[0]) {
2589 dev_err(dev
, "failed to map irq\n");
2593 priv
->irq
[1] = irq_of_parse_and_map(np
, 1);
2595 /* get the primary irq line */
2596 if (!priv
->irq
[1]) {
2597 err
= request_irq(priv
->irq
[0], talitos_interrupt_4ch
, 0,
2598 dev_driver_string(dev
), dev
);
2602 err
= request_irq(priv
->irq
[0], talitos_interrupt_ch0_2
, 0,
2603 dev_driver_string(dev
), dev
);
2607 /* get the secondary irq line */
2608 err
= request_irq(priv
->irq
[1], talitos_interrupt_ch1_3
, 0,
2609 dev_driver_string(dev
), dev
);
2611 dev_err(dev
, "failed to request secondary irq\n");
2612 irq_dispose_mapping(priv
->irq
[1]);
2620 dev_err(dev
, "failed to request primary irq\n");
2621 irq_dispose_mapping(priv
->irq
[0]);
2628 static int talitos_probe(struct platform_device
*ofdev
)
2630 struct device
*dev
= &ofdev
->dev
;
2631 struct device_node
*np
= ofdev
->dev
.of_node
;
2632 struct talitos_private
*priv
;
2633 const unsigned int *prop
;
2636 priv
= kzalloc(sizeof(struct talitos_private
), GFP_KERNEL
);
2640 dev_set_drvdata(dev
, priv
);
2642 priv
->ofdev
= ofdev
;
2644 spin_lock_init(&priv
->reg_lock
);
2646 err
= talitos_probe_irq(ofdev
);
2650 if (!priv
->irq
[1]) {
2651 tasklet_init(&priv
->done_task
[0], talitos_done_4ch
,
2652 (unsigned long)dev
);
2654 tasklet_init(&priv
->done_task
[0], talitos_done_ch0_2
,
2655 (unsigned long)dev
);
2656 tasklet_init(&priv
->done_task
[1], talitos_done_ch1_3
,
2657 (unsigned long)dev
);
2660 INIT_LIST_HEAD(&priv
->alg_list
);
2662 priv
->reg
= of_iomap(np
, 0);
2664 dev_err(dev
, "failed to of_iomap\n");
2669 /* get SEC version capabilities from device tree */
2670 prop
= of_get_property(np
, "fsl,num-channels", NULL
);
2672 priv
->num_channels
= *prop
;
2674 prop
= of_get_property(np
, "fsl,channel-fifo-len", NULL
);
2676 priv
->chfifo_len
= *prop
;
2678 prop
= of_get_property(np
, "fsl,exec-units-mask", NULL
);
2680 priv
->exec_units
= *prop
;
2682 prop
= of_get_property(np
, "fsl,descriptor-types-mask", NULL
);
2684 priv
->desc_types
= *prop
;
2686 if (!is_power_of_2(priv
->num_channels
) || !priv
->chfifo_len
||
2687 !priv
->exec_units
|| !priv
->desc_types
) {
2688 dev_err(dev
, "invalid property data in device tree node\n");
2693 if (of_device_is_compatible(np
, "fsl,sec3.0"))
2694 priv
->features
|= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT
;
2696 if (of_device_is_compatible(np
, "fsl,sec2.1"))
2697 priv
->features
|= TALITOS_FTR_HW_AUTH_CHECK
|
2698 TALITOS_FTR_SHA224_HWINIT
|
2699 TALITOS_FTR_HMAC_OK
;
2701 priv
->chan
= kzalloc(sizeof(struct talitos_channel
) *
2702 priv
->num_channels
, GFP_KERNEL
);
2704 dev_err(dev
, "failed to allocate channel management space\n");
2709 for (i
= 0; i
< priv
->num_channels
; i
++) {
2710 priv
->chan
[i
].reg
= priv
->reg
+ TALITOS_CH_STRIDE
* (i
+ 1);
2711 if (!priv
->irq
[1] || !(i
& 1))
2712 priv
->chan
[i
].reg
+= TALITOS_CH_BASE_OFFSET
;
2715 for (i
= 0; i
< priv
->num_channels
; i
++) {
2716 spin_lock_init(&priv
->chan
[i
].head_lock
);
2717 spin_lock_init(&priv
->chan
[i
].tail_lock
);
2720 priv
->fifo_len
= roundup_pow_of_two(priv
->chfifo_len
);
2722 for (i
= 0; i
< priv
->num_channels
; i
++) {
2723 priv
->chan
[i
].fifo
= kzalloc(sizeof(struct talitos_request
) *
2724 priv
->fifo_len
, GFP_KERNEL
);
2725 if (!priv
->chan
[i
].fifo
) {
2726 dev_err(dev
, "failed to allocate request fifo %d\n", i
);
2732 for (i
= 0; i
< priv
->num_channels
; i
++)
2733 atomic_set(&priv
->chan
[i
].submit_count
,
2734 -(priv
->chfifo_len
- 1));
2736 dma_set_mask(dev
, DMA_BIT_MASK(36));
2738 /* reset and initialize the h/w */
2739 err
= init_device(dev
);
2741 dev_err(dev
, "failed to initialize device\n");
2745 /* register the RNG, if available */
2746 if (hw_supports(dev
, DESC_HDR_SEL0_RNG
)) {
2747 err
= talitos_register_rng(dev
);
2749 dev_err(dev
, "failed to register hwrng: %d\n", err
);
2752 dev_info(dev
, "hwrng\n");
2755 /* register crypto algorithms the device supports */
2756 for (i
= 0; i
< ARRAY_SIZE(driver_algs
); i
++) {
2757 if (hw_supports(dev
, driver_algs
[i
].desc_hdr_template
)) {
2758 struct talitos_crypto_alg
*t_alg
;
2761 t_alg
= talitos_alg_alloc(dev
, &driver_algs
[i
]);
2762 if (IS_ERR(t_alg
)) {
2763 err
= PTR_ERR(t_alg
);
2764 if (err
== -ENOTSUPP
)
2769 switch (t_alg
->algt
.type
) {
2770 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
2771 case CRYPTO_ALG_TYPE_AEAD
:
2772 err
= crypto_register_alg(
2773 &t_alg
->algt
.alg
.crypto
);
2774 name
= t_alg
->algt
.alg
.crypto
.cra_driver_name
;
2776 case CRYPTO_ALG_TYPE_AHASH
:
2777 err
= crypto_register_ahash(
2778 &t_alg
->algt
.alg
.hash
);
2780 t_alg
->algt
.alg
.hash
.halg
.base
.cra_driver_name
;
2784 dev_err(dev
, "%s alg registration failed\n",
2788 list_add_tail(&t_alg
->entry
, &priv
->alg_list
);
2791 if (!list_empty(&priv
->alg_list
))
2792 dev_info(dev
, "%s algorithms registered in /proc/crypto\n",
2793 (char *)of_get_property(np
, "compatible", NULL
));
2798 talitos_remove(ofdev
);
2803 static const struct of_device_id talitos_match
[] = {
2805 .compatible
= "fsl,sec2.0",
2809 MODULE_DEVICE_TABLE(of
, talitos_match
);
2811 static struct platform_driver talitos_driver
= {
2814 .owner
= THIS_MODULE
,
2815 .of_match_table
= talitos_match
,
2817 .probe
= talitos_probe
,
2818 .remove
= talitos_remove
,
2821 module_platform_driver(talitos_driver
);
2823 MODULE_LICENSE("GPL");
2824 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
2825 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");