uapi/if_ether.h: move __UAPI_DEF_ETHHDR libc define
[linux/fpc-iii.git] / drivers / crypto / talitos.c
blob4388f4e3840cf7f1170f63e28a7e2b108ab4b4d6
1 /*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
56 #include "talitos.h"
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 bool is_sec1)
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 if (!is_sec1)
63 ptr->eptr = upper_32_bits(dma_addr);
66 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
67 struct talitos_ptr *src_ptr, bool is_sec1)
69 dst_ptr->ptr = src_ptr->ptr;
70 if (!is_sec1)
71 dst_ptr->eptr = src_ptr->eptr;
74 static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
75 bool is_sec1)
77 if (is_sec1) {
78 ptr->res = 0;
79 ptr->len1 = cpu_to_be16(len);
80 } else {
81 ptr->len = cpu_to_be16(len);
85 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
86 bool is_sec1)
88 if (is_sec1)
89 return be16_to_cpu(ptr->len1);
90 else
91 return be16_to_cpu(ptr->len);
94 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
95 bool is_sec1)
97 if (!is_sec1)
98 ptr->j_extent = val;
101 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
103 if (!is_sec1)
104 ptr->j_extent |= val;
108 * map virtual single (contiguous) pointer to h/w descriptor pointer
110 static void map_single_talitos_ptr(struct device *dev,
111 struct talitos_ptr *ptr,
112 unsigned int len, void *data,
113 enum dma_data_direction dir)
115 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
116 struct talitos_private *priv = dev_get_drvdata(dev);
117 bool is_sec1 = has_ftr_sec1(priv);
119 to_talitos_ptr_len(ptr, len, is_sec1);
120 to_talitos_ptr(ptr, dma_addr, is_sec1);
121 to_talitos_ptr_ext_set(ptr, 0, is_sec1);
125 * unmap bus single (contiguous) h/w descriptor pointer
127 static void unmap_single_talitos_ptr(struct device *dev,
128 struct talitos_ptr *ptr,
129 enum dma_data_direction dir)
131 struct talitos_private *priv = dev_get_drvdata(dev);
132 bool is_sec1 = has_ftr_sec1(priv);
134 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
135 from_talitos_ptr_len(ptr, is_sec1), dir);
138 static int reset_channel(struct device *dev, int ch)
140 struct talitos_private *priv = dev_get_drvdata(dev);
141 unsigned int timeout = TALITOS_TIMEOUT;
142 bool is_sec1 = has_ftr_sec1(priv);
144 if (is_sec1) {
145 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
146 TALITOS1_CCCR_LO_RESET);
148 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
149 TALITOS1_CCCR_LO_RESET) && --timeout)
150 cpu_relax();
151 } else {
152 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
153 TALITOS2_CCCR_RESET);
155 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
156 TALITOS2_CCCR_RESET) && --timeout)
157 cpu_relax();
160 if (timeout == 0) {
161 dev_err(dev, "failed to reset channel %d\n", ch);
162 return -EIO;
165 /* set 36-bit addressing, done writeback enable and done IRQ enable */
166 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
167 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
169 /* and ICCR writeback, if available */
170 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
171 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
172 TALITOS_CCCR_LO_IWSE);
174 return 0;
177 static int reset_device(struct device *dev)
179 struct talitos_private *priv = dev_get_drvdata(dev);
180 unsigned int timeout = TALITOS_TIMEOUT;
181 bool is_sec1 = has_ftr_sec1(priv);
182 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
184 setbits32(priv->reg + TALITOS_MCR, mcr);
186 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
187 && --timeout)
188 cpu_relax();
190 if (priv->irq[1]) {
191 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
192 setbits32(priv->reg + TALITOS_MCR, mcr);
195 if (timeout == 0) {
196 dev_err(dev, "failed to reset device\n");
197 return -EIO;
200 return 0;
204 * Reset and initialize the device
206 static int init_device(struct device *dev)
208 struct talitos_private *priv = dev_get_drvdata(dev);
209 int ch, err;
210 bool is_sec1 = has_ftr_sec1(priv);
213 * Master reset
214 * errata documentation: warning: certain SEC interrupts
215 * are not fully cleared by writing the MCR:SWR bit,
216 * set bit twice to completely reset
218 err = reset_device(dev);
219 if (err)
220 return err;
222 err = reset_device(dev);
223 if (err)
224 return err;
226 /* reset channels */
227 for (ch = 0; ch < priv->num_channels; ch++) {
228 err = reset_channel(dev, ch);
229 if (err)
230 return err;
233 /* enable channel done and error interrupts */
234 if (is_sec1) {
235 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
236 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
237 /* disable parity error check in DEU (erroneous? test vect.) */
238 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
239 } else {
240 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
241 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
244 /* disable integrity check error interrupts (use writeback instead) */
245 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
246 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
247 TALITOS_MDEUICR_LO_ICE);
249 return 0;
253 * talitos_submit - submits a descriptor to the device for processing
254 * @dev: the SEC device to be used
255 * @ch: the SEC device channel to be used
256 * @desc: the descriptor to be processed by the device
257 * @callback: whom to call when processing is complete
258 * @context: a handle for use by caller (optional)
260 * desc must contain valid dma-mapped (bus physical) address pointers.
261 * callback must check err and feedback in descriptor header
262 * for device processing status.
264 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
265 void (*callback)(struct device *dev,
266 struct talitos_desc *desc,
267 void *context, int error),
268 void *context)
270 struct talitos_private *priv = dev_get_drvdata(dev);
271 struct talitos_request *request;
272 unsigned long flags;
273 int head;
274 bool is_sec1 = has_ftr_sec1(priv);
276 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
278 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
279 /* h/w fifo is full */
280 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
281 return -EAGAIN;
284 head = priv->chan[ch].head;
285 request = &priv->chan[ch].fifo[head];
287 /* map descriptor and save caller data */
288 if (is_sec1) {
289 desc->hdr1 = desc->hdr;
290 desc->next_desc = 0;
291 request->dma_desc = dma_map_single(dev, &desc->hdr1,
292 TALITOS_DESC_SIZE,
293 DMA_BIDIRECTIONAL);
294 } else {
295 request->dma_desc = dma_map_single(dev, desc,
296 TALITOS_DESC_SIZE,
297 DMA_BIDIRECTIONAL);
299 request->callback = callback;
300 request->context = context;
302 /* increment fifo head */
303 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
305 smp_wmb();
306 request->desc = desc;
308 /* GO! */
309 wmb();
310 out_be32(priv->chan[ch].reg + TALITOS_FF,
311 upper_32_bits(request->dma_desc));
312 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
313 lower_32_bits(request->dma_desc));
315 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
317 return -EINPROGRESS;
319 EXPORT_SYMBOL(talitos_submit);
322 * process what was done, notify callback of error if not
324 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
326 struct talitos_private *priv = dev_get_drvdata(dev);
327 struct talitos_request *request, saved_req;
328 unsigned long flags;
329 int tail, status;
330 bool is_sec1 = has_ftr_sec1(priv);
332 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
334 tail = priv->chan[ch].tail;
335 while (priv->chan[ch].fifo[tail].desc) {
336 __be32 hdr;
338 request = &priv->chan[ch].fifo[tail];
340 /* descriptors with their done bits set don't get the error */
341 rmb();
342 hdr = is_sec1 ? request->desc->hdr1 : request->desc->hdr;
344 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
345 status = 0;
346 else
347 if (!error)
348 break;
349 else
350 status = error;
352 dma_unmap_single(dev, request->dma_desc,
353 TALITOS_DESC_SIZE,
354 DMA_BIDIRECTIONAL);
356 /* copy entries so we can call callback outside lock */
357 saved_req.desc = request->desc;
358 saved_req.callback = request->callback;
359 saved_req.context = request->context;
361 /* release request entry in fifo */
362 smp_wmb();
363 request->desc = NULL;
365 /* increment fifo tail */
366 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
368 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
370 atomic_dec(&priv->chan[ch].submit_count);
372 saved_req.callback(dev, saved_req.desc, saved_req.context,
373 status);
374 /* channel may resume processing in single desc error case */
375 if (error && !reset_ch && status == error)
376 return;
377 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
378 tail = priv->chan[ch].tail;
381 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
385 * process completed requests for channels that have done status
387 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
388 static void talitos1_done_##name(unsigned long data) \
390 struct device *dev = (struct device *)data; \
391 struct talitos_private *priv = dev_get_drvdata(dev); \
392 unsigned long flags; \
394 if (ch_done_mask & 0x10000000) \
395 flush_channel(dev, 0, 0, 0); \
396 if (priv->num_channels == 1) \
397 goto out; \
398 if (ch_done_mask & 0x40000000) \
399 flush_channel(dev, 1, 0, 0); \
400 if (ch_done_mask & 0x00010000) \
401 flush_channel(dev, 2, 0, 0); \
402 if (ch_done_mask & 0x00040000) \
403 flush_channel(dev, 3, 0, 0); \
405 out: \
406 /* At this point, all completed channels have been processed */ \
407 /* Unmask done interrupts for channels completed later on. */ \
408 spin_lock_irqsave(&priv->reg_lock, flags); \
409 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
410 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
411 spin_unlock_irqrestore(&priv->reg_lock, flags); \
414 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
416 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
417 static void talitos2_done_##name(unsigned long data) \
419 struct device *dev = (struct device *)data; \
420 struct talitos_private *priv = dev_get_drvdata(dev); \
421 unsigned long flags; \
423 if (ch_done_mask & 1) \
424 flush_channel(dev, 0, 0, 0); \
425 if (priv->num_channels == 1) \
426 goto out; \
427 if (ch_done_mask & (1 << 2)) \
428 flush_channel(dev, 1, 0, 0); \
429 if (ch_done_mask & (1 << 4)) \
430 flush_channel(dev, 2, 0, 0); \
431 if (ch_done_mask & (1 << 6)) \
432 flush_channel(dev, 3, 0, 0); \
434 out: \
435 /* At this point, all completed channels have been processed */ \
436 /* Unmask done interrupts for channels completed later on. */ \
437 spin_lock_irqsave(&priv->reg_lock, flags); \
438 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
439 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
440 spin_unlock_irqrestore(&priv->reg_lock, flags); \
443 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
444 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
445 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
448 * locate current (offending) descriptor
450 static u32 current_desc_hdr(struct device *dev, int ch)
452 struct talitos_private *priv = dev_get_drvdata(dev);
453 int tail, iter;
454 dma_addr_t cur_desc;
456 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
457 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
459 if (!cur_desc) {
460 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
461 return 0;
464 tail = priv->chan[ch].tail;
466 iter = tail;
467 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc) {
468 iter = (iter + 1) & (priv->fifo_len - 1);
469 if (iter == tail) {
470 dev_err(dev, "couldn't locate current descriptor\n");
471 return 0;
475 return priv->chan[ch].fifo[iter].desc->hdr;
479 * user diagnostics; report root cause of error based on execution unit status
481 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
483 struct talitos_private *priv = dev_get_drvdata(dev);
484 int i;
486 if (!desc_hdr)
487 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
489 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
490 case DESC_HDR_SEL0_AFEU:
491 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
492 in_be32(priv->reg_afeu + TALITOS_EUISR),
493 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
494 break;
495 case DESC_HDR_SEL0_DEU:
496 dev_err(dev, "DEUISR 0x%08x_%08x\n",
497 in_be32(priv->reg_deu + TALITOS_EUISR),
498 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
499 break;
500 case DESC_HDR_SEL0_MDEUA:
501 case DESC_HDR_SEL0_MDEUB:
502 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
503 in_be32(priv->reg_mdeu + TALITOS_EUISR),
504 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
505 break;
506 case DESC_HDR_SEL0_RNG:
507 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
508 in_be32(priv->reg_rngu + TALITOS_ISR),
509 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
510 break;
511 case DESC_HDR_SEL0_PKEU:
512 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
513 in_be32(priv->reg_pkeu + TALITOS_EUISR),
514 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
515 break;
516 case DESC_HDR_SEL0_AESU:
517 dev_err(dev, "AESUISR 0x%08x_%08x\n",
518 in_be32(priv->reg_aesu + TALITOS_EUISR),
519 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
520 break;
521 case DESC_HDR_SEL0_CRCU:
522 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
523 in_be32(priv->reg_crcu + TALITOS_EUISR),
524 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
525 break;
526 case DESC_HDR_SEL0_KEU:
527 dev_err(dev, "KEUISR 0x%08x_%08x\n",
528 in_be32(priv->reg_pkeu + TALITOS_EUISR),
529 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
530 break;
533 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
534 case DESC_HDR_SEL1_MDEUA:
535 case DESC_HDR_SEL1_MDEUB:
536 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
537 in_be32(priv->reg_mdeu + TALITOS_EUISR),
538 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
539 break;
540 case DESC_HDR_SEL1_CRCU:
541 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
542 in_be32(priv->reg_crcu + TALITOS_EUISR),
543 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
544 break;
547 for (i = 0; i < 8; i++)
548 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
549 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
550 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
554 * recover from error interrupts
556 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
558 struct talitos_private *priv = dev_get_drvdata(dev);
559 unsigned int timeout = TALITOS_TIMEOUT;
560 int ch, error, reset_dev = 0;
561 u32 v_lo;
562 bool is_sec1 = has_ftr_sec1(priv);
563 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
565 for (ch = 0; ch < priv->num_channels; ch++) {
566 /* skip channels without errors */
567 if (is_sec1) {
568 /* bits 29, 31, 17, 19 */
569 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
570 continue;
571 } else {
572 if (!(isr & (1 << (ch * 2 + 1))))
573 continue;
576 error = -EINVAL;
578 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
580 if (v_lo & TALITOS_CCPSR_LO_DOF) {
581 dev_err(dev, "double fetch fifo overflow error\n");
582 error = -EAGAIN;
583 reset_ch = 1;
585 if (v_lo & TALITOS_CCPSR_LO_SOF) {
586 /* h/w dropped descriptor */
587 dev_err(dev, "single fetch fifo overflow error\n");
588 error = -EAGAIN;
590 if (v_lo & TALITOS_CCPSR_LO_MDTE)
591 dev_err(dev, "master data transfer error\n");
592 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
593 dev_err(dev, is_sec1 ? "pointer not complete error\n"
594 : "s/g data length zero error\n");
595 if (v_lo & TALITOS_CCPSR_LO_FPZ)
596 dev_err(dev, is_sec1 ? "parity error\n"
597 : "fetch pointer zero error\n");
598 if (v_lo & TALITOS_CCPSR_LO_IDH)
599 dev_err(dev, "illegal descriptor header error\n");
600 if (v_lo & TALITOS_CCPSR_LO_IEU)
601 dev_err(dev, is_sec1 ? "static assignment error\n"
602 : "invalid exec unit error\n");
603 if (v_lo & TALITOS_CCPSR_LO_EU)
604 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
605 if (!is_sec1) {
606 if (v_lo & TALITOS_CCPSR_LO_GB)
607 dev_err(dev, "gather boundary error\n");
608 if (v_lo & TALITOS_CCPSR_LO_GRL)
609 dev_err(dev, "gather return/length error\n");
610 if (v_lo & TALITOS_CCPSR_LO_SB)
611 dev_err(dev, "scatter boundary error\n");
612 if (v_lo & TALITOS_CCPSR_LO_SRL)
613 dev_err(dev, "scatter return/length error\n");
616 flush_channel(dev, ch, error, reset_ch);
618 if (reset_ch) {
619 reset_channel(dev, ch);
620 } else {
621 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
622 TALITOS2_CCCR_CONT);
623 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
624 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
625 TALITOS2_CCCR_CONT) && --timeout)
626 cpu_relax();
627 if (timeout == 0) {
628 dev_err(dev, "failed to restart channel %d\n",
629 ch);
630 reset_dev = 1;
634 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
635 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
636 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
637 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
638 isr, isr_lo);
639 else
640 dev_err(dev, "done overflow, internal time out, or "
641 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
643 /* purge request queues */
644 for (ch = 0; ch < priv->num_channels; ch++)
645 flush_channel(dev, ch, -EIO, 1);
647 /* reset and reinitialize the device */
648 init_device(dev);
652 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
653 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
655 struct device *dev = data; \
656 struct talitos_private *priv = dev_get_drvdata(dev); \
657 u32 isr, isr_lo; \
658 unsigned long flags; \
660 spin_lock_irqsave(&priv->reg_lock, flags); \
661 isr = in_be32(priv->reg + TALITOS_ISR); \
662 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
663 /* Acknowledge interrupt */ \
664 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
665 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
667 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
668 spin_unlock_irqrestore(&priv->reg_lock, flags); \
669 talitos_error(dev, isr & ch_err_mask, isr_lo); \
671 else { \
672 if (likely(isr & ch_done_mask)) { \
673 /* mask further done interrupts. */ \
674 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
675 /* done_task will unmask done interrupts at exit */ \
676 tasklet_schedule(&priv->done_task[tlet]); \
678 spin_unlock_irqrestore(&priv->reg_lock, flags); \
681 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
682 IRQ_NONE; \
685 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
687 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
688 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
690 struct device *dev = data; \
691 struct talitos_private *priv = dev_get_drvdata(dev); \
692 u32 isr, isr_lo; \
693 unsigned long flags; \
695 spin_lock_irqsave(&priv->reg_lock, flags); \
696 isr = in_be32(priv->reg + TALITOS_ISR); \
697 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
698 /* Acknowledge interrupt */ \
699 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
700 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
702 if (unlikely(isr & ch_err_mask || isr_lo)) { \
703 spin_unlock_irqrestore(&priv->reg_lock, flags); \
704 talitos_error(dev, isr & ch_err_mask, isr_lo); \
706 else { \
707 if (likely(isr & ch_done_mask)) { \
708 /* mask further done interrupts. */ \
709 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
710 /* done_task will unmask done interrupts at exit */ \
711 tasklet_schedule(&priv->done_task[tlet]); \
713 spin_unlock_irqrestore(&priv->reg_lock, flags); \
716 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
717 IRQ_NONE; \
720 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
721 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
723 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
727 * hwrng
729 static int talitos_rng_data_present(struct hwrng *rng, int wait)
731 struct device *dev = (struct device *)rng->priv;
732 struct talitos_private *priv = dev_get_drvdata(dev);
733 u32 ofl;
734 int i;
736 for (i = 0; i < 20; i++) {
737 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
738 TALITOS_RNGUSR_LO_OFL;
739 if (ofl || !wait)
740 break;
741 udelay(10);
744 return !!ofl;
747 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
749 struct device *dev = (struct device *)rng->priv;
750 struct talitos_private *priv = dev_get_drvdata(dev);
752 /* rng fifo requires 64-bit accesses */
753 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
754 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
756 return sizeof(u32);
759 static int talitos_rng_init(struct hwrng *rng)
761 struct device *dev = (struct device *)rng->priv;
762 struct talitos_private *priv = dev_get_drvdata(dev);
763 unsigned int timeout = TALITOS_TIMEOUT;
765 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
766 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
767 & TALITOS_RNGUSR_LO_RD)
768 && --timeout)
769 cpu_relax();
770 if (timeout == 0) {
771 dev_err(dev, "failed to reset rng hw\n");
772 return -ENODEV;
775 /* start generating */
776 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
778 return 0;
781 static int talitos_register_rng(struct device *dev)
783 struct talitos_private *priv = dev_get_drvdata(dev);
784 int err;
786 priv->rng.name = dev_driver_string(dev),
787 priv->rng.init = talitos_rng_init,
788 priv->rng.data_present = talitos_rng_data_present,
789 priv->rng.data_read = talitos_rng_data_read,
790 priv->rng.priv = (unsigned long)dev;
792 err = hwrng_register(&priv->rng);
793 if (!err)
794 priv->rng_registered = true;
796 return err;
799 static void talitos_unregister_rng(struct device *dev)
801 struct talitos_private *priv = dev_get_drvdata(dev);
803 if (!priv->rng_registered)
804 return;
806 hwrng_unregister(&priv->rng);
807 priv->rng_registered = false;
811 * crypto alg
813 #define TALITOS_CRA_PRIORITY 3000
815 * Defines a priority for doing AEAD with descriptors type
816 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
818 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
819 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
820 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
822 struct talitos_ctx {
823 struct device *dev;
824 int ch;
825 __be32 desc_hdr_template;
826 u8 key[TALITOS_MAX_KEY_SIZE];
827 u8 iv[TALITOS_MAX_IV_LENGTH];
828 unsigned int keylen;
829 unsigned int enckeylen;
830 unsigned int authkeylen;
833 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
834 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
836 struct talitos_ahash_req_ctx {
837 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
838 unsigned int hw_context_size;
839 u8 buf[HASH_MAX_BLOCK_SIZE];
840 u8 bufnext[HASH_MAX_BLOCK_SIZE];
841 unsigned int swinit;
842 unsigned int first;
843 unsigned int last;
844 unsigned int to_hash_later;
845 unsigned int nbuf;
846 struct scatterlist bufsl[2];
847 struct scatterlist *psrc;
850 struct talitos_export_state {
851 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
852 u8 buf[HASH_MAX_BLOCK_SIZE];
853 unsigned int swinit;
854 unsigned int first;
855 unsigned int last;
856 unsigned int to_hash_later;
857 unsigned int nbuf;
860 static int aead_setkey(struct crypto_aead *authenc,
861 const u8 *key, unsigned int keylen)
863 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
864 struct crypto_authenc_keys keys;
866 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
867 goto badkey;
869 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
870 goto badkey;
872 memcpy(ctx->key, keys.authkey, keys.authkeylen);
873 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
875 ctx->keylen = keys.authkeylen + keys.enckeylen;
876 ctx->enckeylen = keys.enckeylen;
877 ctx->authkeylen = keys.authkeylen;
879 return 0;
881 badkey:
882 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
883 return -EINVAL;
887 * talitos_edesc - s/w-extended descriptor
888 * @src_nents: number of segments in input scatterlist
889 * @dst_nents: number of segments in output scatterlist
890 * @icv_ool: whether ICV is out-of-line
891 * @iv_dma: dma address of iv for checking continuity and link table
892 * @dma_len: length of dma mapped link_tbl space
893 * @dma_link_tbl: bus physical address of link_tbl/buf
894 * @desc: h/w descriptor
895 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
896 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
898 * if decrypting (with authcheck), or either one of src_nents or dst_nents
899 * is greater than 1, an integrity check value is concatenated to the end
900 * of link_tbl data
902 struct talitos_edesc {
903 int src_nents;
904 int dst_nents;
905 bool icv_ool;
906 dma_addr_t iv_dma;
907 int dma_len;
908 dma_addr_t dma_link_tbl;
909 struct talitos_desc desc;
910 union {
911 struct talitos_ptr link_tbl[0];
912 u8 buf[0];
916 static void talitos_sg_unmap(struct device *dev,
917 struct talitos_edesc *edesc,
918 struct scatterlist *src,
919 struct scatterlist *dst,
920 unsigned int len, unsigned int offset)
922 struct talitos_private *priv = dev_get_drvdata(dev);
923 bool is_sec1 = has_ftr_sec1(priv);
924 unsigned int src_nents = edesc->src_nents ? : 1;
925 unsigned int dst_nents = edesc->dst_nents ? : 1;
927 if (is_sec1 && dst && dst_nents > 1) {
928 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
929 len, DMA_FROM_DEVICE);
930 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
931 offset);
933 if (src != dst) {
934 if (src_nents == 1 || !is_sec1)
935 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
937 if (dst && (dst_nents == 1 || !is_sec1))
938 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
939 } else if (src_nents == 1 || !is_sec1) {
940 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
944 static void ipsec_esp_unmap(struct device *dev,
945 struct talitos_edesc *edesc,
946 struct aead_request *areq)
948 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
949 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
950 unsigned int ivsize = crypto_aead_ivsize(aead);
952 if (edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)
953 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
954 DMA_FROM_DEVICE);
955 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[3], DMA_TO_DEVICE);
956 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
957 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[0], DMA_TO_DEVICE);
959 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
960 areq->assoclen);
962 if (edesc->dma_len)
963 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
964 DMA_BIDIRECTIONAL);
966 if (!(edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
967 unsigned int dst_nents = edesc->dst_nents ? : 1;
969 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
970 areq->assoclen + areq->cryptlen - ivsize);
975 * ipsec_esp descriptor callbacks
977 static void ipsec_esp_encrypt_done(struct device *dev,
978 struct talitos_desc *desc, void *context,
979 int err)
981 struct talitos_private *priv = dev_get_drvdata(dev);
982 bool is_sec1 = has_ftr_sec1(priv);
983 struct aead_request *areq = context;
984 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
985 unsigned int authsize = crypto_aead_authsize(authenc);
986 struct talitos_edesc *edesc;
987 struct scatterlist *sg;
988 void *icvdata;
990 edesc = container_of(desc, struct talitos_edesc, desc);
992 ipsec_esp_unmap(dev, edesc, areq);
994 /* copy the generated ICV to dst */
995 if (edesc->icv_ool) {
996 if (is_sec1)
997 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
998 else
999 icvdata = &edesc->link_tbl[edesc->src_nents +
1000 edesc->dst_nents + 2];
1001 sg = sg_last(areq->dst, edesc->dst_nents);
1002 memcpy((char *)sg_virt(sg) + sg->length - authsize,
1003 icvdata, authsize);
1006 kfree(edesc);
1008 aead_request_complete(areq, err);
1011 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1012 struct talitos_desc *desc,
1013 void *context, int err)
1015 struct aead_request *req = context;
1016 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1017 unsigned int authsize = crypto_aead_authsize(authenc);
1018 struct talitos_edesc *edesc;
1019 struct scatterlist *sg;
1020 char *oicv, *icv;
1021 struct talitos_private *priv = dev_get_drvdata(dev);
1022 bool is_sec1 = has_ftr_sec1(priv);
1024 edesc = container_of(desc, struct talitos_edesc, desc);
1026 ipsec_esp_unmap(dev, edesc, req);
1028 if (!err) {
1029 /* auth check */
1030 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1031 icv = (char *)sg_virt(sg) + sg->length - authsize;
1033 if (edesc->dma_len) {
1034 if (is_sec1)
1035 oicv = (char *)&edesc->dma_link_tbl +
1036 req->assoclen + req->cryptlen;
1037 else
1038 oicv = (char *)
1039 &edesc->link_tbl[edesc->src_nents +
1040 edesc->dst_nents + 2];
1041 if (edesc->icv_ool)
1042 icv = oicv + authsize;
1043 } else
1044 oicv = (char *)&edesc->link_tbl[0];
1046 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1049 kfree(edesc);
1051 aead_request_complete(req, err);
1054 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1055 struct talitos_desc *desc,
1056 void *context, int err)
1058 struct aead_request *req = context;
1059 struct talitos_edesc *edesc;
1061 edesc = container_of(desc, struct talitos_edesc, desc);
1063 ipsec_esp_unmap(dev, edesc, req);
1065 /* check ICV auth status */
1066 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1067 DESC_HDR_LO_ICCR1_PASS))
1068 err = -EBADMSG;
1070 kfree(edesc);
1072 aead_request_complete(req, err);
1076 * convert scatterlist to SEC h/w link table format
1077 * stop at cryptlen bytes
1079 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1080 unsigned int offset, int cryptlen,
1081 struct talitos_ptr *link_tbl_ptr)
1083 int n_sg = sg_count;
1084 int count = 0;
1086 while (cryptlen && sg && n_sg--) {
1087 unsigned int len = sg_dma_len(sg);
1089 if (offset >= len) {
1090 offset -= len;
1091 goto next;
1094 len -= offset;
1096 if (len > cryptlen)
1097 len = cryptlen;
1099 to_talitos_ptr(link_tbl_ptr + count,
1100 sg_dma_address(sg) + offset, 0);
1101 to_talitos_ptr_len(link_tbl_ptr + count, len, 0);
1102 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1103 count++;
1104 cryptlen -= len;
1105 offset = 0;
1107 next:
1108 sg = sg_next(sg);
1111 /* tag end of link table */
1112 if (count > 0)
1113 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1114 DESC_PTR_LNKTBL_RETURN, 0);
1116 return count;
1119 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1120 unsigned int len, struct talitos_edesc *edesc,
1121 struct talitos_ptr *ptr, int sg_count,
1122 unsigned int offset, int tbl_off, int elen)
1124 struct talitos_private *priv = dev_get_drvdata(dev);
1125 bool is_sec1 = has_ftr_sec1(priv);
1127 if (!src) {
1128 *ptr = zero_entry;
1129 return 1;
1132 to_talitos_ptr_len(ptr, len, is_sec1);
1133 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1135 if (sg_count == 1) {
1136 to_talitos_ptr(ptr, sg_dma_address(src) + offset, is_sec1);
1137 return sg_count;
1139 if (is_sec1) {
1140 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, is_sec1);
1141 return sg_count;
1143 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1144 &edesc->link_tbl[tbl_off]);
1145 if (sg_count == 1) {
1146 /* Only one segment now, so no link tbl needed*/
1147 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1148 return sg_count;
1150 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1151 tbl_off * sizeof(struct talitos_ptr), is_sec1);
1152 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1154 return sg_count;
1157 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1158 unsigned int len, struct talitos_edesc *edesc,
1159 struct talitos_ptr *ptr, int sg_count,
1160 unsigned int offset, int tbl_off)
1162 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1163 tbl_off, 0);
1167 * fill in and submit ipsec_esp descriptor
1169 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1170 void (*callback)(struct device *dev,
1171 struct talitos_desc *desc,
1172 void *context, int error))
1174 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1175 unsigned int authsize = crypto_aead_authsize(aead);
1176 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1177 struct device *dev = ctx->dev;
1178 struct talitos_desc *desc = &edesc->desc;
1179 unsigned int cryptlen = areq->cryptlen;
1180 unsigned int ivsize = crypto_aead_ivsize(aead);
1181 int tbl_off = 0;
1182 int sg_count, ret;
1183 int elen = 0;
1184 bool sync_needed = false;
1185 struct talitos_private *priv = dev_get_drvdata(dev);
1186 bool is_sec1 = has_ftr_sec1(priv);
1188 /* hmac key */
1189 map_single_talitos_ptr(dev, &desc->ptr[0], ctx->authkeylen, &ctx->key,
1190 DMA_TO_DEVICE);
1192 sg_count = edesc->src_nents ?: 1;
1193 if (is_sec1 && sg_count > 1)
1194 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1195 areq->assoclen + cryptlen);
1196 else
1197 sg_count = dma_map_sg(dev, areq->src, sg_count,
1198 (areq->src == areq->dst) ?
1199 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1201 /* hmac data */
1202 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1203 &desc->ptr[1], sg_count, 0, tbl_off);
1205 if (ret > 1) {
1206 tbl_off += ret;
1207 sync_needed = true;
1210 /* cipher iv */
1211 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1212 to_talitos_ptr(&desc->ptr[2], edesc->iv_dma, is_sec1);
1213 to_talitos_ptr_len(&desc->ptr[2], ivsize, is_sec1);
1214 to_talitos_ptr_ext_set(&desc->ptr[2], 0, is_sec1);
1215 } else {
1216 to_talitos_ptr(&desc->ptr[3], edesc->iv_dma, is_sec1);
1217 to_talitos_ptr_len(&desc->ptr[3], ivsize, is_sec1);
1218 to_talitos_ptr_ext_set(&desc->ptr[3], 0, is_sec1);
1221 /* cipher key */
1222 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1223 map_single_talitos_ptr(dev, &desc->ptr[3], ctx->enckeylen,
1224 (char *)&ctx->key + ctx->authkeylen,
1225 DMA_TO_DEVICE);
1226 else
1227 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->enckeylen,
1228 (char *)&ctx->key + ctx->authkeylen,
1229 DMA_TO_DEVICE);
1232 * cipher in
1233 * map and adjust cipher len to aead request cryptlen.
1234 * extent is bytes of HMAC postpended to ciphertext,
1235 * typically 12 for ipsec
1237 if ((desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) &&
1238 (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1239 elen = authsize;
1241 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1242 sg_count, areq->assoclen, tbl_off, elen);
1244 if (ret > 1) {
1245 tbl_off += ret;
1246 sync_needed = true;
1249 /* cipher out */
1250 if (areq->src != areq->dst) {
1251 sg_count = edesc->dst_nents ? : 1;
1252 if (!is_sec1 || sg_count == 1)
1253 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1256 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1257 sg_count, areq->assoclen, tbl_off);
1259 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1260 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1262 /* ICV data */
1263 if (ret > 1) {
1264 tbl_off += ret;
1265 edesc->icv_ool = true;
1266 sync_needed = true;
1268 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP) {
1269 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1270 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1271 sizeof(struct talitos_ptr) + authsize;
1273 /* Add an entry to the link table for ICV data */
1274 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1275 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1276 is_sec1);
1277 to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
1279 /* icv data follows link tables */
1280 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1281 is_sec1);
1282 } else {
1283 dma_addr_t addr = edesc->dma_link_tbl;
1285 if (is_sec1)
1286 addr += areq->assoclen + cryptlen;
1287 else
1288 addr += sizeof(struct talitos_ptr) * tbl_off;
1290 to_talitos_ptr(&desc->ptr[6], addr, is_sec1);
1291 to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
1293 } else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
1294 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1295 &desc->ptr[6], sg_count, areq->assoclen +
1296 cryptlen,
1297 tbl_off);
1298 if (ret > 1) {
1299 tbl_off += ret;
1300 edesc->icv_ool = true;
1301 sync_needed = true;
1302 } else {
1303 edesc->icv_ool = false;
1305 } else {
1306 edesc->icv_ool = false;
1309 /* iv out */
1310 if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1311 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1312 DMA_FROM_DEVICE);
1314 if (sync_needed)
1315 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1316 edesc->dma_len,
1317 DMA_BIDIRECTIONAL);
1319 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1320 if (ret != -EINPROGRESS) {
1321 ipsec_esp_unmap(dev, edesc, areq);
1322 kfree(edesc);
1324 return ret;
1328 * allocate and map the extended descriptor
1330 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1331 struct scatterlist *src,
1332 struct scatterlist *dst,
1333 u8 *iv,
1334 unsigned int assoclen,
1335 unsigned int cryptlen,
1336 unsigned int authsize,
1337 unsigned int ivsize,
1338 int icv_stashing,
1339 u32 cryptoflags,
1340 bool encrypt)
1342 struct talitos_edesc *edesc;
1343 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1344 dma_addr_t iv_dma = 0;
1345 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1346 GFP_ATOMIC;
1347 struct talitos_private *priv = dev_get_drvdata(dev);
1348 bool is_sec1 = has_ftr_sec1(priv);
1349 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1351 if (cryptlen + authsize > max_len) {
1352 dev_err(dev, "length exceeds h/w max limit\n");
1353 return ERR_PTR(-EINVAL);
1356 if (!dst || dst == src) {
1357 src_len = assoclen + cryptlen + authsize;
1358 src_nents = sg_nents_for_len(src, src_len);
1359 if (src_nents < 0) {
1360 dev_err(dev, "Invalid number of src SG.\n");
1361 return ERR_PTR(-EINVAL);
1363 src_nents = (src_nents == 1) ? 0 : src_nents;
1364 dst_nents = dst ? src_nents : 0;
1365 dst_len = 0;
1366 } else { /* dst && dst != src*/
1367 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1368 src_nents = sg_nents_for_len(src, src_len);
1369 if (src_nents < 0) {
1370 dev_err(dev, "Invalid number of src SG.\n");
1371 return ERR_PTR(-EINVAL);
1373 src_nents = (src_nents == 1) ? 0 : src_nents;
1374 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1375 dst_nents = sg_nents_for_len(dst, dst_len);
1376 if (dst_nents < 0) {
1377 dev_err(dev, "Invalid number of dst SG.\n");
1378 return ERR_PTR(-EINVAL);
1380 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1384 * allocate space for base edesc plus the link tables,
1385 * allowing for two separate entries for AD and generated ICV (+ 2),
1386 * and space for two sets of ICVs (stashed and generated)
1388 alloc_len = sizeof(struct talitos_edesc);
1389 if (src_nents || dst_nents) {
1390 if (is_sec1)
1391 dma_len = (src_nents ? src_len : 0) +
1392 (dst_nents ? dst_len : 0);
1393 else
1394 dma_len = (src_nents + dst_nents + 2) *
1395 sizeof(struct talitos_ptr) + authsize * 2;
1396 alloc_len += dma_len;
1397 } else {
1398 dma_len = 0;
1399 alloc_len += icv_stashing ? authsize : 0;
1401 alloc_len += ivsize;
1403 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1404 if (!edesc)
1405 return ERR_PTR(-ENOMEM);
1406 if (ivsize) {
1407 iv = memcpy(((u8 *)edesc) + alloc_len - ivsize, iv, ivsize);
1408 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1411 edesc->src_nents = src_nents;
1412 edesc->dst_nents = dst_nents;
1413 edesc->iv_dma = iv_dma;
1414 edesc->dma_len = dma_len;
1415 if (dma_len)
1416 edesc->dma_link_tbl = dma_map_single(dev, &edesc->link_tbl[0],
1417 edesc->dma_len,
1418 DMA_BIDIRECTIONAL);
1420 return edesc;
1423 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1424 int icv_stashing, bool encrypt)
1426 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1427 unsigned int authsize = crypto_aead_authsize(authenc);
1428 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1429 unsigned int ivsize = crypto_aead_ivsize(authenc);
1431 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1432 iv, areq->assoclen, areq->cryptlen,
1433 authsize, ivsize, icv_stashing,
1434 areq->base.flags, encrypt);
1437 static int aead_encrypt(struct aead_request *req)
1439 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1440 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1441 struct talitos_edesc *edesc;
1443 /* allocate extended descriptor */
1444 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1445 if (IS_ERR(edesc))
1446 return PTR_ERR(edesc);
1448 /* set encrypt */
1449 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1451 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1454 static int aead_decrypt(struct aead_request *req)
1456 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1457 unsigned int authsize = crypto_aead_authsize(authenc);
1458 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1459 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1460 struct talitos_edesc *edesc;
1461 struct scatterlist *sg;
1462 void *icvdata;
1464 req->cryptlen -= authsize;
1466 /* allocate extended descriptor */
1467 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1468 if (IS_ERR(edesc))
1469 return PTR_ERR(edesc);
1471 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1472 ((!edesc->src_nents && !edesc->dst_nents) ||
1473 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1475 /* decrypt and check the ICV */
1476 edesc->desc.hdr = ctx->desc_hdr_template |
1477 DESC_HDR_DIR_INBOUND |
1478 DESC_HDR_MODE1_MDEU_CICV;
1480 /* reset integrity check result bits */
1481 edesc->desc.hdr_lo = 0;
1483 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1486 /* Have to check the ICV with software */
1487 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1489 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1490 if (edesc->dma_len)
1491 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1492 edesc->dst_nents + 2];
1493 else
1494 icvdata = &edesc->link_tbl[0];
1496 sg = sg_last(req->src, edesc->src_nents ? : 1);
1498 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1500 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1503 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1504 const u8 *key, unsigned int keylen)
1506 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1507 u32 tmp[DES_EXPKEY_WORDS];
1509 if (keylen > TALITOS_MAX_KEY_SIZE) {
1510 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1511 return -EINVAL;
1514 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1515 CRYPTO_TFM_REQ_WEAK_KEY) &&
1516 !des_ekey(tmp, key)) {
1517 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1518 return -EINVAL;
1521 memcpy(&ctx->key, key, keylen);
1522 ctx->keylen = keylen;
1524 return 0;
1527 static void common_nonsnoop_unmap(struct device *dev,
1528 struct talitos_edesc *edesc,
1529 struct ablkcipher_request *areq)
1531 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1533 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1534 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2], DMA_TO_DEVICE);
1535 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1537 if (edesc->dma_len)
1538 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1539 DMA_BIDIRECTIONAL);
1542 static void ablkcipher_done(struct device *dev,
1543 struct talitos_desc *desc, void *context,
1544 int err)
1546 struct ablkcipher_request *areq = context;
1547 struct talitos_edesc *edesc;
1549 edesc = container_of(desc, struct talitos_edesc, desc);
1551 common_nonsnoop_unmap(dev, edesc, areq);
1553 kfree(edesc);
1555 areq->base.complete(&areq->base, err);
1558 static int common_nonsnoop(struct talitos_edesc *edesc,
1559 struct ablkcipher_request *areq,
1560 void (*callback) (struct device *dev,
1561 struct talitos_desc *desc,
1562 void *context, int error))
1564 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1565 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1566 struct device *dev = ctx->dev;
1567 struct talitos_desc *desc = &edesc->desc;
1568 unsigned int cryptlen = areq->nbytes;
1569 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1570 int sg_count, ret;
1571 bool sync_needed = false;
1572 struct talitos_private *priv = dev_get_drvdata(dev);
1573 bool is_sec1 = has_ftr_sec1(priv);
1575 /* first DWORD empty */
1576 desc->ptr[0] = zero_entry;
1578 /* cipher iv */
1579 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, is_sec1);
1580 to_talitos_ptr_len(&desc->ptr[1], ivsize, is_sec1);
1581 to_talitos_ptr_ext_set(&desc->ptr[1], 0, is_sec1);
1583 /* cipher key */
1584 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1585 (char *)&ctx->key, DMA_TO_DEVICE);
1587 sg_count = edesc->src_nents ?: 1;
1588 if (is_sec1 && sg_count > 1)
1589 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1590 cryptlen);
1591 else
1592 sg_count = dma_map_sg(dev, areq->src, sg_count,
1593 (areq->src == areq->dst) ?
1594 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1596 * cipher in
1598 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1599 &desc->ptr[3], sg_count, 0, 0);
1600 if (sg_count > 1)
1601 sync_needed = true;
1603 /* cipher out */
1604 if (areq->src != areq->dst) {
1605 sg_count = edesc->dst_nents ? : 1;
1606 if (!is_sec1 || sg_count == 1)
1607 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1610 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1611 sg_count, 0, (edesc->src_nents + 1));
1612 if (ret > 1)
1613 sync_needed = true;
1615 /* iv out */
1616 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1617 DMA_FROM_DEVICE);
1619 /* last DWORD empty */
1620 desc->ptr[6] = zero_entry;
1622 if (sync_needed)
1623 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1624 edesc->dma_len, DMA_BIDIRECTIONAL);
1626 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1627 if (ret != -EINPROGRESS) {
1628 common_nonsnoop_unmap(dev, edesc, areq);
1629 kfree(edesc);
1631 return ret;
1634 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1635 areq, bool encrypt)
1637 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1638 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1639 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1641 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1642 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1643 areq->base.flags, encrypt);
1646 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1648 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1649 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1650 struct talitos_edesc *edesc;
1652 /* allocate extended descriptor */
1653 edesc = ablkcipher_edesc_alloc(areq, true);
1654 if (IS_ERR(edesc))
1655 return PTR_ERR(edesc);
1657 /* set encrypt */
1658 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1660 return common_nonsnoop(edesc, areq, ablkcipher_done);
1663 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1665 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1666 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1667 struct talitos_edesc *edesc;
1669 /* allocate extended descriptor */
1670 edesc = ablkcipher_edesc_alloc(areq, false);
1671 if (IS_ERR(edesc))
1672 return PTR_ERR(edesc);
1674 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1676 return common_nonsnoop(edesc, areq, ablkcipher_done);
1679 static void common_nonsnoop_hash_unmap(struct device *dev,
1680 struct talitos_edesc *edesc,
1681 struct ahash_request *areq)
1683 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1684 struct talitos_private *priv = dev_get_drvdata(dev);
1685 bool is_sec1 = has_ftr_sec1(priv);
1687 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1689 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1691 /* When using hashctx-in, must unmap it. */
1692 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1693 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1694 DMA_TO_DEVICE);
1696 if (from_talitos_ptr_len(&edesc->desc.ptr[2], is_sec1))
1697 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[2],
1698 DMA_TO_DEVICE);
1700 if (edesc->dma_len)
1701 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1702 DMA_BIDIRECTIONAL);
1706 static void ahash_done(struct device *dev,
1707 struct talitos_desc *desc, void *context,
1708 int err)
1710 struct ahash_request *areq = context;
1711 struct talitos_edesc *edesc =
1712 container_of(desc, struct talitos_edesc, desc);
1713 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1715 if (!req_ctx->last && req_ctx->to_hash_later) {
1716 /* Position any partial block for next update/final/finup */
1717 memcpy(req_ctx->buf, req_ctx->bufnext, req_ctx->to_hash_later);
1718 req_ctx->nbuf = req_ctx->to_hash_later;
1720 common_nonsnoop_hash_unmap(dev, edesc, areq);
1722 kfree(edesc);
1724 areq->base.complete(&areq->base, err);
1728 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1729 * ourself and submit a padded block
1731 void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1732 struct talitos_edesc *edesc,
1733 struct talitos_ptr *ptr)
1735 static u8 padded_hash[64] = {
1736 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1737 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1738 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1739 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1742 pr_err_once("Bug in SEC1, padding ourself\n");
1743 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1744 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1745 (char *)padded_hash, DMA_TO_DEVICE);
1748 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1749 struct ahash_request *areq, unsigned int length,
1750 void (*callback) (struct device *dev,
1751 struct talitos_desc *desc,
1752 void *context, int error))
1754 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1755 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1756 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1757 struct device *dev = ctx->dev;
1758 struct talitos_desc *desc = &edesc->desc;
1759 int ret;
1760 bool sync_needed = false;
1761 struct talitos_private *priv = dev_get_drvdata(dev);
1762 bool is_sec1 = has_ftr_sec1(priv);
1763 int sg_count;
1765 /* first DWORD empty */
1766 desc->ptr[0] = zero_entry;
1768 /* hash context in */
1769 if (!req_ctx->first || req_ctx->swinit) {
1770 map_single_talitos_ptr(dev, &desc->ptr[1],
1771 req_ctx->hw_context_size,
1772 (char *)req_ctx->hw_context,
1773 DMA_TO_DEVICE);
1774 req_ctx->swinit = 0;
1775 } else {
1776 desc->ptr[1] = zero_entry;
1778 /* Indicate next op is not the first. */
1779 req_ctx->first = 0;
1781 /* HMAC key */
1782 if (ctx->keylen)
1783 map_single_talitos_ptr(dev, &desc->ptr[2], ctx->keylen,
1784 (char *)&ctx->key, DMA_TO_DEVICE);
1785 else
1786 desc->ptr[2] = zero_entry;
1788 sg_count = edesc->src_nents ?: 1;
1789 if (is_sec1 && sg_count > 1)
1790 sg_copy_to_buffer(req_ctx->psrc, sg_count, edesc->buf, length);
1791 else
1792 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1793 DMA_TO_DEVICE);
1795 * data in
1797 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1798 &desc->ptr[3], sg_count, 0, 0);
1799 if (sg_count > 1)
1800 sync_needed = true;
1802 /* fifth DWORD empty */
1803 desc->ptr[4] = zero_entry;
1805 /* hash/HMAC out -or- hash context out */
1806 if (req_ctx->last)
1807 map_single_talitos_ptr(dev, &desc->ptr[5],
1808 crypto_ahash_digestsize(tfm),
1809 areq->result, DMA_FROM_DEVICE);
1810 else
1811 map_single_talitos_ptr(dev, &desc->ptr[5],
1812 req_ctx->hw_context_size,
1813 req_ctx->hw_context, DMA_FROM_DEVICE);
1815 /* last DWORD empty */
1816 desc->ptr[6] = zero_entry;
1818 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1819 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1821 if (sync_needed)
1822 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1823 edesc->dma_len, DMA_BIDIRECTIONAL);
1825 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1826 if (ret != -EINPROGRESS) {
1827 common_nonsnoop_hash_unmap(dev, edesc, areq);
1828 kfree(edesc);
1830 return ret;
1833 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1834 unsigned int nbytes)
1836 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1837 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1838 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1840 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1841 nbytes, 0, 0, 0, areq->base.flags, false);
1844 static int ahash_init(struct ahash_request *areq)
1846 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1847 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1849 /* Initialize the context */
1850 req_ctx->nbuf = 0;
1851 req_ctx->first = 1; /* first indicates h/w must init its context */
1852 req_ctx->swinit = 0; /* assume h/w init of context */
1853 req_ctx->hw_context_size =
1854 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1855 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1856 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1858 return 0;
1862 * on h/w without explicit sha224 support, we initialize h/w context
1863 * manually with sha224 constants, and tell it to run sha256.
1865 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1867 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1869 ahash_init(areq);
1870 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1872 req_ctx->hw_context[0] = SHA224_H0;
1873 req_ctx->hw_context[1] = SHA224_H1;
1874 req_ctx->hw_context[2] = SHA224_H2;
1875 req_ctx->hw_context[3] = SHA224_H3;
1876 req_ctx->hw_context[4] = SHA224_H4;
1877 req_ctx->hw_context[5] = SHA224_H5;
1878 req_ctx->hw_context[6] = SHA224_H6;
1879 req_ctx->hw_context[7] = SHA224_H7;
1881 /* init 64-bit count */
1882 req_ctx->hw_context[8] = 0;
1883 req_ctx->hw_context[9] = 0;
1885 return 0;
1888 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1890 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1891 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1892 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1893 struct talitos_edesc *edesc;
1894 unsigned int blocksize =
1895 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1896 unsigned int nbytes_to_hash;
1897 unsigned int to_hash_later;
1898 unsigned int nsg;
1899 int nents;
1901 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1902 /* Buffer up to one whole block */
1903 nents = sg_nents_for_len(areq->src, nbytes);
1904 if (nents < 0) {
1905 dev_err(ctx->dev, "Invalid number of src SG.\n");
1906 return nents;
1908 sg_copy_to_buffer(areq->src, nents,
1909 req_ctx->buf + req_ctx->nbuf, nbytes);
1910 req_ctx->nbuf += nbytes;
1911 return 0;
1914 /* At least (blocksize + 1) bytes are available to hash */
1915 nbytes_to_hash = nbytes + req_ctx->nbuf;
1916 to_hash_later = nbytes_to_hash & (blocksize - 1);
1918 if (req_ctx->last)
1919 to_hash_later = 0;
1920 else if (to_hash_later)
1921 /* There is a partial block. Hash the full block(s) now */
1922 nbytes_to_hash -= to_hash_later;
1923 else {
1924 /* Keep one block buffered */
1925 nbytes_to_hash -= blocksize;
1926 to_hash_later = blocksize;
1929 /* Chain in any previously buffered data */
1930 if (req_ctx->nbuf) {
1931 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
1932 sg_init_table(req_ctx->bufsl, nsg);
1933 sg_set_buf(req_ctx->bufsl, req_ctx->buf, req_ctx->nbuf);
1934 if (nsg > 1)
1935 sg_chain(req_ctx->bufsl, 2, areq->src);
1936 req_ctx->psrc = req_ctx->bufsl;
1937 } else
1938 req_ctx->psrc = areq->src;
1940 if (to_hash_later) {
1941 nents = sg_nents_for_len(areq->src, nbytes);
1942 if (nents < 0) {
1943 dev_err(ctx->dev, "Invalid number of src SG.\n");
1944 return nents;
1946 sg_pcopy_to_buffer(areq->src, nents,
1947 req_ctx->bufnext,
1948 to_hash_later,
1949 nbytes - to_hash_later);
1951 req_ctx->to_hash_later = to_hash_later;
1953 /* Allocate extended descriptor */
1954 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
1955 if (IS_ERR(edesc))
1956 return PTR_ERR(edesc);
1958 edesc->desc.hdr = ctx->desc_hdr_template;
1960 /* On last one, request SEC to pad; otherwise continue */
1961 if (req_ctx->last)
1962 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
1963 else
1964 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
1966 /* request SEC to INIT hash. */
1967 if (req_ctx->first && !req_ctx->swinit)
1968 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
1970 /* When the tfm context has a keylen, it's an HMAC.
1971 * A first or last (ie. not middle) descriptor must request HMAC.
1973 if (ctx->keylen && (req_ctx->first || req_ctx->last))
1974 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
1976 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash,
1977 ahash_done);
1980 static int ahash_update(struct ahash_request *areq)
1982 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1984 req_ctx->last = 0;
1986 return ahash_process_req(areq, areq->nbytes);
1989 static int ahash_final(struct ahash_request *areq)
1991 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1993 req_ctx->last = 1;
1995 return ahash_process_req(areq, 0);
1998 static int ahash_finup(struct ahash_request *areq)
2000 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2002 req_ctx->last = 1;
2004 return ahash_process_req(areq, areq->nbytes);
2007 static int ahash_digest(struct ahash_request *areq)
2009 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2010 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2012 ahash->init(areq);
2013 req_ctx->last = 1;
2015 return ahash_process_req(areq, areq->nbytes);
2018 static int ahash_export(struct ahash_request *areq, void *out)
2020 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2021 struct talitos_export_state *export = out;
2023 memcpy(export->hw_context, req_ctx->hw_context,
2024 req_ctx->hw_context_size);
2025 memcpy(export->buf, req_ctx->buf, req_ctx->nbuf);
2026 export->swinit = req_ctx->swinit;
2027 export->first = req_ctx->first;
2028 export->last = req_ctx->last;
2029 export->to_hash_later = req_ctx->to_hash_later;
2030 export->nbuf = req_ctx->nbuf;
2032 return 0;
2035 static int ahash_import(struct ahash_request *areq, const void *in)
2037 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2038 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2039 const struct talitos_export_state *export = in;
2041 memset(req_ctx, 0, sizeof(*req_ctx));
2042 req_ctx->hw_context_size =
2043 (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2044 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2045 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2046 memcpy(req_ctx->hw_context, export->hw_context,
2047 req_ctx->hw_context_size);
2048 memcpy(req_ctx->buf, export->buf, export->nbuf);
2049 req_ctx->swinit = export->swinit;
2050 req_ctx->first = export->first;
2051 req_ctx->last = export->last;
2052 req_ctx->to_hash_later = export->to_hash_later;
2053 req_ctx->nbuf = export->nbuf;
2055 return 0;
2058 struct keyhash_result {
2059 struct completion completion;
2060 int err;
2063 static void keyhash_complete(struct crypto_async_request *req, int err)
2065 struct keyhash_result *res = req->data;
2067 if (err == -EINPROGRESS)
2068 return;
2070 res->err = err;
2071 complete(&res->completion);
2074 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2075 u8 *hash)
2077 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2079 struct scatterlist sg[1];
2080 struct ahash_request *req;
2081 struct keyhash_result hresult;
2082 int ret;
2084 init_completion(&hresult.completion);
2086 req = ahash_request_alloc(tfm, GFP_KERNEL);
2087 if (!req)
2088 return -ENOMEM;
2090 /* Keep tfm keylen == 0 during hash of the long key */
2091 ctx->keylen = 0;
2092 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2093 keyhash_complete, &hresult);
2095 sg_init_one(&sg[0], key, keylen);
2097 ahash_request_set_crypt(req, sg, hash, keylen);
2098 ret = crypto_ahash_digest(req);
2099 switch (ret) {
2100 case 0:
2101 break;
2102 case -EINPROGRESS:
2103 case -EBUSY:
2104 ret = wait_for_completion_interruptible(
2105 &hresult.completion);
2106 if (!ret)
2107 ret = hresult.err;
2108 break;
2109 default:
2110 break;
2112 ahash_request_free(req);
2114 return ret;
2117 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2118 unsigned int keylen)
2120 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2121 unsigned int blocksize =
2122 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2123 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2124 unsigned int keysize = keylen;
2125 u8 hash[SHA512_DIGEST_SIZE];
2126 int ret;
2128 if (keylen <= blocksize)
2129 memcpy(ctx->key, key, keysize);
2130 else {
2131 /* Must get the hash of the long key */
2132 ret = keyhash(tfm, key, keylen, hash);
2134 if (ret) {
2135 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2136 return -EINVAL;
2139 keysize = digestsize;
2140 memcpy(ctx->key, hash, digestsize);
2143 ctx->keylen = keysize;
2145 return 0;
2149 struct talitos_alg_template {
2150 u32 type;
2151 u32 priority;
2152 union {
2153 struct crypto_alg crypto;
2154 struct ahash_alg hash;
2155 struct aead_alg aead;
2156 } alg;
2157 __be32 desc_hdr_template;
2160 static struct talitos_alg_template driver_algs[] = {
2161 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2162 { .type = CRYPTO_ALG_TYPE_AEAD,
2163 .alg.aead = {
2164 .base = {
2165 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2166 .cra_driver_name = "authenc-hmac-sha1-"
2167 "cbc-aes-talitos",
2168 .cra_blocksize = AES_BLOCK_SIZE,
2169 .cra_flags = CRYPTO_ALG_ASYNC,
2171 .ivsize = AES_BLOCK_SIZE,
2172 .maxauthsize = SHA1_DIGEST_SIZE,
2174 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2175 DESC_HDR_SEL0_AESU |
2176 DESC_HDR_MODE0_AESU_CBC |
2177 DESC_HDR_SEL1_MDEUA |
2178 DESC_HDR_MODE1_MDEU_INIT |
2179 DESC_HDR_MODE1_MDEU_PAD |
2180 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2182 { .type = CRYPTO_ALG_TYPE_AEAD,
2183 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2184 .alg.aead = {
2185 .base = {
2186 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2187 .cra_driver_name = "authenc-hmac-sha1-"
2188 "cbc-aes-talitos",
2189 .cra_blocksize = AES_BLOCK_SIZE,
2190 .cra_flags = CRYPTO_ALG_ASYNC,
2192 .ivsize = AES_BLOCK_SIZE,
2193 .maxauthsize = SHA1_DIGEST_SIZE,
2195 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2196 DESC_HDR_SEL0_AESU |
2197 DESC_HDR_MODE0_AESU_CBC |
2198 DESC_HDR_SEL1_MDEUA |
2199 DESC_HDR_MODE1_MDEU_INIT |
2200 DESC_HDR_MODE1_MDEU_PAD |
2201 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2203 { .type = CRYPTO_ALG_TYPE_AEAD,
2204 .alg.aead = {
2205 .base = {
2206 .cra_name = "authenc(hmac(sha1),"
2207 "cbc(des3_ede))",
2208 .cra_driver_name = "authenc-hmac-sha1-"
2209 "cbc-3des-talitos",
2210 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2211 .cra_flags = CRYPTO_ALG_ASYNC,
2213 .ivsize = DES3_EDE_BLOCK_SIZE,
2214 .maxauthsize = SHA1_DIGEST_SIZE,
2216 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2217 DESC_HDR_SEL0_DEU |
2218 DESC_HDR_MODE0_DEU_CBC |
2219 DESC_HDR_MODE0_DEU_3DES |
2220 DESC_HDR_SEL1_MDEUA |
2221 DESC_HDR_MODE1_MDEU_INIT |
2222 DESC_HDR_MODE1_MDEU_PAD |
2223 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2225 { .type = CRYPTO_ALG_TYPE_AEAD,
2226 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2227 .alg.aead = {
2228 .base = {
2229 .cra_name = "authenc(hmac(sha1),"
2230 "cbc(des3_ede))",
2231 .cra_driver_name = "authenc-hmac-sha1-"
2232 "cbc-3des-talitos",
2233 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2234 .cra_flags = CRYPTO_ALG_ASYNC,
2236 .ivsize = DES3_EDE_BLOCK_SIZE,
2237 .maxauthsize = SHA1_DIGEST_SIZE,
2239 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2240 DESC_HDR_SEL0_DEU |
2241 DESC_HDR_MODE0_DEU_CBC |
2242 DESC_HDR_MODE0_DEU_3DES |
2243 DESC_HDR_SEL1_MDEUA |
2244 DESC_HDR_MODE1_MDEU_INIT |
2245 DESC_HDR_MODE1_MDEU_PAD |
2246 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2248 { .type = CRYPTO_ALG_TYPE_AEAD,
2249 .alg.aead = {
2250 .base = {
2251 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2252 .cra_driver_name = "authenc-hmac-sha224-"
2253 "cbc-aes-talitos",
2254 .cra_blocksize = AES_BLOCK_SIZE,
2255 .cra_flags = CRYPTO_ALG_ASYNC,
2257 .ivsize = AES_BLOCK_SIZE,
2258 .maxauthsize = SHA224_DIGEST_SIZE,
2260 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2261 DESC_HDR_SEL0_AESU |
2262 DESC_HDR_MODE0_AESU_CBC |
2263 DESC_HDR_SEL1_MDEUA |
2264 DESC_HDR_MODE1_MDEU_INIT |
2265 DESC_HDR_MODE1_MDEU_PAD |
2266 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2268 { .type = CRYPTO_ALG_TYPE_AEAD,
2269 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2270 .alg.aead = {
2271 .base = {
2272 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2273 .cra_driver_name = "authenc-hmac-sha224-"
2274 "cbc-aes-talitos",
2275 .cra_blocksize = AES_BLOCK_SIZE,
2276 .cra_flags = CRYPTO_ALG_ASYNC,
2278 .ivsize = AES_BLOCK_SIZE,
2279 .maxauthsize = SHA224_DIGEST_SIZE,
2281 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2282 DESC_HDR_SEL0_AESU |
2283 DESC_HDR_MODE0_AESU_CBC |
2284 DESC_HDR_SEL1_MDEUA |
2285 DESC_HDR_MODE1_MDEU_INIT |
2286 DESC_HDR_MODE1_MDEU_PAD |
2287 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2289 { .type = CRYPTO_ALG_TYPE_AEAD,
2290 .alg.aead = {
2291 .base = {
2292 .cra_name = "authenc(hmac(sha224),"
2293 "cbc(des3_ede))",
2294 .cra_driver_name = "authenc-hmac-sha224-"
2295 "cbc-3des-talitos",
2296 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2297 .cra_flags = CRYPTO_ALG_ASYNC,
2299 .ivsize = DES3_EDE_BLOCK_SIZE,
2300 .maxauthsize = SHA224_DIGEST_SIZE,
2302 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2303 DESC_HDR_SEL0_DEU |
2304 DESC_HDR_MODE0_DEU_CBC |
2305 DESC_HDR_MODE0_DEU_3DES |
2306 DESC_HDR_SEL1_MDEUA |
2307 DESC_HDR_MODE1_MDEU_INIT |
2308 DESC_HDR_MODE1_MDEU_PAD |
2309 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2311 { .type = CRYPTO_ALG_TYPE_AEAD,
2312 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2313 .alg.aead = {
2314 .base = {
2315 .cra_name = "authenc(hmac(sha224),"
2316 "cbc(des3_ede))",
2317 .cra_driver_name = "authenc-hmac-sha224-"
2318 "cbc-3des-talitos",
2319 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2320 .cra_flags = CRYPTO_ALG_ASYNC,
2322 .ivsize = DES3_EDE_BLOCK_SIZE,
2323 .maxauthsize = SHA224_DIGEST_SIZE,
2325 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2326 DESC_HDR_SEL0_DEU |
2327 DESC_HDR_MODE0_DEU_CBC |
2328 DESC_HDR_MODE0_DEU_3DES |
2329 DESC_HDR_SEL1_MDEUA |
2330 DESC_HDR_MODE1_MDEU_INIT |
2331 DESC_HDR_MODE1_MDEU_PAD |
2332 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2334 { .type = CRYPTO_ALG_TYPE_AEAD,
2335 .alg.aead = {
2336 .base = {
2337 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2338 .cra_driver_name = "authenc-hmac-sha256-"
2339 "cbc-aes-talitos",
2340 .cra_blocksize = AES_BLOCK_SIZE,
2341 .cra_flags = CRYPTO_ALG_ASYNC,
2343 .ivsize = AES_BLOCK_SIZE,
2344 .maxauthsize = SHA256_DIGEST_SIZE,
2346 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2347 DESC_HDR_SEL0_AESU |
2348 DESC_HDR_MODE0_AESU_CBC |
2349 DESC_HDR_SEL1_MDEUA |
2350 DESC_HDR_MODE1_MDEU_INIT |
2351 DESC_HDR_MODE1_MDEU_PAD |
2352 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2354 { .type = CRYPTO_ALG_TYPE_AEAD,
2355 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2356 .alg.aead = {
2357 .base = {
2358 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2359 .cra_driver_name = "authenc-hmac-sha256-"
2360 "cbc-aes-talitos",
2361 .cra_blocksize = AES_BLOCK_SIZE,
2362 .cra_flags = CRYPTO_ALG_ASYNC,
2364 .ivsize = AES_BLOCK_SIZE,
2365 .maxauthsize = SHA256_DIGEST_SIZE,
2367 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2368 DESC_HDR_SEL0_AESU |
2369 DESC_HDR_MODE0_AESU_CBC |
2370 DESC_HDR_SEL1_MDEUA |
2371 DESC_HDR_MODE1_MDEU_INIT |
2372 DESC_HDR_MODE1_MDEU_PAD |
2373 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2375 { .type = CRYPTO_ALG_TYPE_AEAD,
2376 .alg.aead = {
2377 .base = {
2378 .cra_name = "authenc(hmac(sha256),"
2379 "cbc(des3_ede))",
2380 .cra_driver_name = "authenc-hmac-sha256-"
2381 "cbc-3des-talitos",
2382 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2383 .cra_flags = CRYPTO_ALG_ASYNC,
2385 .ivsize = DES3_EDE_BLOCK_SIZE,
2386 .maxauthsize = SHA256_DIGEST_SIZE,
2388 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2389 DESC_HDR_SEL0_DEU |
2390 DESC_HDR_MODE0_DEU_CBC |
2391 DESC_HDR_MODE0_DEU_3DES |
2392 DESC_HDR_SEL1_MDEUA |
2393 DESC_HDR_MODE1_MDEU_INIT |
2394 DESC_HDR_MODE1_MDEU_PAD |
2395 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2397 { .type = CRYPTO_ALG_TYPE_AEAD,
2398 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2399 .alg.aead = {
2400 .base = {
2401 .cra_name = "authenc(hmac(sha256),"
2402 "cbc(des3_ede))",
2403 .cra_driver_name = "authenc-hmac-sha256-"
2404 "cbc-3des-talitos",
2405 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2406 .cra_flags = CRYPTO_ALG_ASYNC,
2408 .ivsize = DES3_EDE_BLOCK_SIZE,
2409 .maxauthsize = SHA256_DIGEST_SIZE,
2411 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2412 DESC_HDR_SEL0_DEU |
2413 DESC_HDR_MODE0_DEU_CBC |
2414 DESC_HDR_MODE0_DEU_3DES |
2415 DESC_HDR_SEL1_MDEUA |
2416 DESC_HDR_MODE1_MDEU_INIT |
2417 DESC_HDR_MODE1_MDEU_PAD |
2418 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2420 { .type = CRYPTO_ALG_TYPE_AEAD,
2421 .alg.aead = {
2422 .base = {
2423 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2424 .cra_driver_name = "authenc-hmac-sha384-"
2425 "cbc-aes-talitos",
2426 .cra_blocksize = AES_BLOCK_SIZE,
2427 .cra_flags = CRYPTO_ALG_ASYNC,
2429 .ivsize = AES_BLOCK_SIZE,
2430 .maxauthsize = SHA384_DIGEST_SIZE,
2432 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2433 DESC_HDR_SEL0_AESU |
2434 DESC_HDR_MODE0_AESU_CBC |
2435 DESC_HDR_SEL1_MDEUB |
2436 DESC_HDR_MODE1_MDEU_INIT |
2437 DESC_HDR_MODE1_MDEU_PAD |
2438 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2440 { .type = CRYPTO_ALG_TYPE_AEAD,
2441 .alg.aead = {
2442 .base = {
2443 .cra_name = "authenc(hmac(sha384),"
2444 "cbc(des3_ede))",
2445 .cra_driver_name = "authenc-hmac-sha384-"
2446 "cbc-3des-talitos",
2447 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2448 .cra_flags = CRYPTO_ALG_ASYNC,
2450 .ivsize = DES3_EDE_BLOCK_SIZE,
2451 .maxauthsize = SHA384_DIGEST_SIZE,
2453 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2454 DESC_HDR_SEL0_DEU |
2455 DESC_HDR_MODE0_DEU_CBC |
2456 DESC_HDR_MODE0_DEU_3DES |
2457 DESC_HDR_SEL1_MDEUB |
2458 DESC_HDR_MODE1_MDEU_INIT |
2459 DESC_HDR_MODE1_MDEU_PAD |
2460 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2462 { .type = CRYPTO_ALG_TYPE_AEAD,
2463 .alg.aead = {
2464 .base = {
2465 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2466 .cra_driver_name = "authenc-hmac-sha512-"
2467 "cbc-aes-talitos",
2468 .cra_blocksize = AES_BLOCK_SIZE,
2469 .cra_flags = CRYPTO_ALG_ASYNC,
2471 .ivsize = AES_BLOCK_SIZE,
2472 .maxauthsize = SHA512_DIGEST_SIZE,
2474 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2475 DESC_HDR_SEL0_AESU |
2476 DESC_HDR_MODE0_AESU_CBC |
2477 DESC_HDR_SEL1_MDEUB |
2478 DESC_HDR_MODE1_MDEU_INIT |
2479 DESC_HDR_MODE1_MDEU_PAD |
2480 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2482 { .type = CRYPTO_ALG_TYPE_AEAD,
2483 .alg.aead = {
2484 .base = {
2485 .cra_name = "authenc(hmac(sha512),"
2486 "cbc(des3_ede))",
2487 .cra_driver_name = "authenc-hmac-sha512-"
2488 "cbc-3des-talitos",
2489 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2490 .cra_flags = CRYPTO_ALG_ASYNC,
2492 .ivsize = DES3_EDE_BLOCK_SIZE,
2493 .maxauthsize = SHA512_DIGEST_SIZE,
2495 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2496 DESC_HDR_SEL0_DEU |
2497 DESC_HDR_MODE0_DEU_CBC |
2498 DESC_HDR_MODE0_DEU_3DES |
2499 DESC_HDR_SEL1_MDEUB |
2500 DESC_HDR_MODE1_MDEU_INIT |
2501 DESC_HDR_MODE1_MDEU_PAD |
2502 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2504 { .type = CRYPTO_ALG_TYPE_AEAD,
2505 .alg.aead = {
2506 .base = {
2507 .cra_name = "authenc(hmac(md5),cbc(aes))",
2508 .cra_driver_name = "authenc-hmac-md5-"
2509 "cbc-aes-talitos",
2510 .cra_blocksize = AES_BLOCK_SIZE,
2511 .cra_flags = CRYPTO_ALG_ASYNC,
2513 .ivsize = AES_BLOCK_SIZE,
2514 .maxauthsize = MD5_DIGEST_SIZE,
2516 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2517 DESC_HDR_SEL0_AESU |
2518 DESC_HDR_MODE0_AESU_CBC |
2519 DESC_HDR_SEL1_MDEUA |
2520 DESC_HDR_MODE1_MDEU_INIT |
2521 DESC_HDR_MODE1_MDEU_PAD |
2522 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2524 { .type = CRYPTO_ALG_TYPE_AEAD,
2525 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2526 .alg.aead = {
2527 .base = {
2528 .cra_name = "authenc(hmac(md5),cbc(aes))",
2529 .cra_driver_name = "authenc-hmac-md5-"
2530 "cbc-aes-talitos",
2531 .cra_blocksize = AES_BLOCK_SIZE,
2532 .cra_flags = CRYPTO_ALG_ASYNC,
2534 .ivsize = AES_BLOCK_SIZE,
2535 .maxauthsize = MD5_DIGEST_SIZE,
2537 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2538 DESC_HDR_SEL0_AESU |
2539 DESC_HDR_MODE0_AESU_CBC |
2540 DESC_HDR_SEL1_MDEUA |
2541 DESC_HDR_MODE1_MDEU_INIT |
2542 DESC_HDR_MODE1_MDEU_PAD |
2543 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2545 { .type = CRYPTO_ALG_TYPE_AEAD,
2546 .alg.aead = {
2547 .base = {
2548 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2549 .cra_driver_name = "authenc-hmac-md5-"
2550 "cbc-3des-talitos",
2551 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2552 .cra_flags = CRYPTO_ALG_ASYNC,
2554 .ivsize = DES3_EDE_BLOCK_SIZE,
2555 .maxauthsize = MD5_DIGEST_SIZE,
2557 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2558 DESC_HDR_SEL0_DEU |
2559 DESC_HDR_MODE0_DEU_CBC |
2560 DESC_HDR_MODE0_DEU_3DES |
2561 DESC_HDR_SEL1_MDEUA |
2562 DESC_HDR_MODE1_MDEU_INIT |
2563 DESC_HDR_MODE1_MDEU_PAD |
2564 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2566 { .type = CRYPTO_ALG_TYPE_AEAD,
2567 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2568 .alg.aead = {
2569 .base = {
2570 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2571 .cra_driver_name = "authenc-hmac-md5-"
2572 "cbc-3des-talitos",
2573 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2574 .cra_flags = CRYPTO_ALG_ASYNC,
2576 .ivsize = DES3_EDE_BLOCK_SIZE,
2577 .maxauthsize = MD5_DIGEST_SIZE,
2579 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2580 DESC_HDR_SEL0_DEU |
2581 DESC_HDR_MODE0_DEU_CBC |
2582 DESC_HDR_MODE0_DEU_3DES |
2583 DESC_HDR_SEL1_MDEUA |
2584 DESC_HDR_MODE1_MDEU_INIT |
2585 DESC_HDR_MODE1_MDEU_PAD |
2586 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2588 /* ABLKCIPHER algorithms. */
2589 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2590 .alg.crypto = {
2591 .cra_name = "ecb(aes)",
2592 .cra_driver_name = "ecb-aes-talitos",
2593 .cra_blocksize = AES_BLOCK_SIZE,
2594 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2595 CRYPTO_ALG_ASYNC,
2596 .cra_ablkcipher = {
2597 .min_keysize = AES_MIN_KEY_SIZE,
2598 .max_keysize = AES_MAX_KEY_SIZE,
2599 .ivsize = AES_BLOCK_SIZE,
2602 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2603 DESC_HDR_SEL0_AESU,
2605 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2606 .alg.crypto = {
2607 .cra_name = "cbc(aes)",
2608 .cra_driver_name = "cbc-aes-talitos",
2609 .cra_blocksize = AES_BLOCK_SIZE,
2610 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2611 CRYPTO_ALG_ASYNC,
2612 .cra_ablkcipher = {
2613 .min_keysize = AES_MIN_KEY_SIZE,
2614 .max_keysize = AES_MAX_KEY_SIZE,
2615 .ivsize = AES_BLOCK_SIZE,
2618 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2619 DESC_HDR_SEL0_AESU |
2620 DESC_HDR_MODE0_AESU_CBC,
2622 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2623 .alg.crypto = {
2624 .cra_name = "ctr(aes)",
2625 .cra_driver_name = "ctr-aes-talitos",
2626 .cra_blocksize = AES_BLOCK_SIZE,
2627 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2628 CRYPTO_ALG_ASYNC,
2629 .cra_ablkcipher = {
2630 .min_keysize = AES_MIN_KEY_SIZE,
2631 .max_keysize = AES_MAX_KEY_SIZE,
2632 .ivsize = AES_BLOCK_SIZE,
2635 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2636 DESC_HDR_SEL0_AESU |
2637 DESC_HDR_MODE0_AESU_CTR,
2639 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2640 .alg.crypto = {
2641 .cra_name = "ecb(des)",
2642 .cra_driver_name = "ecb-des-talitos",
2643 .cra_blocksize = DES_BLOCK_SIZE,
2644 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2645 CRYPTO_ALG_ASYNC,
2646 .cra_ablkcipher = {
2647 .min_keysize = DES_KEY_SIZE,
2648 .max_keysize = DES_KEY_SIZE,
2649 .ivsize = DES_BLOCK_SIZE,
2652 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2653 DESC_HDR_SEL0_DEU,
2655 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2656 .alg.crypto = {
2657 .cra_name = "cbc(des)",
2658 .cra_driver_name = "cbc-des-talitos",
2659 .cra_blocksize = DES_BLOCK_SIZE,
2660 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2661 CRYPTO_ALG_ASYNC,
2662 .cra_ablkcipher = {
2663 .min_keysize = DES_KEY_SIZE,
2664 .max_keysize = DES_KEY_SIZE,
2665 .ivsize = DES_BLOCK_SIZE,
2668 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2669 DESC_HDR_SEL0_DEU |
2670 DESC_HDR_MODE0_DEU_CBC,
2672 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2673 .alg.crypto = {
2674 .cra_name = "ecb(des3_ede)",
2675 .cra_driver_name = "ecb-3des-talitos",
2676 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2677 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2678 CRYPTO_ALG_ASYNC,
2679 .cra_ablkcipher = {
2680 .min_keysize = DES3_EDE_KEY_SIZE,
2681 .max_keysize = DES3_EDE_KEY_SIZE,
2682 .ivsize = DES3_EDE_BLOCK_SIZE,
2685 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2686 DESC_HDR_SEL0_DEU |
2687 DESC_HDR_MODE0_DEU_3DES,
2689 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2690 .alg.crypto = {
2691 .cra_name = "cbc(des3_ede)",
2692 .cra_driver_name = "cbc-3des-talitos",
2693 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2694 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2695 CRYPTO_ALG_ASYNC,
2696 .cra_ablkcipher = {
2697 .min_keysize = DES3_EDE_KEY_SIZE,
2698 .max_keysize = DES3_EDE_KEY_SIZE,
2699 .ivsize = DES3_EDE_BLOCK_SIZE,
2702 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2703 DESC_HDR_SEL0_DEU |
2704 DESC_HDR_MODE0_DEU_CBC |
2705 DESC_HDR_MODE0_DEU_3DES,
2707 /* AHASH algorithms. */
2708 { .type = CRYPTO_ALG_TYPE_AHASH,
2709 .alg.hash = {
2710 .halg.digestsize = MD5_DIGEST_SIZE,
2711 .halg.statesize = sizeof(struct talitos_export_state),
2712 .halg.base = {
2713 .cra_name = "md5",
2714 .cra_driver_name = "md5-talitos",
2715 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2716 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2717 CRYPTO_ALG_ASYNC,
2720 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2721 DESC_HDR_SEL0_MDEUA |
2722 DESC_HDR_MODE0_MDEU_MD5,
2724 { .type = CRYPTO_ALG_TYPE_AHASH,
2725 .alg.hash = {
2726 .halg.digestsize = SHA1_DIGEST_SIZE,
2727 .halg.statesize = sizeof(struct talitos_export_state),
2728 .halg.base = {
2729 .cra_name = "sha1",
2730 .cra_driver_name = "sha1-talitos",
2731 .cra_blocksize = SHA1_BLOCK_SIZE,
2732 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2733 CRYPTO_ALG_ASYNC,
2736 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2737 DESC_HDR_SEL0_MDEUA |
2738 DESC_HDR_MODE0_MDEU_SHA1,
2740 { .type = CRYPTO_ALG_TYPE_AHASH,
2741 .alg.hash = {
2742 .halg.digestsize = SHA224_DIGEST_SIZE,
2743 .halg.statesize = sizeof(struct talitos_export_state),
2744 .halg.base = {
2745 .cra_name = "sha224",
2746 .cra_driver_name = "sha224-talitos",
2747 .cra_blocksize = SHA224_BLOCK_SIZE,
2748 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2749 CRYPTO_ALG_ASYNC,
2752 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2753 DESC_HDR_SEL0_MDEUA |
2754 DESC_HDR_MODE0_MDEU_SHA224,
2756 { .type = CRYPTO_ALG_TYPE_AHASH,
2757 .alg.hash = {
2758 .halg.digestsize = SHA256_DIGEST_SIZE,
2759 .halg.statesize = sizeof(struct talitos_export_state),
2760 .halg.base = {
2761 .cra_name = "sha256",
2762 .cra_driver_name = "sha256-talitos",
2763 .cra_blocksize = SHA256_BLOCK_SIZE,
2764 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2765 CRYPTO_ALG_ASYNC,
2768 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2769 DESC_HDR_SEL0_MDEUA |
2770 DESC_HDR_MODE0_MDEU_SHA256,
2772 { .type = CRYPTO_ALG_TYPE_AHASH,
2773 .alg.hash = {
2774 .halg.digestsize = SHA384_DIGEST_SIZE,
2775 .halg.statesize = sizeof(struct talitos_export_state),
2776 .halg.base = {
2777 .cra_name = "sha384",
2778 .cra_driver_name = "sha384-talitos",
2779 .cra_blocksize = SHA384_BLOCK_SIZE,
2780 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2781 CRYPTO_ALG_ASYNC,
2784 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2785 DESC_HDR_SEL0_MDEUB |
2786 DESC_HDR_MODE0_MDEUB_SHA384,
2788 { .type = CRYPTO_ALG_TYPE_AHASH,
2789 .alg.hash = {
2790 .halg.digestsize = SHA512_DIGEST_SIZE,
2791 .halg.statesize = sizeof(struct talitos_export_state),
2792 .halg.base = {
2793 .cra_name = "sha512",
2794 .cra_driver_name = "sha512-talitos",
2795 .cra_blocksize = SHA512_BLOCK_SIZE,
2796 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2797 CRYPTO_ALG_ASYNC,
2800 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2801 DESC_HDR_SEL0_MDEUB |
2802 DESC_HDR_MODE0_MDEUB_SHA512,
2804 { .type = CRYPTO_ALG_TYPE_AHASH,
2805 .alg.hash = {
2806 .halg.digestsize = MD5_DIGEST_SIZE,
2807 .halg.statesize = sizeof(struct talitos_export_state),
2808 .halg.base = {
2809 .cra_name = "hmac(md5)",
2810 .cra_driver_name = "hmac-md5-talitos",
2811 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2812 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2813 CRYPTO_ALG_ASYNC,
2816 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2817 DESC_HDR_SEL0_MDEUA |
2818 DESC_HDR_MODE0_MDEU_MD5,
2820 { .type = CRYPTO_ALG_TYPE_AHASH,
2821 .alg.hash = {
2822 .halg.digestsize = SHA1_DIGEST_SIZE,
2823 .halg.statesize = sizeof(struct talitos_export_state),
2824 .halg.base = {
2825 .cra_name = "hmac(sha1)",
2826 .cra_driver_name = "hmac-sha1-talitos",
2827 .cra_blocksize = SHA1_BLOCK_SIZE,
2828 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2829 CRYPTO_ALG_ASYNC,
2832 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2833 DESC_HDR_SEL0_MDEUA |
2834 DESC_HDR_MODE0_MDEU_SHA1,
2836 { .type = CRYPTO_ALG_TYPE_AHASH,
2837 .alg.hash = {
2838 .halg.digestsize = SHA224_DIGEST_SIZE,
2839 .halg.statesize = sizeof(struct talitos_export_state),
2840 .halg.base = {
2841 .cra_name = "hmac(sha224)",
2842 .cra_driver_name = "hmac-sha224-talitos",
2843 .cra_blocksize = SHA224_BLOCK_SIZE,
2844 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2845 CRYPTO_ALG_ASYNC,
2848 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2849 DESC_HDR_SEL0_MDEUA |
2850 DESC_HDR_MODE0_MDEU_SHA224,
2852 { .type = CRYPTO_ALG_TYPE_AHASH,
2853 .alg.hash = {
2854 .halg.digestsize = SHA256_DIGEST_SIZE,
2855 .halg.statesize = sizeof(struct talitos_export_state),
2856 .halg.base = {
2857 .cra_name = "hmac(sha256)",
2858 .cra_driver_name = "hmac-sha256-talitos",
2859 .cra_blocksize = SHA256_BLOCK_SIZE,
2860 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2861 CRYPTO_ALG_ASYNC,
2864 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2865 DESC_HDR_SEL0_MDEUA |
2866 DESC_HDR_MODE0_MDEU_SHA256,
2868 { .type = CRYPTO_ALG_TYPE_AHASH,
2869 .alg.hash = {
2870 .halg.digestsize = SHA384_DIGEST_SIZE,
2871 .halg.statesize = sizeof(struct talitos_export_state),
2872 .halg.base = {
2873 .cra_name = "hmac(sha384)",
2874 .cra_driver_name = "hmac-sha384-talitos",
2875 .cra_blocksize = SHA384_BLOCK_SIZE,
2876 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2877 CRYPTO_ALG_ASYNC,
2880 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2881 DESC_HDR_SEL0_MDEUB |
2882 DESC_HDR_MODE0_MDEUB_SHA384,
2884 { .type = CRYPTO_ALG_TYPE_AHASH,
2885 .alg.hash = {
2886 .halg.digestsize = SHA512_DIGEST_SIZE,
2887 .halg.statesize = sizeof(struct talitos_export_state),
2888 .halg.base = {
2889 .cra_name = "hmac(sha512)",
2890 .cra_driver_name = "hmac-sha512-talitos",
2891 .cra_blocksize = SHA512_BLOCK_SIZE,
2892 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2893 CRYPTO_ALG_ASYNC,
2896 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2897 DESC_HDR_SEL0_MDEUB |
2898 DESC_HDR_MODE0_MDEUB_SHA512,
2902 struct talitos_crypto_alg {
2903 struct list_head entry;
2904 struct device *dev;
2905 struct talitos_alg_template algt;
2908 static int talitos_init_common(struct talitos_ctx *ctx,
2909 struct talitos_crypto_alg *talitos_alg)
2911 struct talitos_private *priv;
2913 /* update context with ptr to dev */
2914 ctx->dev = talitos_alg->dev;
2916 /* assign SEC channel to tfm in round-robin fashion */
2917 priv = dev_get_drvdata(ctx->dev);
2918 ctx->ch = atomic_inc_return(&priv->last_chan) &
2919 (priv->num_channels - 1);
2921 /* copy descriptor header template value */
2922 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2924 /* select done notification */
2925 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2927 return 0;
2930 static int talitos_cra_init(struct crypto_tfm *tfm)
2932 struct crypto_alg *alg = tfm->__crt_alg;
2933 struct talitos_crypto_alg *talitos_alg;
2934 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2936 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
2937 talitos_alg = container_of(__crypto_ahash_alg(alg),
2938 struct talitos_crypto_alg,
2939 algt.alg.hash);
2940 else
2941 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2942 algt.alg.crypto);
2944 return talitos_init_common(ctx, talitos_alg);
2947 static int talitos_cra_init_aead(struct crypto_aead *tfm)
2949 struct aead_alg *alg = crypto_aead_alg(tfm);
2950 struct talitos_crypto_alg *talitos_alg;
2951 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
2953 talitos_alg = container_of(alg, struct talitos_crypto_alg,
2954 algt.alg.aead);
2956 return talitos_init_common(ctx, talitos_alg);
2959 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
2961 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
2963 talitos_cra_init(tfm);
2965 ctx->keylen = 0;
2966 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
2967 sizeof(struct talitos_ahash_req_ctx));
2969 return 0;
2973 * given the alg's descriptor header template, determine whether descriptor
2974 * type and primary/secondary execution units required match the hw
2975 * capabilities description provided in the device tree node.
2977 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
2979 struct talitos_private *priv = dev_get_drvdata(dev);
2980 int ret;
2982 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
2983 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
2985 if (SECONDARY_EU(desc_hdr_template))
2986 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
2987 & priv->exec_units);
2989 return ret;
2992 static int talitos_remove(struct platform_device *ofdev)
2994 struct device *dev = &ofdev->dev;
2995 struct talitos_private *priv = dev_get_drvdata(dev);
2996 struct talitos_crypto_alg *t_alg, *n;
2997 int i;
2999 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3000 switch (t_alg->algt.type) {
3001 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3002 break;
3003 case CRYPTO_ALG_TYPE_AEAD:
3004 crypto_unregister_aead(&t_alg->algt.alg.aead);
3005 case CRYPTO_ALG_TYPE_AHASH:
3006 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3007 break;
3009 list_del(&t_alg->entry);
3010 kfree(t_alg);
3013 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3014 talitos_unregister_rng(dev);
3016 for (i = 0; priv->chan && i < priv->num_channels; i++)
3017 kfree(priv->chan[i].fifo);
3019 kfree(priv->chan);
3021 for (i = 0; i < 2; i++)
3022 if (priv->irq[i]) {
3023 free_irq(priv->irq[i], dev);
3024 irq_dispose_mapping(priv->irq[i]);
3027 tasklet_kill(&priv->done_task[0]);
3028 if (priv->irq[1])
3029 tasklet_kill(&priv->done_task[1]);
3031 iounmap(priv->reg);
3033 kfree(priv);
3035 return 0;
3038 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3039 struct talitos_alg_template
3040 *template)
3042 struct talitos_private *priv = dev_get_drvdata(dev);
3043 struct talitos_crypto_alg *t_alg;
3044 struct crypto_alg *alg;
3046 t_alg = kzalloc(sizeof(struct talitos_crypto_alg), GFP_KERNEL);
3047 if (!t_alg)
3048 return ERR_PTR(-ENOMEM);
3050 t_alg->algt = *template;
3052 switch (t_alg->algt.type) {
3053 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3054 alg = &t_alg->algt.alg.crypto;
3055 alg->cra_init = talitos_cra_init;
3056 alg->cra_type = &crypto_ablkcipher_type;
3057 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3058 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3059 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3060 alg->cra_ablkcipher.geniv = "eseqiv";
3061 break;
3062 case CRYPTO_ALG_TYPE_AEAD:
3063 alg = &t_alg->algt.alg.aead.base;
3064 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3065 t_alg->algt.alg.aead.setkey = aead_setkey;
3066 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3067 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3068 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3069 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3070 kfree(t_alg);
3071 return ERR_PTR(-ENOTSUPP);
3073 break;
3074 case CRYPTO_ALG_TYPE_AHASH:
3075 alg = &t_alg->algt.alg.hash.halg.base;
3076 alg->cra_init = talitos_cra_init_ahash;
3077 alg->cra_type = &crypto_ahash_type;
3078 t_alg->algt.alg.hash.init = ahash_init;
3079 t_alg->algt.alg.hash.update = ahash_update;
3080 t_alg->algt.alg.hash.final = ahash_final;
3081 t_alg->algt.alg.hash.finup = ahash_finup;
3082 t_alg->algt.alg.hash.digest = ahash_digest;
3083 if (!strncmp(alg->cra_name, "hmac", 4))
3084 t_alg->algt.alg.hash.setkey = ahash_setkey;
3085 t_alg->algt.alg.hash.import = ahash_import;
3086 t_alg->algt.alg.hash.export = ahash_export;
3088 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3089 !strncmp(alg->cra_name, "hmac", 4)) {
3090 kfree(t_alg);
3091 return ERR_PTR(-ENOTSUPP);
3093 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3094 (!strcmp(alg->cra_name, "sha224") ||
3095 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3096 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3097 t_alg->algt.desc_hdr_template =
3098 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3099 DESC_HDR_SEL0_MDEUA |
3100 DESC_HDR_MODE0_MDEU_SHA256;
3102 break;
3103 default:
3104 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3105 kfree(t_alg);
3106 return ERR_PTR(-EINVAL);
3109 alg->cra_module = THIS_MODULE;
3110 if (t_alg->algt.priority)
3111 alg->cra_priority = t_alg->algt.priority;
3112 else
3113 alg->cra_priority = TALITOS_CRA_PRIORITY;
3114 alg->cra_alignmask = 0;
3115 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3116 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3118 t_alg->dev = dev;
3120 return t_alg;
3123 static int talitos_probe_irq(struct platform_device *ofdev)
3125 struct device *dev = &ofdev->dev;
3126 struct device_node *np = ofdev->dev.of_node;
3127 struct talitos_private *priv = dev_get_drvdata(dev);
3128 int err;
3129 bool is_sec1 = has_ftr_sec1(priv);
3131 priv->irq[0] = irq_of_parse_and_map(np, 0);
3132 if (!priv->irq[0]) {
3133 dev_err(dev, "failed to map irq\n");
3134 return -EINVAL;
3136 if (is_sec1) {
3137 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3138 dev_driver_string(dev), dev);
3139 goto primary_out;
3142 priv->irq[1] = irq_of_parse_and_map(np, 1);
3144 /* get the primary irq line */
3145 if (!priv->irq[1]) {
3146 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3147 dev_driver_string(dev), dev);
3148 goto primary_out;
3151 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3152 dev_driver_string(dev), dev);
3153 if (err)
3154 goto primary_out;
3156 /* get the secondary irq line */
3157 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3158 dev_driver_string(dev), dev);
3159 if (err) {
3160 dev_err(dev, "failed to request secondary irq\n");
3161 irq_dispose_mapping(priv->irq[1]);
3162 priv->irq[1] = 0;
3165 return err;
3167 primary_out:
3168 if (err) {
3169 dev_err(dev, "failed to request primary irq\n");
3170 irq_dispose_mapping(priv->irq[0]);
3171 priv->irq[0] = 0;
3174 return err;
3177 static int talitos_probe(struct platform_device *ofdev)
3179 struct device *dev = &ofdev->dev;
3180 struct device_node *np = ofdev->dev.of_node;
3181 struct talitos_private *priv;
3182 const unsigned int *prop;
3183 int i, err;
3184 int stride;
3186 priv = kzalloc(sizeof(struct talitos_private), GFP_KERNEL);
3187 if (!priv)
3188 return -ENOMEM;
3190 INIT_LIST_HEAD(&priv->alg_list);
3192 dev_set_drvdata(dev, priv);
3194 priv->ofdev = ofdev;
3196 spin_lock_init(&priv->reg_lock);
3198 priv->reg = of_iomap(np, 0);
3199 if (!priv->reg) {
3200 dev_err(dev, "failed to of_iomap\n");
3201 err = -ENOMEM;
3202 goto err_out;
3205 /* get SEC version capabilities from device tree */
3206 prop = of_get_property(np, "fsl,num-channels", NULL);
3207 if (prop)
3208 priv->num_channels = *prop;
3210 prop = of_get_property(np, "fsl,channel-fifo-len", NULL);
3211 if (prop)
3212 priv->chfifo_len = *prop;
3214 prop = of_get_property(np, "fsl,exec-units-mask", NULL);
3215 if (prop)
3216 priv->exec_units = *prop;
3218 prop = of_get_property(np, "fsl,descriptor-types-mask", NULL);
3219 if (prop)
3220 priv->desc_types = *prop;
3222 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3223 !priv->exec_units || !priv->desc_types) {
3224 dev_err(dev, "invalid property data in device tree node\n");
3225 err = -EINVAL;
3226 goto err_out;
3229 if (of_device_is_compatible(np, "fsl,sec3.0"))
3230 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3232 if (of_device_is_compatible(np, "fsl,sec2.1"))
3233 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3234 TALITOS_FTR_SHA224_HWINIT |
3235 TALITOS_FTR_HMAC_OK;
3237 if (of_device_is_compatible(np, "fsl,sec1.0"))
3238 priv->features |= TALITOS_FTR_SEC1;
3240 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3241 priv->reg_deu = priv->reg + TALITOS12_DEU;
3242 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3243 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3244 stride = TALITOS1_CH_STRIDE;
3245 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3246 priv->reg_deu = priv->reg + TALITOS10_DEU;
3247 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3248 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3249 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3250 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3251 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3252 stride = TALITOS1_CH_STRIDE;
3253 } else {
3254 priv->reg_deu = priv->reg + TALITOS2_DEU;
3255 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3256 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3257 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3258 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3259 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3260 priv->reg_keu = priv->reg + TALITOS2_KEU;
3261 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3262 stride = TALITOS2_CH_STRIDE;
3265 err = talitos_probe_irq(ofdev);
3266 if (err)
3267 goto err_out;
3269 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3270 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3271 (unsigned long)dev);
3272 } else {
3273 if (!priv->irq[1]) {
3274 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3275 (unsigned long)dev);
3276 } else {
3277 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3278 (unsigned long)dev);
3279 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3280 (unsigned long)dev);
3284 priv->chan = kzalloc(sizeof(struct talitos_channel) *
3285 priv->num_channels, GFP_KERNEL);
3286 if (!priv->chan) {
3287 dev_err(dev, "failed to allocate channel management space\n");
3288 err = -ENOMEM;
3289 goto err_out;
3292 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3294 for (i = 0; i < priv->num_channels; i++) {
3295 priv->chan[i].reg = priv->reg + stride * (i + 1);
3296 if (!priv->irq[1] || !(i & 1))
3297 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3299 spin_lock_init(&priv->chan[i].head_lock);
3300 spin_lock_init(&priv->chan[i].tail_lock);
3302 priv->chan[i].fifo = kzalloc(sizeof(struct talitos_request) *
3303 priv->fifo_len, GFP_KERNEL);
3304 if (!priv->chan[i].fifo) {
3305 dev_err(dev, "failed to allocate request fifo %d\n", i);
3306 err = -ENOMEM;
3307 goto err_out;
3310 atomic_set(&priv->chan[i].submit_count,
3311 -(priv->chfifo_len - 1));
3314 dma_set_mask(dev, DMA_BIT_MASK(36));
3316 /* reset and initialize the h/w */
3317 err = init_device(dev);
3318 if (err) {
3319 dev_err(dev, "failed to initialize device\n");
3320 goto err_out;
3323 /* register the RNG, if available */
3324 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3325 err = talitos_register_rng(dev);
3326 if (err) {
3327 dev_err(dev, "failed to register hwrng: %d\n", err);
3328 goto err_out;
3329 } else
3330 dev_info(dev, "hwrng\n");
3333 /* register crypto algorithms the device supports */
3334 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3335 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3336 struct talitos_crypto_alg *t_alg;
3337 struct crypto_alg *alg = NULL;
3339 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3340 if (IS_ERR(t_alg)) {
3341 err = PTR_ERR(t_alg);
3342 if (err == -ENOTSUPP)
3343 continue;
3344 goto err_out;
3347 switch (t_alg->algt.type) {
3348 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3349 err = crypto_register_alg(
3350 &t_alg->algt.alg.crypto);
3351 alg = &t_alg->algt.alg.crypto;
3352 break;
3354 case CRYPTO_ALG_TYPE_AEAD:
3355 err = crypto_register_aead(
3356 &t_alg->algt.alg.aead);
3357 alg = &t_alg->algt.alg.aead.base;
3358 break;
3360 case CRYPTO_ALG_TYPE_AHASH:
3361 err = crypto_register_ahash(
3362 &t_alg->algt.alg.hash);
3363 alg = &t_alg->algt.alg.hash.halg.base;
3364 break;
3366 if (err) {
3367 dev_err(dev, "%s alg registration failed\n",
3368 alg->cra_driver_name);
3369 kfree(t_alg);
3370 } else
3371 list_add_tail(&t_alg->entry, &priv->alg_list);
3374 if (!list_empty(&priv->alg_list))
3375 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3376 (char *)of_get_property(np, "compatible", NULL));
3378 return 0;
3380 err_out:
3381 talitos_remove(ofdev);
3383 return err;
3386 static const struct of_device_id talitos_match[] = {
3387 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3389 .compatible = "fsl,sec1.0",
3391 #endif
3392 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3394 .compatible = "fsl,sec2.0",
3396 #endif
3399 MODULE_DEVICE_TABLE(of, talitos_match);
3401 static struct platform_driver talitos_driver = {
3402 .driver = {
3403 .name = "talitos",
3404 .of_match_table = talitos_match,
3406 .probe = talitos_probe,
3407 .remove = talitos_remove,
3410 module_platform_driver(talitos_driver);
3412 MODULE_LICENSE("GPL");
3413 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3414 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");