bpf: Prevent memory disambiguation attack
[linux/fpc-iii.git] / drivers / crypto / talitos.c
blobc805d0122c0b08da24ca9794b1c38ae2e7c20e04
1 /*
2 * talitos - Freescale Integrated Security Engine (SEC) device driver
4 * Copyright (c) 2008-2011 Freescale Semiconductor, Inc.
6 * Scatterlist Crypto API glue code copied from files with the following:
7 * Copyright (c) 2006-2007 Herbert Xu <herbert@gondor.apana.org.au>
9 * Crypto algorithm registration code copied from hifn driver:
10 * 2007+ Copyright (c) Evgeniy Polyakov <johnpol@2ka.mipt.ru>
11 * All rights reserved.
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/mod_devicetable.h>
31 #include <linux/device.h>
32 #include <linux/interrupt.h>
33 #include <linux/crypto.h>
34 #include <linux/hw_random.h>
35 #include <linux/of_address.h>
36 #include <linux/of_irq.h>
37 #include <linux/of_platform.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/io.h>
40 #include <linux/spinlock.h>
41 #include <linux/rtnetlink.h>
42 #include <linux/slab.h>
44 #include <crypto/algapi.h>
45 #include <crypto/aes.h>
46 #include <crypto/des.h>
47 #include <crypto/sha.h>
48 #include <crypto/md5.h>
49 #include <crypto/internal/aead.h>
50 #include <crypto/authenc.h>
51 #include <crypto/skcipher.h>
52 #include <crypto/hash.h>
53 #include <crypto/internal/hash.h>
54 #include <crypto/scatterwalk.h>
56 #include "talitos.h"
58 static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
59 unsigned int len, bool is_sec1)
61 ptr->ptr = cpu_to_be32(lower_32_bits(dma_addr));
62 if (is_sec1) {
63 ptr->len1 = cpu_to_be16(len);
64 } else {
65 ptr->len = cpu_to_be16(len);
66 ptr->eptr = upper_32_bits(dma_addr);
70 static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
71 struct talitos_ptr *src_ptr, bool is_sec1)
73 dst_ptr->ptr = src_ptr->ptr;
74 if (is_sec1) {
75 dst_ptr->len1 = src_ptr->len1;
76 } else {
77 dst_ptr->len = src_ptr->len;
78 dst_ptr->eptr = src_ptr->eptr;
82 static unsigned short from_talitos_ptr_len(struct talitos_ptr *ptr,
83 bool is_sec1)
85 if (is_sec1)
86 return be16_to_cpu(ptr->len1);
87 else
88 return be16_to_cpu(ptr->len);
91 static void to_talitos_ptr_ext_set(struct talitos_ptr *ptr, u8 val,
92 bool is_sec1)
94 if (!is_sec1)
95 ptr->j_extent = val;
98 static void to_talitos_ptr_ext_or(struct talitos_ptr *ptr, u8 val, bool is_sec1)
100 if (!is_sec1)
101 ptr->j_extent |= val;
105 * map virtual single (contiguous) pointer to h/w descriptor pointer
107 static void map_single_talitos_ptr(struct device *dev,
108 struct talitos_ptr *ptr,
109 unsigned int len, void *data,
110 enum dma_data_direction dir)
112 dma_addr_t dma_addr = dma_map_single(dev, data, len, dir);
113 struct talitos_private *priv = dev_get_drvdata(dev);
114 bool is_sec1 = has_ftr_sec1(priv);
116 to_talitos_ptr(ptr, dma_addr, len, is_sec1);
120 * unmap bus single (contiguous) h/w descriptor pointer
122 static void unmap_single_talitos_ptr(struct device *dev,
123 struct talitos_ptr *ptr,
124 enum dma_data_direction dir)
126 struct talitos_private *priv = dev_get_drvdata(dev);
127 bool is_sec1 = has_ftr_sec1(priv);
129 dma_unmap_single(dev, be32_to_cpu(ptr->ptr),
130 from_talitos_ptr_len(ptr, is_sec1), dir);
133 static int reset_channel(struct device *dev, int ch)
135 struct talitos_private *priv = dev_get_drvdata(dev);
136 unsigned int timeout = TALITOS_TIMEOUT;
137 bool is_sec1 = has_ftr_sec1(priv);
139 if (is_sec1) {
140 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
141 TALITOS1_CCCR_LO_RESET);
143 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR_LO) &
144 TALITOS1_CCCR_LO_RESET) && --timeout)
145 cpu_relax();
146 } else {
147 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
148 TALITOS2_CCCR_RESET);
150 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
151 TALITOS2_CCCR_RESET) && --timeout)
152 cpu_relax();
155 if (timeout == 0) {
156 dev_err(dev, "failed to reset channel %d\n", ch);
157 return -EIO;
160 /* set 36-bit addressing, done writeback enable and done IRQ enable */
161 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, TALITOS_CCCR_LO_EAE |
162 TALITOS_CCCR_LO_CDWE | TALITOS_CCCR_LO_CDIE);
163 /* enable chaining descriptors */
164 if (is_sec1)
165 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
166 TALITOS_CCCR_LO_NE);
168 /* and ICCR writeback, if available */
169 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
170 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO,
171 TALITOS_CCCR_LO_IWSE);
173 return 0;
176 static int reset_device(struct device *dev)
178 struct talitos_private *priv = dev_get_drvdata(dev);
179 unsigned int timeout = TALITOS_TIMEOUT;
180 bool is_sec1 = has_ftr_sec1(priv);
181 u32 mcr = is_sec1 ? TALITOS1_MCR_SWR : TALITOS2_MCR_SWR;
183 setbits32(priv->reg + TALITOS_MCR, mcr);
185 while ((in_be32(priv->reg + TALITOS_MCR) & mcr)
186 && --timeout)
187 cpu_relax();
189 if (priv->irq[1]) {
190 mcr = TALITOS_MCR_RCA1 | TALITOS_MCR_RCA3;
191 setbits32(priv->reg + TALITOS_MCR, mcr);
194 if (timeout == 0) {
195 dev_err(dev, "failed to reset device\n");
196 return -EIO;
199 return 0;
203 * Reset and initialize the device
205 static int init_device(struct device *dev)
207 struct talitos_private *priv = dev_get_drvdata(dev);
208 int ch, err;
209 bool is_sec1 = has_ftr_sec1(priv);
212 * Master reset
213 * errata documentation: warning: certain SEC interrupts
214 * are not fully cleared by writing the MCR:SWR bit,
215 * set bit twice to completely reset
217 err = reset_device(dev);
218 if (err)
219 return err;
221 err = reset_device(dev);
222 if (err)
223 return err;
225 /* reset channels */
226 for (ch = 0; ch < priv->num_channels; ch++) {
227 err = reset_channel(dev, ch);
228 if (err)
229 return err;
232 /* enable channel done and error interrupts */
233 if (is_sec1) {
234 clrbits32(priv->reg + TALITOS_IMR, TALITOS1_IMR_INIT);
235 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT);
236 /* disable parity error check in DEU (erroneous? test vect.) */
237 setbits32(priv->reg_deu + TALITOS_EUICR, TALITOS1_DEUICR_KPE);
238 } else {
239 setbits32(priv->reg + TALITOS_IMR, TALITOS2_IMR_INIT);
240 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT);
243 /* disable integrity check error interrupts (use writeback instead) */
244 if (priv->features & TALITOS_FTR_HW_AUTH_CHECK)
245 setbits32(priv->reg_mdeu + TALITOS_EUICR_LO,
246 TALITOS_MDEUICR_LO_ICE);
248 return 0;
252 * talitos_submit - submits a descriptor to the device for processing
253 * @dev: the SEC device to be used
254 * @ch: the SEC device channel to be used
255 * @desc: the descriptor to be processed by the device
256 * @callback: whom to call when processing is complete
257 * @context: a handle for use by caller (optional)
259 * desc must contain valid dma-mapped (bus physical) address pointers.
260 * callback must check err and feedback in descriptor header
261 * for device processing status.
263 int talitos_submit(struct device *dev, int ch, struct talitos_desc *desc,
264 void (*callback)(struct device *dev,
265 struct talitos_desc *desc,
266 void *context, int error),
267 void *context)
269 struct talitos_private *priv = dev_get_drvdata(dev);
270 struct talitos_request *request;
271 unsigned long flags;
272 int head;
273 bool is_sec1 = has_ftr_sec1(priv);
275 spin_lock_irqsave(&priv->chan[ch].head_lock, flags);
277 if (!atomic_inc_not_zero(&priv->chan[ch].submit_count)) {
278 /* h/w fifo is full */
279 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
280 return -EAGAIN;
283 head = priv->chan[ch].head;
284 request = &priv->chan[ch].fifo[head];
286 /* map descriptor and save caller data */
287 if (is_sec1) {
288 desc->hdr1 = desc->hdr;
289 request->dma_desc = dma_map_single(dev, &desc->hdr1,
290 TALITOS_DESC_SIZE,
291 DMA_BIDIRECTIONAL);
292 } else {
293 request->dma_desc = dma_map_single(dev, desc,
294 TALITOS_DESC_SIZE,
295 DMA_BIDIRECTIONAL);
297 request->callback = callback;
298 request->context = context;
300 /* increment fifo head */
301 priv->chan[ch].head = (priv->chan[ch].head + 1) & (priv->fifo_len - 1);
303 smp_wmb();
304 request->desc = desc;
306 /* GO! */
307 wmb();
308 out_be32(priv->chan[ch].reg + TALITOS_FF,
309 upper_32_bits(request->dma_desc));
310 out_be32(priv->chan[ch].reg + TALITOS_FF_LO,
311 lower_32_bits(request->dma_desc));
313 spin_unlock_irqrestore(&priv->chan[ch].head_lock, flags);
315 return -EINPROGRESS;
317 EXPORT_SYMBOL(talitos_submit);
320 * process what was done, notify callback of error if not
322 static void flush_channel(struct device *dev, int ch, int error, int reset_ch)
324 struct talitos_private *priv = dev_get_drvdata(dev);
325 struct talitos_request *request, saved_req;
326 unsigned long flags;
327 int tail, status;
328 bool is_sec1 = has_ftr_sec1(priv);
330 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
332 tail = priv->chan[ch].tail;
333 while (priv->chan[ch].fifo[tail].desc) {
334 __be32 hdr;
336 request = &priv->chan[ch].fifo[tail];
338 /* descriptors with their done bits set don't get the error */
339 rmb();
340 if (!is_sec1)
341 hdr = request->desc->hdr;
342 else if (request->desc->next_desc)
343 hdr = (request->desc + 1)->hdr1;
344 else
345 hdr = request->desc->hdr1;
347 if ((hdr & DESC_HDR_DONE) == DESC_HDR_DONE)
348 status = 0;
349 else
350 if (!error)
351 break;
352 else
353 status = error;
355 dma_unmap_single(dev, request->dma_desc,
356 TALITOS_DESC_SIZE,
357 DMA_BIDIRECTIONAL);
359 /* copy entries so we can call callback outside lock */
360 saved_req.desc = request->desc;
361 saved_req.callback = request->callback;
362 saved_req.context = request->context;
364 /* release request entry in fifo */
365 smp_wmb();
366 request->desc = NULL;
368 /* increment fifo tail */
369 priv->chan[ch].tail = (tail + 1) & (priv->fifo_len - 1);
371 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
373 atomic_dec(&priv->chan[ch].submit_count);
375 saved_req.callback(dev, saved_req.desc, saved_req.context,
376 status);
377 /* channel may resume processing in single desc error case */
378 if (error && !reset_ch && status == error)
379 return;
380 spin_lock_irqsave(&priv->chan[ch].tail_lock, flags);
381 tail = priv->chan[ch].tail;
384 spin_unlock_irqrestore(&priv->chan[ch].tail_lock, flags);
388 * process completed requests for channels that have done status
390 #define DEF_TALITOS1_DONE(name, ch_done_mask) \
391 static void talitos1_done_##name(unsigned long data) \
393 struct device *dev = (struct device *)data; \
394 struct talitos_private *priv = dev_get_drvdata(dev); \
395 unsigned long flags; \
397 if (ch_done_mask & 0x10000000) \
398 flush_channel(dev, 0, 0, 0); \
399 if (ch_done_mask & 0x40000000) \
400 flush_channel(dev, 1, 0, 0); \
401 if (ch_done_mask & 0x00010000) \
402 flush_channel(dev, 2, 0, 0); \
403 if (ch_done_mask & 0x00040000) \
404 flush_channel(dev, 3, 0, 0); \
406 /* At this point, all completed channels have been processed */ \
407 /* Unmask done interrupts for channels completed later on. */ \
408 spin_lock_irqsave(&priv->reg_lock, flags); \
409 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
410 clrbits32(priv->reg + TALITOS_IMR_LO, TALITOS1_IMR_LO_INIT); \
411 spin_unlock_irqrestore(&priv->reg_lock, flags); \
414 DEF_TALITOS1_DONE(4ch, TALITOS1_ISR_4CHDONE)
415 DEF_TALITOS1_DONE(ch0, TALITOS1_ISR_CH_0_DONE)
417 #define DEF_TALITOS2_DONE(name, ch_done_mask) \
418 static void talitos2_done_##name(unsigned long data) \
420 struct device *dev = (struct device *)data; \
421 struct talitos_private *priv = dev_get_drvdata(dev); \
422 unsigned long flags; \
424 if (ch_done_mask & 1) \
425 flush_channel(dev, 0, 0, 0); \
426 if (ch_done_mask & (1 << 2)) \
427 flush_channel(dev, 1, 0, 0); \
428 if (ch_done_mask & (1 << 4)) \
429 flush_channel(dev, 2, 0, 0); \
430 if (ch_done_mask & (1 << 6)) \
431 flush_channel(dev, 3, 0, 0); \
433 /* At this point, all completed channels have been processed */ \
434 /* Unmask done interrupts for channels completed later on. */ \
435 spin_lock_irqsave(&priv->reg_lock, flags); \
436 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
437 setbits32(priv->reg + TALITOS_IMR_LO, TALITOS2_IMR_LO_INIT); \
438 spin_unlock_irqrestore(&priv->reg_lock, flags); \
441 DEF_TALITOS2_DONE(4ch, TALITOS2_ISR_4CHDONE)
442 DEF_TALITOS2_DONE(ch0, TALITOS2_ISR_CH_0_DONE)
443 DEF_TALITOS2_DONE(ch0_2, TALITOS2_ISR_CH_0_2_DONE)
444 DEF_TALITOS2_DONE(ch1_3, TALITOS2_ISR_CH_1_3_DONE)
447 * locate current (offending) descriptor
449 static u32 current_desc_hdr(struct device *dev, int ch)
451 struct talitos_private *priv = dev_get_drvdata(dev);
452 int tail, iter;
453 dma_addr_t cur_desc;
455 cur_desc = ((u64)in_be32(priv->chan[ch].reg + TALITOS_CDPR)) << 32;
456 cur_desc |= in_be32(priv->chan[ch].reg + TALITOS_CDPR_LO);
458 if (!cur_desc) {
459 dev_err(dev, "CDPR is NULL, giving up search for offending descriptor\n");
460 return 0;
463 tail = priv->chan[ch].tail;
465 iter = tail;
466 while (priv->chan[ch].fifo[iter].dma_desc != cur_desc &&
467 priv->chan[ch].fifo[iter].desc->next_desc != cur_desc) {
468 iter = (iter + 1) & (priv->fifo_len - 1);
469 if (iter == tail) {
470 dev_err(dev, "couldn't locate current descriptor\n");
471 return 0;
475 if (priv->chan[ch].fifo[iter].desc->next_desc == cur_desc)
476 return (priv->chan[ch].fifo[iter].desc + 1)->hdr;
478 return priv->chan[ch].fifo[iter].desc->hdr;
482 * user diagnostics; report root cause of error based on execution unit status
484 static void report_eu_error(struct device *dev, int ch, u32 desc_hdr)
486 struct talitos_private *priv = dev_get_drvdata(dev);
487 int i;
489 if (!desc_hdr)
490 desc_hdr = in_be32(priv->chan[ch].reg + TALITOS_DESCBUF);
492 switch (desc_hdr & DESC_HDR_SEL0_MASK) {
493 case DESC_HDR_SEL0_AFEU:
494 dev_err(dev, "AFEUISR 0x%08x_%08x\n",
495 in_be32(priv->reg_afeu + TALITOS_EUISR),
496 in_be32(priv->reg_afeu + TALITOS_EUISR_LO));
497 break;
498 case DESC_HDR_SEL0_DEU:
499 dev_err(dev, "DEUISR 0x%08x_%08x\n",
500 in_be32(priv->reg_deu + TALITOS_EUISR),
501 in_be32(priv->reg_deu + TALITOS_EUISR_LO));
502 break;
503 case DESC_HDR_SEL0_MDEUA:
504 case DESC_HDR_SEL0_MDEUB:
505 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
506 in_be32(priv->reg_mdeu + TALITOS_EUISR),
507 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
508 break;
509 case DESC_HDR_SEL0_RNG:
510 dev_err(dev, "RNGUISR 0x%08x_%08x\n",
511 in_be32(priv->reg_rngu + TALITOS_ISR),
512 in_be32(priv->reg_rngu + TALITOS_ISR_LO));
513 break;
514 case DESC_HDR_SEL0_PKEU:
515 dev_err(dev, "PKEUISR 0x%08x_%08x\n",
516 in_be32(priv->reg_pkeu + TALITOS_EUISR),
517 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
518 break;
519 case DESC_HDR_SEL0_AESU:
520 dev_err(dev, "AESUISR 0x%08x_%08x\n",
521 in_be32(priv->reg_aesu + TALITOS_EUISR),
522 in_be32(priv->reg_aesu + TALITOS_EUISR_LO));
523 break;
524 case DESC_HDR_SEL0_CRCU:
525 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
526 in_be32(priv->reg_crcu + TALITOS_EUISR),
527 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
528 break;
529 case DESC_HDR_SEL0_KEU:
530 dev_err(dev, "KEUISR 0x%08x_%08x\n",
531 in_be32(priv->reg_pkeu + TALITOS_EUISR),
532 in_be32(priv->reg_pkeu + TALITOS_EUISR_LO));
533 break;
536 switch (desc_hdr & DESC_HDR_SEL1_MASK) {
537 case DESC_HDR_SEL1_MDEUA:
538 case DESC_HDR_SEL1_MDEUB:
539 dev_err(dev, "MDEUISR 0x%08x_%08x\n",
540 in_be32(priv->reg_mdeu + TALITOS_EUISR),
541 in_be32(priv->reg_mdeu + TALITOS_EUISR_LO));
542 break;
543 case DESC_HDR_SEL1_CRCU:
544 dev_err(dev, "CRCUISR 0x%08x_%08x\n",
545 in_be32(priv->reg_crcu + TALITOS_EUISR),
546 in_be32(priv->reg_crcu + TALITOS_EUISR_LO));
547 break;
550 for (i = 0; i < 8; i++)
551 dev_err(dev, "DESCBUF 0x%08x_%08x\n",
552 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF + 8*i),
553 in_be32(priv->chan[ch].reg + TALITOS_DESCBUF_LO + 8*i));
557 * recover from error interrupts
559 static void talitos_error(struct device *dev, u32 isr, u32 isr_lo)
561 struct talitos_private *priv = dev_get_drvdata(dev);
562 unsigned int timeout = TALITOS_TIMEOUT;
563 int ch, error, reset_dev = 0;
564 u32 v_lo;
565 bool is_sec1 = has_ftr_sec1(priv);
566 int reset_ch = is_sec1 ? 1 : 0; /* only SEC2 supports continuation */
568 for (ch = 0; ch < priv->num_channels; ch++) {
569 /* skip channels without errors */
570 if (is_sec1) {
571 /* bits 29, 31, 17, 19 */
572 if (!(isr & (1 << (29 + (ch & 1) * 2 - (ch & 2) * 6))))
573 continue;
574 } else {
575 if (!(isr & (1 << (ch * 2 + 1))))
576 continue;
579 error = -EINVAL;
581 v_lo = in_be32(priv->chan[ch].reg + TALITOS_CCPSR_LO);
583 if (v_lo & TALITOS_CCPSR_LO_DOF) {
584 dev_err(dev, "double fetch fifo overflow error\n");
585 error = -EAGAIN;
586 reset_ch = 1;
588 if (v_lo & TALITOS_CCPSR_LO_SOF) {
589 /* h/w dropped descriptor */
590 dev_err(dev, "single fetch fifo overflow error\n");
591 error = -EAGAIN;
593 if (v_lo & TALITOS_CCPSR_LO_MDTE)
594 dev_err(dev, "master data transfer error\n");
595 if (v_lo & TALITOS_CCPSR_LO_SGDLZ)
596 dev_err(dev, is_sec1 ? "pointer not complete error\n"
597 : "s/g data length zero error\n");
598 if (v_lo & TALITOS_CCPSR_LO_FPZ)
599 dev_err(dev, is_sec1 ? "parity error\n"
600 : "fetch pointer zero error\n");
601 if (v_lo & TALITOS_CCPSR_LO_IDH)
602 dev_err(dev, "illegal descriptor header error\n");
603 if (v_lo & TALITOS_CCPSR_LO_IEU)
604 dev_err(dev, is_sec1 ? "static assignment error\n"
605 : "invalid exec unit error\n");
606 if (v_lo & TALITOS_CCPSR_LO_EU)
607 report_eu_error(dev, ch, current_desc_hdr(dev, ch));
608 if (!is_sec1) {
609 if (v_lo & TALITOS_CCPSR_LO_GB)
610 dev_err(dev, "gather boundary error\n");
611 if (v_lo & TALITOS_CCPSR_LO_GRL)
612 dev_err(dev, "gather return/length error\n");
613 if (v_lo & TALITOS_CCPSR_LO_SB)
614 dev_err(dev, "scatter boundary error\n");
615 if (v_lo & TALITOS_CCPSR_LO_SRL)
616 dev_err(dev, "scatter return/length error\n");
619 flush_channel(dev, ch, error, reset_ch);
621 if (reset_ch) {
622 reset_channel(dev, ch);
623 } else {
624 setbits32(priv->chan[ch].reg + TALITOS_CCCR,
625 TALITOS2_CCCR_CONT);
626 setbits32(priv->chan[ch].reg + TALITOS_CCCR_LO, 0);
627 while ((in_be32(priv->chan[ch].reg + TALITOS_CCCR) &
628 TALITOS2_CCCR_CONT) && --timeout)
629 cpu_relax();
630 if (timeout == 0) {
631 dev_err(dev, "failed to restart channel %d\n",
632 ch);
633 reset_dev = 1;
637 if (reset_dev || (is_sec1 && isr & ~TALITOS1_ISR_4CHERR) ||
638 (!is_sec1 && isr & ~TALITOS2_ISR_4CHERR) || isr_lo) {
639 if (is_sec1 && (isr_lo & TALITOS1_ISR_TEA_ERR))
640 dev_err(dev, "TEA error: ISR 0x%08x_%08x\n",
641 isr, isr_lo);
642 else
643 dev_err(dev, "done overflow, internal time out, or "
644 "rngu error: ISR 0x%08x_%08x\n", isr, isr_lo);
646 /* purge request queues */
647 for (ch = 0; ch < priv->num_channels; ch++)
648 flush_channel(dev, ch, -EIO, 1);
650 /* reset and reinitialize the device */
651 init_device(dev);
655 #define DEF_TALITOS1_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
656 static irqreturn_t talitos1_interrupt_##name(int irq, void *data) \
658 struct device *dev = data; \
659 struct talitos_private *priv = dev_get_drvdata(dev); \
660 u32 isr, isr_lo; \
661 unsigned long flags; \
663 spin_lock_irqsave(&priv->reg_lock, flags); \
664 isr = in_be32(priv->reg + TALITOS_ISR); \
665 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
666 /* Acknowledge interrupt */ \
667 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
668 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
670 if (unlikely(isr & ch_err_mask || isr_lo & TALITOS1_IMR_LO_INIT)) { \
671 spin_unlock_irqrestore(&priv->reg_lock, flags); \
672 talitos_error(dev, isr & ch_err_mask, isr_lo); \
674 else { \
675 if (likely(isr & ch_done_mask)) { \
676 /* mask further done interrupts. */ \
677 setbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
678 /* done_task will unmask done interrupts at exit */ \
679 tasklet_schedule(&priv->done_task[tlet]); \
681 spin_unlock_irqrestore(&priv->reg_lock, flags); \
684 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
685 IRQ_NONE; \
688 DEF_TALITOS1_INTERRUPT(4ch, TALITOS1_ISR_4CHDONE, TALITOS1_ISR_4CHERR, 0)
690 #define DEF_TALITOS2_INTERRUPT(name, ch_done_mask, ch_err_mask, tlet) \
691 static irqreturn_t talitos2_interrupt_##name(int irq, void *data) \
693 struct device *dev = data; \
694 struct talitos_private *priv = dev_get_drvdata(dev); \
695 u32 isr, isr_lo; \
696 unsigned long flags; \
698 spin_lock_irqsave(&priv->reg_lock, flags); \
699 isr = in_be32(priv->reg + TALITOS_ISR); \
700 isr_lo = in_be32(priv->reg + TALITOS_ISR_LO); \
701 /* Acknowledge interrupt */ \
702 out_be32(priv->reg + TALITOS_ICR, isr & (ch_done_mask | ch_err_mask)); \
703 out_be32(priv->reg + TALITOS_ICR_LO, isr_lo); \
705 if (unlikely(isr & ch_err_mask || isr_lo)) { \
706 spin_unlock_irqrestore(&priv->reg_lock, flags); \
707 talitos_error(dev, isr & ch_err_mask, isr_lo); \
709 else { \
710 if (likely(isr & ch_done_mask)) { \
711 /* mask further done interrupts. */ \
712 clrbits32(priv->reg + TALITOS_IMR, ch_done_mask); \
713 /* done_task will unmask done interrupts at exit */ \
714 tasklet_schedule(&priv->done_task[tlet]); \
716 spin_unlock_irqrestore(&priv->reg_lock, flags); \
719 return (isr & (ch_done_mask | ch_err_mask) || isr_lo) ? IRQ_HANDLED : \
720 IRQ_NONE; \
723 DEF_TALITOS2_INTERRUPT(4ch, TALITOS2_ISR_4CHDONE, TALITOS2_ISR_4CHERR, 0)
724 DEF_TALITOS2_INTERRUPT(ch0_2, TALITOS2_ISR_CH_0_2_DONE, TALITOS2_ISR_CH_0_2_ERR,
726 DEF_TALITOS2_INTERRUPT(ch1_3, TALITOS2_ISR_CH_1_3_DONE, TALITOS2_ISR_CH_1_3_ERR,
730 * hwrng
732 static int talitos_rng_data_present(struct hwrng *rng, int wait)
734 struct device *dev = (struct device *)rng->priv;
735 struct talitos_private *priv = dev_get_drvdata(dev);
736 u32 ofl;
737 int i;
739 for (i = 0; i < 20; i++) {
740 ofl = in_be32(priv->reg_rngu + TALITOS_EUSR_LO) &
741 TALITOS_RNGUSR_LO_OFL;
742 if (ofl || !wait)
743 break;
744 udelay(10);
747 return !!ofl;
750 static int talitos_rng_data_read(struct hwrng *rng, u32 *data)
752 struct device *dev = (struct device *)rng->priv;
753 struct talitos_private *priv = dev_get_drvdata(dev);
755 /* rng fifo requires 64-bit accesses */
756 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO);
757 *data = in_be32(priv->reg_rngu + TALITOS_EU_FIFO_LO);
759 return sizeof(u32);
762 static int talitos_rng_init(struct hwrng *rng)
764 struct device *dev = (struct device *)rng->priv;
765 struct talitos_private *priv = dev_get_drvdata(dev);
766 unsigned int timeout = TALITOS_TIMEOUT;
768 setbits32(priv->reg_rngu + TALITOS_EURCR_LO, TALITOS_RNGURCR_LO_SR);
769 while (!(in_be32(priv->reg_rngu + TALITOS_EUSR_LO)
770 & TALITOS_RNGUSR_LO_RD)
771 && --timeout)
772 cpu_relax();
773 if (timeout == 0) {
774 dev_err(dev, "failed to reset rng hw\n");
775 return -ENODEV;
778 /* start generating */
779 setbits32(priv->reg_rngu + TALITOS_EUDSR_LO, 0);
781 return 0;
784 static int talitos_register_rng(struct device *dev)
786 struct talitos_private *priv = dev_get_drvdata(dev);
787 int err;
789 priv->rng.name = dev_driver_string(dev),
790 priv->rng.init = talitos_rng_init,
791 priv->rng.data_present = talitos_rng_data_present,
792 priv->rng.data_read = talitos_rng_data_read,
793 priv->rng.priv = (unsigned long)dev;
795 err = hwrng_register(&priv->rng);
796 if (!err)
797 priv->rng_registered = true;
799 return err;
802 static void talitos_unregister_rng(struct device *dev)
804 struct talitos_private *priv = dev_get_drvdata(dev);
806 if (!priv->rng_registered)
807 return;
809 hwrng_unregister(&priv->rng);
810 priv->rng_registered = false;
814 * crypto alg
816 #define TALITOS_CRA_PRIORITY 3000
818 * Defines a priority for doing AEAD with descriptors type
819 * HMAC_SNOOP_NO_AFEA (HSNA) instead of type IPSEC_ESP
821 #define TALITOS_CRA_PRIORITY_AEAD_HSNA (TALITOS_CRA_PRIORITY - 1)
822 #define TALITOS_MAX_KEY_SIZE (AES_MAX_KEY_SIZE + SHA512_BLOCK_SIZE)
823 #define TALITOS_MAX_IV_LENGTH 16 /* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
825 struct talitos_ctx {
826 struct device *dev;
827 int ch;
828 __be32 desc_hdr_template;
829 u8 key[TALITOS_MAX_KEY_SIZE];
830 u8 iv[TALITOS_MAX_IV_LENGTH];
831 dma_addr_t dma_key;
832 unsigned int keylen;
833 unsigned int enckeylen;
834 unsigned int authkeylen;
837 #define HASH_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
838 #define TALITOS_MDEU_MAX_CONTEXT_SIZE TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512
840 struct talitos_ahash_req_ctx {
841 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
842 unsigned int hw_context_size;
843 u8 buf[2][HASH_MAX_BLOCK_SIZE];
844 int buf_idx;
845 unsigned int swinit;
846 unsigned int first;
847 unsigned int last;
848 unsigned int to_hash_later;
849 unsigned int nbuf;
850 struct scatterlist bufsl[2];
851 struct scatterlist *psrc;
854 struct talitos_export_state {
855 u32 hw_context[TALITOS_MDEU_MAX_CONTEXT_SIZE / sizeof(u32)];
856 u8 buf[HASH_MAX_BLOCK_SIZE];
857 unsigned int swinit;
858 unsigned int first;
859 unsigned int last;
860 unsigned int to_hash_later;
861 unsigned int nbuf;
864 static int aead_setkey(struct crypto_aead *authenc,
865 const u8 *key, unsigned int keylen)
867 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
868 struct device *dev = ctx->dev;
869 struct crypto_authenc_keys keys;
871 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
872 goto badkey;
874 if (keys.authkeylen + keys.enckeylen > TALITOS_MAX_KEY_SIZE)
875 goto badkey;
877 if (ctx->keylen)
878 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
880 memcpy(ctx->key, keys.authkey, keys.authkeylen);
881 memcpy(&ctx->key[keys.authkeylen], keys.enckey, keys.enckeylen);
883 ctx->keylen = keys.authkeylen + keys.enckeylen;
884 ctx->enckeylen = keys.enckeylen;
885 ctx->authkeylen = keys.authkeylen;
886 ctx->dma_key = dma_map_single(dev, ctx->key, ctx->keylen,
887 DMA_TO_DEVICE);
889 return 0;
891 badkey:
892 crypto_aead_set_flags(authenc, CRYPTO_TFM_RES_BAD_KEY_LEN);
893 return -EINVAL;
897 * talitos_edesc - s/w-extended descriptor
898 * @src_nents: number of segments in input scatterlist
899 * @dst_nents: number of segments in output scatterlist
900 * @icv_ool: whether ICV is out-of-line
901 * @iv_dma: dma address of iv for checking continuity and link table
902 * @dma_len: length of dma mapped link_tbl space
903 * @dma_link_tbl: bus physical address of link_tbl/buf
904 * @desc: h/w descriptor
905 * @link_tbl: input and output h/w link tables (if {src,dst}_nents > 1) (SEC2)
906 * @buf: input and output buffeur (if {src,dst}_nents > 1) (SEC1)
908 * if decrypting (with authcheck), or either one of src_nents or dst_nents
909 * is greater than 1, an integrity check value is concatenated to the end
910 * of link_tbl data
912 struct talitos_edesc {
913 int src_nents;
914 int dst_nents;
915 bool icv_ool;
916 dma_addr_t iv_dma;
917 int dma_len;
918 dma_addr_t dma_link_tbl;
919 struct talitos_desc desc;
920 union {
921 struct talitos_ptr link_tbl[0];
922 u8 buf[0];
926 static void talitos_sg_unmap(struct device *dev,
927 struct talitos_edesc *edesc,
928 struct scatterlist *src,
929 struct scatterlist *dst,
930 unsigned int len, unsigned int offset)
932 struct talitos_private *priv = dev_get_drvdata(dev);
933 bool is_sec1 = has_ftr_sec1(priv);
934 unsigned int src_nents = edesc->src_nents ? : 1;
935 unsigned int dst_nents = edesc->dst_nents ? : 1;
937 if (is_sec1 && dst && dst_nents > 1) {
938 dma_sync_single_for_device(dev, edesc->dma_link_tbl + offset,
939 len, DMA_FROM_DEVICE);
940 sg_pcopy_from_buffer(dst, dst_nents, edesc->buf + offset, len,
941 offset);
943 if (src != dst) {
944 if (src_nents == 1 || !is_sec1)
945 dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE);
947 if (dst && (dst_nents == 1 || !is_sec1))
948 dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE);
949 } else if (src_nents == 1 || !is_sec1) {
950 dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL);
954 static void ipsec_esp_unmap(struct device *dev,
955 struct talitos_edesc *edesc,
956 struct aead_request *areq)
958 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
959 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
960 unsigned int ivsize = crypto_aead_ivsize(aead);
961 bool is_ipsec_esp = edesc->desc.hdr & DESC_HDR_TYPE_IPSEC_ESP;
962 struct talitos_ptr *civ_ptr = &edesc->desc.ptr[is_ipsec_esp ? 2 : 3];
964 if (is_ipsec_esp)
965 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[6],
966 DMA_FROM_DEVICE);
967 unmap_single_talitos_ptr(dev, civ_ptr, DMA_TO_DEVICE);
969 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->cryptlen,
970 areq->assoclen);
972 if (edesc->dma_len)
973 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
974 DMA_BIDIRECTIONAL);
976 if (!is_ipsec_esp) {
977 unsigned int dst_nents = edesc->dst_nents ? : 1;
979 sg_pcopy_to_buffer(areq->dst, dst_nents, ctx->iv, ivsize,
980 areq->assoclen + areq->cryptlen - ivsize);
985 * ipsec_esp descriptor callbacks
987 static void ipsec_esp_encrypt_done(struct device *dev,
988 struct talitos_desc *desc, void *context,
989 int err)
991 struct talitos_private *priv = dev_get_drvdata(dev);
992 bool is_sec1 = has_ftr_sec1(priv);
993 struct aead_request *areq = context;
994 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
995 unsigned int authsize = crypto_aead_authsize(authenc);
996 unsigned int ivsize = crypto_aead_ivsize(authenc);
997 struct talitos_edesc *edesc;
998 struct scatterlist *sg;
999 void *icvdata;
1001 edesc = container_of(desc, struct talitos_edesc, desc);
1003 ipsec_esp_unmap(dev, edesc, areq);
1005 /* copy the generated ICV to dst */
1006 if (edesc->icv_ool) {
1007 if (is_sec1)
1008 icvdata = edesc->buf + areq->assoclen + areq->cryptlen;
1009 else
1010 icvdata = &edesc->link_tbl[edesc->src_nents +
1011 edesc->dst_nents + 2];
1012 sg = sg_last(areq->dst, edesc->dst_nents);
1013 memcpy((char *)sg_virt(sg) + sg->length - authsize,
1014 icvdata, authsize);
1017 dma_unmap_single(dev, edesc->iv_dma, ivsize, DMA_TO_DEVICE);
1019 kfree(edesc);
1021 aead_request_complete(areq, err);
1024 static void ipsec_esp_decrypt_swauth_done(struct device *dev,
1025 struct talitos_desc *desc,
1026 void *context, int err)
1028 struct aead_request *req = context;
1029 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1030 unsigned int authsize = crypto_aead_authsize(authenc);
1031 struct talitos_edesc *edesc;
1032 struct scatterlist *sg;
1033 char *oicv, *icv;
1034 struct talitos_private *priv = dev_get_drvdata(dev);
1035 bool is_sec1 = has_ftr_sec1(priv);
1037 edesc = container_of(desc, struct talitos_edesc, desc);
1039 ipsec_esp_unmap(dev, edesc, req);
1041 if (!err) {
1042 /* auth check */
1043 sg = sg_last(req->dst, edesc->dst_nents ? : 1);
1044 icv = (char *)sg_virt(sg) + sg->length - authsize;
1046 if (edesc->dma_len) {
1047 if (is_sec1)
1048 oicv = (char *)&edesc->dma_link_tbl +
1049 req->assoclen + req->cryptlen;
1050 else
1051 oicv = (char *)
1052 &edesc->link_tbl[edesc->src_nents +
1053 edesc->dst_nents + 2];
1054 if (edesc->icv_ool)
1055 icv = oicv + authsize;
1056 } else
1057 oicv = (char *)&edesc->link_tbl[0];
1059 err = crypto_memneq(oicv, icv, authsize) ? -EBADMSG : 0;
1062 kfree(edesc);
1064 aead_request_complete(req, err);
1067 static void ipsec_esp_decrypt_hwauth_done(struct device *dev,
1068 struct talitos_desc *desc,
1069 void *context, int err)
1071 struct aead_request *req = context;
1072 struct talitos_edesc *edesc;
1074 edesc = container_of(desc, struct talitos_edesc, desc);
1076 ipsec_esp_unmap(dev, edesc, req);
1078 /* check ICV auth status */
1079 if (!err && ((desc->hdr_lo & DESC_HDR_LO_ICCR1_MASK) !=
1080 DESC_HDR_LO_ICCR1_PASS))
1081 err = -EBADMSG;
1083 kfree(edesc);
1085 aead_request_complete(req, err);
1089 * convert scatterlist to SEC h/w link table format
1090 * stop at cryptlen bytes
1092 static int sg_to_link_tbl_offset(struct scatterlist *sg, int sg_count,
1093 unsigned int offset, int cryptlen,
1094 struct talitos_ptr *link_tbl_ptr)
1096 int n_sg = sg_count;
1097 int count = 0;
1099 while (cryptlen && sg && n_sg--) {
1100 unsigned int len = sg_dma_len(sg);
1102 if (offset >= len) {
1103 offset -= len;
1104 goto next;
1107 len -= offset;
1109 if (len > cryptlen)
1110 len = cryptlen;
1112 to_talitos_ptr(link_tbl_ptr + count,
1113 sg_dma_address(sg) + offset, len, 0);
1114 to_talitos_ptr_ext_set(link_tbl_ptr + count, 0, 0);
1115 count++;
1116 cryptlen -= len;
1117 offset = 0;
1119 next:
1120 sg = sg_next(sg);
1123 /* tag end of link table */
1124 if (count > 0)
1125 to_talitos_ptr_ext_set(link_tbl_ptr + count - 1,
1126 DESC_PTR_LNKTBL_RETURN, 0);
1128 return count;
1131 static int talitos_sg_map_ext(struct device *dev, struct scatterlist *src,
1132 unsigned int len, struct talitos_edesc *edesc,
1133 struct talitos_ptr *ptr, int sg_count,
1134 unsigned int offset, int tbl_off, int elen)
1136 struct talitos_private *priv = dev_get_drvdata(dev);
1137 bool is_sec1 = has_ftr_sec1(priv);
1139 if (!src) {
1140 to_talitos_ptr(ptr, 0, 0, is_sec1);
1141 return 1;
1143 to_talitos_ptr_ext_set(ptr, elen, is_sec1);
1144 if (sg_count == 1) {
1145 to_talitos_ptr(ptr, sg_dma_address(src) + offset, len, is_sec1);
1146 return sg_count;
1148 if (is_sec1) {
1149 to_talitos_ptr(ptr, edesc->dma_link_tbl + offset, len, is_sec1);
1150 return sg_count;
1152 sg_count = sg_to_link_tbl_offset(src, sg_count, offset, len + elen,
1153 &edesc->link_tbl[tbl_off]);
1154 if (sg_count == 1) {
1155 /* Only one segment now, so no link tbl needed*/
1156 copy_talitos_ptr(ptr, &edesc->link_tbl[tbl_off], is_sec1);
1157 return sg_count;
1159 to_talitos_ptr(ptr, edesc->dma_link_tbl +
1160 tbl_off * sizeof(struct talitos_ptr), len, is_sec1);
1161 to_talitos_ptr_ext_or(ptr, DESC_PTR_LNKTBL_JUMP, is_sec1);
1163 return sg_count;
1166 static int talitos_sg_map(struct device *dev, struct scatterlist *src,
1167 unsigned int len, struct talitos_edesc *edesc,
1168 struct talitos_ptr *ptr, int sg_count,
1169 unsigned int offset, int tbl_off)
1171 return talitos_sg_map_ext(dev, src, len, edesc, ptr, sg_count, offset,
1172 tbl_off, 0);
1176 * fill in and submit ipsec_esp descriptor
1178 static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1179 void (*callback)(struct device *dev,
1180 struct talitos_desc *desc,
1181 void *context, int error))
1183 struct crypto_aead *aead = crypto_aead_reqtfm(areq);
1184 unsigned int authsize = crypto_aead_authsize(aead);
1185 struct talitos_ctx *ctx = crypto_aead_ctx(aead);
1186 struct device *dev = ctx->dev;
1187 struct talitos_desc *desc = &edesc->desc;
1188 unsigned int cryptlen = areq->cryptlen;
1189 unsigned int ivsize = crypto_aead_ivsize(aead);
1190 int tbl_off = 0;
1191 int sg_count, ret;
1192 int elen = 0;
1193 bool sync_needed = false;
1194 struct talitos_private *priv = dev_get_drvdata(dev);
1195 bool is_sec1 = has_ftr_sec1(priv);
1196 bool is_ipsec_esp = desc->hdr & DESC_HDR_TYPE_IPSEC_ESP;
1197 struct talitos_ptr *civ_ptr = &desc->ptr[is_ipsec_esp ? 2 : 3];
1198 struct talitos_ptr *ckey_ptr = &desc->ptr[is_ipsec_esp ? 3 : 2];
1200 /* hmac key */
1201 to_talitos_ptr(&desc->ptr[0], ctx->dma_key, ctx->authkeylen, is_sec1);
1203 sg_count = edesc->src_nents ?: 1;
1204 if (is_sec1 && sg_count > 1)
1205 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1206 areq->assoclen + cryptlen);
1207 else
1208 sg_count = dma_map_sg(dev, areq->src, sg_count,
1209 (areq->src == areq->dst) ?
1210 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1212 /* hmac data */
1213 ret = talitos_sg_map(dev, areq->src, areq->assoclen, edesc,
1214 &desc->ptr[1], sg_count, 0, tbl_off);
1216 if (ret > 1) {
1217 tbl_off += ret;
1218 sync_needed = true;
1221 /* cipher iv */
1222 to_talitos_ptr(civ_ptr, edesc->iv_dma, ivsize, is_sec1);
1224 /* cipher key */
1225 to_talitos_ptr(ckey_ptr, ctx->dma_key + ctx->authkeylen,
1226 ctx->enckeylen, is_sec1);
1229 * cipher in
1230 * map and adjust cipher len to aead request cryptlen.
1231 * extent is bytes of HMAC postpended to ciphertext,
1232 * typically 12 for ipsec
1234 if (is_ipsec_esp && (desc->hdr & DESC_HDR_MODE1_MDEU_CICV))
1235 elen = authsize;
1237 ret = talitos_sg_map_ext(dev, areq->src, cryptlen, edesc, &desc->ptr[4],
1238 sg_count, areq->assoclen, tbl_off, elen);
1240 if (ret > 1) {
1241 tbl_off += ret;
1242 sync_needed = true;
1245 /* cipher out */
1246 if (areq->src != areq->dst) {
1247 sg_count = edesc->dst_nents ? : 1;
1248 if (!is_sec1 || sg_count == 1)
1249 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1252 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1253 sg_count, areq->assoclen, tbl_off);
1255 if (is_ipsec_esp)
1256 to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1258 /* ICV data */
1259 if (ret > 1) {
1260 tbl_off += ret;
1261 edesc->icv_ool = true;
1262 sync_needed = true;
1264 if (is_ipsec_esp) {
1265 struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
1266 int offset = (edesc->src_nents + edesc->dst_nents + 2) *
1267 sizeof(struct talitos_ptr) + authsize;
1269 /* Add an entry to the link table for ICV data */
1270 to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1271 to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1272 is_sec1);
1274 /* icv data follows link tables */
1275 to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1276 authsize, is_sec1);
1277 } else {
1278 dma_addr_t addr = edesc->dma_link_tbl;
1280 if (is_sec1)
1281 addr += areq->assoclen + cryptlen;
1282 else
1283 addr += sizeof(struct talitos_ptr) * tbl_off;
1285 to_talitos_ptr(&desc->ptr[6], addr, authsize, is_sec1);
1287 } else if (!is_ipsec_esp) {
1288 ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1289 &desc->ptr[6], sg_count, areq->assoclen +
1290 cryptlen,
1291 tbl_off);
1292 if (ret > 1) {
1293 tbl_off += ret;
1294 edesc->icv_ool = true;
1295 sync_needed = true;
1296 } else {
1297 edesc->icv_ool = false;
1299 } else {
1300 edesc->icv_ool = false;
1303 /* iv out */
1304 if (is_ipsec_esp)
1305 map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1306 DMA_FROM_DEVICE);
1308 if (sync_needed)
1309 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1310 edesc->dma_len,
1311 DMA_BIDIRECTIONAL);
1313 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1314 if (ret != -EINPROGRESS) {
1315 ipsec_esp_unmap(dev, edesc, areq);
1316 kfree(edesc);
1318 return ret;
1322 * allocate and map the extended descriptor
1324 static struct talitos_edesc *talitos_edesc_alloc(struct device *dev,
1325 struct scatterlist *src,
1326 struct scatterlist *dst,
1327 u8 *iv,
1328 unsigned int assoclen,
1329 unsigned int cryptlen,
1330 unsigned int authsize,
1331 unsigned int ivsize,
1332 int icv_stashing,
1333 u32 cryptoflags,
1334 bool encrypt)
1336 struct talitos_edesc *edesc;
1337 int src_nents, dst_nents, alloc_len, dma_len, src_len, dst_len;
1338 dma_addr_t iv_dma = 0;
1339 gfp_t flags = cryptoflags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
1340 GFP_ATOMIC;
1341 struct talitos_private *priv = dev_get_drvdata(dev);
1342 bool is_sec1 = has_ftr_sec1(priv);
1343 int max_len = is_sec1 ? TALITOS1_MAX_DATA_LEN : TALITOS2_MAX_DATA_LEN;
1344 void *err;
1346 if (cryptlen + authsize > max_len) {
1347 dev_err(dev, "length exceeds h/w max limit\n");
1348 return ERR_PTR(-EINVAL);
1351 if (ivsize)
1352 iv_dma = dma_map_single(dev, iv, ivsize, DMA_TO_DEVICE);
1354 if (!dst || dst == src) {
1355 src_len = assoclen + cryptlen + authsize;
1356 src_nents = sg_nents_for_len(src, src_len);
1357 if (src_nents < 0) {
1358 dev_err(dev, "Invalid number of src SG.\n");
1359 err = ERR_PTR(-EINVAL);
1360 goto error_sg;
1362 src_nents = (src_nents == 1) ? 0 : src_nents;
1363 dst_nents = dst ? src_nents : 0;
1364 dst_len = 0;
1365 } else { /* dst && dst != src*/
1366 src_len = assoclen + cryptlen + (encrypt ? 0 : authsize);
1367 src_nents = sg_nents_for_len(src, src_len);
1368 if (src_nents < 0) {
1369 dev_err(dev, "Invalid number of src SG.\n");
1370 err = ERR_PTR(-EINVAL);
1371 goto error_sg;
1373 src_nents = (src_nents == 1) ? 0 : src_nents;
1374 dst_len = assoclen + cryptlen + (encrypt ? authsize : 0);
1375 dst_nents = sg_nents_for_len(dst, dst_len);
1376 if (dst_nents < 0) {
1377 dev_err(dev, "Invalid number of dst SG.\n");
1378 err = ERR_PTR(-EINVAL);
1379 goto error_sg;
1381 dst_nents = (dst_nents == 1) ? 0 : dst_nents;
1385 * allocate space for base edesc plus the link tables,
1386 * allowing for two separate entries for AD and generated ICV (+ 2),
1387 * and space for two sets of ICVs (stashed and generated)
1389 alloc_len = sizeof(struct talitos_edesc);
1390 if (src_nents || dst_nents) {
1391 if (is_sec1)
1392 dma_len = (src_nents ? src_len : 0) +
1393 (dst_nents ? dst_len : 0);
1394 else
1395 dma_len = (src_nents + dst_nents + 2) *
1396 sizeof(struct talitos_ptr) + authsize * 2;
1397 alloc_len += dma_len;
1398 } else {
1399 dma_len = 0;
1400 alloc_len += icv_stashing ? authsize : 0;
1403 /* if its a ahash, add space for a second desc next to the first one */
1404 if (is_sec1 && !dst)
1405 alloc_len += sizeof(struct talitos_desc);
1407 edesc = kmalloc(alloc_len, GFP_DMA | flags);
1408 if (!edesc) {
1409 dev_err(dev, "could not allocate edescriptor\n");
1410 err = ERR_PTR(-ENOMEM);
1411 goto error_sg;
1413 memset(&edesc->desc, 0, sizeof(edesc->desc));
1415 edesc->src_nents = src_nents;
1416 edesc->dst_nents = dst_nents;
1417 edesc->iv_dma = iv_dma;
1418 edesc->dma_len = dma_len;
1419 if (dma_len) {
1420 void *addr = &edesc->link_tbl[0];
1422 if (is_sec1 && !dst)
1423 addr += sizeof(struct talitos_desc);
1424 edesc->dma_link_tbl = dma_map_single(dev, addr,
1425 edesc->dma_len,
1426 DMA_BIDIRECTIONAL);
1428 return edesc;
1429 error_sg:
1430 if (iv_dma)
1431 dma_unmap_single(dev, iv_dma, ivsize, DMA_TO_DEVICE);
1432 return err;
1435 static struct talitos_edesc *aead_edesc_alloc(struct aead_request *areq, u8 *iv,
1436 int icv_stashing, bool encrypt)
1438 struct crypto_aead *authenc = crypto_aead_reqtfm(areq);
1439 unsigned int authsize = crypto_aead_authsize(authenc);
1440 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1441 unsigned int ivsize = crypto_aead_ivsize(authenc);
1443 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1444 iv, areq->assoclen, areq->cryptlen,
1445 authsize, ivsize, icv_stashing,
1446 areq->base.flags, encrypt);
1449 static int aead_encrypt(struct aead_request *req)
1451 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1452 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1453 struct talitos_edesc *edesc;
1455 /* allocate extended descriptor */
1456 edesc = aead_edesc_alloc(req, req->iv, 0, true);
1457 if (IS_ERR(edesc))
1458 return PTR_ERR(edesc);
1460 /* set encrypt */
1461 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1463 return ipsec_esp(edesc, req, ipsec_esp_encrypt_done);
1466 static int aead_decrypt(struct aead_request *req)
1468 struct crypto_aead *authenc = crypto_aead_reqtfm(req);
1469 unsigned int authsize = crypto_aead_authsize(authenc);
1470 struct talitos_ctx *ctx = crypto_aead_ctx(authenc);
1471 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1472 struct talitos_edesc *edesc;
1473 struct scatterlist *sg;
1474 void *icvdata;
1476 req->cryptlen -= authsize;
1478 /* allocate extended descriptor */
1479 edesc = aead_edesc_alloc(req, req->iv, 1, false);
1480 if (IS_ERR(edesc))
1481 return PTR_ERR(edesc);
1483 if ((priv->features & TALITOS_FTR_HW_AUTH_CHECK) &&
1484 ((!edesc->src_nents && !edesc->dst_nents) ||
1485 priv->features & TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT)) {
1487 /* decrypt and check the ICV */
1488 edesc->desc.hdr = ctx->desc_hdr_template |
1489 DESC_HDR_DIR_INBOUND |
1490 DESC_HDR_MODE1_MDEU_CICV;
1492 /* reset integrity check result bits */
1494 return ipsec_esp(edesc, req, ipsec_esp_decrypt_hwauth_done);
1497 /* Have to check the ICV with software */
1498 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1500 /* stash incoming ICV for later cmp with ICV generated by the h/w */
1501 if (edesc->dma_len)
1502 icvdata = (char *)&edesc->link_tbl[edesc->src_nents +
1503 edesc->dst_nents + 2];
1504 else
1505 icvdata = &edesc->link_tbl[0];
1507 sg = sg_last(req->src, edesc->src_nents ? : 1);
1509 memcpy(icvdata, (char *)sg_virt(sg) + sg->length - authsize, authsize);
1511 return ipsec_esp(edesc, req, ipsec_esp_decrypt_swauth_done);
1514 static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1515 const u8 *key, unsigned int keylen)
1517 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1518 struct device *dev = ctx->dev;
1519 u32 tmp[DES_EXPKEY_WORDS];
1521 if (keylen > TALITOS_MAX_KEY_SIZE) {
1522 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1523 return -EINVAL;
1526 if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1527 CRYPTO_TFM_REQ_WEAK_KEY) &&
1528 !des_ekey(tmp, key)) {
1529 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1530 return -EINVAL;
1533 if (ctx->keylen)
1534 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
1536 memcpy(&ctx->key, key, keylen);
1537 ctx->keylen = keylen;
1539 ctx->dma_key = dma_map_single(dev, ctx->key, keylen, DMA_TO_DEVICE);
1541 return 0;
1544 static void common_nonsnoop_unmap(struct device *dev,
1545 struct talitos_edesc *edesc,
1546 struct ablkcipher_request *areq)
1548 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1550 talitos_sg_unmap(dev, edesc, areq->src, areq->dst, areq->nbytes, 0);
1551 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1], DMA_TO_DEVICE);
1553 if (edesc->dma_len)
1554 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1555 DMA_BIDIRECTIONAL);
1558 static void ablkcipher_done(struct device *dev,
1559 struct talitos_desc *desc, void *context,
1560 int err)
1562 struct ablkcipher_request *areq = context;
1563 struct talitos_edesc *edesc;
1565 edesc = container_of(desc, struct talitos_edesc, desc);
1567 common_nonsnoop_unmap(dev, edesc, areq);
1569 kfree(edesc);
1571 areq->base.complete(&areq->base, err);
1574 static int common_nonsnoop(struct talitos_edesc *edesc,
1575 struct ablkcipher_request *areq,
1576 void (*callback) (struct device *dev,
1577 struct talitos_desc *desc,
1578 void *context, int error))
1580 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1581 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1582 struct device *dev = ctx->dev;
1583 struct talitos_desc *desc = &edesc->desc;
1584 unsigned int cryptlen = areq->nbytes;
1585 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1586 int sg_count, ret;
1587 bool sync_needed = false;
1588 struct talitos_private *priv = dev_get_drvdata(dev);
1589 bool is_sec1 = has_ftr_sec1(priv);
1591 /* first DWORD empty */
1593 /* cipher iv */
1594 to_talitos_ptr(&desc->ptr[1], edesc->iv_dma, ivsize, is_sec1);
1596 /* cipher key */
1597 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen, is_sec1);
1599 sg_count = edesc->src_nents ?: 1;
1600 if (is_sec1 && sg_count > 1)
1601 sg_copy_to_buffer(areq->src, sg_count, edesc->buf,
1602 cryptlen);
1603 else
1604 sg_count = dma_map_sg(dev, areq->src, sg_count,
1605 (areq->src == areq->dst) ?
1606 DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
1608 * cipher in
1610 sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1611 &desc->ptr[3], sg_count, 0, 0);
1612 if (sg_count > 1)
1613 sync_needed = true;
1615 /* cipher out */
1616 if (areq->src != areq->dst) {
1617 sg_count = edesc->dst_nents ? : 1;
1618 if (!is_sec1 || sg_count == 1)
1619 dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1622 ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[4],
1623 sg_count, 0, (edesc->src_nents + 1));
1624 if (ret > 1)
1625 sync_needed = true;
1627 /* iv out */
1628 map_single_talitos_ptr(dev, &desc->ptr[5], ivsize, ctx->iv,
1629 DMA_FROM_DEVICE);
1631 /* last DWORD empty */
1633 if (sync_needed)
1634 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1635 edesc->dma_len, DMA_BIDIRECTIONAL);
1637 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1638 if (ret != -EINPROGRESS) {
1639 common_nonsnoop_unmap(dev, edesc, areq);
1640 kfree(edesc);
1642 return ret;
1645 static struct talitos_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request *
1646 areq, bool encrypt)
1648 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1649 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1650 unsigned int ivsize = crypto_ablkcipher_ivsize(cipher);
1652 return talitos_edesc_alloc(ctx->dev, areq->src, areq->dst,
1653 areq->info, 0, areq->nbytes, 0, ivsize, 0,
1654 areq->base.flags, encrypt);
1657 static int ablkcipher_encrypt(struct ablkcipher_request *areq)
1659 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1660 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1661 struct talitos_edesc *edesc;
1663 /* allocate extended descriptor */
1664 edesc = ablkcipher_edesc_alloc(areq, true);
1665 if (IS_ERR(edesc))
1666 return PTR_ERR(edesc);
1668 /* set encrypt */
1669 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_MODE0_ENCRYPT;
1671 return common_nonsnoop(edesc, areq, ablkcipher_done);
1674 static int ablkcipher_decrypt(struct ablkcipher_request *areq)
1676 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(areq);
1677 struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1678 struct talitos_edesc *edesc;
1680 /* allocate extended descriptor */
1681 edesc = ablkcipher_edesc_alloc(areq, false);
1682 if (IS_ERR(edesc))
1683 return PTR_ERR(edesc);
1685 edesc->desc.hdr = ctx->desc_hdr_template | DESC_HDR_DIR_INBOUND;
1687 return common_nonsnoop(edesc, areq, ablkcipher_done);
1690 static void common_nonsnoop_hash_unmap(struct device *dev,
1691 struct talitos_edesc *edesc,
1692 struct ahash_request *areq)
1694 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1695 struct talitos_private *priv = dev_get_drvdata(dev);
1696 bool is_sec1 = has_ftr_sec1(priv);
1697 struct talitos_desc *desc = &edesc->desc;
1698 struct talitos_desc *desc2 = desc + 1;
1700 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[5], DMA_FROM_DEVICE);
1701 if (desc->next_desc &&
1702 desc->ptr[5].ptr != desc2->ptr[5].ptr)
1703 unmap_single_talitos_ptr(dev, &desc2->ptr[5], DMA_FROM_DEVICE);
1705 talitos_sg_unmap(dev, edesc, req_ctx->psrc, NULL, 0, 0);
1707 /* When using hashctx-in, must unmap it. */
1708 if (from_talitos_ptr_len(&edesc->desc.ptr[1], is_sec1))
1709 unmap_single_talitos_ptr(dev, &edesc->desc.ptr[1],
1710 DMA_TO_DEVICE);
1711 else if (desc->next_desc)
1712 unmap_single_talitos_ptr(dev, &desc2->ptr[1],
1713 DMA_TO_DEVICE);
1715 if (is_sec1 && req_ctx->nbuf)
1716 unmap_single_talitos_ptr(dev, &desc->ptr[3],
1717 DMA_TO_DEVICE);
1719 if (edesc->dma_len)
1720 dma_unmap_single(dev, edesc->dma_link_tbl, edesc->dma_len,
1721 DMA_BIDIRECTIONAL);
1723 if (edesc->desc.next_desc)
1724 dma_unmap_single(dev, be32_to_cpu(edesc->desc.next_desc),
1725 TALITOS_DESC_SIZE, DMA_BIDIRECTIONAL);
1728 static void ahash_done(struct device *dev,
1729 struct talitos_desc *desc, void *context,
1730 int err)
1732 struct ahash_request *areq = context;
1733 struct talitos_edesc *edesc =
1734 container_of(desc, struct talitos_edesc, desc);
1735 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1737 if (!req_ctx->last && req_ctx->to_hash_later) {
1738 /* Position any partial block for next update/final/finup */
1739 req_ctx->buf_idx = (req_ctx->buf_idx + 1) & 1;
1740 req_ctx->nbuf = req_ctx->to_hash_later;
1742 common_nonsnoop_hash_unmap(dev, edesc, areq);
1744 kfree(edesc);
1746 areq->base.complete(&areq->base, err);
1750 * SEC1 doesn't like hashing of 0 sized message, so we do the padding
1751 * ourself and submit a padded block
1753 static void talitos_handle_buggy_hash(struct talitos_ctx *ctx,
1754 struct talitos_edesc *edesc,
1755 struct talitos_ptr *ptr)
1757 static u8 padded_hash[64] = {
1758 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1759 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1760 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1761 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1764 pr_err_once("Bug in SEC1, padding ourself\n");
1765 edesc->desc.hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1766 map_single_talitos_ptr(ctx->dev, ptr, sizeof(padded_hash),
1767 (char *)padded_hash, DMA_TO_DEVICE);
1770 static int common_nonsnoop_hash(struct talitos_edesc *edesc,
1771 struct ahash_request *areq, unsigned int length,
1772 unsigned int offset,
1773 void (*callback) (struct device *dev,
1774 struct talitos_desc *desc,
1775 void *context, int error))
1777 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1778 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1779 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1780 struct device *dev = ctx->dev;
1781 struct talitos_desc *desc = &edesc->desc;
1782 int ret;
1783 bool sync_needed = false;
1784 struct talitos_private *priv = dev_get_drvdata(dev);
1785 bool is_sec1 = has_ftr_sec1(priv);
1786 int sg_count;
1788 /* first DWORD empty */
1790 /* hash context in */
1791 if (!req_ctx->first || req_ctx->swinit) {
1792 map_single_talitos_ptr(dev, &desc->ptr[1],
1793 req_ctx->hw_context_size,
1794 (char *)req_ctx->hw_context,
1795 DMA_TO_DEVICE);
1796 req_ctx->swinit = 0;
1798 /* Indicate next op is not the first. */
1799 req_ctx->first = 0;
1801 /* HMAC key */
1802 if (ctx->keylen)
1803 to_talitos_ptr(&desc->ptr[2], ctx->dma_key, ctx->keylen,
1804 is_sec1);
1806 if (is_sec1 && req_ctx->nbuf)
1807 length -= req_ctx->nbuf;
1809 sg_count = edesc->src_nents ?: 1;
1810 if (is_sec1 && sg_count > 1)
1811 sg_pcopy_to_buffer(req_ctx->psrc, sg_count,
1812 edesc->buf + sizeof(struct talitos_desc),
1813 length, req_ctx->nbuf);
1814 else if (length)
1815 sg_count = dma_map_sg(dev, req_ctx->psrc, sg_count,
1816 DMA_TO_DEVICE);
1818 * data in
1820 if (is_sec1 && req_ctx->nbuf) {
1821 map_single_talitos_ptr(dev, &desc->ptr[3], req_ctx->nbuf,
1822 req_ctx->buf[req_ctx->buf_idx],
1823 DMA_TO_DEVICE);
1824 } else {
1825 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1826 &desc->ptr[3], sg_count, offset, 0);
1827 if (sg_count > 1)
1828 sync_needed = true;
1831 /* fifth DWORD empty */
1833 /* hash/HMAC out -or- hash context out */
1834 if (req_ctx->last)
1835 map_single_talitos_ptr(dev, &desc->ptr[5],
1836 crypto_ahash_digestsize(tfm),
1837 areq->result, DMA_FROM_DEVICE);
1838 else
1839 map_single_talitos_ptr(dev, &desc->ptr[5],
1840 req_ctx->hw_context_size,
1841 req_ctx->hw_context, DMA_FROM_DEVICE);
1843 /* last DWORD empty */
1845 if (is_sec1 && from_talitos_ptr_len(&desc->ptr[3], true) == 0)
1846 talitos_handle_buggy_hash(ctx, edesc, &desc->ptr[3]);
1848 if (is_sec1 && req_ctx->nbuf && length) {
1849 struct talitos_desc *desc2 = desc + 1;
1850 dma_addr_t next_desc;
1852 memset(desc2, 0, sizeof(*desc2));
1853 desc2->hdr = desc->hdr;
1854 desc2->hdr &= ~DESC_HDR_MODE0_MDEU_INIT;
1855 desc2->hdr1 = desc2->hdr;
1856 desc->hdr &= ~DESC_HDR_MODE0_MDEU_PAD;
1857 desc->hdr |= DESC_HDR_MODE0_MDEU_CONT;
1858 desc->hdr &= ~DESC_HDR_DONE_NOTIFY;
1860 if (desc->ptr[1].ptr)
1861 copy_talitos_ptr(&desc2->ptr[1], &desc->ptr[1],
1862 is_sec1);
1863 else
1864 map_single_talitos_ptr(dev, &desc2->ptr[1],
1865 req_ctx->hw_context_size,
1866 req_ctx->hw_context,
1867 DMA_TO_DEVICE);
1868 copy_talitos_ptr(&desc2->ptr[2], &desc->ptr[2], is_sec1);
1869 sg_count = talitos_sg_map(dev, req_ctx->psrc, length, edesc,
1870 &desc2->ptr[3], sg_count, offset, 0);
1871 if (sg_count > 1)
1872 sync_needed = true;
1873 copy_talitos_ptr(&desc2->ptr[5], &desc->ptr[5], is_sec1);
1874 if (req_ctx->last)
1875 map_single_talitos_ptr(dev, &desc->ptr[5],
1876 req_ctx->hw_context_size,
1877 req_ctx->hw_context,
1878 DMA_FROM_DEVICE);
1880 next_desc = dma_map_single(dev, &desc2->hdr1, TALITOS_DESC_SIZE,
1881 DMA_BIDIRECTIONAL);
1882 desc->next_desc = cpu_to_be32(next_desc);
1885 if (sync_needed)
1886 dma_sync_single_for_device(dev, edesc->dma_link_tbl,
1887 edesc->dma_len, DMA_BIDIRECTIONAL);
1889 ret = talitos_submit(dev, ctx->ch, desc, callback, areq);
1890 if (ret != -EINPROGRESS) {
1891 common_nonsnoop_hash_unmap(dev, edesc, areq);
1892 kfree(edesc);
1894 return ret;
1897 static struct talitos_edesc *ahash_edesc_alloc(struct ahash_request *areq,
1898 unsigned int nbytes)
1900 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1901 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1902 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1903 struct talitos_private *priv = dev_get_drvdata(ctx->dev);
1904 bool is_sec1 = has_ftr_sec1(priv);
1906 if (is_sec1)
1907 nbytes -= req_ctx->nbuf;
1909 return talitos_edesc_alloc(ctx->dev, req_ctx->psrc, NULL, NULL, 0,
1910 nbytes, 0, 0, 0, areq->base.flags, false);
1913 static int ahash_init(struct ahash_request *areq)
1915 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1916 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1917 unsigned int size;
1919 /* Initialize the context */
1920 req_ctx->buf_idx = 0;
1921 req_ctx->nbuf = 0;
1922 req_ctx->first = 1; /* first indicates h/w must init its context */
1923 req_ctx->swinit = 0; /* assume h/w init of context */
1924 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
1925 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
1926 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
1927 req_ctx->hw_context_size = size;
1929 return 0;
1933 * on h/w without explicit sha224 support, we initialize h/w context
1934 * manually with sha224 constants, and tell it to run sha256.
1936 static int ahash_init_sha224_swinit(struct ahash_request *areq)
1938 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1940 ahash_init(areq);
1941 req_ctx->swinit = 1;/* prevent h/w initting context with sha256 values*/
1943 req_ctx->hw_context[0] = SHA224_H0;
1944 req_ctx->hw_context[1] = SHA224_H1;
1945 req_ctx->hw_context[2] = SHA224_H2;
1946 req_ctx->hw_context[3] = SHA224_H3;
1947 req_ctx->hw_context[4] = SHA224_H4;
1948 req_ctx->hw_context[5] = SHA224_H5;
1949 req_ctx->hw_context[6] = SHA224_H6;
1950 req_ctx->hw_context[7] = SHA224_H7;
1952 /* init 64-bit count */
1953 req_ctx->hw_context[8] = 0;
1954 req_ctx->hw_context[9] = 0;
1956 return 0;
1959 static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
1961 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
1962 struct talitos_ctx *ctx = crypto_ahash_ctx(tfm);
1963 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
1964 struct talitos_edesc *edesc;
1965 unsigned int blocksize =
1966 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
1967 unsigned int nbytes_to_hash;
1968 unsigned int to_hash_later;
1969 unsigned int nsg;
1970 int nents;
1971 struct device *dev = ctx->dev;
1972 struct talitos_private *priv = dev_get_drvdata(dev);
1973 bool is_sec1 = has_ftr_sec1(priv);
1974 int offset = 0;
1975 u8 *ctx_buf = req_ctx->buf[req_ctx->buf_idx];
1977 if (!req_ctx->last && (nbytes + req_ctx->nbuf <= blocksize)) {
1978 /* Buffer up to one whole block */
1979 nents = sg_nents_for_len(areq->src, nbytes);
1980 if (nents < 0) {
1981 dev_err(ctx->dev, "Invalid number of src SG.\n");
1982 return nents;
1984 sg_copy_to_buffer(areq->src, nents,
1985 ctx_buf + req_ctx->nbuf, nbytes);
1986 req_ctx->nbuf += nbytes;
1987 return 0;
1990 /* At least (blocksize + 1) bytes are available to hash */
1991 nbytes_to_hash = nbytes + req_ctx->nbuf;
1992 to_hash_later = nbytes_to_hash & (blocksize - 1);
1994 if (req_ctx->last)
1995 to_hash_later = 0;
1996 else if (to_hash_later)
1997 /* There is a partial block. Hash the full block(s) now */
1998 nbytes_to_hash -= to_hash_later;
1999 else {
2000 /* Keep one block buffered */
2001 nbytes_to_hash -= blocksize;
2002 to_hash_later = blocksize;
2005 /* Chain in any previously buffered data */
2006 if (!is_sec1 && req_ctx->nbuf) {
2007 nsg = (req_ctx->nbuf < nbytes_to_hash) ? 2 : 1;
2008 sg_init_table(req_ctx->bufsl, nsg);
2009 sg_set_buf(req_ctx->bufsl, ctx_buf, req_ctx->nbuf);
2010 if (nsg > 1)
2011 sg_chain(req_ctx->bufsl, 2, areq->src);
2012 req_ctx->psrc = req_ctx->bufsl;
2013 } else if (is_sec1 && req_ctx->nbuf && req_ctx->nbuf < blocksize) {
2014 if (nbytes_to_hash > blocksize)
2015 offset = blocksize - req_ctx->nbuf;
2016 else
2017 offset = nbytes_to_hash - req_ctx->nbuf;
2018 nents = sg_nents_for_len(areq->src, offset);
2019 if (nents < 0) {
2020 dev_err(ctx->dev, "Invalid number of src SG.\n");
2021 return nents;
2023 sg_copy_to_buffer(areq->src, nents,
2024 ctx_buf + req_ctx->nbuf, offset);
2025 req_ctx->nbuf += offset;
2026 req_ctx->psrc = areq->src;
2027 } else
2028 req_ctx->psrc = areq->src;
2030 if (to_hash_later) {
2031 nents = sg_nents_for_len(areq->src, nbytes);
2032 if (nents < 0) {
2033 dev_err(ctx->dev, "Invalid number of src SG.\n");
2034 return nents;
2036 sg_pcopy_to_buffer(areq->src, nents,
2037 req_ctx->buf[(req_ctx->buf_idx + 1) & 1],
2038 to_hash_later,
2039 nbytes - to_hash_later);
2041 req_ctx->to_hash_later = to_hash_later;
2043 /* Allocate extended descriptor */
2044 edesc = ahash_edesc_alloc(areq, nbytes_to_hash);
2045 if (IS_ERR(edesc))
2046 return PTR_ERR(edesc);
2048 edesc->desc.hdr = ctx->desc_hdr_template;
2050 /* On last one, request SEC to pad; otherwise continue */
2051 if (req_ctx->last)
2052 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_PAD;
2053 else
2054 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_CONT;
2056 /* request SEC to INIT hash. */
2057 if (req_ctx->first && !req_ctx->swinit)
2058 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_INIT;
2060 /* When the tfm context has a keylen, it's an HMAC.
2061 * A first or last (ie. not middle) descriptor must request HMAC.
2063 if (ctx->keylen && (req_ctx->first || req_ctx->last))
2064 edesc->desc.hdr |= DESC_HDR_MODE0_MDEU_HMAC;
2066 return common_nonsnoop_hash(edesc, areq, nbytes_to_hash, offset,
2067 ahash_done);
2070 static int ahash_update(struct ahash_request *areq)
2072 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2074 req_ctx->last = 0;
2076 return ahash_process_req(areq, areq->nbytes);
2079 static int ahash_final(struct ahash_request *areq)
2081 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2083 req_ctx->last = 1;
2085 return ahash_process_req(areq, 0);
2088 static int ahash_finup(struct ahash_request *areq)
2090 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2092 req_ctx->last = 1;
2094 return ahash_process_req(areq, areq->nbytes);
2097 static int ahash_digest(struct ahash_request *areq)
2099 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2100 struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
2102 ahash->init(areq);
2103 req_ctx->last = 1;
2105 return ahash_process_req(areq, areq->nbytes);
2108 static int ahash_export(struct ahash_request *areq, void *out)
2110 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2111 struct talitos_export_state *export = out;
2113 memcpy(export->hw_context, req_ctx->hw_context,
2114 req_ctx->hw_context_size);
2115 memcpy(export->buf, req_ctx->buf[req_ctx->buf_idx], req_ctx->nbuf);
2116 export->swinit = req_ctx->swinit;
2117 export->first = req_ctx->first;
2118 export->last = req_ctx->last;
2119 export->to_hash_later = req_ctx->to_hash_later;
2120 export->nbuf = req_ctx->nbuf;
2122 return 0;
2125 static int ahash_import(struct ahash_request *areq, const void *in)
2127 struct talitos_ahash_req_ctx *req_ctx = ahash_request_ctx(areq);
2128 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2129 const struct talitos_export_state *export = in;
2130 unsigned int size;
2132 memset(req_ctx, 0, sizeof(*req_ctx));
2133 size = (crypto_ahash_digestsize(tfm) <= SHA256_DIGEST_SIZE)
2134 ? TALITOS_MDEU_CONTEXT_SIZE_MD5_SHA1_SHA256
2135 : TALITOS_MDEU_CONTEXT_SIZE_SHA384_SHA512;
2136 req_ctx->hw_context_size = size;
2137 memcpy(req_ctx->hw_context, export->hw_context, size);
2138 memcpy(req_ctx->buf[0], export->buf, export->nbuf);
2139 req_ctx->swinit = export->swinit;
2140 req_ctx->first = export->first;
2141 req_ctx->last = export->last;
2142 req_ctx->to_hash_later = export->to_hash_later;
2143 req_ctx->nbuf = export->nbuf;
2145 return 0;
2148 static int keyhash(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen,
2149 u8 *hash)
2151 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2153 struct scatterlist sg[1];
2154 struct ahash_request *req;
2155 struct crypto_wait wait;
2156 int ret;
2158 crypto_init_wait(&wait);
2160 req = ahash_request_alloc(tfm, GFP_KERNEL);
2161 if (!req)
2162 return -ENOMEM;
2164 /* Keep tfm keylen == 0 during hash of the long key */
2165 ctx->keylen = 0;
2166 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
2167 crypto_req_done, &wait);
2169 sg_init_one(&sg[0], key, keylen);
2171 ahash_request_set_crypt(req, sg, hash, keylen);
2172 ret = crypto_wait_req(crypto_ahash_digest(req), &wait);
2174 ahash_request_free(req);
2176 return ret;
2179 static int ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
2180 unsigned int keylen)
2182 struct talitos_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm));
2183 struct device *dev = ctx->dev;
2184 unsigned int blocksize =
2185 crypto_tfm_alg_blocksize(crypto_ahash_tfm(tfm));
2186 unsigned int digestsize = crypto_ahash_digestsize(tfm);
2187 unsigned int keysize = keylen;
2188 u8 hash[SHA512_DIGEST_SIZE];
2189 int ret;
2191 if (keylen <= blocksize)
2192 memcpy(ctx->key, key, keysize);
2193 else {
2194 /* Must get the hash of the long key */
2195 ret = keyhash(tfm, key, keylen, hash);
2197 if (ret) {
2198 crypto_ahash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
2199 return -EINVAL;
2202 keysize = digestsize;
2203 memcpy(ctx->key, hash, digestsize);
2206 if (ctx->keylen)
2207 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
2209 ctx->keylen = keysize;
2210 ctx->dma_key = dma_map_single(dev, ctx->key, keysize, DMA_TO_DEVICE);
2212 return 0;
2216 struct talitos_alg_template {
2217 u32 type;
2218 u32 priority;
2219 union {
2220 struct crypto_alg crypto;
2221 struct ahash_alg hash;
2222 struct aead_alg aead;
2223 } alg;
2224 __be32 desc_hdr_template;
2227 static struct talitos_alg_template driver_algs[] = {
2228 /* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
2229 { .type = CRYPTO_ALG_TYPE_AEAD,
2230 .alg.aead = {
2231 .base = {
2232 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2233 .cra_driver_name = "authenc-hmac-sha1-"
2234 "cbc-aes-talitos",
2235 .cra_blocksize = AES_BLOCK_SIZE,
2236 .cra_flags = CRYPTO_ALG_ASYNC,
2238 .ivsize = AES_BLOCK_SIZE,
2239 .maxauthsize = SHA1_DIGEST_SIZE,
2241 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2242 DESC_HDR_SEL0_AESU |
2243 DESC_HDR_MODE0_AESU_CBC |
2244 DESC_HDR_SEL1_MDEUA |
2245 DESC_HDR_MODE1_MDEU_INIT |
2246 DESC_HDR_MODE1_MDEU_PAD |
2247 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2249 { .type = CRYPTO_ALG_TYPE_AEAD,
2250 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2251 .alg.aead = {
2252 .base = {
2253 .cra_name = "authenc(hmac(sha1),cbc(aes))",
2254 .cra_driver_name = "authenc-hmac-sha1-"
2255 "cbc-aes-talitos",
2256 .cra_blocksize = AES_BLOCK_SIZE,
2257 .cra_flags = CRYPTO_ALG_ASYNC,
2259 .ivsize = AES_BLOCK_SIZE,
2260 .maxauthsize = SHA1_DIGEST_SIZE,
2262 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2263 DESC_HDR_SEL0_AESU |
2264 DESC_HDR_MODE0_AESU_CBC |
2265 DESC_HDR_SEL1_MDEUA |
2266 DESC_HDR_MODE1_MDEU_INIT |
2267 DESC_HDR_MODE1_MDEU_PAD |
2268 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2270 { .type = CRYPTO_ALG_TYPE_AEAD,
2271 .alg.aead = {
2272 .base = {
2273 .cra_name = "authenc(hmac(sha1),"
2274 "cbc(des3_ede))",
2275 .cra_driver_name = "authenc-hmac-sha1-"
2276 "cbc-3des-talitos",
2277 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2278 .cra_flags = CRYPTO_ALG_ASYNC,
2280 .ivsize = DES3_EDE_BLOCK_SIZE,
2281 .maxauthsize = SHA1_DIGEST_SIZE,
2283 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2284 DESC_HDR_SEL0_DEU |
2285 DESC_HDR_MODE0_DEU_CBC |
2286 DESC_HDR_MODE0_DEU_3DES |
2287 DESC_HDR_SEL1_MDEUA |
2288 DESC_HDR_MODE1_MDEU_INIT |
2289 DESC_HDR_MODE1_MDEU_PAD |
2290 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2292 { .type = CRYPTO_ALG_TYPE_AEAD,
2293 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2294 .alg.aead = {
2295 .base = {
2296 .cra_name = "authenc(hmac(sha1),"
2297 "cbc(des3_ede))",
2298 .cra_driver_name = "authenc-hmac-sha1-"
2299 "cbc-3des-talitos",
2300 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2301 .cra_flags = CRYPTO_ALG_ASYNC,
2303 .ivsize = DES3_EDE_BLOCK_SIZE,
2304 .maxauthsize = SHA1_DIGEST_SIZE,
2306 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2307 DESC_HDR_SEL0_DEU |
2308 DESC_HDR_MODE0_DEU_CBC |
2309 DESC_HDR_MODE0_DEU_3DES |
2310 DESC_HDR_SEL1_MDEUA |
2311 DESC_HDR_MODE1_MDEU_INIT |
2312 DESC_HDR_MODE1_MDEU_PAD |
2313 DESC_HDR_MODE1_MDEU_SHA1_HMAC,
2315 { .type = CRYPTO_ALG_TYPE_AEAD,
2316 .alg.aead = {
2317 .base = {
2318 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2319 .cra_driver_name = "authenc-hmac-sha224-"
2320 "cbc-aes-talitos",
2321 .cra_blocksize = AES_BLOCK_SIZE,
2322 .cra_flags = CRYPTO_ALG_ASYNC,
2324 .ivsize = AES_BLOCK_SIZE,
2325 .maxauthsize = SHA224_DIGEST_SIZE,
2327 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2328 DESC_HDR_SEL0_AESU |
2329 DESC_HDR_MODE0_AESU_CBC |
2330 DESC_HDR_SEL1_MDEUA |
2331 DESC_HDR_MODE1_MDEU_INIT |
2332 DESC_HDR_MODE1_MDEU_PAD |
2333 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2335 { .type = CRYPTO_ALG_TYPE_AEAD,
2336 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2337 .alg.aead = {
2338 .base = {
2339 .cra_name = "authenc(hmac(sha224),cbc(aes))",
2340 .cra_driver_name = "authenc-hmac-sha224-"
2341 "cbc-aes-talitos",
2342 .cra_blocksize = AES_BLOCK_SIZE,
2343 .cra_flags = CRYPTO_ALG_ASYNC,
2345 .ivsize = AES_BLOCK_SIZE,
2346 .maxauthsize = SHA224_DIGEST_SIZE,
2348 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2349 DESC_HDR_SEL0_AESU |
2350 DESC_HDR_MODE0_AESU_CBC |
2351 DESC_HDR_SEL1_MDEUA |
2352 DESC_HDR_MODE1_MDEU_INIT |
2353 DESC_HDR_MODE1_MDEU_PAD |
2354 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2356 { .type = CRYPTO_ALG_TYPE_AEAD,
2357 .alg.aead = {
2358 .base = {
2359 .cra_name = "authenc(hmac(sha224),"
2360 "cbc(des3_ede))",
2361 .cra_driver_name = "authenc-hmac-sha224-"
2362 "cbc-3des-talitos",
2363 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2364 .cra_flags = CRYPTO_ALG_ASYNC,
2366 .ivsize = DES3_EDE_BLOCK_SIZE,
2367 .maxauthsize = SHA224_DIGEST_SIZE,
2369 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2370 DESC_HDR_SEL0_DEU |
2371 DESC_HDR_MODE0_DEU_CBC |
2372 DESC_HDR_MODE0_DEU_3DES |
2373 DESC_HDR_SEL1_MDEUA |
2374 DESC_HDR_MODE1_MDEU_INIT |
2375 DESC_HDR_MODE1_MDEU_PAD |
2376 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2378 { .type = CRYPTO_ALG_TYPE_AEAD,
2379 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2380 .alg.aead = {
2381 .base = {
2382 .cra_name = "authenc(hmac(sha224),"
2383 "cbc(des3_ede))",
2384 .cra_driver_name = "authenc-hmac-sha224-"
2385 "cbc-3des-talitos",
2386 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2387 .cra_flags = CRYPTO_ALG_ASYNC,
2389 .ivsize = DES3_EDE_BLOCK_SIZE,
2390 .maxauthsize = SHA224_DIGEST_SIZE,
2392 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2393 DESC_HDR_SEL0_DEU |
2394 DESC_HDR_MODE0_DEU_CBC |
2395 DESC_HDR_MODE0_DEU_3DES |
2396 DESC_HDR_SEL1_MDEUA |
2397 DESC_HDR_MODE1_MDEU_INIT |
2398 DESC_HDR_MODE1_MDEU_PAD |
2399 DESC_HDR_MODE1_MDEU_SHA224_HMAC,
2401 { .type = CRYPTO_ALG_TYPE_AEAD,
2402 .alg.aead = {
2403 .base = {
2404 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2405 .cra_driver_name = "authenc-hmac-sha256-"
2406 "cbc-aes-talitos",
2407 .cra_blocksize = AES_BLOCK_SIZE,
2408 .cra_flags = CRYPTO_ALG_ASYNC,
2410 .ivsize = AES_BLOCK_SIZE,
2411 .maxauthsize = SHA256_DIGEST_SIZE,
2413 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2414 DESC_HDR_SEL0_AESU |
2415 DESC_HDR_MODE0_AESU_CBC |
2416 DESC_HDR_SEL1_MDEUA |
2417 DESC_HDR_MODE1_MDEU_INIT |
2418 DESC_HDR_MODE1_MDEU_PAD |
2419 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2421 { .type = CRYPTO_ALG_TYPE_AEAD,
2422 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2423 .alg.aead = {
2424 .base = {
2425 .cra_name = "authenc(hmac(sha256),cbc(aes))",
2426 .cra_driver_name = "authenc-hmac-sha256-"
2427 "cbc-aes-talitos",
2428 .cra_blocksize = AES_BLOCK_SIZE,
2429 .cra_flags = CRYPTO_ALG_ASYNC,
2431 .ivsize = AES_BLOCK_SIZE,
2432 .maxauthsize = SHA256_DIGEST_SIZE,
2434 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2435 DESC_HDR_SEL0_AESU |
2436 DESC_HDR_MODE0_AESU_CBC |
2437 DESC_HDR_SEL1_MDEUA |
2438 DESC_HDR_MODE1_MDEU_INIT |
2439 DESC_HDR_MODE1_MDEU_PAD |
2440 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2442 { .type = CRYPTO_ALG_TYPE_AEAD,
2443 .alg.aead = {
2444 .base = {
2445 .cra_name = "authenc(hmac(sha256),"
2446 "cbc(des3_ede))",
2447 .cra_driver_name = "authenc-hmac-sha256-"
2448 "cbc-3des-talitos",
2449 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2450 .cra_flags = CRYPTO_ALG_ASYNC,
2452 .ivsize = DES3_EDE_BLOCK_SIZE,
2453 .maxauthsize = SHA256_DIGEST_SIZE,
2455 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2456 DESC_HDR_SEL0_DEU |
2457 DESC_HDR_MODE0_DEU_CBC |
2458 DESC_HDR_MODE0_DEU_3DES |
2459 DESC_HDR_SEL1_MDEUA |
2460 DESC_HDR_MODE1_MDEU_INIT |
2461 DESC_HDR_MODE1_MDEU_PAD |
2462 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2464 { .type = CRYPTO_ALG_TYPE_AEAD,
2465 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2466 .alg.aead = {
2467 .base = {
2468 .cra_name = "authenc(hmac(sha256),"
2469 "cbc(des3_ede))",
2470 .cra_driver_name = "authenc-hmac-sha256-"
2471 "cbc-3des-talitos",
2472 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2473 .cra_flags = CRYPTO_ALG_ASYNC,
2475 .ivsize = DES3_EDE_BLOCK_SIZE,
2476 .maxauthsize = SHA256_DIGEST_SIZE,
2478 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2479 DESC_HDR_SEL0_DEU |
2480 DESC_HDR_MODE0_DEU_CBC |
2481 DESC_HDR_MODE0_DEU_3DES |
2482 DESC_HDR_SEL1_MDEUA |
2483 DESC_HDR_MODE1_MDEU_INIT |
2484 DESC_HDR_MODE1_MDEU_PAD |
2485 DESC_HDR_MODE1_MDEU_SHA256_HMAC,
2487 { .type = CRYPTO_ALG_TYPE_AEAD,
2488 .alg.aead = {
2489 .base = {
2490 .cra_name = "authenc(hmac(sha384),cbc(aes))",
2491 .cra_driver_name = "authenc-hmac-sha384-"
2492 "cbc-aes-talitos",
2493 .cra_blocksize = AES_BLOCK_SIZE,
2494 .cra_flags = CRYPTO_ALG_ASYNC,
2496 .ivsize = AES_BLOCK_SIZE,
2497 .maxauthsize = SHA384_DIGEST_SIZE,
2499 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2500 DESC_HDR_SEL0_AESU |
2501 DESC_HDR_MODE0_AESU_CBC |
2502 DESC_HDR_SEL1_MDEUB |
2503 DESC_HDR_MODE1_MDEU_INIT |
2504 DESC_HDR_MODE1_MDEU_PAD |
2505 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2507 { .type = CRYPTO_ALG_TYPE_AEAD,
2508 .alg.aead = {
2509 .base = {
2510 .cra_name = "authenc(hmac(sha384),"
2511 "cbc(des3_ede))",
2512 .cra_driver_name = "authenc-hmac-sha384-"
2513 "cbc-3des-talitos",
2514 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2515 .cra_flags = CRYPTO_ALG_ASYNC,
2517 .ivsize = DES3_EDE_BLOCK_SIZE,
2518 .maxauthsize = SHA384_DIGEST_SIZE,
2520 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2521 DESC_HDR_SEL0_DEU |
2522 DESC_HDR_MODE0_DEU_CBC |
2523 DESC_HDR_MODE0_DEU_3DES |
2524 DESC_HDR_SEL1_MDEUB |
2525 DESC_HDR_MODE1_MDEU_INIT |
2526 DESC_HDR_MODE1_MDEU_PAD |
2527 DESC_HDR_MODE1_MDEUB_SHA384_HMAC,
2529 { .type = CRYPTO_ALG_TYPE_AEAD,
2530 .alg.aead = {
2531 .base = {
2532 .cra_name = "authenc(hmac(sha512),cbc(aes))",
2533 .cra_driver_name = "authenc-hmac-sha512-"
2534 "cbc-aes-talitos",
2535 .cra_blocksize = AES_BLOCK_SIZE,
2536 .cra_flags = CRYPTO_ALG_ASYNC,
2538 .ivsize = AES_BLOCK_SIZE,
2539 .maxauthsize = SHA512_DIGEST_SIZE,
2541 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2542 DESC_HDR_SEL0_AESU |
2543 DESC_HDR_MODE0_AESU_CBC |
2544 DESC_HDR_SEL1_MDEUB |
2545 DESC_HDR_MODE1_MDEU_INIT |
2546 DESC_HDR_MODE1_MDEU_PAD |
2547 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2549 { .type = CRYPTO_ALG_TYPE_AEAD,
2550 .alg.aead = {
2551 .base = {
2552 .cra_name = "authenc(hmac(sha512),"
2553 "cbc(des3_ede))",
2554 .cra_driver_name = "authenc-hmac-sha512-"
2555 "cbc-3des-talitos",
2556 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2557 .cra_flags = CRYPTO_ALG_ASYNC,
2559 .ivsize = DES3_EDE_BLOCK_SIZE,
2560 .maxauthsize = SHA512_DIGEST_SIZE,
2562 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2563 DESC_HDR_SEL0_DEU |
2564 DESC_HDR_MODE0_DEU_CBC |
2565 DESC_HDR_MODE0_DEU_3DES |
2566 DESC_HDR_SEL1_MDEUB |
2567 DESC_HDR_MODE1_MDEU_INIT |
2568 DESC_HDR_MODE1_MDEU_PAD |
2569 DESC_HDR_MODE1_MDEUB_SHA512_HMAC,
2571 { .type = CRYPTO_ALG_TYPE_AEAD,
2572 .alg.aead = {
2573 .base = {
2574 .cra_name = "authenc(hmac(md5),cbc(aes))",
2575 .cra_driver_name = "authenc-hmac-md5-"
2576 "cbc-aes-talitos",
2577 .cra_blocksize = AES_BLOCK_SIZE,
2578 .cra_flags = CRYPTO_ALG_ASYNC,
2580 .ivsize = AES_BLOCK_SIZE,
2581 .maxauthsize = MD5_DIGEST_SIZE,
2583 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2584 DESC_HDR_SEL0_AESU |
2585 DESC_HDR_MODE0_AESU_CBC |
2586 DESC_HDR_SEL1_MDEUA |
2587 DESC_HDR_MODE1_MDEU_INIT |
2588 DESC_HDR_MODE1_MDEU_PAD |
2589 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2591 { .type = CRYPTO_ALG_TYPE_AEAD,
2592 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2593 .alg.aead = {
2594 .base = {
2595 .cra_name = "authenc(hmac(md5),cbc(aes))",
2596 .cra_driver_name = "authenc-hmac-md5-"
2597 "cbc-aes-talitos",
2598 .cra_blocksize = AES_BLOCK_SIZE,
2599 .cra_flags = CRYPTO_ALG_ASYNC,
2601 .ivsize = AES_BLOCK_SIZE,
2602 .maxauthsize = MD5_DIGEST_SIZE,
2604 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2605 DESC_HDR_SEL0_AESU |
2606 DESC_HDR_MODE0_AESU_CBC |
2607 DESC_HDR_SEL1_MDEUA |
2608 DESC_HDR_MODE1_MDEU_INIT |
2609 DESC_HDR_MODE1_MDEU_PAD |
2610 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2612 { .type = CRYPTO_ALG_TYPE_AEAD,
2613 .alg.aead = {
2614 .base = {
2615 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2616 .cra_driver_name = "authenc-hmac-md5-"
2617 "cbc-3des-talitos",
2618 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2619 .cra_flags = CRYPTO_ALG_ASYNC,
2621 .ivsize = DES3_EDE_BLOCK_SIZE,
2622 .maxauthsize = MD5_DIGEST_SIZE,
2624 .desc_hdr_template = DESC_HDR_TYPE_IPSEC_ESP |
2625 DESC_HDR_SEL0_DEU |
2626 DESC_HDR_MODE0_DEU_CBC |
2627 DESC_HDR_MODE0_DEU_3DES |
2628 DESC_HDR_SEL1_MDEUA |
2629 DESC_HDR_MODE1_MDEU_INIT |
2630 DESC_HDR_MODE1_MDEU_PAD |
2631 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2633 { .type = CRYPTO_ALG_TYPE_AEAD,
2634 .priority = TALITOS_CRA_PRIORITY_AEAD_HSNA,
2635 .alg.aead = {
2636 .base = {
2637 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
2638 .cra_driver_name = "authenc-hmac-md5-"
2639 "cbc-3des-talitos",
2640 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2641 .cra_flags = CRYPTO_ALG_ASYNC,
2643 .ivsize = DES3_EDE_BLOCK_SIZE,
2644 .maxauthsize = MD5_DIGEST_SIZE,
2646 .desc_hdr_template = DESC_HDR_TYPE_HMAC_SNOOP_NO_AFEU |
2647 DESC_HDR_SEL0_DEU |
2648 DESC_HDR_MODE0_DEU_CBC |
2649 DESC_HDR_MODE0_DEU_3DES |
2650 DESC_HDR_SEL1_MDEUA |
2651 DESC_HDR_MODE1_MDEU_INIT |
2652 DESC_HDR_MODE1_MDEU_PAD |
2653 DESC_HDR_MODE1_MDEU_MD5_HMAC,
2655 /* ABLKCIPHER algorithms. */
2656 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2657 .alg.crypto = {
2658 .cra_name = "ecb(aes)",
2659 .cra_driver_name = "ecb-aes-talitos",
2660 .cra_blocksize = AES_BLOCK_SIZE,
2661 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2662 CRYPTO_ALG_ASYNC,
2663 .cra_ablkcipher = {
2664 .min_keysize = AES_MIN_KEY_SIZE,
2665 .max_keysize = AES_MAX_KEY_SIZE,
2666 .ivsize = AES_BLOCK_SIZE,
2669 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2670 DESC_HDR_SEL0_AESU,
2672 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2673 .alg.crypto = {
2674 .cra_name = "cbc(aes)",
2675 .cra_driver_name = "cbc-aes-talitos",
2676 .cra_blocksize = AES_BLOCK_SIZE,
2677 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2678 CRYPTO_ALG_ASYNC,
2679 .cra_ablkcipher = {
2680 .min_keysize = AES_MIN_KEY_SIZE,
2681 .max_keysize = AES_MAX_KEY_SIZE,
2682 .ivsize = AES_BLOCK_SIZE,
2685 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2686 DESC_HDR_SEL0_AESU |
2687 DESC_HDR_MODE0_AESU_CBC,
2689 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2690 .alg.crypto = {
2691 .cra_name = "ctr(aes)",
2692 .cra_driver_name = "ctr-aes-talitos",
2693 .cra_blocksize = AES_BLOCK_SIZE,
2694 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2695 CRYPTO_ALG_ASYNC,
2696 .cra_ablkcipher = {
2697 .min_keysize = AES_MIN_KEY_SIZE,
2698 .max_keysize = AES_MAX_KEY_SIZE,
2699 .ivsize = AES_BLOCK_SIZE,
2702 .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
2703 DESC_HDR_SEL0_AESU |
2704 DESC_HDR_MODE0_AESU_CTR,
2706 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2707 .alg.crypto = {
2708 .cra_name = "ecb(des)",
2709 .cra_driver_name = "ecb-des-talitos",
2710 .cra_blocksize = DES_BLOCK_SIZE,
2711 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2712 CRYPTO_ALG_ASYNC,
2713 .cra_ablkcipher = {
2714 .min_keysize = DES_KEY_SIZE,
2715 .max_keysize = DES_KEY_SIZE,
2716 .ivsize = DES_BLOCK_SIZE,
2719 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2720 DESC_HDR_SEL0_DEU,
2722 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2723 .alg.crypto = {
2724 .cra_name = "cbc(des)",
2725 .cra_driver_name = "cbc-des-talitos",
2726 .cra_blocksize = DES_BLOCK_SIZE,
2727 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2728 CRYPTO_ALG_ASYNC,
2729 .cra_ablkcipher = {
2730 .min_keysize = DES_KEY_SIZE,
2731 .max_keysize = DES_KEY_SIZE,
2732 .ivsize = DES_BLOCK_SIZE,
2735 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2736 DESC_HDR_SEL0_DEU |
2737 DESC_HDR_MODE0_DEU_CBC,
2739 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2740 .alg.crypto = {
2741 .cra_name = "ecb(des3_ede)",
2742 .cra_driver_name = "ecb-3des-talitos",
2743 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2744 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2745 CRYPTO_ALG_ASYNC,
2746 .cra_ablkcipher = {
2747 .min_keysize = DES3_EDE_KEY_SIZE,
2748 .max_keysize = DES3_EDE_KEY_SIZE,
2749 .ivsize = DES3_EDE_BLOCK_SIZE,
2752 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2753 DESC_HDR_SEL0_DEU |
2754 DESC_HDR_MODE0_DEU_3DES,
2756 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER,
2757 .alg.crypto = {
2758 .cra_name = "cbc(des3_ede)",
2759 .cra_driver_name = "cbc-3des-talitos",
2760 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
2761 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
2762 CRYPTO_ALG_ASYNC,
2763 .cra_ablkcipher = {
2764 .min_keysize = DES3_EDE_KEY_SIZE,
2765 .max_keysize = DES3_EDE_KEY_SIZE,
2766 .ivsize = DES3_EDE_BLOCK_SIZE,
2769 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2770 DESC_HDR_SEL0_DEU |
2771 DESC_HDR_MODE0_DEU_CBC |
2772 DESC_HDR_MODE0_DEU_3DES,
2774 /* AHASH algorithms. */
2775 { .type = CRYPTO_ALG_TYPE_AHASH,
2776 .alg.hash = {
2777 .halg.digestsize = MD5_DIGEST_SIZE,
2778 .halg.statesize = sizeof(struct talitos_export_state),
2779 .halg.base = {
2780 .cra_name = "md5",
2781 .cra_driver_name = "md5-talitos",
2782 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2783 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2784 CRYPTO_ALG_ASYNC,
2787 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2788 DESC_HDR_SEL0_MDEUA |
2789 DESC_HDR_MODE0_MDEU_MD5,
2791 { .type = CRYPTO_ALG_TYPE_AHASH,
2792 .alg.hash = {
2793 .halg.digestsize = SHA1_DIGEST_SIZE,
2794 .halg.statesize = sizeof(struct talitos_export_state),
2795 .halg.base = {
2796 .cra_name = "sha1",
2797 .cra_driver_name = "sha1-talitos",
2798 .cra_blocksize = SHA1_BLOCK_SIZE,
2799 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2800 CRYPTO_ALG_ASYNC,
2803 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2804 DESC_HDR_SEL0_MDEUA |
2805 DESC_HDR_MODE0_MDEU_SHA1,
2807 { .type = CRYPTO_ALG_TYPE_AHASH,
2808 .alg.hash = {
2809 .halg.digestsize = SHA224_DIGEST_SIZE,
2810 .halg.statesize = sizeof(struct talitos_export_state),
2811 .halg.base = {
2812 .cra_name = "sha224",
2813 .cra_driver_name = "sha224-talitos",
2814 .cra_blocksize = SHA224_BLOCK_SIZE,
2815 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2816 CRYPTO_ALG_ASYNC,
2819 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2820 DESC_HDR_SEL0_MDEUA |
2821 DESC_HDR_MODE0_MDEU_SHA224,
2823 { .type = CRYPTO_ALG_TYPE_AHASH,
2824 .alg.hash = {
2825 .halg.digestsize = SHA256_DIGEST_SIZE,
2826 .halg.statesize = sizeof(struct talitos_export_state),
2827 .halg.base = {
2828 .cra_name = "sha256",
2829 .cra_driver_name = "sha256-talitos",
2830 .cra_blocksize = SHA256_BLOCK_SIZE,
2831 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2832 CRYPTO_ALG_ASYNC,
2835 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2836 DESC_HDR_SEL0_MDEUA |
2837 DESC_HDR_MODE0_MDEU_SHA256,
2839 { .type = CRYPTO_ALG_TYPE_AHASH,
2840 .alg.hash = {
2841 .halg.digestsize = SHA384_DIGEST_SIZE,
2842 .halg.statesize = sizeof(struct talitos_export_state),
2843 .halg.base = {
2844 .cra_name = "sha384",
2845 .cra_driver_name = "sha384-talitos",
2846 .cra_blocksize = SHA384_BLOCK_SIZE,
2847 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2848 CRYPTO_ALG_ASYNC,
2851 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2852 DESC_HDR_SEL0_MDEUB |
2853 DESC_HDR_MODE0_MDEUB_SHA384,
2855 { .type = CRYPTO_ALG_TYPE_AHASH,
2856 .alg.hash = {
2857 .halg.digestsize = SHA512_DIGEST_SIZE,
2858 .halg.statesize = sizeof(struct talitos_export_state),
2859 .halg.base = {
2860 .cra_name = "sha512",
2861 .cra_driver_name = "sha512-talitos",
2862 .cra_blocksize = SHA512_BLOCK_SIZE,
2863 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2864 CRYPTO_ALG_ASYNC,
2867 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2868 DESC_HDR_SEL0_MDEUB |
2869 DESC_HDR_MODE0_MDEUB_SHA512,
2871 { .type = CRYPTO_ALG_TYPE_AHASH,
2872 .alg.hash = {
2873 .halg.digestsize = MD5_DIGEST_SIZE,
2874 .halg.statesize = sizeof(struct talitos_export_state),
2875 .halg.base = {
2876 .cra_name = "hmac(md5)",
2877 .cra_driver_name = "hmac-md5-talitos",
2878 .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
2879 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2880 CRYPTO_ALG_ASYNC,
2883 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2884 DESC_HDR_SEL0_MDEUA |
2885 DESC_HDR_MODE0_MDEU_MD5,
2887 { .type = CRYPTO_ALG_TYPE_AHASH,
2888 .alg.hash = {
2889 .halg.digestsize = SHA1_DIGEST_SIZE,
2890 .halg.statesize = sizeof(struct talitos_export_state),
2891 .halg.base = {
2892 .cra_name = "hmac(sha1)",
2893 .cra_driver_name = "hmac-sha1-talitos",
2894 .cra_blocksize = SHA1_BLOCK_SIZE,
2895 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2896 CRYPTO_ALG_ASYNC,
2899 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2900 DESC_HDR_SEL0_MDEUA |
2901 DESC_HDR_MODE0_MDEU_SHA1,
2903 { .type = CRYPTO_ALG_TYPE_AHASH,
2904 .alg.hash = {
2905 .halg.digestsize = SHA224_DIGEST_SIZE,
2906 .halg.statesize = sizeof(struct talitos_export_state),
2907 .halg.base = {
2908 .cra_name = "hmac(sha224)",
2909 .cra_driver_name = "hmac-sha224-talitos",
2910 .cra_blocksize = SHA224_BLOCK_SIZE,
2911 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2912 CRYPTO_ALG_ASYNC,
2915 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2916 DESC_HDR_SEL0_MDEUA |
2917 DESC_HDR_MODE0_MDEU_SHA224,
2919 { .type = CRYPTO_ALG_TYPE_AHASH,
2920 .alg.hash = {
2921 .halg.digestsize = SHA256_DIGEST_SIZE,
2922 .halg.statesize = sizeof(struct talitos_export_state),
2923 .halg.base = {
2924 .cra_name = "hmac(sha256)",
2925 .cra_driver_name = "hmac-sha256-talitos",
2926 .cra_blocksize = SHA256_BLOCK_SIZE,
2927 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2928 CRYPTO_ALG_ASYNC,
2931 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2932 DESC_HDR_SEL0_MDEUA |
2933 DESC_HDR_MODE0_MDEU_SHA256,
2935 { .type = CRYPTO_ALG_TYPE_AHASH,
2936 .alg.hash = {
2937 .halg.digestsize = SHA384_DIGEST_SIZE,
2938 .halg.statesize = sizeof(struct talitos_export_state),
2939 .halg.base = {
2940 .cra_name = "hmac(sha384)",
2941 .cra_driver_name = "hmac-sha384-talitos",
2942 .cra_blocksize = SHA384_BLOCK_SIZE,
2943 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2944 CRYPTO_ALG_ASYNC,
2947 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2948 DESC_HDR_SEL0_MDEUB |
2949 DESC_HDR_MODE0_MDEUB_SHA384,
2951 { .type = CRYPTO_ALG_TYPE_AHASH,
2952 .alg.hash = {
2953 .halg.digestsize = SHA512_DIGEST_SIZE,
2954 .halg.statesize = sizeof(struct talitos_export_state),
2955 .halg.base = {
2956 .cra_name = "hmac(sha512)",
2957 .cra_driver_name = "hmac-sha512-talitos",
2958 .cra_blocksize = SHA512_BLOCK_SIZE,
2959 .cra_flags = CRYPTO_ALG_TYPE_AHASH |
2960 CRYPTO_ALG_ASYNC,
2963 .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
2964 DESC_HDR_SEL0_MDEUB |
2965 DESC_HDR_MODE0_MDEUB_SHA512,
2969 struct talitos_crypto_alg {
2970 struct list_head entry;
2971 struct device *dev;
2972 struct talitos_alg_template algt;
2975 static int talitos_init_common(struct talitos_ctx *ctx,
2976 struct talitos_crypto_alg *talitos_alg)
2978 struct talitos_private *priv;
2980 /* update context with ptr to dev */
2981 ctx->dev = talitos_alg->dev;
2983 /* assign SEC channel to tfm in round-robin fashion */
2984 priv = dev_get_drvdata(ctx->dev);
2985 ctx->ch = atomic_inc_return(&priv->last_chan) &
2986 (priv->num_channels - 1);
2988 /* copy descriptor header template value */
2989 ctx->desc_hdr_template = talitos_alg->algt.desc_hdr_template;
2991 /* select done notification */
2992 ctx->desc_hdr_template |= DESC_HDR_DONE_NOTIFY;
2994 return 0;
2997 static int talitos_cra_init(struct crypto_tfm *tfm)
2999 struct crypto_alg *alg = tfm->__crt_alg;
3000 struct talitos_crypto_alg *talitos_alg;
3001 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3003 if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
3004 talitos_alg = container_of(__crypto_ahash_alg(alg),
3005 struct talitos_crypto_alg,
3006 algt.alg.hash);
3007 else
3008 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3009 algt.alg.crypto);
3011 return talitos_init_common(ctx, talitos_alg);
3014 static int talitos_cra_init_aead(struct crypto_aead *tfm)
3016 struct aead_alg *alg = crypto_aead_alg(tfm);
3017 struct talitos_crypto_alg *talitos_alg;
3018 struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
3020 talitos_alg = container_of(alg, struct talitos_crypto_alg,
3021 algt.alg.aead);
3023 return talitos_init_common(ctx, talitos_alg);
3026 static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
3028 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3030 talitos_cra_init(tfm);
3032 ctx->keylen = 0;
3033 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
3034 sizeof(struct talitos_ahash_req_ctx));
3036 return 0;
3039 static void talitos_cra_exit(struct crypto_tfm *tfm)
3041 struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
3042 struct device *dev = ctx->dev;
3044 if (ctx->keylen)
3045 dma_unmap_single(dev, ctx->dma_key, ctx->keylen, DMA_TO_DEVICE);
3049 * given the alg's descriptor header template, determine whether descriptor
3050 * type and primary/secondary execution units required match the hw
3051 * capabilities description provided in the device tree node.
3053 static int hw_supports(struct device *dev, __be32 desc_hdr_template)
3055 struct talitos_private *priv = dev_get_drvdata(dev);
3056 int ret;
3058 ret = (1 << DESC_TYPE(desc_hdr_template) & priv->desc_types) &&
3059 (1 << PRIMARY_EU(desc_hdr_template) & priv->exec_units);
3061 if (SECONDARY_EU(desc_hdr_template))
3062 ret = ret && (1 << SECONDARY_EU(desc_hdr_template)
3063 & priv->exec_units);
3065 return ret;
3068 static int talitos_remove(struct platform_device *ofdev)
3070 struct device *dev = &ofdev->dev;
3071 struct talitos_private *priv = dev_get_drvdata(dev);
3072 struct talitos_crypto_alg *t_alg, *n;
3073 int i;
3075 list_for_each_entry_safe(t_alg, n, &priv->alg_list, entry) {
3076 switch (t_alg->algt.type) {
3077 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3078 break;
3079 case CRYPTO_ALG_TYPE_AEAD:
3080 crypto_unregister_aead(&t_alg->algt.alg.aead);
3081 case CRYPTO_ALG_TYPE_AHASH:
3082 crypto_unregister_ahash(&t_alg->algt.alg.hash);
3083 break;
3085 list_del(&t_alg->entry);
3088 if (hw_supports(dev, DESC_HDR_SEL0_RNG))
3089 talitos_unregister_rng(dev);
3091 for (i = 0; i < 2; i++)
3092 if (priv->irq[i]) {
3093 free_irq(priv->irq[i], dev);
3094 irq_dispose_mapping(priv->irq[i]);
3097 tasklet_kill(&priv->done_task[0]);
3098 if (priv->irq[1])
3099 tasklet_kill(&priv->done_task[1]);
3101 return 0;
3104 static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
3105 struct talitos_alg_template
3106 *template)
3108 struct talitos_private *priv = dev_get_drvdata(dev);
3109 struct talitos_crypto_alg *t_alg;
3110 struct crypto_alg *alg;
3112 t_alg = devm_kzalloc(dev, sizeof(struct talitos_crypto_alg),
3113 GFP_KERNEL);
3114 if (!t_alg)
3115 return ERR_PTR(-ENOMEM);
3117 t_alg->algt = *template;
3119 switch (t_alg->algt.type) {
3120 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3121 alg = &t_alg->algt.alg.crypto;
3122 alg->cra_init = talitos_cra_init;
3123 alg->cra_exit = talitos_cra_exit;
3124 alg->cra_type = &crypto_ablkcipher_type;
3125 alg->cra_ablkcipher.setkey = ablkcipher_setkey;
3126 alg->cra_ablkcipher.encrypt = ablkcipher_encrypt;
3127 alg->cra_ablkcipher.decrypt = ablkcipher_decrypt;
3128 alg->cra_ablkcipher.geniv = "eseqiv";
3129 break;
3130 case CRYPTO_ALG_TYPE_AEAD:
3131 alg = &t_alg->algt.alg.aead.base;
3132 alg->cra_exit = talitos_cra_exit;
3133 t_alg->algt.alg.aead.init = talitos_cra_init_aead;
3134 t_alg->algt.alg.aead.setkey = aead_setkey;
3135 t_alg->algt.alg.aead.encrypt = aead_encrypt;
3136 t_alg->algt.alg.aead.decrypt = aead_decrypt;
3137 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3138 !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
3139 devm_kfree(dev, t_alg);
3140 return ERR_PTR(-ENOTSUPP);
3142 break;
3143 case CRYPTO_ALG_TYPE_AHASH:
3144 alg = &t_alg->algt.alg.hash.halg.base;
3145 alg->cra_init = talitos_cra_init_ahash;
3146 alg->cra_exit = talitos_cra_exit;
3147 alg->cra_type = &crypto_ahash_type;
3148 t_alg->algt.alg.hash.init = ahash_init;
3149 t_alg->algt.alg.hash.update = ahash_update;
3150 t_alg->algt.alg.hash.final = ahash_final;
3151 t_alg->algt.alg.hash.finup = ahash_finup;
3152 t_alg->algt.alg.hash.digest = ahash_digest;
3153 if (!strncmp(alg->cra_name, "hmac", 4))
3154 t_alg->algt.alg.hash.setkey = ahash_setkey;
3155 t_alg->algt.alg.hash.import = ahash_import;
3156 t_alg->algt.alg.hash.export = ahash_export;
3158 if (!(priv->features & TALITOS_FTR_HMAC_OK) &&
3159 !strncmp(alg->cra_name, "hmac", 4)) {
3160 devm_kfree(dev, t_alg);
3161 return ERR_PTR(-ENOTSUPP);
3163 if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
3164 (!strcmp(alg->cra_name, "sha224") ||
3165 !strcmp(alg->cra_name, "hmac(sha224)"))) {
3166 t_alg->algt.alg.hash.init = ahash_init_sha224_swinit;
3167 t_alg->algt.desc_hdr_template =
3168 DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
3169 DESC_HDR_SEL0_MDEUA |
3170 DESC_HDR_MODE0_MDEU_SHA256;
3172 break;
3173 default:
3174 dev_err(dev, "unknown algorithm type %d\n", t_alg->algt.type);
3175 devm_kfree(dev, t_alg);
3176 return ERR_PTR(-EINVAL);
3179 alg->cra_module = THIS_MODULE;
3180 if (t_alg->algt.priority)
3181 alg->cra_priority = t_alg->algt.priority;
3182 else
3183 alg->cra_priority = TALITOS_CRA_PRIORITY;
3184 alg->cra_alignmask = 0;
3185 alg->cra_ctxsize = sizeof(struct talitos_ctx);
3186 alg->cra_flags |= CRYPTO_ALG_KERN_DRIVER_ONLY;
3188 t_alg->dev = dev;
3190 return t_alg;
3193 static int talitos_probe_irq(struct platform_device *ofdev)
3195 struct device *dev = &ofdev->dev;
3196 struct device_node *np = ofdev->dev.of_node;
3197 struct talitos_private *priv = dev_get_drvdata(dev);
3198 int err;
3199 bool is_sec1 = has_ftr_sec1(priv);
3201 priv->irq[0] = irq_of_parse_and_map(np, 0);
3202 if (!priv->irq[0]) {
3203 dev_err(dev, "failed to map irq\n");
3204 return -EINVAL;
3206 if (is_sec1) {
3207 err = request_irq(priv->irq[0], talitos1_interrupt_4ch, 0,
3208 dev_driver_string(dev), dev);
3209 goto primary_out;
3212 priv->irq[1] = irq_of_parse_and_map(np, 1);
3214 /* get the primary irq line */
3215 if (!priv->irq[1]) {
3216 err = request_irq(priv->irq[0], talitos2_interrupt_4ch, 0,
3217 dev_driver_string(dev), dev);
3218 goto primary_out;
3221 err = request_irq(priv->irq[0], talitos2_interrupt_ch0_2, 0,
3222 dev_driver_string(dev), dev);
3223 if (err)
3224 goto primary_out;
3226 /* get the secondary irq line */
3227 err = request_irq(priv->irq[1], talitos2_interrupt_ch1_3, 0,
3228 dev_driver_string(dev), dev);
3229 if (err) {
3230 dev_err(dev, "failed to request secondary irq\n");
3231 irq_dispose_mapping(priv->irq[1]);
3232 priv->irq[1] = 0;
3235 return err;
3237 primary_out:
3238 if (err) {
3239 dev_err(dev, "failed to request primary irq\n");
3240 irq_dispose_mapping(priv->irq[0]);
3241 priv->irq[0] = 0;
3244 return err;
3247 static int talitos_probe(struct platform_device *ofdev)
3249 struct device *dev = &ofdev->dev;
3250 struct device_node *np = ofdev->dev.of_node;
3251 struct talitos_private *priv;
3252 int i, err;
3253 int stride;
3254 struct resource *res;
3256 priv = devm_kzalloc(dev, sizeof(struct talitos_private), GFP_KERNEL);
3257 if (!priv)
3258 return -ENOMEM;
3260 INIT_LIST_HEAD(&priv->alg_list);
3262 dev_set_drvdata(dev, priv);
3264 priv->ofdev = ofdev;
3266 spin_lock_init(&priv->reg_lock);
3268 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
3269 if (!res)
3270 return -ENXIO;
3271 priv->reg = devm_ioremap(dev, res->start, resource_size(res));
3272 if (!priv->reg) {
3273 dev_err(dev, "failed to of_iomap\n");
3274 err = -ENOMEM;
3275 goto err_out;
3278 /* get SEC version capabilities from device tree */
3279 of_property_read_u32(np, "fsl,num-channels", &priv->num_channels);
3280 of_property_read_u32(np, "fsl,channel-fifo-len", &priv->chfifo_len);
3281 of_property_read_u32(np, "fsl,exec-units-mask", &priv->exec_units);
3282 of_property_read_u32(np, "fsl,descriptor-types-mask",
3283 &priv->desc_types);
3285 if (!is_power_of_2(priv->num_channels) || !priv->chfifo_len ||
3286 !priv->exec_units || !priv->desc_types) {
3287 dev_err(dev, "invalid property data in device tree node\n");
3288 err = -EINVAL;
3289 goto err_out;
3292 if (of_device_is_compatible(np, "fsl,sec3.0"))
3293 priv->features |= TALITOS_FTR_SRC_LINK_TBL_LEN_INCLUDES_EXTENT;
3295 if (of_device_is_compatible(np, "fsl,sec2.1"))
3296 priv->features |= TALITOS_FTR_HW_AUTH_CHECK |
3297 TALITOS_FTR_SHA224_HWINIT |
3298 TALITOS_FTR_HMAC_OK;
3300 if (of_device_is_compatible(np, "fsl,sec1.0"))
3301 priv->features |= TALITOS_FTR_SEC1;
3303 if (of_device_is_compatible(np, "fsl,sec1.2")) {
3304 priv->reg_deu = priv->reg + TALITOS12_DEU;
3305 priv->reg_aesu = priv->reg + TALITOS12_AESU;
3306 priv->reg_mdeu = priv->reg + TALITOS12_MDEU;
3307 stride = TALITOS1_CH_STRIDE;
3308 } else if (of_device_is_compatible(np, "fsl,sec1.0")) {
3309 priv->reg_deu = priv->reg + TALITOS10_DEU;
3310 priv->reg_aesu = priv->reg + TALITOS10_AESU;
3311 priv->reg_mdeu = priv->reg + TALITOS10_MDEU;
3312 priv->reg_afeu = priv->reg + TALITOS10_AFEU;
3313 priv->reg_rngu = priv->reg + TALITOS10_RNGU;
3314 priv->reg_pkeu = priv->reg + TALITOS10_PKEU;
3315 stride = TALITOS1_CH_STRIDE;
3316 } else {
3317 priv->reg_deu = priv->reg + TALITOS2_DEU;
3318 priv->reg_aesu = priv->reg + TALITOS2_AESU;
3319 priv->reg_mdeu = priv->reg + TALITOS2_MDEU;
3320 priv->reg_afeu = priv->reg + TALITOS2_AFEU;
3321 priv->reg_rngu = priv->reg + TALITOS2_RNGU;
3322 priv->reg_pkeu = priv->reg + TALITOS2_PKEU;
3323 priv->reg_keu = priv->reg + TALITOS2_KEU;
3324 priv->reg_crcu = priv->reg + TALITOS2_CRCU;
3325 stride = TALITOS2_CH_STRIDE;
3328 err = talitos_probe_irq(ofdev);
3329 if (err)
3330 goto err_out;
3332 if (of_device_is_compatible(np, "fsl,sec1.0")) {
3333 if (priv->num_channels == 1)
3334 tasklet_init(&priv->done_task[0], talitos1_done_ch0,
3335 (unsigned long)dev);
3336 else
3337 tasklet_init(&priv->done_task[0], talitos1_done_4ch,
3338 (unsigned long)dev);
3339 } else {
3340 if (priv->irq[1]) {
3341 tasklet_init(&priv->done_task[0], talitos2_done_ch0_2,
3342 (unsigned long)dev);
3343 tasklet_init(&priv->done_task[1], talitos2_done_ch1_3,
3344 (unsigned long)dev);
3345 } else if (priv->num_channels == 1) {
3346 tasklet_init(&priv->done_task[0], talitos2_done_ch0,
3347 (unsigned long)dev);
3348 } else {
3349 tasklet_init(&priv->done_task[0], talitos2_done_4ch,
3350 (unsigned long)dev);
3354 priv->chan = devm_kzalloc(dev, sizeof(struct talitos_channel) *
3355 priv->num_channels, GFP_KERNEL);
3356 if (!priv->chan) {
3357 dev_err(dev, "failed to allocate channel management space\n");
3358 err = -ENOMEM;
3359 goto err_out;
3362 priv->fifo_len = roundup_pow_of_two(priv->chfifo_len);
3364 for (i = 0; i < priv->num_channels; i++) {
3365 priv->chan[i].reg = priv->reg + stride * (i + 1);
3366 if (!priv->irq[1] || !(i & 1))
3367 priv->chan[i].reg += TALITOS_CH_BASE_OFFSET;
3369 spin_lock_init(&priv->chan[i].head_lock);
3370 spin_lock_init(&priv->chan[i].tail_lock);
3372 priv->chan[i].fifo = devm_kzalloc(dev,
3373 sizeof(struct talitos_request) *
3374 priv->fifo_len, GFP_KERNEL);
3375 if (!priv->chan[i].fifo) {
3376 dev_err(dev, "failed to allocate request fifo %d\n", i);
3377 err = -ENOMEM;
3378 goto err_out;
3381 atomic_set(&priv->chan[i].submit_count,
3382 -(priv->chfifo_len - 1));
3385 dma_set_mask(dev, DMA_BIT_MASK(36));
3387 /* reset and initialize the h/w */
3388 err = init_device(dev);
3389 if (err) {
3390 dev_err(dev, "failed to initialize device\n");
3391 goto err_out;
3394 /* register the RNG, if available */
3395 if (hw_supports(dev, DESC_HDR_SEL0_RNG)) {
3396 err = talitos_register_rng(dev);
3397 if (err) {
3398 dev_err(dev, "failed to register hwrng: %d\n", err);
3399 goto err_out;
3400 } else
3401 dev_info(dev, "hwrng\n");
3404 /* register crypto algorithms the device supports */
3405 for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
3406 if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
3407 struct talitos_crypto_alg *t_alg;
3408 struct crypto_alg *alg = NULL;
3410 t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
3411 if (IS_ERR(t_alg)) {
3412 err = PTR_ERR(t_alg);
3413 if (err == -ENOTSUPP)
3414 continue;
3415 goto err_out;
3418 switch (t_alg->algt.type) {
3419 case CRYPTO_ALG_TYPE_ABLKCIPHER:
3420 err = crypto_register_alg(
3421 &t_alg->algt.alg.crypto);
3422 alg = &t_alg->algt.alg.crypto;
3423 break;
3425 case CRYPTO_ALG_TYPE_AEAD:
3426 err = crypto_register_aead(
3427 &t_alg->algt.alg.aead);
3428 alg = &t_alg->algt.alg.aead.base;
3429 break;
3431 case CRYPTO_ALG_TYPE_AHASH:
3432 err = crypto_register_ahash(
3433 &t_alg->algt.alg.hash);
3434 alg = &t_alg->algt.alg.hash.halg.base;
3435 break;
3437 if (err) {
3438 dev_err(dev, "%s alg registration failed\n",
3439 alg->cra_driver_name);
3440 devm_kfree(dev, t_alg);
3441 } else
3442 list_add_tail(&t_alg->entry, &priv->alg_list);
3445 if (!list_empty(&priv->alg_list))
3446 dev_info(dev, "%s algorithms registered in /proc/crypto\n",
3447 (char *)of_get_property(np, "compatible", NULL));
3449 return 0;
3451 err_out:
3452 talitos_remove(ofdev);
3454 return err;
3457 static const struct of_device_id talitos_match[] = {
3458 #ifdef CONFIG_CRYPTO_DEV_TALITOS1
3460 .compatible = "fsl,sec1.0",
3462 #endif
3463 #ifdef CONFIG_CRYPTO_DEV_TALITOS2
3465 .compatible = "fsl,sec2.0",
3467 #endif
3470 MODULE_DEVICE_TABLE(of, talitos_match);
3472 static struct platform_driver talitos_driver = {
3473 .driver = {
3474 .name = "talitos",
3475 .of_match_table = talitos_match,
3477 .probe = talitos_probe,
3478 .remove = talitos_remove,
3481 module_platform_driver(talitos_driver);
3483 MODULE_LICENSE("GPL");
3484 MODULE_AUTHOR("Kim Phillips <kim.phillips@freescale.com>");
3485 MODULE_DESCRIPTION("Freescale integrated security engine (SEC) driver");