[MINI2440] Updated defconfig to add (optional) packages
[openwrt/mini2440.git] / package / ubsec_ssb / src / ubsec_ssb.c
blob699450414084c863cee32854d69aacf2c8e8d085
1 /* $Id: $ */
3 /*
4 * Copyright (c) 2008 Daniel Mueller (daniel@danm.de)
5 * Copyright (c) 2007 David McCullough (david_mccullough@securecomputing.com)
6 * Copyright (c) 2000 Jason L. Wright (jason@thought.net)
7 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org)
8 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
21 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
23 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
24 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
27 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
28 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
31 * Effort sponsored in part by the Defense Advanced Research Projects
32 * Agency (DARPA) and Air Force Research Laboratory, Air Force
33 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36 #undef UBSEC_DEBUG
37 #undef UBSEC_VERBOSE_DEBUG
39 #ifdef UBSEC_VERBOSE_DEBUG
40 #define UBSEC_DEBUG
41 #endif
44 * uBsec BCM5365 hardware crypto accelerator
47 #include <linux/kernel.h>
48 #include <linux/module.h>
49 #include <linux/moduleparam.h>
50 #include <linux/proc_fs.h>
51 #include <linux/types.h>
52 #include <linux/init.h>
53 #include <linux/delay.h>
54 #include <linux/interrupt.h>
55 #include <linux/fs.h>
56 #include <linux/random.h>
57 #include <linux/skbuff.h>
58 #include <linux/stat.h>
59 #include <asm/io.h>
61 #include <linux/ssb/ssb.h>
64 * BSD queue
66 #include "bsdqueue.h"
68 /*
69 * OCF
71 #include "cryptodev.h"
72 #include "uio.h"
74 #define HMAC_HACK 1
76 #ifdef HMAC_HACK
77 #include "hmachack.h"
78 #include "md5.h"
79 #include "md5.c"
80 #include "sha1.h"
81 #include "sha1.c"
82 #endif
84 #include "ubsecreg.h"
85 #include "ubsecvar.h"
87 #define DRV_MODULE_NAME "ubsec_ssb"
88 #define PFX DRV_MODULE_NAME ": "
89 #define DRV_MODULE_VERSION "0.02"
90 #define DRV_MODULE_RELDATE "Feb 21, 2009"
92 #if 1
93 #define DPRINTF(a...) \
94 if (debug) \
95 { \
96 printk(DRV_MODULE_NAME ": " a); \
98 #else
99 #define DPRINTF(a...)
100 #endif
103 * Prototypes
105 static irqreturn_t ubsec_ssb_isr(int, void *, struct pt_regs *);
106 static int __devinit ubsec_ssb_probe(struct ssb_device *sdev,
107 const struct ssb_device_id *ent);
108 static void __devexit ubsec_ssb_remove(struct ssb_device *sdev);
109 int ubsec_attach(struct ssb_device *sdev, const struct ssb_device_id *ent,
110 struct device *self);
111 static void ubsec_setup_mackey(struct ubsec_session *ses, int algo,
112 caddr_t key, int klen);
113 static int dma_map_skb(struct ubsec_softc *sc,
114 struct ubsec_dma_alloc* q_map, struct sk_buff *skb, int *mlen);
115 static int dma_map_uio(struct ubsec_softc *sc,
116 struct ubsec_dma_alloc *q_map, struct uio *uio, int *mlen);
117 static void dma_unmap(struct ubsec_softc *sc,
118 struct ubsec_dma_alloc *q_map, int mlen);
119 static int ubsec_dmamap_aligned(struct ubsec_softc *sc,
120 const struct ubsec_dma_alloc *q_map, int mlen);
122 #ifdef UBSEC_DEBUG
123 static int proc_read(char *buf, char **start, off_t offset,
124 int size, int *peof, void *data);
125 #endif
127 void ubsec_reset_board(struct ubsec_softc *);
128 void ubsec_init_board(struct ubsec_softc *);
129 void ubsec_cleanchip(struct ubsec_softc *);
130 void ubsec_totalreset(struct ubsec_softc *);
131 int ubsec_free_q(struct ubsec_softc*, struct ubsec_q *);
133 static int ubsec_newsession(device_t, u_int32_t *, struct cryptoini *);
134 static int ubsec_freesession(device_t, u_int64_t);
135 static int ubsec_process(device_t, struct cryptop *, int);
137 void ubsec_callback(struct ubsec_softc *, struct ubsec_q *);
138 void ubsec_feed(struct ubsec_softc *);
139 void ubsec_mcopy(struct sk_buff *, struct sk_buff *, int, int);
140 void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *);
141 int ubsec_dma_malloc(struct ubsec_softc *, struct ubsec_dma_alloc *,
142 size_t, int);
144 /* DEBUG crap... */
145 void ubsec_dump_pb(struct ubsec_pktbuf *);
146 void ubsec_dump_mcr(struct ubsec_mcr *);
148 #define READ_REG(sc,r) \
149 ssb_read32((sc)->sdev, (r));
150 #define WRITE_REG(sc,r,val) \
151 ssb_write32((sc)->sdev, (r), (val));
152 #define READ_REG_SDEV(sdev,r) \
153 ssb_read32((sdev), (r));
154 #define WRITE_REG_SDEV(sdev,r,val) \
155 ssb_write32((sdev), (r), (val));
157 #define SWAP32(x) (x) = htole32(ntohl((x)))
158 #define HTOLE32(x) (x) = htole32(x)
160 #ifdef __LITTLE_ENDIAN
161 #define letoh16(x) (x)
162 #define letoh32(x) (x)
163 #endif
165 static int debug;
166 module_param(debug, int, 0644);
167 MODULE_PARM_DESC(debug, "Enable debug output");
169 #define UBSEC_SSB_MAX_CHIPS 1
170 static struct ubsec_softc *ubsec_chip_idx[UBSEC_SSB_MAX_CHIPS];
171 static struct ubsec_stats ubsecstats;
173 #ifdef UBSEC_DEBUG
174 static struct proc_dir_entry *procdebug;
175 #endif
177 static struct ssb_device_id ubsec_ssb_tbl[] = {
178 /* Broadcom BCM5365P IPSec Core */
179 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_IPSEC, SSB_ANY_REV),
180 SSB_DEVTABLE_END
183 static struct ssb_driver ubsec_ssb_driver = {
184 .name = DRV_MODULE_NAME,
185 .id_table = ubsec_ssb_tbl,
186 .probe = ubsec_ssb_probe,
187 .remove = __devexit_p(ubsec_ssb_remove),
189 .suspend = ubsec_ssb_suspend,
190 .resume = ubsec_ssb_resume
194 static device_method_t ubsec_ssb_methods = {
195 /* crypto device methods */
196 DEVMETHOD(cryptodev_newsession, ubsec_newsession),
197 DEVMETHOD(cryptodev_freesession,ubsec_freesession),
198 DEVMETHOD(cryptodev_process, ubsec_process),
201 #ifdef UBSEC_DEBUG
202 static int
203 proc_read(char *buf, char **start, off_t offset,
204 int size, int *peof, void *data)
206 int i = 0, byteswritten = 0, ret;
207 unsigned int stat, ctrl;
208 #ifdef UBSEC_VERBOSE_DEBUG
209 struct ubsec_q *q;
210 struct ubsec_dma *dmap;
211 #endif
213 while ((i < UBSEC_SSB_MAX_CHIPS) && (ubsec_chip_idx[i] != NULL))
215 struct ubsec_softc *sc = ubsec_chip_idx[i];
217 stat = READ_REG(sc, BS_STAT);
218 ctrl = READ_REG(sc, BS_CTRL);
219 ret = snprintf((buf + byteswritten),
220 (size - byteswritten) ,
221 "DEV %d, DMASTAT %08x, DMACTRL %08x\n", i, stat, ctrl);
223 byteswritten += ret;
225 #ifdef UBSEC_VERBOSE_DEBUG
226 printf("DEV %d, DMASTAT %08x, DMACTRL %08x\n", i, stat, ctrl);
228 /* Dump all queues MCRs */
229 if (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
230 BSD_SIMPLEQ_FOREACH(q, &sc->sc_qchip, q_next)
232 dmap = q->q_dma;
233 ubsec_dump_mcr(&dmap->d_dma->d_mcr);
236 #endif
238 i++;
241 *peof = 1;
243 return byteswritten;
245 #endif
248 * map in a given sk_buff
250 static int
251 dma_map_skb(struct ubsec_softc *sc, struct ubsec_dma_alloc* q_map, struct sk_buff *skb, int *mlen)
253 int i = 0;
254 dma_addr_t tmp;
256 #ifdef UBSEC_DEBUG
257 DPRINTF("%s()\n", __FUNCTION__);
258 #endif
261 * We support only a limited number of fragments.
263 if (unlikely((skb_shinfo(skb)->nr_frags + 1) >= UBS_MAX_SCATTER))
265 printk(KERN_ERR "Only %d scatter fragments are supported.\n", UBS_MAX_SCATTER);
266 return (-ENOMEM);
269 #ifdef UBSEC_VERBOSE_DEBUG
270 DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, 0, (unsigned int)skb->data, skb_headlen(skb));
271 #endif
273 /* first data package */
274 tmp = dma_map_single(sc->sc_dv,
275 skb->data,
276 skb_headlen(skb),
277 DMA_BIDIRECTIONAL);
279 q_map[i].dma_paddr = tmp;
280 q_map[i].dma_vaddr = skb->data;
281 q_map[i].dma_size = skb_headlen(skb);
283 if (unlikely(tmp == 0))
285 printk(KERN_ERR "Could not map memory region for dma.\n");
286 return (-EINVAL);
289 #ifdef UBSEC_VERBOSE_DEBUG
290 DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, 0, (unsigned int)tmp);
291 #endif
294 /* all other data packages */
295 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
297 #ifdef UBSEC_VERBOSE_DEBUG
298 DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, i + 1,
299 (unsigned int)page_address(skb_shinfo(skb)->frags[i].page) +
300 skb_shinfo(skb)->frags[i].page_offset, skb_shinfo(skb)->frags[i].size);
301 #endif
303 tmp = dma_map_single(sc->sc_dv,
304 page_address(skb_shinfo(skb)->frags[i].page) +
305 skb_shinfo(skb)->frags[i].page_offset,
306 skb_shinfo(skb)->frags[i].size,
307 DMA_BIDIRECTIONAL);
309 q_map[i + 1].dma_paddr = tmp;
310 q_map[i + 1].dma_vaddr = (void*)(page_address(skb_shinfo(skb)->frags[i].page) +
311 skb_shinfo(skb)->frags[i].page_offset);
312 q_map[i + 1].dma_size = skb_shinfo(skb)->frags[i].size;
314 if (unlikely(tmp == 0))
316 printk(KERN_ERR "Could not map memory region for dma.\n");
317 return (-EINVAL);
320 #ifdef UBSEC_VERBOSE_DEBUG
321 DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, i + 1, (unsigned int)tmp);
322 #endif
325 *mlen = i + 1;
327 return(0);
331 * map in a given uio buffer
334 static int
335 dma_map_uio(struct ubsec_softc *sc, struct ubsec_dma_alloc *q_map, struct uio *uio, int *mlen)
337 struct iovec *iov = uio->uio_iov;
338 int n;
339 dma_addr_t tmp;
341 #ifdef UBSEC_DEBUG
342 DPRINTF("%s()\n", __FUNCTION__);
343 #endif
346 * We support only a limited number of fragments.
348 if (unlikely(uio->uio_iovcnt >= UBS_MAX_SCATTER))
350 printk(KERN_ERR "Only %d scatter fragments are supported.\n", UBS_MAX_SCATTER);
351 return (-ENOMEM);
354 for (n = 0; n < uio->uio_iovcnt; n++) {
355 #ifdef UBSEC_VERBOSE_DEBUG
356 DPRINTF("%s - map %d 0x%x %d\n", __FUNCTION__, n, (unsigned int)iov->iov_base, iov->iov_len);
357 #endif
358 tmp = dma_map_single(sc->sc_dv,
359 iov->iov_base,
360 iov->iov_len,
361 DMA_BIDIRECTIONAL);
363 q_map[n].dma_paddr = tmp;
364 q_map[n].dma_vaddr = iov->iov_base;
365 q_map[n].dma_size = iov->iov_len;
367 if (unlikely(tmp == 0))
369 printk(KERN_ERR "Could not map memory region for dma.\n");
370 return (-EINVAL);
373 #ifdef UBSEC_VERBOSE_DEBUG
374 DPRINTF("%s - map %d done physical addr 0x%x\n", __FUNCTION__, n, (unsigned int)tmp);
375 #endif
377 iov++;
379 *mlen = n;
381 return(0);
384 static void
385 dma_unmap(struct ubsec_softc *sc, struct ubsec_dma_alloc *q_map, int mlen)
387 int i;
389 #ifdef UBSEC_DEBUG
390 DPRINTF("%s()\n", __FUNCTION__);
391 #endif
393 for(i = 0; i < mlen; i++)
395 #ifdef UBSEC_VERBOSE_DEBUG
396 DPRINTF("%s - unmap %d 0x%x %d\n", __FUNCTION__, i, (unsigned int)q_map[i].dma_paddr, q_map[i].dma_size);
397 #endif
398 dma_unmap_single(sc->sc_dv,
399 q_map[i].dma_paddr,
400 q_map[i].dma_size,
401 DMA_BIDIRECTIONAL);
403 return;
407 * Is the operand suitable aligned for direct DMA. Each
408 * segment must be aligned on a 32-bit boundary and all
409 * but the last segment must be a multiple of 4 bytes.
411 static int
412 ubsec_dmamap_aligned(struct ubsec_softc *sc, const struct ubsec_dma_alloc *q_map, int mlen)
414 int i;
416 #ifdef UBSEC_DEBUG
417 DPRINTF("%s()\n", __FUNCTION__);
418 #endif
420 for (i = 0; i < mlen; i++) {
421 if (q_map[i].dma_paddr & 3)
422 return (0);
423 if (i != (mlen - 1) && (q_map[i].dma_size & 3))
424 return (0);
426 return (1);
430 #define N(a) (sizeof(a) / sizeof (a[0]))
431 static void
432 ubsec_setup_mackey(struct ubsec_session *ses, int algo, caddr_t key, int klen)
434 #ifdef HMAC_HACK
435 MD5_CTX md5ctx;
436 SHA1_CTX sha1ctx;
437 int i;
439 #ifdef UBSEC_DEBUG
440 DPRINTF("%s()\n", __FUNCTION__);
441 #endif
443 for (i = 0; i < klen; i++)
444 key[i] ^= HMAC_IPAD_VAL;
446 if (algo == CRYPTO_MD5_HMAC) {
447 MD5Init(&md5ctx);
448 MD5Update(&md5ctx, key, klen);
449 MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
450 bcopy(md5ctx.md5_st8, ses->ses_hminner, sizeof(md5ctx.md5_st8));
451 } else {
452 SHA1Init(&sha1ctx);
453 SHA1Update(&sha1ctx, key, klen);
454 SHA1Update(&sha1ctx, hmac_ipad_buffer,
455 SHA1_HMAC_BLOCK_LEN - klen);
456 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
459 for (i = 0; i < klen; i++)
460 key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
462 if (algo == CRYPTO_MD5_HMAC) {
463 MD5Init(&md5ctx);
464 MD5Update(&md5ctx, key, klen);
465 MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
466 bcopy(md5ctx.md5_st8, ses->ses_hmouter, sizeof(md5ctx.md5_st8));
467 } else {
468 SHA1Init(&sha1ctx);
469 SHA1Update(&sha1ctx, key, klen);
470 SHA1Update(&sha1ctx, hmac_opad_buffer,
471 SHA1_HMAC_BLOCK_LEN - klen);
472 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
475 for (i = 0; i < klen; i++)
476 key[i] ^= HMAC_OPAD_VAL;
478 #else /* HMAC_HACK */
479 DPRINTF("md5/sha not implemented\n");
480 #endif /* HMAC_HACK */
482 #undef N
484 static int
485 __devinit ubsec_ssb_probe(struct ssb_device *sdev,
486 const struct ssb_device_id *ent)
488 int err;
490 #ifdef UBSEC_DEBUG
491 DPRINTF("%s()\n", __FUNCTION__);
492 #endif
494 err = ssb_bus_powerup(sdev->bus, 0);
495 if (err) {
496 dev_err(sdev->dev, "Failed to powerup the bus\n");
497 goto err_out;
500 err = request_irq(sdev->irq, (irq_handler_t)ubsec_ssb_isr,
501 IRQF_DISABLED | IRQF_SHARED, DRV_MODULE_NAME, sdev);
502 if (err) {
503 dev_err(sdev->dev, "Could not request irq\n");
504 goto err_out_powerdown;
507 err = ssb_dma_set_mask(sdev, DMA_32BIT_MASK);
508 if (err) {
509 dev_err(sdev->dev,
510 "Required 32BIT DMA mask unsupported by the system.\n");
511 goto err_out_free_irq;
514 printk(KERN_INFO "Sentry5(tm) ROBOGateway(tm) IPSec Core at IRQ %u\n",
515 sdev->irq);
517 DPRINTF("Vendor: %x, core id: %x, revision: %x\n",
518 sdev->id.vendor, sdev->id.coreid, sdev->id.revision);
520 ssb_device_enable(sdev, 0);
522 if (ubsec_attach(sdev, ent, sdev->dev) != 0)
523 goto err_out_disable;
525 #ifdef UBSEC_DEBUG
526 procdebug = create_proc_entry(DRV_MODULE_NAME, S_IRUSR, NULL);
527 if (procdebug)
529 procdebug->read_proc = proc_read;
530 procdebug->data = NULL;
531 } else
532 DPRINTF("Unable to create proc file.\n");
533 #endif
535 return 0;
537 err_out_disable:
538 ssb_device_disable(sdev, 0);
540 err_out_free_irq:
541 free_irq(sdev->irq, sdev);
543 err_out_powerdown:
544 ssb_bus_may_powerdown(sdev->bus);
546 err_out:
547 return err;
550 static void __devexit ubsec_ssb_remove(struct ssb_device *sdev) {
552 struct ubsec_softc *sc;
553 unsigned int ctrlflgs;
554 struct ubsec_dma *dmap;
555 u_int32_t i;
557 #ifdef UBSEC_DEBUG
558 DPRINTF("%s()\n", __FUNCTION__);
559 #endif
561 ctrlflgs = READ_REG_SDEV(sdev, BS_CTRL);
562 /* disable all IPSec Core interrupts globally */
563 ctrlflgs ^= (BS_CTRL_MCR1INT | BS_CTRL_MCR2INT |
564 BS_CTRL_DMAERR);
565 WRITE_REG_SDEV(sdev, BS_CTRL, ctrlflgs);
567 free_irq(sdev->irq, sdev);
569 sc = (struct ubsec_softc *)ssb_get_drvdata(sdev);
571 /* unregister all crypto algorithms */
572 crypto_unregister_all(sc->sc_cid);
574 /* Free queue / dma memory */
575 for (i = 0; i < UBS_MAX_NQUEUE; i++) {
576 struct ubsec_q *q;
578 q = sc->sc_queuea[i];
579 if (q != NULL)
581 dmap = q->q_dma;
582 if (dmap != NULL)
584 ubsec_dma_free(sc, &dmap->d_alloc);
585 q->q_dma = NULL;
587 kfree(q);
589 sc->sc_queuea[i] = NULL;
592 ssb_device_disable(sdev, 0);
593 ssb_bus_may_powerdown(sdev->bus);
594 ssb_set_drvdata(sdev, NULL);
596 #ifdef UBSEC_DEBUG
597 if (procdebug)
598 remove_proc_entry(DRV_MODULE_NAME, NULL);
599 #endif
605 ubsec_attach(struct ssb_device *sdev, const struct ssb_device_id *ent,
606 struct device *self)
608 struct ubsec_softc *sc = NULL;
609 struct ubsec_dma *dmap;
610 u_int32_t i;
611 static int num_chips = 0;
613 #ifdef UBSEC_DEBUG
614 DPRINTF("%s()\n", __FUNCTION__);
615 #endif
617 sc = (struct ubsec_softc *) kmalloc(sizeof(*sc), GFP_KERNEL);
618 if (!sc)
619 return(-ENOMEM);
620 memset(sc, 0, sizeof(*sc));
622 sc->sc_dv = sdev->dev;
623 sc->sdev = sdev;
625 spin_lock_init(&sc->sc_ringmtx);
627 softc_device_init(sc, "ubsec_ssb", num_chips, ubsec_ssb_methods);
629 /* Maybe someday there are boards with more than one chip available */
630 if (num_chips < UBSEC_SSB_MAX_CHIPS) {
631 ubsec_chip_idx[device_get_unit(sc->sc_dev)] = sc;
632 num_chips++;
635 ssb_set_drvdata(sdev, sc);
637 BSD_SIMPLEQ_INIT(&sc->sc_queue);
638 BSD_SIMPLEQ_INIT(&sc->sc_qchip);
639 BSD_SIMPLEQ_INIT(&sc->sc_queue2);
640 BSD_SIMPLEQ_INIT(&sc->sc_qchip2);
641 BSD_SIMPLEQ_INIT(&sc->sc_q2free);
643 sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR;
645 sc->sc_cid = crypto_get_driverid(softc_get_device(sc), CRYPTOCAP_F_HARDWARE);
646 if (sc->sc_cid < 0) {
647 device_printf(sc->sc_dev, "could not get crypto driver id\n");
648 return -1;
651 BSD_SIMPLEQ_INIT(&sc->sc_freequeue);
652 dmap = sc->sc_dmaa;
653 for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) {
654 struct ubsec_q *q;
656 q = (struct ubsec_q *)kmalloc(sizeof(struct ubsec_q), GFP_KERNEL);
657 if (q == NULL) {
658 printf(": can't allocate queue buffers\n");
659 break;
662 if (ubsec_dma_malloc(sc, &dmap->d_alloc, sizeof(struct ubsec_dmachunk),0)) {
663 printf(": can't allocate dma buffers\n");
664 kfree(q);
665 break;
667 dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr;
669 q->q_dma = dmap;
670 sc->sc_queuea[i] = q;
672 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
676 * Reset Broadcom chip
678 ubsec_reset_board(sc);
681 * Init Broadcom chip
683 ubsec_init_board(sc);
685 /* supported crypto algorithms */
686 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
687 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
689 if (sc->sc_flags & UBS_FLAGS_AES) {
690 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
691 printf(KERN_INFO DRV_MODULE_NAME ": DES 3DES AES128 AES192 AES256 MD5_HMAC SHA1_HMAC\n");
693 else
694 printf(KERN_INFO DRV_MODULE_NAME ": DES 3DES MD5_HMAC SHA1_HMAC\n");
696 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
697 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
699 return 0;
703 * UBSEC Interrupt routine
705 static irqreturn_t
706 ubsec_ssb_isr(int irq, void *arg, struct pt_regs *regs)
708 struct ubsec_softc *sc = NULL;
709 volatile u_int32_t stat;
710 struct ubsec_q *q;
711 struct ubsec_dma *dmap;
712 int npkts = 0, i;
714 #ifdef UBSEC_VERBOSE_DEBUG
715 DPRINTF("%s()\n", __FUNCTION__);
716 #endif
718 sc = (struct ubsec_softc *)ssb_get_drvdata(arg);
720 stat = READ_REG(sc, BS_STAT);
722 stat &= sc->sc_statmask;
723 if (stat == 0)
724 return IRQ_NONE;
726 WRITE_REG(sc, BS_STAT, stat); /* IACK */
729 * Check to see if we have any packets waiting for us
731 if ((stat & BS_STAT_MCR1_DONE)) {
732 while (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
733 q = BSD_SIMPLEQ_FIRST(&sc->sc_qchip);
734 dmap = q->q_dma;
736 if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0)
738 DPRINTF("error while processing MCR. Flags = %x\n", dmap->d_dma->d_mcr.mcr_flags);
739 break;
742 BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
744 npkts = q->q_nstacked_mcrs;
746 * search for further sc_qchip ubsec_q's that share
747 * the same MCR, and complete them too, they must be
748 * at the top.
750 for (i = 0; i < npkts; i++) {
751 if(q->q_stacked_mcr[i])
752 ubsec_callback(sc, q->q_stacked_mcr[i]);
753 else
754 break;
756 ubsec_callback(sc, q);
760 * Don't send any more packet to chip if there has been
761 * a DMAERR.
763 if (likely(!(stat & BS_STAT_DMAERR)))
764 ubsec_feed(sc);
765 else
766 DPRINTF("DMA error occurred. Stop feeding crypto chip.\n");
770 * Check to see if we got any DMA Error
772 if (stat & BS_STAT_DMAERR) {
773 volatile u_int32_t a = READ_REG(sc, BS_ERR);
775 printf(KERN_ERR "%s: dmaerr %s@%08x\n", DRV_MODULE_NAME,
776 (a & BS_ERR_READ) ? "read" : "write", a & BS_ERR_ADDR);
778 ubsecstats.hst_dmaerr++;
779 ubsec_totalreset(sc);
780 ubsec_feed(sc);
783 return IRQ_HANDLED;
787 * ubsec_feed() - aggregate and post requests to chip
788 * It is assumed that the caller set splnet()
790 void
791 ubsec_feed(struct ubsec_softc *sc)
793 #ifdef UBSEC_VERBOSE_DEBUG
794 static int max;
795 #endif
796 struct ubsec_q *q, *q2;
797 int npkts, i;
798 void *v;
799 u_int32_t stat;
801 npkts = sc->sc_nqueue;
802 if (npkts > UBS_MAX_AGGR)
803 npkts = UBS_MAX_AGGR;
804 if (npkts < 2)
805 goto feed1;
807 stat = READ_REG(sc, BS_STAT);
809 if (stat & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
810 if(stat & BS_STAT_DMAERR) {
811 ubsec_totalreset(sc);
812 ubsecstats.hst_dmaerr++;
814 return;
817 #ifdef UBSEC_VERBOSE_DEBUG
818 DPRINTF("merging %d records\n", npkts);
820 /* XXX temporary aggregation statistics reporting code */
821 if (max < npkts) {
822 max = npkts;
823 DPRINTF("%s: new max aggregate %d\n", DRV_MODULE_NAME, max);
825 #endif /* UBSEC_VERBOSE_DEBUG */
827 q = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
828 BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
829 --sc->sc_nqueue;
831 #if 0
833 * XXX
834 * We use dma_map_single() - no sync required!
837 bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
838 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
839 if (q->q_dst_map != NULL)
840 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
841 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
842 #endif
844 q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */
846 for (i = 0; i < q->q_nstacked_mcrs; i++) {
847 q2 = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
849 #if 0
850 bus_dmamap_sync(sc->sc_dmat, q2->q_src_map,
851 0, q2->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
852 if (q2->q_dst_map != NULL)
853 bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map,
854 0, q2->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
855 #endif
856 BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
857 --sc->sc_nqueue;
859 v = ((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) -
860 sizeof(struct ubsec_mcr_add);
861 bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add));
862 q->q_stacked_mcr[i] = q2;
864 q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts);
865 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
866 #if 0
867 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
868 0, q->q_dma->d_alloc.dma_map->dm_mapsize,
869 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
870 #endif
871 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
872 offsetof(struct ubsec_dmachunk, d_mcr));
873 #ifdef UBSEC_VERBOSE_DEBUG
874 DPRINTF("feed (1): q->chip %p %08x %08x\n", q,
875 (u_int32_t)q->q_dma->d_alloc.dma_paddr,
876 (u_int32_t)(q->q_dma->d_alloc.dma_paddr +
877 offsetof(struct ubsec_dmachunk, d_mcr)));
878 #endif /* UBSEC_DEBUG */
879 return;
881 feed1:
882 while (!BSD_SIMPLEQ_EMPTY(&sc->sc_queue)) {
883 stat = READ_REG(sc, BS_STAT);
885 if (stat & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
886 if(stat & BS_STAT_DMAERR) {
887 ubsec_totalreset(sc);
888 ubsecstats.hst_dmaerr++;
890 break;
893 q = BSD_SIMPLEQ_FIRST(&sc->sc_queue);
895 #if 0
896 bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
897 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_PREWRITE);
898 if (q->q_dst_map != NULL)
899 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
900 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_PREREAD);
901 bus_dmamap_sync(sc->sc_dmat, q->q_dma->d_alloc.dma_map,
902 0, q->q_dma->d_alloc.dma_map->dm_mapsize,
903 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
904 #endif
906 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
907 offsetof(struct ubsec_dmachunk, d_mcr));
908 #ifdef UBSEC_VERBOSE_DEBUG
909 DPRINTF("feed (2): q->chip %p %08x %08x\n", q,
910 (u_int32_t)q->q_dma->d_alloc.dma_paddr,
911 (u_int32_t)(q->q_dma->d_alloc.dma_paddr +
912 offsetof(struct ubsec_dmachunk, d_mcr)));
913 #endif /* UBSEC_DEBUG */
914 BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
915 --sc->sc_nqueue;
916 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
921 * Allocate a new 'session' and return an encoded session id. 'sidp'
922 * contains our registration id, and should contain an encoded session
923 * id on successful allocation.
925 static int
926 ubsec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
928 struct cryptoini *c, *encini = NULL, *macini = NULL;
929 struct ubsec_softc *sc = NULL;
930 struct ubsec_session *ses = NULL;
931 int sesn, i;
933 #ifdef UBSEC_DEBUG
934 DPRINTF("%s()\n", __FUNCTION__);
935 #endif
937 if (sidp == NULL || cri == NULL)
938 return (EINVAL);
940 sc = device_get_softc(dev);
942 if (sc == NULL)
943 return (EINVAL);
945 for (c = cri; c != NULL; c = c->cri_next) {
946 if (c->cri_alg == CRYPTO_MD5_HMAC ||
947 c->cri_alg == CRYPTO_SHA1_HMAC) {
948 if (macini)
949 return (EINVAL);
950 macini = c;
951 } else if (c->cri_alg == CRYPTO_DES_CBC ||
952 c->cri_alg == CRYPTO_3DES_CBC ||
953 c->cri_alg == CRYPTO_AES_CBC) {
954 if (encini)
955 return (EINVAL);
956 encini = c;
957 } else
958 return (EINVAL);
960 if (encini == NULL && macini == NULL)
961 return (EINVAL);
963 if (sc->sc_sessions == NULL) {
964 ses = sc->sc_sessions = (struct ubsec_session *)kmalloc(
965 sizeof(struct ubsec_session), SLAB_ATOMIC);
966 if (ses == NULL)
967 return (ENOMEM);
968 memset(ses, 0, sizeof(struct ubsec_session));
969 sesn = 0;
970 sc->sc_nsessions = 1;
971 } else {
972 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
973 if (sc->sc_sessions[sesn].ses_used == 0) {
974 ses = &sc->sc_sessions[sesn];
975 break;
979 if (ses == NULL) {
980 sesn = sc->sc_nsessions;
981 ses = (struct ubsec_session *)kmalloc((sesn + 1) *
982 sizeof(struct ubsec_session), SLAB_ATOMIC);
983 if (ses == NULL)
984 return (ENOMEM);
985 memset(ses, 0, (sesn + 1) * sizeof(struct ubsec_session));
986 bcopy(sc->sc_sessions, ses, sesn *
987 sizeof(struct ubsec_session));
988 bzero(sc->sc_sessions, sesn *
989 sizeof(struct ubsec_session));
990 kfree(sc->sc_sessions);
991 sc->sc_sessions = ses;
992 ses = &sc->sc_sessions[sesn];
993 sc->sc_nsessions++;
997 bzero(ses, sizeof(struct ubsec_session));
998 ses->ses_used = 1;
999 if (encini) {
1000 /* get an IV */
1001 /* XXX may read fewer than requested */
1002 read_random(ses->ses_iv, sizeof(ses->ses_iv));
1004 /* Go ahead and compute key in ubsec's byte order */
1005 if (encini->cri_alg == CRYPTO_DES_CBC) {
1006 /* DES uses the same key three times:
1007 * 1st encrypt -> 2nd decrypt -> 3nd encrypt */
1008 bcopy(encini->cri_key, &ses->ses_key[0], 8);
1009 bcopy(encini->cri_key, &ses->ses_key[2], 8);
1010 bcopy(encini->cri_key, &ses->ses_key[4], 8);
1011 ses->ses_keysize = 192; /* Fake! Actually its only 64bits ..
1012 oh no it is even less: 54bits. */
1013 } else if(encini->cri_alg == CRYPTO_3DES_CBC) {
1014 bcopy(encini->cri_key, ses->ses_key, 24);
1015 ses->ses_keysize = 192;
1016 } else if(encini->cri_alg == CRYPTO_AES_CBC) {
1017 ses->ses_keysize = encini->cri_klen;
1019 if (ses->ses_keysize != 128 &&
1020 ses->ses_keysize != 192 &&
1021 ses->ses_keysize != 256)
1023 DPRINTF("unsupported AES key size: %d\n", ses->ses_keysize);
1024 return (EINVAL);
1026 bcopy(encini->cri_key, ses->ses_key, (ses->ses_keysize / 8));
1029 /* Hardware requires the keys in little endian byte order */
1030 for (i=0; i < (ses->ses_keysize / 32); i++)
1031 SWAP32(ses->ses_key[i]);
1034 if (macini) {
1035 ses->ses_mlen = macini->cri_mlen;
1037 if (ses->ses_mlen == 0 ||
1038 ses->ses_mlen > SHA1_HASH_LEN) {
1040 if (macini->cri_alg == CRYPTO_MD5_HMAC ||
1041 macini->cri_alg == CRYPTO_SHA1_HMAC)
1043 ses->ses_mlen = DEFAULT_HMAC_LEN;
1044 } else
1047 * Reserved for future usage. MD5/SHA1 calculations have
1048 * different hash sizes.
1050 printk(KERN_ERR DRV_MODULE_NAME ": unsupported hash operation with mac/hash len: %d\n", ses->ses_mlen);
1051 return (EINVAL);
1056 if (macini->cri_key != NULL) {
1057 ubsec_setup_mackey(ses, macini->cri_alg, macini->cri_key,
1058 macini->cri_klen / 8);
1062 *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn);
1063 return (0);
1067 * Deallocate a session.
1069 static int
1070 ubsec_freesession(device_t dev, u_int64_t tid)
1072 struct ubsec_softc *sc = device_get_softc(dev);
1073 int session;
1074 u_int32_t sid = ((u_int32_t)tid) & 0xffffffff;
1076 #ifdef UBSEC_DEBUG
1077 DPRINTF("%s()\n", __FUNCTION__);
1078 #endif
1080 if (sc == NULL)
1081 return (EINVAL);
1083 session = UBSEC_SESSION(sid);
1084 if (session < sc->sc_nsessions) {
1085 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
1086 return (0);
1087 } else
1088 return (EINVAL);
1091 static int
1092 ubsec_process(device_t dev, struct cryptop *crp, int hint)
1094 struct ubsec_q *q = NULL;
1095 int err = 0, i, j, nicealign;
1096 struct ubsec_softc *sc = device_get_softc(dev);
1097 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
1098 int encoffset = 0, macoffset = 0, cpskip, cpoffset;
1099 int sskip, dskip, stheend, dtheend, ivsize = 8;
1100 int16_t coffset;
1101 struct ubsec_session *ses;
1102 struct ubsec_generic_ctx ctx;
1103 struct ubsec_dma *dmap = NULL;
1104 unsigned long flags;
1106 #ifdef UBSEC_DEBUG
1107 DPRINTF("%s()\n", __FUNCTION__);
1108 #endif
1110 if (unlikely(crp == NULL || crp->crp_callback == NULL)) {
1111 ubsecstats.hst_invalid++;
1112 return (EINVAL);
1115 if (unlikely(sc == NULL))
1116 return (EINVAL);
1118 #ifdef UBSEC_VERBOSE_DEBUG
1119 DPRINTF("spin_lock_irqsave\n");
1120 #endif
1121 spin_lock_irqsave(&sc->sc_ringmtx, flags);
1122 //spin_lock_irq(&sc->sc_ringmtx);
1124 if (BSD_SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
1125 ubsecstats.hst_queuefull++;
1126 #ifdef UBSEC_VERBOSE_DEBUG
1127 DPRINTF("spin_unlock_irqrestore\n");
1128 #endif
1129 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1130 //spin_unlock_irq(&sc->sc_ringmtx);
1131 err = ENOMEM;
1132 goto errout2;
1135 q = BSD_SIMPLEQ_FIRST(&sc->sc_freequeue);
1136 BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next);
1137 #ifdef UBSEC_VERBOSE_DEBUG
1138 DPRINTF("spin_unlock_irqrestore\n");
1139 #endif
1140 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1141 //spin_unlock_irq(&sc->sc_ringmtx);
1143 dmap = q->q_dma; /* Save dma pointer */
1144 bzero(q, sizeof(struct ubsec_q));
1145 bzero(&ctx, sizeof(ctx));
1147 q->q_sesn = UBSEC_SESSION(crp->crp_sid);
1148 q->q_dma = dmap;
1149 ses = &sc->sc_sessions[q->q_sesn];
1151 if (crp->crp_flags & CRYPTO_F_SKBUF) {
1152 q->q_src_m = (struct sk_buff *)crp->crp_buf;
1153 q->q_dst_m = (struct sk_buff *)crp->crp_buf;
1154 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1155 q->q_src_io = (struct uio *)crp->crp_buf;
1156 q->q_dst_io = (struct uio *)crp->crp_buf;
1157 } else {
1158 err = EINVAL;
1159 goto errout; /* XXX we don't handle contiguous blocks! */
1162 bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr));
1164 dmap->d_dma->d_mcr.mcr_pkts = htole16(1);
1165 dmap->d_dma->d_mcr.mcr_flags = 0;
1166 q->q_crp = crp;
1168 crd1 = crp->crp_desc;
1169 if (crd1 == NULL) {
1170 err = EINVAL;
1171 goto errout;
1173 crd2 = crd1->crd_next;
1175 if (crd2 == NULL) {
1176 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
1177 crd1->crd_alg == CRYPTO_SHA1_HMAC) {
1178 maccrd = crd1;
1179 enccrd = NULL;
1180 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
1181 crd1->crd_alg == CRYPTO_3DES_CBC ||
1182 crd1->crd_alg == CRYPTO_AES_CBC) {
1183 maccrd = NULL;
1184 enccrd = crd1;
1185 } else {
1186 err = EINVAL;
1187 goto errout;
1189 } else {
1190 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
1191 crd1->crd_alg == CRYPTO_SHA1_HMAC) &&
1192 (crd2->crd_alg == CRYPTO_DES_CBC ||
1193 crd2->crd_alg == CRYPTO_3DES_CBC ||
1194 crd2->crd_alg == CRYPTO_AES_CBC) &&
1195 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
1196 maccrd = crd1;
1197 enccrd = crd2;
1198 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
1199 crd1->crd_alg == CRYPTO_3DES_CBC ||
1200 crd1->crd_alg == CRYPTO_AES_CBC) &&
1201 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
1202 crd2->crd_alg == CRYPTO_SHA1_HMAC) &&
1203 (crd1->crd_flags & CRD_F_ENCRYPT)) {
1204 enccrd = crd1;
1205 maccrd = crd2;
1206 } else {
1208 * We cannot order the ubsec as requested
1210 printk(KERN_ERR DRV_MODULE_NAME ": got wrong algorithm/signature order.\n");
1211 err = EINVAL;
1212 goto errout;
1216 /* Encryption/Decryption requested */
1217 if (enccrd) {
1218 encoffset = enccrd->crd_skip;
1220 if (enccrd->crd_alg == CRYPTO_DES_CBC ||
1221 enccrd->crd_alg == CRYPTO_3DES_CBC)
1223 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES);
1224 ctx.pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC_DES);
1225 ivsize = 8; /* [3]DES uses 64bit IVs */
1226 } else {
1227 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_AES);
1228 ctx.pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC_AES);
1229 ivsize = 16; /* AES uses 128bit IVs / [3]DES 64bit IVs */
1231 switch(ses->ses_keysize)
1233 case 128:
1234 ctx.pc_flags |= htole16(UBS_PKTCTX_AES128);
1235 break;
1236 case 192:
1237 ctx.pc_flags |= htole16(UBS_PKTCTX_AES192);
1238 break;
1239 case 256:
1240 ctx.pc_flags |= htole16(UBS_PKTCTX_AES256);
1241 break;
1242 default:
1243 DPRINTF("invalid AES key size: %d\n", ses->ses_keysize);
1244 err = EINVAL;
1245 goto errout;
1249 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
1250 /* Direction: Outbound */
1252 q->q_flags |= UBSEC_QFLAGS_COPYOUTIV;
1254 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
1255 bcopy(enccrd->crd_iv, ctx.pc_iv, ivsize);
1256 } else {
1257 for(i=0; i < (ivsize / 4); i++)
1258 ctx.pc_iv[i] = ses->ses_iv[i];
1261 /* If there is no IV in the buffer -> copy it here */
1262 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
1263 if (crp->crp_flags & CRYPTO_F_SKBUF)
1265 m_copyback(q->q_src_m,
1266 enccrd->crd_inject,
1267 8, ctx.pc_iv);
1269 crypto_copyback(crp->crp_flags, (caddr_t)q->q_src_m,
1270 enccrd->crd_inject, ivsize, (caddr_t)ctx.pc_iv);
1271 else if (crp->crp_flags & CRYPTO_F_IOV)
1273 cuio_copyback(q->q_src_io,
1274 enccrd->crd_inject,
1275 8, ctx.pc_iv);
1277 crypto_copyback(crp->crp_flags, (caddr_t)q->q_src_io,
1278 enccrd->crd_inject, ivsize, (caddr_t)ctx.pc_iv);
1280 } else {
1281 /* Direction: Inbound */
1283 ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND);
1285 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1286 bcopy(enccrd->crd_iv, ctx.pc_iv, ivsize);
1287 else if (crp->crp_flags & CRYPTO_F_SKBUF)
1289 m_copydata(q->q_src_m, enccrd->crd_inject,
1290 8, (caddr_t)ctx.pc_iv);
1292 crypto_copydata(crp->crp_flags, (caddr_t)q->q_src_m,
1293 enccrd->crd_inject, ivsize,
1294 (caddr_t)ctx.pc_iv);
1295 else if (crp->crp_flags & CRYPTO_F_IOV)
1297 cuio_copydata(q->q_src_io,
1298 enccrd->crd_inject, 8,
1299 (caddr_t)ctx.pc_iv);
1301 crypto_copydata(crp->crp_flags, (caddr_t)q->q_src_io,
1302 enccrd->crd_inject, ivsize,
1303 (caddr_t)ctx.pc_iv);
1307 /* Even though key & IV sizes differ from cipher to cipher
1308 * copy / swap the full array lengths. Let the compiler unroll
1309 * the loop to increase the cpu pipeline performance... */
1310 for(i=0; i < 8; i++)
1311 ctx.pc_key[i] = ses->ses_key[i];
1312 for(i=0; i < 4; i++)
1313 SWAP32(ctx.pc_iv[i]);
1316 /* Authentication requested */
1317 if (maccrd) {
1318 macoffset = maccrd->crd_skip;
1320 if (maccrd->crd_alg == CRYPTO_MD5_HMAC)
1321 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5);
1322 else
1323 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1);
1325 for (i = 0; i < 5; i++) {
1326 ctx.pc_hminner[i] = ses->ses_hminner[i];
1327 ctx.pc_hmouter[i] = ses->ses_hmouter[i];
1329 HTOLE32(ctx.pc_hminner[i]);
1330 HTOLE32(ctx.pc_hmouter[i]);
1334 if (enccrd && maccrd) {
1336 * ubsec cannot handle packets where the end of encryption
1337 * and authentication are not the same, or where the
1338 * encrypted part begins before the authenticated part.
1340 if (((encoffset + enccrd->crd_len) !=
1341 (macoffset + maccrd->crd_len)) ||
1342 (enccrd->crd_skip < maccrd->crd_skip)) {
1343 err = EINVAL;
1344 goto errout;
1346 sskip = maccrd->crd_skip;
1347 cpskip = dskip = enccrd->crd_skip;
1348 stheend = maccrd->crd_len;
1349 dtheend = enccrd->crd_len;
1350 coffset = enccrd->crd_skip - maccrd->crd_skip;
1351 cpoffset = cpskip + dtheend;
1352 #ifdef UBSEC_DEBUG
1353 DPRINTF("mac: skip %d, len %d, inject %d\n",
1354 maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject);
1355 DPRINTF("enc: skip %d, len %d, inject %d\n",
1356 enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject);
1357 DPRINTF("src: skip %d, len %d\n", sskip, stheend);
1358 DPRINTF("dst: skip %d, len %d\n", dskip, dtheend);
1359 DPRINTF("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n",
1360 coffset, stheend, cpskip, cpoffset);
1361 #endif
1362 } else {
1363 cpskip = dskip = sskip = macoffset + encoffset;
1364 dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len;
1365 cpoffset = cpskip + dtheend;
1366 coffset = 0;
1368 ctx.pc_offset = htole16(coffset >> 2);
1370 #if 0
1371 if (bus_dmamap_create(sc->sc_dmat, 0xfff0, UBS_MAX_SCATTER,
1372 0xfff0, 0, BUS_DMA_NOWAIT, &q->q_src_map) != 0) {
1373 err = ENOMEM;
1374 goto errout;
1376 #endif
1378 if (crp->crp_flags & CRYPTO_F_SKBUF) {
1379 #if 0
1380 if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map,
1381 q->q_src_m, BUS_DMA_NOWAIT) != 0) {
1382 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1383 q->q_src_map = NULL;
1384 err = ENOMEM;
1385 goto errout;
1387 #endif
1388 err = dma_map_skb(sc, q->q_src_map, q->q_src_m, &q->q_src_len);
1389 if (unlikely(err != 0))
1390 goto errout;
1392 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1393 #if 0
1394 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map,
1395 q->q_src_io, BUS_DMA_NOWAIT) != 0) {
1396 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1397 q->q_src_map = NULL;
1398 err = ENOMEM;
1399 goto errout;
1401 #endif
1402 err = dma_map_uio(sc, q->q_src_map, q->q_src_io, &q->q_src_len);
1403 if (unlikely(err != 0))
1404 goto errout;
1408 * Check alignment
1410 nicealign = ubsec_dmamap_aligned(sc, q->q_src_map, q->q_src_len);
1412 dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend);
1414 #ifdef UBSEC_DEBUG
1415 DPRINTF("src skip: %d\n", sskip);
1416 #endif
1417 for (i = j = 0; i < q->q_src_len; i++) {
1418 struct ubsec_pktbuf *pb;
1419 size_t packl = q->q_src_map[i].dma_size;
1420 dma_addr_t packp = q->q_src_map[i].dma_paddr;
1422 if (sskip >= packl) {
1423 sskip -= packl;
1424 continue;
1427 packl -= sskip;
1428 packp += sskip;
1429 sskip = 0;
1431 /* maximum fragment size is 0xfffc */
1432 if (packl > 0xfffc) {
1433 DPRINTF("Error: fragment size is bigger than 0xfffc.\n");
1434 err = EIO;
1435 goto errout;
1438 if (j == 0)
1439 pb = &dmap->d_dma->d_mcr.mcr_ipktbuf;
1440 else
1441 pb = &dmap->d_dma->d_sbuf[j - 1];
1443 pb->pb_addr = htole32(packp);
1445 if (stheend) {
1446 if (packl > stheend) {
1447 pb->pb_len = htole32(stheend);
1448 stheend = 0;
1449 } else {
1450 pb->pb_len = htole32(packl);
1451 stheend -= packl;
1453 } else
1454 pb->pb_len = htole32(packl);
1456 if ((i + 1) == q->q_src_len)
1457 pb->pb_next = 0;
1458 else
1459 pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1460 offsetof(struct ubsec_dmachunk, d_sbuf[j]));
1461 j++;
1464 if (enccrd == NULL && maccrd != NULL) {
1465 /* Authentication only */
1466 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0;
1467 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0;
1468 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next =
1469 htole32(dmap->d_alloc.dma_paddr +
1470 offsetof(struct ubsec_dmachunk, d_macbuf[0]));
1471 #ifdef UBSEC_DEBUG
1472 DPRINTF("opkt: %x %x %x\n",
1473 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr,
1474 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len,
1475 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next);
1476 #endif
1477 } else {
1478 if (crp->crp_flags & CRYPTO_F_IOV) {
1479 if (!nicealign) {
1480 err = EINVAL;
1481 goto errout;
1483 #if 0
1484 if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
1485 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
1486 &q->q_dst_map) != 0) {
1487 err = ENOMEM;
1488 goto errout;
1490 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map,
1491 q->q_dst_io, BUS_DMA_NOWAIT) != 0) {
1492 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1493 q->q_dst_map = NULL;
1494 goto errout;
1496 #endif
1498 /* HW shall copy the result into the source memory */
1499 for(i = 0; i < q->q_src_len; i++)
1500 q->q_dst_map[i] = q->q_src_map[i];
1502 q->q_dst_len = q->q_src_len;
1503 q->q_has_dst = 0;
1505 } else if (crp->crp_flags & CRYPTO_F_SKBUF) {
1506 if (nicealign) {
1508 /* HW shall copy the result into the source memory */
1509 q->q_dst_m = q->q_src_m;
1510 for(i = 0; i < q->q_src_len; i++)
1511 q->q_dst_map[i] = q->q_src_map[i];
1513 q->q_dst_len = q->q_src_len;
1514 q->q_has_dst = 0;
1516 } else {
1517 #ifdef NOTYET
1518 int totlen, len;
1519 struct sk_buff *m, *top, **mp;
1521 totlen = q->q_src_map->dm_mapsize;
1522 if (q->q_src_m->m_flags & M_PKTHDR) {
1523 len = MHLEN;
1524 MGETHDR(m, M_DONTWAIT, MT_DATA);
1525 } else {
1526 len = MLEN;
1527 MGET(m, M_DONTWAIT, MT_DATA);
1529 if (m == NULL) {
1530 err = ENOMEM;
1531 goto errout;
1533 if (len == MHLEN)
1534 M_DUP_PKTHDR(m, q->q_src_m);
1535 if (totlen >= MINCLSIZE) {
1536 MCLGET(m, M_DONTWAIT);
1537 if (m->m_flags & M_EXT)
1538 len = MCLBYTES;
1540 m->m_len = len;
1541 top = NULL;
1542 mp = &top;
1544 while (totlen > 0) {
1545 if (top) {
1546 MGET(m, M_DONTWAIT, MT_DATA);
1547 if (m == NULL) {
1548 m_freem(top);
1549 err = ENOMEM;
1550 goto errout;
1552 len = MLEN;
1554 if (top && totlen >= MINCLSIZE) {
1555 MCLGET(m, M_DONTWAIT);
1556 if (m->m_flags & M_EXT)
1557 len = MCLBYTES;
1559 m->m_len = len = min(totlen, len);
1560 totlen -= len;
1561 *mp = m;
1562 mp = &m->m_next;
1564 q->q_dst_m = top;
1565 ubsec_mcopy(q->q_src_m, q->q_dst_m,
1566 cpskip, cpoffset);
1567 if (bus_dmamap_create(sc->sc_dmat, 0xfff0,
1568 UBS_MAX_SCATTER, 0xfff0, 0, BUS_DMA_NOWAIT,
1569 &q->q_dst_map) != 0) {
1570 err = ENOMEM;
1571 goto errout;
1573 if (bus_dmamap_load_mbuf(sc->sc_dmat,
1574 q->q_dst_map, q->q_dst_m,
1575 BUS_DMA_NOWAIT) != 0) {
1576 bus_dmamap_destroy(sc->sc_dmat,
1577 q->q_dst_map);
1578 q->q_dst_map = NULL;
1579 err = ENOMEM;
1580 goto errout;
1582 #else
1583 device_printf(sc->sc_dev,
1584 "%s,%d: CRYPTO_F_SKBUF unaligned not implemented\n",
1585 __FILE__, __LINE__);
1586 err = EINVAL;
1587 goto errout;
1588 #endif
1590 } else {
1591 err = EINVAL;
1592 goto errout;
1595 #ifdef UBSEC_DEBUG
1596 DPRINTF("dst skip: %d\n", dskip);
1597 #endif
1598 for (i = j = 0; i < q->q_dst_len; i++) {
1599 struct ubsec_pktbuf *pb;
1600 size_t packl = q->q_dst_map[i].dma_size;
1601 dma_addr_t packp = q->q_dst_map[i].dma_paddr;
1603 if (dskip >= packl) {
1604 dskip -= packl;
1605 continue;
1608 packl -= dskip;
1609 packp += dskip;
1610 dskip = 0;
1612 if (packl > 0xfffc) {
1613 DPRINTF("Error: fragment size is bigger than 0xfffc.\n");
1614 err = EIO;
1615 goto errout;
1618 if (j == 0)
1619 pb = &dmap->d_dma->d_mcr.mcr_opktbuf;
1620 else
1621 pb = &dmap->d_dma->d_dbuf[j - 1];
1623 pb->pb_addr = htole32(packp);
1625 if (dtheend) {
1626 if (packl > dtheend) {
1627 pb->pb_len = htole32(dtheend);
1628 dtheend = 0;
1629 } else {
1630 pb->pb_len = htole32(packl);
1631 dtheend -= packl;
1633 } else
1634 pb->pb_len = htole32(packl);
1636 if ((i + 1) == q->q_dst_len) {
1637 if (maccrd)
1638 /* Authentication:
1639 * The last fragment of the output buffer
1640 * contains the HMAC. */
1641 pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1642 offsetof(struct ubsec_dmachunk, d_macbuf[0]));
1643 else
1644 pb->pb_next = 0;
1645 } else
1646 pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1647 offsetof(struct ubsec_dmachunk, d_dbuf[j]));
1648 j++;
1652 dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr +
1653 offsetof(struct ubsec_dmachunk, d_ctx));
1655 if (sc->sc_flags & UBS_FLAGS_LONGCTX) {
1656 /* new Broadcom cards with dynamic long command context structure */
1658 if (enccrd != NULL &&
1659 enccrd->crd_alg == CRYPTO_AES_CBC)
1661 struct ubsec_pktctx_aes128 *ctxaes128;
1662 struct ubsec_pktctx_aes192 *ctxaes192;
1663 struct ubsec_pktctx_aes256 *ctxaes256;
1665 switch(ses->ses_keysize)
1667 /* AES 128bit */
1668 case 128:
1669 ctxaes128 = (struct ubsec_pktctx_aes128 *)
1670 (dmap->d_alloc.dma_vaddr +
1671 offsetof(struct ubsec_dmachunk, d_ctx));
1673 ctxaes128->pc_len = htole16(sizeof(struct ubsec_pktctx_aes128));
1674 ctxaes128->pc_type = ctx.pc_type;
1675 ctxaes128->pc_flags = ctx.pc_flags;
1676 ctxaes128->pc_offset = ctx.pc_offset;
1677 for (i = 0; i < 4; i++)
1678 ctxaes128->pc_aeskey[i] = ctx.pc_key[i];
1679 for (i = 0; i < 5; i++)
1680 ctxaes128->pc_hminner[i] = ctx.pc_hminner[i];
1681 for (i = 0; i < 5; i++)
1682 ctxaes128->pc_hmouter[i] = ctx.pc_hmouter[i];
1683 for (i = 0; i < 4; i++)
1684 ctxaes128->pc_iv[i] = ctx.pc_iv[i];
1685 break;
1687 /* AES 192bit */
1688 case 192:
1689 ctxaes192 = (struct ubsec_pktctx_aes192 *)
1690 (dmap->d_alloc.dma_vaddr +
1691 offsetof(struct ubsec_dmachunk, d_ctx));
1693 ctxaes192->pc_len = htole16(sizeof(struct ubsec_pktctx_aes192));
1694 ctxaes192->pc_type = ctx.pc_type;
1695 ctxaes192->pc_flags = ctx.pc_flags;
1696 ctxaes192->pc_offset = ctx.pc_offset;
1697 for (i = 0; i < 6; i++)
1698 ctxaes192->pc_aeskey[i] = ctx.pc_key[i];
1699 for (i = 0; i < 5; i++)
1700 ctxaes192->pc_hminner[i] = ctx.pc_hminner[i];
1701 for (i = 0; i < 5; i++)
1702 ctxaes192->pc_hmouter[i] = ctx.pc_hmouter[i];
1703 for (i = 0; i < 4; i++)
1704 ctxaes192->pc_iv[i] = ctx.pc_iv[i];
1705 break;
1707 /* AES 256bit */
1708 case 256:
1709 ctxaes256 = (struct ubsec_pktctx_aes256 *)
1710 (dmap->d_alloc.dma_vaddr +
1711 offsetof(struct ubsec_dmachunk, d_ctx));
1713 ctxaes256->pc_len = htole16(sizeof(struct ubsec_pktctx_aes256));
1714 ctxaes256->pc_type = ctx.pc_type;
1715 ctxaes256->pc_flags = ctx.pc_flags;
1716 ctxaes256->pc_offset = ctx.pc_offset;
1717 for (i = 0; i < 8; i++)
1718 ctxaes256->pc_aeskey[i] = ctx.pc_key[i];
1719 for (i = 0; i < 5; i++)
1720 ctxaes256->pc_hminner[i] = ctx.pc_hminner[i];
1721 for (i = 0; i < 5; i++)
1722 ctxaes256->pc_hmouter[i] = ctx.pc_hmouter[i];
1723 for (i = 0; i < 4; i++)
1724 ctxaes256->pc_iv[i] = ctx.pc_iv[i];
1725 break;
1728 } else {
1730 * [3]DES / MD5_HMAC / SHA1_HMAC
1732 * MD5_HMAC / SHA1_HMAC can use the IPSEC 3DES operation without
1733 * encryption.
1735 struct ubsec_pktctx_des *ctxdes;
1737 ctxdes = (struct ubsec_pktctx_des *)(dmap->d_alloc.dma_vaddr +
1738 offsetof(struct ubsec_dmachunk, d_ctx));
1740 ctxdes->pc_len = htole16(sizeof(struct ubsec_pktctx_des));
1741 ctxdes->pc_type = ctx.pc_type;
1742 ctxdes->pc_flags = ctx.pc_flags;
1743 ctxdes->pc_offset = ctx.pc_offset;
1744 for (i = 0; i < 6; i++)
1745 ctxdes->pc_deskey[i] = ctx.pc_key[i];
1746 for (i = 0; i < 5; i++)
1747 ctxdes->pc_hminner[i] = ctx.pc_hminner[i];
1748 for (i = 0; i < 5; i++)
1749 ctxdes->pc_hmouter[i] = ctx.pc_hmouter[i];
1750 ctxdes->pc_iv[0] = ctx.pc_iv[0];
1751 ctxdes->pc_iv[1] = ctx.pc_iv[1];
1753 } else
1755 /* old Broadcom card with fixed small command context structure */
1758 * [3]DES / MD5_HMAC / SHA1_HMAC
1760 struct ubsec_pktctx *ctxs;
1762 ctxs = (struct ubsec_pktctx *)(dmap->d_alloc.dma_vaddr +
1763 offsetof(struct ubsec_dmachunk, d_ctx));
1765 /* transform generic context into small context */
1766 for (i = 0; i < 6; i++)
1767 ctxs->pc_deskey[i] = ctx.pc_key[i];
1768 for (i = 0; i < 5; i++)
1769 ctxs->pc_hminner[i] = ctx.pc_hminner[i];
1770 for (i = 0; i < 5; i++)
1771 ctxs->pc_hmouter[i] = ctx.pc_hmouter[i];
1772 ctxs->pc_iv[0] = ctx.pc_iv[0];
1773 ctxs->pc_iv[1] = ctx.pc_iv[1];
1774 ctxs->pc_flags = ctx.pc_flags;
1775 ctxs->pc_offset = ctx.pc_offset;
1778 #ifdef UBSEC_VERBOSE_DEBUG
1779 DPRINTF("spin_lock_irqsave\n");
1780 #endif
1781 spin_lock_irqsave(&sc->sc_ringmtx, flags);
1782 //spin_lock_irq(&sc->sc_ringmtx);
1784 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
1785 sc->sc_nqueue++;
1786 ubsecstats.hst_ipackets++;
1787 ubsecstats.hst_ibytes += stheend;
1788 ubsec_feed(sc);
1790 #ifdef UBSEC_VERBOSE_DEBUG
1791 DPRINTF("spin_unlock_irqrestore\n");
1792 #endif
1793 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1794 //spin_unlock_irq(&sc->sc_ringmtx);
1796 return (0);
1798 errout:
1799 if (q != NULL) {
1800 #ifdef NOTYET
1801 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
1802 m_freem(q->q_dst_m);
1803 #endif
1805 if ((q->q_has_dst == 1) && q->q_dst_len > 0) {
1806 #if 0
1807 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
1808 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1809 #endif
1810 dma_unmap(sc, q->q_dst_map, q->q_dst_len);
1812 if (q->q_src_len > 0) {
1813 #if 0
1814 bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
1815 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1816 #endif
1817 dma_unmap(sc, q->q_src_map, q->q_src_len);
1820 #ifdef UBSEC_VERBOSE_DEBUG
1821 DPRINTF("spin_lock_irqsave\n");
1822 #endif
1823 spin_lock_irqsave(&sc->sc_ringmtx, flags);
1824 //spin_lock_irq(&sc->sc_ringmtx);
1826 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1828 #ifdef UBSEC_VERBOSE_DEBUG
1829 DPRINTF("spin_unlock_irqrestore\n");
1830 #endif
1831 spin_unlock_irqrestore(&sc->sc_ringmtx, flags);
1832 //spin_unlock_irq(&sc->sc_ringmtx);
1835 if (err == EINVAL)
1836 ubsecstats.hst_invalid++;
1837 else
1838 ubsecstats.hst_nomem++;
1839 errout2:
1840 crp->crp_etype = err;
1841 crypto_done(crp);
1843 #ifdef UBSEC_DEBUG
1844 DPRINTF("%s() err = %x\n", __FUNCTION__, err);
1845 #endif
1847 return (0);
1850 void
1851 ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q)
1853 struct cryptop *crp = (struct cryptop *)q->q_crp;
1854 struct cryptodesc *crd;
1855 struct ubsec_dma *dmap = q->q_dma;
1856 int ivsize = 8;
1858 #ifdef UBSEC_DEBUG
1859 DPRINTF("%s()\n", __FUNCTION__);
1860 #endif
1862 ubsecstats.hst_opackets++;
1863 ubsecstats.hst_obytes += dmap->d_alloc.dma_size;
1865 #if 0
1866 bus_dmamap_sync(sc->sc_dmat, dmap->d_alloc.dma_map, 0,
1867 dmap->d_alloc.dma_map->dm_mapsize,
1868 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1869 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
1870 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
1871 0, q->q_dst_map->dm_mapsize, BUS_DMASYNC_POSTREAD);
1872 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
1873 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1875 bus_dmamap_sync(sc->sc_dmat, q->q_src_map,
1876 0, q->q_src_map->dm_mapsize, BUS_DMASYNC_POSTWRITE);
1877 bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
1878 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1879 #endif
1881 if ((q->q_has_dst == 1) && q->q_dst_len > 0)
1882 dma_unmap(sc, q->q_dst_map, q->q_dst_len);
1884 dma_unmap(sc, q->q_src_map, q->q_src_len);
1886 #ifdef NOTYET
1887 if ((crp->crp_flags & CRYPTO_F_SKBUF) && (q->q_src_m != q->q_dst_m)) {
1888 m_freem(q->q_src_m);
1889 crp->crp_buf = (caddr_t)q->q_dst_m;
1891 #endif
1893 /* copy out IV for future use */
1894 if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) {
1895 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1896 if (crd->crd_alg != CRYPTO_DES_CBC &&
1897 crd->crd_alg != CRYPTO_3DES_CBC &&
1898 crd->crd_alg != CRYPTO_AES_CBC)
1899 continue;
1901 if (crd->crd_alg == CRYPTO_AES_CBC)
1902 ivsize = 16;
1903 else
1904 ivsize = 8;
1906 if (crp->crp_flags & CRYPTO_F_SKBUF)
1907 #if 0
1908 m_copydata((struct sk_buff *)crp->crp_buf,
1909 crd->crd_skip + crd->crd_len - 8, 8,
1910 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
1911 #endif
1912 crypto_copydata(crp->crp_flags, (caddr_t)crp->crp_buf,
1913 crd->crd_skip + crd->crd_len - ivsize, ivsize,
1914 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
1916 else if (crp->crp_flags & CRYPTO_F_IOV) {
1917 #if 0
1918 cuio_copydata((struct uio *)crp->crp_buf,
1919 crd->crd_skip + crd->crd_len - 8, 8,
1920 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
1921 #endif
1922 crypto_copydata(crp->crp_flags, (caddr_t)crp->crp_buf,
1923 crd->crd_skip + crd->crd_len - ivsize, ivsize,
1924 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
1927 break;
1931 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1932 if (crd->crd_alg != CRYPTO_MD5_HMAC &&
1933 crd->crd_alg != CRYPTO_SHA1_HMAC)
1934 continue;
1935 #if 0
1936 if (crp->crp_flags & CRYPTO_F_SKBUF)
1937 m_copyback((struct sk_buff *)crp->crp_buf,
1938 crd->crd_inject, 12,
1939 dmap->d_dma->d_macbuf);
1940 #endif
1941 #if 0
1942 /* BUG? it does not honor the mac len.. */
1943 crypto_copyback(crp->crp_flags, crp->crp_buf,
1944 crd->crd_inject, 12,
1945 (caddr_t)dmap->d_dma->d_macbuf);
1946 #endif
1947 crypto_copyback(crp->crp_flags, crp->crp_buf,
1948 crd->crd_inject,
1949 sc->sc_sessions[q->q_sesn].ses_mlen,
1950 (caddr_t)dmap->d_dma->d_macbuf);
1951 #if 0
1952 else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac)
1953 bcopy((caddr_t)dmap->d_dma->d_macbuf,
1954 crp->crp_mac, 12);
1955 #endif
1956 break;
1958 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1959 crypto_done(crp);
1962 void
1963 ubsec_mcopy(struct sk_buff *srcm, struct sk_buff *dstm, int hoffset, int toffset)
1965 int i, j, dlen, slen;
1966 caddr_t dptr, sptr;
1968 j = 0;
1969 sptr = srcm->data;
1970 slen = srcm->len;
1971 dptr = dstm->data;
1972 dlen = dstm->len;
1974 while (1) {
1975 for (i = 0; i < min(slen, dlen); i++) {
1976 if (j < hoffset || j >= toffset)
1977 *dptr++ = *sptr++;
1978 slen--;
1979 dlen--;
1980 j++;
1982 if (slen == 0) {
1983 srcm = srcm->next;
1984 if (srcm == NULL)
1985 return;
1986 sptr = srcm->data;
1987 slen = srcm->len;
1989 if (dlen == 0) {
1990 dstm = dstm->next;
1991 if (dstm == NULL)
1992 return;
1993 dptr = dstm->data;
1994 dlen = dstm->len;
2000 ubsec_dma_malloc(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma,
2001 size_t size, int mapflags)
2003 dma->dma_vaddr = dma_alloc_coherent(sc->sc_dv,
2004 size, &dma->dma_paddr, GFP_KERNEL);
2006 if (likely(dma->dma_vaddr))
2008 dma->dma_size = size;
2009 return (0);
2012 DPRINTF("could not allocate %d bytes of coherent memory.\n", size);
2014 return (1);
2017 void
2018 ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma)
2020 dma_free_coherent(sc->sc_dv, dma->dma_size, dma->dma_vaddr,
2021 dma->dma_paddr);
2025 * Resets the board. Values in the regesters are left as is
2026 * from the reset (i.e. initial values are assigned elsewhere).
2028 void
2029 ubsec_reset_board(struct ubsec_softc *sc)
2031 volatile u_int32_t ctrl;
2033 #ifdef UBSEC_DEBUG
2034 DPRINTF("%s()\n", __FUNCTION__);
2035 #endif
2036 DPRINTF("Send reset signal to chip.\n");
2038 ctrl = READ_REG(sc, BS_CTRL);
2039 ctrl |= BS_CTRL_RESET;
2040 WRITE_REG(sc, BS_CTRL, ctrl);
2043 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us
2045 DELAY(10);
2049 * Init Broadcom registers
2051 void
2052 ubsec_init_board(struct ubsec_softc *sc)
2054 u_int32_t ctrl;
2056 #ifdef UBSEC_DEBUG
2057 DPRINTF("%s()\n", __FUNCTION__);
2058 #endif
2059 DPRINTF("Initialize chip.\n");
2061 ctrl = READ_REG(sc, BS_CTRL);
2062 ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64);
2063 ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT | BS_CTRL_DMAERR;
2065 WRITE_REG(sc, BS_CTRL, ctrl);
2067 /* Set chip capabilities (BCM5365P) */
2068 sc->sc_flags |= UBS_FLAGS_LONGCTX | UBS_FLAGS_AES;
2072 * Clean up after a chip crash.
2073 * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
2075 void
2076 ubsec_cleanchip(struct ubsec_softc *sc)
2078 struct ubsec_q *q;
2080 #ifdef UBSEC_DEBUG
2081 DPRINTF("%s()\n", __FUNCTION__);
2082 #endif
2083 DPRINTF("Clean up queues after chip crash.\n");
2085 while (!BSD_SIMPLEQ_EMPTY(&sc->sc_qchip)) {
2086 q = BSD_SIMPLEQ_FIRST(&sc->sc_qchip);
2087 BSD_SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
2088 ubsec_free_q(sc, q);
2093 * free a ubsec_q
2094 * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
2097 ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q)
2099 struct ubsec_q *q2;
2100 struct cryptop *crp;
2101 int npkts;
2102 int i;
2104 #ifdef UBSEC_DEBUG
2105 DPRINTF("%s()\n", __FUNCTION__);
2106 #endif
2108 npkts = q->q_nstacked_mcrs;
2110 for (i = 0; i < npkts; i++) {
2111 if(q->q_stacked_mcr[i]) {
2112 q2 = q->q_stacked_mcr[i];
2114 if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m))
2115 #ifdef NOTYET
2116 m_freem(q2->q_dst_m);
2117 #else
2118 printk(KERN_ERR "%s,%d: SKB not supported\n", __FILE__, __LINE__);
2119 #endif
2121 crp = (struct cryptop *)q2->q_crp;
2123 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next);
2125 crp->crp_etype = EFAULT;
2126 crypto_done(crp);
2127 } else {
2128 break;
2133 * Free header MCR
2135 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
2136 #ifdef NOTYET
2137 m_freem(q->q_dst_m);
2138 #else
2139 printk(KERN_ERR "%s,%d: SKB not supported\n", __FILE__, __LINE__);
2140 #endif
2142 crp = (struct cryptop *)q->q_crp;
2144 BSD_SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
2146 crp->crp_etype = EFAULT;
2147 crypto_done(crp);
2148 return(0);
2152 * Routine to reset the chip and clean up.
2153 * It is assumed that the caller has spin_lock_irq(sc_ringmtx).
2155 void
2156 ubsec_totalreset(struct ubsec_softc *sc)
2159 #ifdef UBSEC_DEBUG
2160 DPRINTF("%s()\n", __FUNCTION__);
2161 #endif
2162 DPRINTF("initiate total chip reset.. \n");
2163 ubsec_reset_board(sc);
2164 ubsec_init_board(sc);
2165 ubsec_cleanchip(sc);
2168 void
2169 ubsec_dump_pb(struct ubsec_pktbuf *pb)
2171 printf("addr 0x%x (0x%x) next 0x%x\n",
2172 pb->pb_addr, pb->pb_len, pb->pb_next);
2175 void
2176 ubsec_dump_mcr(struct ubsec_mcr *mcr)
2178 struct ubsec_mcr_add *ma;
2179 int i;
2181 printf("MCR:\n");
2182 printf(" pkts: %u, flags 0x%x\n",
2183 letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags));
2184 ma = (struct ubsec_mcr_add *)&mcr->mcr_cmdctxp;
2185 for (i = 0; i < letoh16(mcr->mcr_pkts); i++) {
2186 printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i,
2187 letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen),
2188 letoh16(ma->mcr_reserved));
2189 printf(" %d: ipkt ", i);
2190 ubsec_dump_pb(&ma->mcr_ipktbuf);
2191 printf(" %d: opkt ", i);
2192 ubsec_dump_pb(&ma->mcr_opktbuf);
2193 ma++;
2195 printf("END MCR\n");
2198 static int __init mod_init(void) {
2199 return ssb_driver_register(&ubsec_ssb_driver);
2202 static void __exit mod_exit(void) {
2203 ssb_driver_unregister(&ubsec_ssb_driver);
2206 module_init(mod_init);
2207 module_exit(mod_exit);
2209 // Meta information
2210 MODULE_AUTHOR("Daniel Mueller <daniel@danm.de>");
2211 MODULE_LICENSE("BSD");
2212 MODULE_DESCRIPTION("OCF driver for BCM5365P IPSec Core");
2213 MODULE_VERSION(DRV_MODULE_VERSION);