sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / drivers / crypto / ixp4xx_crypto.c
blob7868765a70c5f8d780dfea9bb1f6057348be170f
1 /*
2 * Intel IXP4xx NPE-C crypto driver
4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
12 #include <linux/platform_device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/crypto.h>
16 #include <linux/kernel.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/interrupt.h>
19 #include <linux/spinlock.h>
20 #include <linux/gfp.h>
21 #include <linux/module.h>
23 #include <crypto/ctr.h>
24 #include <crypto/des.h>
25 #include <crypto/aes.h>
26 #include <crypto/sha.h>
27 #include <crypto/algapi.h>
28 #include <crypto/internal/aead.h>
29 #include <crypto/authenc.h>
30 #include <crypto/scatterwalk.h>
32 #include <mach/npe.h>
33 #include <mach/qmgr.h>
35 #define MAX_KEYLEN 32
37 /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
38 #define NPE_CTX_LEN 80
39 #define AES_BLOCK128 16
41 #define NPE_OP_HASH_VERIFY 0x01
42 #define NPE_OP_CCM_ENABLE 0x04
43 #define NPE_OP_CRYPT_ENABLE 0x08
44 #define NPE_OP_HASH_ENABLE 0x10
45 #define NPE_OP_NOT_IN_PLACE 0x20
46 #define NPE_OP_HMAC_DISABLE 0x40
47 #define NPE_OP_CRYPT_ENCRYPT 0x80
49 #define NPE_OP_CCM_GEN_MIC 0xcc
50 #define NPE_OP_HASH_GEN_ICV 0x50
51 #define NPE_OP_ENC_GEN_KEY 0xc9
53 #define MOD_ECB 0x0000
54 #define MOD_CTR 0x1000
55 #define MOD_CBC_ENC 0x2000
56 #define MOD_CBC_DEC 0x3000
57 #define MOD_CCM_ENC 0x4000
58 #define MOD_CCM_DEC 0x5000
60 #define KEYLEN_128 4
61 #define KEYLEN_192 6
62 #define KEYLEN_256 8
64 #define CIPH_DECR 0x0000
65 #define CIPH_ENCR 0x0400
67 #define MOD_DES 0x0000
68 #define MOD_TDEA2 0x0100
69 #define MOD_3DES 0x0200
70 #define MOD_AES 0x0800
71 #define MOD_AES128 (0x0800 | KEYLEN_128)
72 #define MOD_AES192 (0x0900 | KEYLEN_192)
73 #define MOD_AES256 (0x0a00 | KEYLEN_256)
75 #define MAX_IVLEN 16
76 #define NPE_ID 2 /* NPE C */
77 #define NPE_QLEN 16
78 /* Space for registering when the first
79 * NPE_QLEN crypt_ctl are busy */
80 #define NPE_QLEN_TOTAL 64
82 #define SEND_QID 29
83 #define RECV_QID 30
85 #define CTL_FLAG_UNUSED 0x0000
86 #define CTL_FLAG_USED 0x1000
87 #define CTL_FLAG_PERFORM_ABLK 0x0001
88 #define CTL_FLAG_GEN_ICV 0x0002
89 #define CTL_FLAG_GEN_REVAES 0x0004
90 #define CTL_FLAG_PERFORM_AEAD 0x0008
91 #define CTL_FLAG_MASK 0x000f
93 #define HMAC_IPAD_VALUE 0x36
94 #define HMAC_OPAD_VALUE 0x5C
95 #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
97 #define MD5_DIGEST_SIZE 16
99 struct buffer_desc {
100 u32 phys_next;
101 #ifdef __ARMEB__
102 u16 buf_len;
103 u16 pkt_len;
104 #else
105 u16 pkt_len;
106 u16 buf_len;
107 #endif
108 u32 phys_addr;
109 u32 __reserved[4];
110 struct buffer_desc *next;
111 enum dma_data_direction dir;
114 struct crypt_ctl {
115 #ifdef __ARMEB__
116 u8 mode; /* NPE_OP_* operation mode */
117 u8 init_len;
118 u16 reserved;
119 #else
120 u16 reserved;
121 u8 init_len;
122 u8 mode; /* NPE_OP_* operation mode */
123 #endif
124 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
125 u32 icv_rev_aes; /* icv or rev aes */
126 u32 src_buf;
127 u32 dst_buf;
128 #ifdef __ARMEB__
129 u16 auth_offs; /* Authentication start offset */
130 u16 auth_len; /* Authentication data length */
131 u16 crypt_offs; /* Cryption start offset */
132 u16 crypt_len; /* Cryption data length */
133 #else
134 u16 auth_len; /* Authentication data length */
135 u16 auth_offs; /* Authentication start offset */
136 u16 crypt_len; /* Cryption data length */
137 u16 crypt_offs; /* Cryption start offset */
138 #endif
139 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
140 u32 crypto_ctx; /* NPE Crypto Param structure address */
142 /* Used by Host: 4*4 bytes*/
143 unsigned ctl_flags;
144 union {
145 struct ablkcipher_request *ablk_req;
146 struct aead_request *aead_req;
147 struct crypto_tfm *tfm;
148 } data;
149 struct buffer_desc *regist_buf;
150 u8 *regist_ptr;
153 struct ablk_ctx {
154 struct buffer_desc *src;
155 struct buffer_desc *dst;
158 struct aead_ctx {
159 struct buffer_desc *src;
160 struct buffer_desc *dst;
161 struct scatterlist ivlist;
162 /* used when the hmac is not on one sg entry */
163 u8 *hmac_virt;
164 int encrypt;
167 struct ix_hash_algo {
168 u32 cfgword;
169 unsigned char *icv;
172 struct ix_sa_dir {
173 unsigned char *npe_ctx;
174 dma_addr_t npe_ctx_phys;
175 int npe_ctx_idx;
176 u8 npe_mode;
179 struct ixp_ctx {
180 struct ix_sa_dir encrypt;
181 struct ix_sa_dir decrypt;
182 int authkey_len;
183 u8 authkey[MAX_KEYLEN];
184 int enckey_len;
185 u8 enckey[MAX_KEYLEN];
186 u8 salt[MAX_IVLEN];
187 u8 nonce[CTR_RFC3686_NONCE_SIZE];
188 unsigned salted;
189 atomic_t configuring;
190 struct completion completion;
193 struct ixp_alg {
194 struct crypto_alg crypto;
195 const struct ix_hash_algo *hash;
196 u32 cfg_enc;
197 u32 cfg_dec;
199 int registered;
202 struct ixp_aead_alg {
203 struct aead_alg crypto;
204 const struct ix_hash_algo *hash;
205 u32 cfg_enc;
206 u32 cfg_dec;
208 int registered;
211 static const struct ix_hash_algo hash_alg_md5 = {
212 .cfgword = 0xAA010004,
213 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
214 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
216 static const struct ix_hash_algo hash_alg_sha1 = {
217 .cfgword = 0x00000005,
218 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
219 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
222 static struct npe *npe_c;
223 static struct dma_pool *buffer_pool = NULL;
224 static struct dma_pool *ctx_pool = NULL;
226 static struct crypt_ctl *crypt_virt = NULL;
227 static dma_addr_t crypt_phys;
229 static int support_aes = 1;
231 #define DRIVER_NAME "ixp4xx_crypto"
233 static struct platform_device *pdev;
235 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
237 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
240 static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
242 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
245 static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
247 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
250 static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
252 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
255 static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
257 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
260 static int setup_crypt_desc(void)
262 struct device *dev = &pdev->dev;
263 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
264 crypt_virt = dma_alloc_coherent(dev,
265 NPE_QLEN * sizeof(struct crypt_ctl),
266 &crypt_phys, GFP_ATOMIC);
267 if (!crypt_virt)
268 return -ENOMEM;
269 memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
270 return 0;
273 static spinlock_t desc_lock;
274 static struct crypt_ctl *get_crypt_desc(void)
276 int i;
277 static int idx = 0;
278 unsigned long flags;
280 spin_lock_irqsave(&desc_lock, flags);
282 if (unlikely(!crypt_virt))
283 setup_crypt_desc();
284 if (unlikely(!crypt_virt)) {
285 spin_unlock_irqrestore(&desc_lock, flags);
286 return NULL;
288 i = idx;
289 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
290 if (++idx >= NPE_QLEN)
291 idx = 0;
292 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
293 spin_unlock_irqrestore(&desc_lock, flags);
294 return crypt_virt +i;
295 } else {
296 spin_unlock_irqrestore(&desc_lock, flags);
297 return NULL;
301 static spinlock_t emerg_lock;
302 static struct crypt_ctl *get_crypt_desc_emerg(void)
304 int i;
305 static int idx = NPE_QLEN;
306 struct crypt_ctl *desc;
307 unsigned long flags;
309 desc = get_crypt_desc();
310 if (desc)
311 return desc;
312 if (unlikely(!crypt_virt))
313 return NULL;
315 spin_lock_irqsave(&emerg_lock, flags);
316 i = idx;
317 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
318 if (++idx >= NPE_QLEN_TOTAL)
319 idx = NPE_QLEN;
320 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
321 spin_unlock_irqrestore(&emerg_lock, flags);
322 return crypt_virt +i;
323 } else {
324 spin_unlock_irqrestore(&emerg_lock, flags);
325 return NULL;
329 static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
331 while (buf) {
332 struct buffer_desc *buf1;
333 u32 phys1;
335 buf1 = buf->next;
336 phys1 = buf->phys_next;
337 dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
338 dma_pool_free(buffer_pool, buf, phys);
339 buf = buf1;
340 phys = phys1;
344 static struct tasklet_struct crypto_done_tasklet;
346 static void finish_scattered_hmac(struct crypt_ctl *crypt)
348 struct aead_request *req = crypt->data.aead_req;
349 struct aead_ctx *req_ctx = aead_request_ctx(req);
350 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
351 int authsize = crypto_aead_authsize(tfm);
352 int decryptlen = req->assoclen + req->cryptlen - authsize;
354 if (req_ctx->encrypt) {
355 scatterwalk_map_and_copy(req_ctx->hmac_virt,
356 req->dst, decryptlen, authsize, 1);
358 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
361 static void one_packet(dma_addr_t phys)
363 struct device *dev = &pdev->dev;
364 struct crypt_ctl *crypt;
365 struct ixp_ctx *ctx;
366 int failed;
368 failed = phys & 0x1 ? -EBADMSG : 0;
369 phys &= ~0x3;
370 crypt = crypt_phys2virt(phys);
372 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
373 case CTL_FLAG_PERFORM_AEAD: {
374 struct aead_request *req = crypt->data.aead_req;
375 struct aead_ctx *req_ctx = aead_request_ctx(req);
377 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
378 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
379 if (req_ctx->hmac_virt) {
380 finish_scattered_hmac(crypt);
382 req->base.complete(&req->base, failed);
383 break;
385 case CTL_FLAG_PERFORM_ABLK: {
386 struct ablkcipher_request *req = crypt->data.ablk_req;
387 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
389 if (req_ctx->dst) {
390 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
392 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
393 req->base.complete(&req->base, failed);
394 break;
396 case CTL_FLAG_GEN_ICV:
397 ctx = crypto_tfm_ctx(crypt->data.tfm);
398 dma_pool_free(ctx_pool, crypt->regist_ptr,
399 crypt->regist_buf->phys_addr);
400 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
401 if (atomic_dec_and_test(&ctx->configuring))
402 complete(&ctx->completion);
403 break;
404 case CTL_FLAG_GEN_REVAES:
405 ctx = crypto_tfm_ctx(crypt->data.tfm);
406 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
407 if (atomic_dec_and_test(&ctx->configuring))
408 complete(&ctx->completion);
409 break;
410 default:
411 BUG();
413 crypt->ctl_flags = CTL_FLAG_UNUSED;
416 static void irqhandler(void *_unused)
418 tasklet_schedule(&crypto_done_tasklet);
421 static void crypto_done_action(unsigned long arg)
423 int i;
425 for(i=0; i<4; i++) {
426 dma_addr_t phys = qmgr_get_entry(RECV_QID);
427 if (!phys)
428 return;
429 one_packet(phys);
431 tasklet_schedule(&crypto_done_tasklet);
434 static int init_ixp_crypto(struct device *dev)
436 int ret = -ENODEV;
437 u32 msg[2] = { 0, 0 };
439 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
440 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
441 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
442 return ret;
444 npe_c = npe_request(NPE_ID);
445 if (!npe_c)
446 return ret;
448 if (!npe_running(npe_c)) {
449 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
450 if (ret)
451 goto npe_release;
452 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
453 goto npe_error;
454 } else {
455 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
456 goto npe_error;
458 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
459 goto npe_error;
462 switch ((msg[1]>>16) & 0xff) {
463 case 3:
464 printk(KERN_WARNING "Firmware of %s lacks AES support\n",
465 npe_name(npe_c));
466 support_aes = 0;
467 break;
468 case 4:
469 case 5:
470 support_aes = 1;
471 break;
472 default:
473 printk(KERN_ERR "Firmware of %s lacks crypto support\n",
474 npe_name(npe_c));
475 ret = -ENODEV;
476 goto npe_release;
478 /* buffer_pool will also be used to sometimes store the hmac,
479 * so assure it is large enough
481 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
482 buffer_pool = dma_pool_create("buffer", dev,
483 sizeof(struct buffer_desc), 32, 0);
484 ret = -ENOMEM;
485 if (!buffer_pool) {
486 goto err;
488 ctx_pool = dma_pool_create("context", dev,
489 NPE_CTX_LEN, 16, 0);
490 if (!ctx_pool) {
491 goto err;
493 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
494 "ixp_crypto:out", NULL);
495 if (ret)
496 goto err;
497 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
498 "ixp_crypto:in", NULL);
499 if (ret) {
500 qmgr_release_queue(SEND_QID);
501 goto err;
503 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
504 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
506 qmgr_enable_irq(RECV_QID);
507 return 0;
509 npe_error:
510 printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
511 ret = -EIO;
512 err:
513 dma_pool_destroy(ctx_pool);
514 dma_pool_destroy(buffer_pool);
515 npe_release:
516 npe_release(npe_c);
517 return ret;
520 static void release_ixp_crypto(struct device *dev)
522 qmgr_disable_irq(RECV_QID);
523 tasklet_kill(&crypto_done_tasklet);
525 qmgr_release_queue(SEND_QID);
526 qmgr_release_queue(RECV_QID);
528 dma_pool_destroy(ctx_pool);
529 dma_pool_destroy(buffer_pool);
531 npe_release(npe_c);
533 if (crypt_virt) {
534 dma_free_coherent(dev,
535 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
536 crypt_virt, crypt_phys);
538 return;
541 static void reset_sa_dir(struct ix_sa_dir *dir)
543 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
544 dir->npe_ctx_idx = 0;
545 dir->npe_mode = 0;
548 static int init_sa_dir(struct ix_sa_dir *dir)
550 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
551 if (!dir->npe_ctx) {
552 return -ENOMEM;
554 reset_sa_dir(dir);
555 return 0;
558 static void free_sa_dir(struct ix_sa_dir *dir)
560 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
561 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
564 static int init_tfm(struct crypto_tfm *tfm)
566 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
567 int ret;
569 atomic_set(&ctx->configuring, 0);
570 ret = init_sa_dir(&ctx->encrypt);
571 if (ret)
572 return ret;
573 ret = init_sa_dir(&ctx->decrypt);
574 if (ret) {
575 free_sa_dir(&ctx->encrypt);
577 return ret;
580 static int init_tfm_ablk(struct crypto_tfm *tfm)
582 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
583 return init_tfm(tfm);
586 static int init_tfm_aead(struct crypto_aead *tfm)
588 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
589 return init_tfm(crypto_aead_tfm(tfm));
592 static void exit_tfm(struct crypto_tfm *tfm)
594 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
595 free_sa_dir(&ctx->encrypt);
596 free_sa_dir(&ctx->decrypt);
599 static void exit_tfm_aead(struct crypto_aead *tfm)
601 exit_tfm(crypto_aead_tfm(tfm));
604 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
605 int init_len, u32 ctx_addr, const u8 *key, int key_len)
607 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
608 struct crypt_ctl *crypt;
609 struct buffer_desc *buf;
610 int i;
611 u8 *pad;
612 u32 pad_phys, buf_phys;
614 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
615 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
616 if (!pad)
617 return -ENOMEM;
618 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
619 if (!buf) {
620 dma_pool_free(ctx_pool, pad, pad_phys);
621 return -ENOMEM;
623 crypt = get_crypt_desc_emerg();
624 if (!crypt) {
625 dma_pool_free(ctx_pool, pad, pad_phys);
626 dma_pool_free(buffer_pool, buf, buf_phys);
627 return -EAGAIN;
630 memcpy(pad, key, key_len);
631 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
632 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
633 pad[i] ^= xpad;
636 crypt->data.tfm = tfm;
637 crypt->regist_ptr = pad;
638 crypt->regist_buf = buf;
640 crypt->auth_offs = 0;
641 crypt->auth_len = HMAC_PAD_BLOCKLEN;
642 crypt->crypto_ctx = ctx_addr;
643 crypt->src_buf = buf_phys;
644 crypt->icv_rev_aes = target;
645 crypt->mode = NPE_OP_HASH_GEN_ICV;
646 crypt->init_len = init_len;
647 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
649 buf->next = 0;
650 buf->buf_len = HMAC_PAD_BLOCKLEN;
651 buf->pkt_len = 0;
652 buf->phys_addr = pad_phys;
654 atomic_inc(&ctx->configuring);
655 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
656 BUG_ON(qmgr_stat_overflow(SEND_QID));
657 return 0;
660 static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
661 const u8 *key, int key_len, unsigned digest_len)
663 u32 itarget, otarget, npe_ctx_addr;
664 unsigned char *cinfo;
665 int init_len, ret = 0;
666 u32 cfgword;
667 struct ix_sa_dir *dir;
668 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
669 const struct ix_hash_algo *algo;
671 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
672 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
673 algo = ix_hash(tfm);
675 /* write cfg word to cryptinfo */
676 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
677 #ifndef __ARMEB__
678 cfgword ^= 0xAA000000; /* change the "byte swap" flags */
679 #endif
680 *(u32*)cinfo = cpu_to_be32(cfgword);
681 cinfo += sizeof(cfgword);
683 /* write ICV to cryptinfo */
684 memcpy(cinfo, algo->icv, digest_len);
685 cinfo += digest_len;
687 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
688 + sizeof(algo->cfgword);
689 otarget = itarget + digest_len;
690 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
691 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
693 dir->npe_ctx_idx += init_len;
694 dir->npe_mode |= NPE_OP_HASH_ENABLE;
696 if (!encrypt)
697 dir->npe_mode |= NPE_OP_HASH_VERIFY;
699 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
700 init_len, npe_ctx_addr, key, key_len);
701 if (ret)
702 return ret;
703 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
704 init_len, npe_ctx_addr, key, key_len);
707 static int gen_rev_aes_key(struct crypto_tfm *tfm)
709 struct crypt_ctl *crypt;
710 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
711 struct ix_sa_dir *dir = &ctx->decrypt;
713 crypt = get_crypt_desc_emerg();
714 if (!crypt) {
715 return -EAGAIN;
717 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
719 crypt->data.tfm = tfm;
720 crypt->crypt_offs = 0;
721 crypt->crypt_len = AES_BLOCK128;
722 crypt->src_buf = 0;
723 crypt->crypto_ctx = dir->npe_ctx_phys;
724 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
725 crypt->mode = NPE_OP_ENC_GEN_KEY;
726 crypt->init_len = dir->npe_ctx_idx;
727 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
729 atomic_inc(&ctx->configuring);
730 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
731 BUG_ON(qmgr_stat_overflow(SEND_QID));
732 return 0;
735 static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
736 const u8 *key, int key_len)
738 u8 *cinfo;
739 u32 cipher_cfg;
740 u32 keylen_cfg = 0;
741 struct ix_sa_dir *dir;
742 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
743 u32 *flags = &tfm->crt_flags;
745 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
746 cinfo = dir->npe_ctx;
748 if (encrypt) {
749 cipher_cfg = cipher_cfg_enc(tfm);
750 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
751 } else {
752 cipher_cfg = cipher_cfg_dec(tfm);
754 if (cipher_cfg & MOD_AES) {
755 switch (key_len) {
756 case 16: keylen_cfg = MOD_AES128; break;
757 case 24: keylen_cfg = MOD_AES192; break;
758 case 32: keylen_cfg = MOD_AES256; break;
759 default:
760 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
761 return -EINVAL;
763 cipher_cfg |= keylen_cfg;
764 } else if (cipher_cfg & MOD_3DES) {
765 const u32 *K = (const u32 *)key;
766 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
767 !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
769 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
770 return -EINVAL;
772 } else {
773 u32 tmp[DES_EXPKEY_WORDS];
774 if (des_ekey(tmp, key) == 0) {
775 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
778 /* write cfg word to cryptinfo */
779 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
780 cinfo += sizeof(cipher_cfg);
782 /* write cipher key to cryptinfo */
783 memcpy(cinfo, key, key_len);
784 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
785 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
786 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
787 key_len = DES3_EDE_KEY_SIZE;
789 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
790 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
791 if ((cipher_cfg & MOD_AES) && !encrypt) {
792 return gen_rev_aes_key(tfm);
794 return 0;
797 static struct buffer_desc *chainup_buffers(struct device *dev,
798 struct scatterlist *sg, unsigned nbytes,
799 struct buffer_desc *buf, gfp_t flags,
800 enum dma_data_direction dir)
802 for (; nbytes > 0; sg = sg_next(sg)) {
803 unsigned len = min(nbytes, sg->length);
804 struct buffer_desc *next_buf;
805 u32 next_buf_phys;
806 void *ptr;
808 nbytes -= len;
809 ptr = page_address(sg_page(sg)) + sg->offset;
810 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
811 if (!next_buf) {
812 buf = NULL;
813 break;
815 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
816 buf->next = next_buf;
817 buf->phys_next = next_buf_phys;
818 buf = next_buf;
820 buf->phys_addr = sg_dma_address(sg);
821 buf->buf_len = len;
822 buf->dir = dir;
824 buf->next = NULL;
825 buf->phys_next = 0;
826 return buf;
829 static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
830 unsigned int key_len)
832 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
833 u32 *flags = &tfm->base.crt_flags;
834 int ret;
836 init_completion(&ctx->completion);
837 atomic_inc(&ctx->configuring);
839 reset_sa_dir(&ctx->encrypt);
840 reset_sa_dir(&ctx->decrypt);
842 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
843 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
845 ret = setup_cipher(&tfm->base, 0, key, key_len);
846 if (ret)
847 goto out;
848 ret = setup_cipher(&tfm->base, 1, key, key_len);
849 if (ret)
850 goto out;
852 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
853 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
854 ret = -EINVAL;
855 } else {
856 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
859 out:
860 if (!atomic_dec_and_test(&ctx->configuring))
861 wait_for_completion(&ctx->completion);
862 return ret;
865 static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
866 unsigned int key_len)
868 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
870 /* the nonce is stored in bytes at end of key */
871 if (key_len < CTR_RFC3686_NONCE_SIZE)
872 return -EINVAL;
874 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
875 CTR_RFC3686_NONCE_SIZE);
877 key_len -= CTR_RFC3686_NONCE_SIZE;
878 return ablk_setkey(tfm, key, key_len);
881 static int ablk_perform(struct ablkcipher_request *req, int encrypt)
883 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
884 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
885 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
886 struct ix_sa_dir *dir;
887 struct crypt_ctl *crypt;
888 unsigned int nbytes = req->nbytes;
889 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
890 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
891 struct buffer_desc src_hook;
892 struct device *dev = &pdev->dev;
893 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
894 GFP_KERNEL : GFP_ATOMIC;
896 if (qmgr_stat_full(SEND_QID))
897 return -EAGAIN;
898 if (atomic_read(&ctx->configuring))
899 return -EAGAIN;
901 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
903 crypt = get_crypt_desc();
904 if (!crypt)
905 return -ENOMEM;
907 crypt->data.ablk_req = req;
908 crypt->crypto_ctx = dir->npe_ctx_phys;
909 crypt->mode = dir->npe_mode;
910 crypt->init_len = dir->npe_ctx_idx;
912 crypt->crypt_offs = 0;
913 crypt->crypt_len = nbytes;
915 BUG_ON(ivsize && !req->info);
916 memcpy(crypt->iv, req->info, ivsize);
917 if (req->src != req->dst) {
918 struct buffer_desc dst_hook;
919 crypt->mode |= NPE_OP_NOT_IN_PLACE;
920 /* This was never tested by Intel
921 * for more than one dst buffer, I think. */
922 req_ctx->dst = NULL;
923 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
924 flags, DMA_FROM_DEVICE))
925 goto free_buf_dest;
926 src_direction = DMA_TO_DEVICE;
927 req_ctx->dst = dst_hook.next;
928 crypt->dst_buf = dst_hook.phys_next;
929 } else {
930 req_ctx->dst = NULL;
932 req_ctx->src = NULL;
933 if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
934 flags, src_direction))
935 goto free_buf_src;
937 req_ctx->src = src_hook.next;
938 crypt->src_buf = src_hook.phys_next;
939 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
940 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
941 BUG_ON(qmgr_stat_overflow(SEND_QID));
942 return -EINPROGRESS;
944 free_buf_src:
945 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
946 free_buf_dest:
947 if (req->src != req->dst) {
948 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
950 crypt->ctl_flags = CTL_FLAG_UNUSED;
951 return -ENOMEM;
954 static int ablk_encrypt(struct ablkcipher_request *req)
956 return ablk_perform(req, 1);
959 static int ablk_decrypt(struct ablkcipher_request *req)
961 return ablk_perform(req, 0);
964 static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
966 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
967 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
968 u8 iv[CTR_RFC3686_BLOCK_SIZE];
969 u8 *info = req->info;
970 int ret;
972 /* set up counter block */
973 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
974 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
976 /* initialize counter portion of counter block */
977 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
978 cpu_to_be32(1);
980 req->info = iv;
981 ret = ablk_perform(req, 1);
982 req->info = info;
983 return ret;
986 static int aead_perform(struct aead_request *req, int encrypt,
987 int cryptoffset, int eff_cryptlen, u8 *iv)
989 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
990 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
991 unsigned ivsize = crypto_aead_ivsize(tfm);
992 unsigned authsize = crypto_aead_authsize(tfm);
993 struct ix_sa_dir *dir;
994 struct crypt_ctl *crypt;
995 unsigned int cryptlen;
996 struct buffer_desc *buf, src_hook;
997 struct aead_ctx *req_ctx = aead_request_ctx(req);
998 struct device *dev = &pdev->dev;
999 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1000 GFP_KERNEL : GFP_ATOMIC;
1001 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
1002 unsigned int lastlen;
1004 if (qmgr_stat_full(SEND_QID))
1005 return -EAGAIN;
1006 if (atomic_read(&ctx->configuring))
1007 return -EAGAIN;
1009 if (encrypt) {
1010 dir = &ctx->encrypt;
1011 cryptlen = req->cryptlen;
1012 } else {
1013 dir = &ctx->decrypt;
1014 /* req->cryptlen includes the authsize when decrypting */
1015 cryptlen = req->cryptlen -authsize;
1016 eff_cryptlen -= authsize;
1018 crypt = get_crypt_desc();
1019 if (!crypt)
1020 return -ENOMEM;
1022 crypt->data.aead_req = req;
1023 crypt->crypto_ctx = dir->npe_ctx_phys;
1024 crypt->mode = dir->npe_mode;
1025 crypt->init_len = dir->npe_ctx_idx;
1027 crypt->crypt_offs = cryptoffset;
1028 crypt->crypt_len = eff_cryptlen;
1030 crypt->auth_offs = 0;
1031 crypt->auth_len = req->assoclen + cryptlen;
1032 BUG_ON(ivsize && !req->iv);
1033 memcpy(crypt->iv, req->iv, ivsize);
1035 buf = chainup_buffers(dev, req->src, crypt->auth_len,
1036 &src_hook, flags, src_direction);
1037 req_ctx->src = src_hook.next;
1038 crypt->src_buf = src_hook.phys_next;
1039 if (!buf)
1040 goto free_buf_src;
1042 lastlen = buf->buf_len;
1043 if (lastlen >= authsize)
1044 crypt->icv_rev_aes = buf->phys_addr +
1045 buf->buf_len - authsize;
1047 req_ctx->dst = NULL;
1049 if (req->src != req->dst) {
1050 struct buffer_desc dst_hook;
1052 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1053 src_direction = DMA_TO_DEVICE;
1055 buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1056 &dst_hook, flags, DMA_FROM_DEVICE);
1057 req_ctx->dst = dst_hook.next;
1058 crypt->dst_buf = dst_hook.phys_next;
1060 if (!buf)
1061 goto free_buf_dst;
1063 if (encrypt) {
1064 lastlen = buf->buf_len;
1065 if (lastlen >= authsize)
1066 crypt->icv_rev_aes = buf->phys_addr +
1067 buf->buf_len - authsize;
1071 if (unlikely(lastlen < authsize)) {
1072 /* The 12 hmac bytes are scattered,
1073 * we need to copy them into a safe buffer */
1074 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1075 &crypt->icv_rev_aes);
1076 if (unlikely(!req_ctx->hmac_virt))
1077 goto free_buf_src;
1078 if (!encrypt) {
1079 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1080 req->src, cryptlen, authsize, 0);
1082 req_ctx->encrypt = encrypt;
1083 } else {
1084 req_ctx->hmac_virt = NULL;
1087 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1088 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1089 BUG_ON(qmgr_stat_overflow(SEND_QID));
1090 return -EINPROGRESS;
1092 free_buf_src:
1093 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1094 free_buf_dst:
1095 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1096 crypt->ctl_flags = CTL_FLAG_UNUSED;
1097 return -ENOMEM;
1100 static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1102 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1103 u32 *flags = &tfm->base.crt_flags;
1104 unsigned digest_len = crypto_aead_maxauthsize(tfm);
1105 int ret;
1107 if (!ctx->enckey_len && !ctx->authkey_len)
1108 return 0;
1109 init_completion(&ctx->completion);
1110 atomic_inc(&ctx->configuring);
1112 reset_sa_dir(&ctx->encrypt);
1113 reset_sa_dir(&ctx->decrypt);
1115 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1116 if (ret)
1117 goto out;
1118 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1119 if (ret)
1120 goto out;
1121 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1122 ctx->authkey_len, digest_len);
1123 if (ret)
1124 goto out;
1125 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1126 ctx->authkey_len, digest_len);
1127 if (ret)
1128 goto out;
1130 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1131 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1132 ret = -EINVAL;
1133 goto out;
1134 } else {
1135 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1138 out:
1139 if (!atomic_dec_and_test(&ctx->configuring))
1140 wait_for_completion(&ctx->completion);
1141 return ret;
1144 static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1146 int max = crypto_aead_maxauthsize(tfm) >> 2;
1148 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1149 return -EINVAL;
1150 return aead_setup(tfm, authsize);
1153 static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1154 unsigned int keylen)
1156 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1157 struct crypto_authenc_keys keys;
1159 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1160 goto badkey;
1162 if (keys.authkeylen > sizeof(ctx->authkey))
1163 goto badkey;
1165 if (keys.enckeylen > sizeof(ctx->enckey))
1166 goto badkey;
1168 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1169 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1170 ctx->authkey_len = keys.authkeylen;
1171 ctx->enckey_len = keys.enckeylen;
1173 return aead_setup(tfm, crypto_aead_authsize(tfm));
1174 badkey:
1175 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1176 return -EINVAL;
1179 static int aead_encrypt(struct aead_request *req)
1181 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1184 static int aead_decrypt(struct aead_request *req)
1186 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1189 static struct ixp_alg ixp4xx_algos[] = {
1191 .crypto = {
1192 .cra_name = "cbc(des)",
1193 .cra_blocksize = DES_BLOCK_SIZE,
1194 .cra_u = { .ablkcipher = {
1195 .min_keysize = DES_KEY_SIZE,
1196 .max_keysize = DES_KEY_SIZE,
1197 .ivsize = DES_BLOCK_SIZE,
1198 .geniv = "eseqiv",
1202 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1203 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1205 }, {
1206 .crypto = {
1207 .cra_name = "ecb(des)",
1208 .cra_blocksize = DES_BLOCK_SIZE,
1209 .cra_u = { .ablkcipher = {
1210 .min_keysize = DES_KEY_SIZE,
1211 .max_keysize = DES_KEY_SIZE,
1215 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1216 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1217 }, {
1218 .crypto = {
1219 .cra_name = "cbc(des3_ede)",
1220 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1221 .cra_u = { .ablkcipher = {
1222 .min_keysize = DES3_EDE_KEY_SIZE,
1223 .max_keysize = DES3_EDE_KEY_SIZE,
1224 .ivsize = DES3_EDE_BLOCK_SIZE,
1225 .geniv = "eseqiv",
1229 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1230 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1231 }, {
1232 .crypto = {
1233 .cra_name = "ecb(des3_ede)",
1234 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1235 .cra_u = { .ablkcipher = {
1236 .min_keysize = DES3_EDE_KEY_SIZE,
1237 .max_keysize = DES3_EDE_KEY_SIZE,
1241 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1242 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1243 }, {
1244 .crypto = {
1245 .cra_name = "cbc(aes)",
1246 .cra_blocksize = AES_BLOCK_SIZE,
1247 .cra_u = { .ablkcipher = {
1248 .min_keysize = AES_MIN_KEY_SIZE,
1249 .max_keysize = AES_MAX_KEY_SIZE,
1250 .ivsize = AES_BLOCK_SIZE,
1251 .geniv = "eseqiv",
1255 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1256 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1257 }, {
1258 .crypto = {
1259 .cra_name = "ecb(aes)",
1260 .cra_blocksize = AES_BLOCK_SIZE,
1261 .cra_u = { .ablkcipher = {
1262 .min_keysize = AES_MIN_KEY_SIZE,
1263 .max_keysize = AES_MAX_KEY_SIZE,
1267 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1268 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1269 }, {
1270 .crypto = {
1271 .cra_name = "ctr(aes)",
1272 .cra_blocksize = AES_BLOCK_SIZE,
1273 .cra_u = { .ablkcipher = {
1274 .min_keysize = AES_MIN_KEY_SIZE,
1275 .max_keysize = AES_MAX_KEY_SIZE,
1276 .ivsize = AES_BLOCK_SIZE,
1277 .geniv = "eseqiv",
1281 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1282 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1283 }, {
1284 .crypto = {
1285 .cra_name = "rfc3686(ctr(aes))",
1286 .cra_blocksize = AES_BLOCK_SIZE,
1287 .cra_u = { .ablkcipher = {
1288 .min_keysize = AES_MIN_KEY_SIZE,
1289 .max_keysize = AES_MAX_KEY_SIZE,
1290 .ivsize = AES_BLOCK_SIZE,
1291 .geniv = "eseqiv",
1292 .setkey = ablk_rfc3686_setkey,
1293 .encrypt = ablk_rfc3686_crypt,
1294 .decrypt = ablk_rfc3686_crypt }
1297 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1298 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1299 } };
1301 static struct ixp_aead_alg ixp4xx_aeads[] = {
1303 .crypto = {
1304 .base = {
1305 .cra_name = "authenc(hmac(md5),cbc(des))",
1306 .cra_blocksize = DES_BLOCK_SIZE,
1308 .ivsize = DES_BLOCK_SIZE,
1309 .maxauthsize = MD5_DIGEST_SIZE,
1311 .hash = &hash_alg_md5,
1312 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1313 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1314 }, {
1315 .crypto = {
1316 .base = {
1317 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1318 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1320 .ivsize = DES3_EDE_BLOCK_SIZE,
1321 .maxauthsize = MD5_DIGEST_SIZE,
1323 .hash = &hash_alg_md5,
1324 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1325 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1326 }, {
1327 .crypto = {
1328 .base = {
1329 .cra_name = "authenc(hmac(sha1),cbc(des))",
1330 .cra_blocksize = DES_BLOCK_SIZE,
1332 .ivsize = DES_BLOCK_SIZE,
1333 .maxauthsize = SHA1_DIGEST_SIZE,
1335 .hash = &hash_alg_sha1,
1336 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1337 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1338 }, {
1339 .crypto = {
1340 .base = {
1341 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1342 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1344 .ivsize = DES3_EDE_BLOCK_SIZE,
1345 .maxauthsize = SHA1_DIGEST_SIZE,
1347 .hash = &hash_alg_sha1,
1348 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1349 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1350 }, {
1351 .crypto = {
1352 .base = {
1353 .cra_name = "authenc(hmac(md5),cbc(aes))",
1354 .cra_blocksize = AES_BLOCK_SIZE,
1356 .ivsize = AES_BLOCK_SIZE,
1357 .maxauthsize = MD5_DIGEST_SIZE,
1359 .hash = &hash_alg_md5,
1360 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1361 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1362 }, {
1363 .crypto = {
1364 .base = {
1365 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1366 .cra_blocksize = AES_BLOCK_SIZE,
1368 .ivsize = AES_BLOCK_SIZE,
1369 .maxauthsize = SHA1_DIGEST_SIZE,
1371 .hash = &hash_alg_sha1,
1372 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1373 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1374 } };
1376 #define IXP_POSTFIX "-ixp4xx"
1378 static const struct platform_device_info ixp_dev_info __initdata = {
1379 .name = DRIVER_NAME,
1380 .id = 0,
1381 .dma_mask = DMA_BIT_MASK(32),
1384 static int __init ixp_module_init(void)
1386 int num = ARRAY_SIZE(ixp4xx_algos);
1387 int i, err;
1389 pdev = platform_device_register_full(&ixp_dev_info);
1390 if (IS_ERR(pdev))
1391 return PTR_ERR(pdev);
1393 spin_lock_init(&desc_lock);
1394 spin_lock_init(&emerg_lock);
1396 err = init_ixp_crypto(&pdev->dev);
1397 if (err) {
1398 platform_device_unregister(pdev);
1399 return err;
1401 for (i=0; i< num; i++) {
1402 struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1404 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1405 "%s"IXP_POSTFIX, cra->cra_name) >=
1406 CRYPTO_MAX_ALG_NAME)
1408 continue;
1410 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1411 continue;
1414 /* block ciphers */
1415 cra->cra_type = &crypto_ablkcipher_type;
1416 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1417 CRYPTO_ALG_KERN_DRIVER_ONLY |
1418 CRYPTO_ALG_ASYNC;
1419 if (!cra->cra_ablkcipher.setkey)
1420 cra->cra_ablkcipher.setkey = ablk_setkey;
1421 if (!cra->cra_ablkcipher.encrypt)
1422 cra->cra_ablkcipher.encrypt = ablk_encrypt;
1423 if (!cra->cra_ablkcipher.decrypt)
1424 cra->cra_ablkcipher.decrypt = ablk_decrypt;
1425 cra->cra_init = init_tfm_ablk;
1427 cra->cra_ctxsize = sizeof(struct ixp_ctx);
1428 cra->cra_module = THIS_MODULE;
1429 cra->cra_alignmask = 3;
1430 cra->cra_priority = 300;
1431 cra->cra_exit = exit_tfm;
1432 if (crypto_register_alg(cra))
1433 printk(KERN_ERR "Failed to register '%s'\n",
1434 cra->cra_name);
1435 else
1436 ixp4xx_algos[i].registered = 1;
1439 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1440 struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1442 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1443 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1444 CRYPTO_MAX_ALG_NAME)
1445 continue;
1446 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1447 continue;
1449 /* authenc */
1450 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1451 CRYPTO_ALG_ASYNC;
1452 cra->setkey = aead_setkey;
1453 cra->setauthsize = aead_setauthsize;
1454 cra->encrypt = aead_encrypt;
1455 cra->decrypt = aead_decrypt;
1456 cra->init = init_tfm_aead;
1457 cra->exit = exit_tfm_aead;
1459 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1460 cra->base.cra_module = THIS_MODULE;
1461 cra->base.cra_alignmask = 3;
1462 cra->base.cra_priority = 300;
1464 if (crypto_register_aead(cra))
1465 printk(KERN_ERR "Failed to register '%s'\n",
1466 cra->base.cra_driver_name);
1467 else
1468 ixp4xx_aeads[i].registered = 1;
1470 return 0;
1473 static void __exit ixp_module_exit(void)
1475 int num = ARRAY_SIZE(ixp4xx_algos);
1476 int i;
1478 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1479 if (ixp4xx_aeads[i].registered)
1480 crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1483 for (i=0; i< num; i++) {
1484 if (ixp4xx_algos[i].registered)
1485 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1487 release_ixp_crypto(&pdev->dev);
1488 platform_device_unregister(pdev);
1491 module_init(ixp_module_init);
1492 module_exit(ixp_module_exit);
1494 MODULE_LICENSE("GPL");
1495 MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1496 MODULE_DESCRIPTION("IXP4xx hardware crypto");