Merge tag 'pm-for-3.6-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux/fpc-iii.git] / drivers / crypto / ixp4xx_crypto.c
blob8f3f74ce8c7fd7ac95e241c2c4504f06a52da38c
1 /*
2 * Intel IXP4xx NPE-C crypto driver
4 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
12 #include <linux/platform_device.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/crypto.h>
16 #include <linux/kernel.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/interrupt.h>
19 #include <linux/spinlock.h>
20 #include <linux/gfp.h>
21 #include <linux/module.h>
23 #include <crypto/ctr.h>
24 #include <crypto/des.h>
25 #include <crypto/aes.h>
26 #include <crypto/sha.h>
27 #include <crypto/algapi.h>
28 #include <crypto/aead.h>
29 #include <crypto/authenc.h>
30 #include <crypto/scatterwalk.h>
32 #include <mach/npe.h>
33 #include <mach/qmgr.h>
35 #define MAX_KEYLEN 32
37 /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
38 #define NPE_CTX_LEN 80
39 #define AES_BLOCK128 16
41 #define NPE_OP_HASH_VERIFY 0x01
42 #define NPE_OP_CCM_ENABLE 0x04
43 #define NPE_OP_CRYPT_ENABLE 0x08
44 #define NPE_OP_HASH_ENABLE 0x10
45 #define NPE_OP_NOT_IN_PLACE 0x20
46 #define NPE_OP_HMAC_DISABLE 0x40
47 #define NPE_OP_CRYPT_ENCRYPT 0x80
49 #define NPE_OP_CCM_GEN_MIC 0xcc
50 #define NPE_OP_HASH_GEN_ICV 0x50
51 #define NPE_OP_ENC_GEN_KEY 0xc9
53 #define MOD_ECB 0x0000
54 #define MOD_CTR 0x1000
55 #define MOD_CBC_ENC 0x2000
56 #define MOD_CBC_DEC 0x3000
57 #define MOD_CCM_ENC 0x4000
58 #define MOD_CCM_DEC 0x5000
60 #define KEYLEN_128 4
61 #define KEYLEN_192 6
62 #define KEYLEN_256 8
64 #define CIPH_DECR 0x0000
65 #define CIPH_ENCR 0x0400
67 #define MOD_DES 0x0000
68 #define MOD_TDEA2 0x0100
69 #define MOD_3DES 0x0200
70 #define MOD_AES 0x0800
71 #define MOD_AES128 (0x0800 | KEYLEN_128)
72 #define MOD_AES192 (0x0900 | KEYLEN_192)
73 #define MOD_AES256 (0x0a00 | KEYLEN_256)
75 #define MAX_IVLEN 16
76 #define NPE_ID 2 /* NPE C */
77 #define NPE_QLEN 16
78 /* Space for registering when the first
79 * NPE_QLEN crypt_ctl are busy */
80 #define NPE_QLEN_TOTAL 64
82 #define SEND_QID 29
83 #define RECV_QID 30
85 #define CTL_FLAG_UNUSED 0x0000
86 #define CTL_FLAG_USED 0x1000
87 #define CTL_FLAG_PERFORM_ABLK 0x0001
88 #define CTL_FLAG_GEN_ICV 0x0002
89 #define CTL_FLAG_GEN_REVAES 0x0004
90 #define CTL_FLAG_PERFORM_AEAD 0x0008
91 #define CTL_FLAG_MASK 0x000f
93 #define HMAC_IPAD_VALUE 0x36
94 #define HMAC_OPAD_VALUE 0x5C
95 #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
97 #define MD5_DIGEST_SIZE 16
99 struct buffer_desc {
100 u32 phys_next;
101 #ifdef __ARMEB__
102 u16 buf_len;
103 u16 pkt_len;
104 #else
105 u16 pkt_len;
106 u16 buf_len;
107 #endif
108 u32 phys_addr;
109 u32 __reserved[4];
110 struct buffer_desc *next;
111 enum dma_data_direction dir;
114 struct crypt_ctl {
115 #ifdef __ARMEB__
116 u8 mode; /* NPE_OP_* operation mode */
117 u8 init_len;
118 u16 reserved;
119 #else
120 u16 reserved;
121 u8 init_len;
122 u8 mode; /* NPE_OP_* operation mode */
123 #endif
124 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
125 u32 icv_rev_aes; /* icv or rev aes */
126 u32 src_buf;
127 u32 dst_buf;
128 #ifdef __ARMEB__
129 u16 auth_offs; /* Authentication start offset */
130 u16 auth_len; /* Authentication data length */
131 u16 crypt_offs; /* Cryption start offset */
132 u16 crypt_len; /* Cryption data length */
133 #else
134 u16 auth_len; /* Authentication data length */
135 u16 auth_offs; /* Authentication start offset */
136 u16 crypt_len; /* Cryption data length */
137 u16 crypt_offs; /* Cryption start offset */
138 #endif
139 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
140 u32 crypto_ctx; /* NPE Crypto Param structure address */
142 /* Used by Host: 4*4 bytes*/
143 unsigned ctl_flags;
144 union {
145 struct ablkcipher_request *ablk_req;
146 struct aead_request *aead_req;
147 struct crypto_tfm *tfm;
148 } data;
149 struct buffer_desc *regist_buf;
150 u8 *regist_ptr;
153 struct ablk_ctx {
154 struct buffer_desc *src;
155 struct buffer_desc *dst;
158 struct aead_ctx {
159 struct buffer_desc *buffer;
160 struct scatterlist ivlist;
161 /* used when the hmac is not on one sg entry */
162 u8 *hmac_virt;
163 int encrypt;
166 struct ix_hash_algo {
167 u32 cfgword;
168 unsigned char *icv;
171 struct ix_sa_dir {
172 unsigned char *npe_ctx;
173 dma_addr_t npe_ctx_phys;
174 int npe_ctx_idx;
175 u8 npe_mode;
178 struct ixp_ctx {
179 struct ix_sa_dir encrypt;
180 struct ix_sa_dir decrypt;
181 int authkey_len;
182 u8 authkey[MAX_KEYLEN];
183 int enckey_len;
184 u8 enckey[MAX_KEYLEN];
185 u8 salt[MAX_IVLEN];
186 u8 nonce[CTR_RFC3686_NONCE_SIZE];
187 unsigned salted;
188 atomic_t configuring;
189 struct completion completion;
192 struct ixp_alg {
193 struct crypto_alg crypto;
194 const struct ix_hash_algo *hash;
195 u32 cfg_enc;
196 u32 cfg_dec;
198 int registered;
201 static const struct ix_hash_algo hash_alg_md5 = {
202 .cfgword = 0xAA010004,
203 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
204 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
206 static const struct ix_hash_algo hash_alg_sha1 = {
207 .cfgword = 0x00000005,
208 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
209 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
212 static struct npe *npe_c;
213 static struct dma_pool *buffer_pool = NULL;
214 static struct dma_pool *ctx_pool = NULL;
216 static struct crypt_ctl *crypt_virt = NULL;
217 static dma_addr_t crypt_phys;
219 static int support_aes = 1;
221 static void dev_release(struct device *dev)
223 return;
226 #define DRIVER_NAME "ixp4xx_crypto"
227 static struct platform_device pseudo_dev = {
228 .name = DRIVER_NAME,
229 .id = 0,
230 .num_resources = 0,
231 .dev = {
232 .coherent_dma_mask = DMA_BIT_MASK(32),
233 .release = dev_release,
237 static struct device *dev = &pseudo_dev.dev;
239 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
241 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
244 static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
246 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
249 static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
251 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_enc;
254 static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
256 return container_of(tfm->__crt_alg, struct ixp_alg,crypto)->cfg_dec;
259 static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
261 return container_of(tfm->__crt_alg, struct ixp_alg, crypto)->hash;
264 static int setup_crypt_desc(void)
266 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
267 crypt_virt = dma_alloc_coherent(dev,
268 NPE_QLEN * sizeof(struct crypt_ctl),
269 &crypt_phys, GFP_ATOMIC);
270 if (!crypt_virt)
271 return -ENOMEM;
272 memset(crypt_virt, 0, NPE_QLEN * sizeof(struct crypt_ctl));
273 return 0;
276 static spinlock_t desc_lock;
277 static struct crypt_ctl *get_crypt_desc(void)
279 int i;
280 static int idx = 0;
281 unsigned long flags;
283 spin_lock_irqsave(&desc_lock, flags);
285 if (unlikely(!crypt_virt))
286 setup_crypt_desc();
287 if (unlikely(!crypt_virt)) {
288 spin_unlock_irqrestore(&desc_lock, flags);
289 return NULL;
291 i = idx;
292 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
293 if (++idx >= NPE_QLEN)
294 idx = 0;
295 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
296 spin_unlock_irqrestore(&desc_lock, flags);
297 return crypt_virt +i;
298 } else {
299 spin_unlock_irqrestore(&desc_lock, flags);
300 return NULL;
304 static spinlock_t emerg_lock;
305 static struct crypt_ctl *get_crypt_desc_emerg(void)
307 int i;
308 static int idx = NPE_QLEN;
309 struct crypt_ctl *desc;
310 unsigned long flags;
312 desc = get_crypt_desc();
313 if (desc)
314 return desc;
315 if (unlikely(!crypt_virt))
316 return NULL;
318 spin_lock_irqsave(&emerg_lock, flags);
319 i = idx;
320 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
321 if (++idx >= NPE_QLEN_TOTAL)
322 idx = NPE_QLEN;
323 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
324 spin_unlock_irqrestore(&emerg_lock, flags);
325 return crypt_virt +i;
326 } else {
327 spin_unlock_irqrestore(&emerg_lock, flags);
328 return NULL;
332 static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
334 while (buf) {
335 struct buffer_desc *buf1;
336 u32 phys1;
338 buf1 = buf->next;
339 phys1 = buf->phys_next;
340 dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
341 dma_pool_free(buffer_pool, buf, phys);
342 buf = buf1;
343 phys = phys1;
347 static struct tasklet_struct crypto_done_tasklet;
349 static void finish_scattered_hmac(struct crypt_ctl *crypt)
351 struct aead_request *req = crypt->data.aead_req;
352 struct aead_ctx *req_ctx = aead_request_ctx(req);
353 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
354 int authsize = crypto_aead_authsize(tfm);
355 int decryptlen = req->cryptlen - authsize;
357 if (req_ctx->encrypt) {
358 scatterwalk_map_and_copy(req_ctx->hmac_virt,
359 req->src, decryptlen, authsize, 1);
361 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
364 static void one_packet(dma_addr_t phys)
366 struct crypt_ctl *crypt;
367 struct ixp_ctx *ctx;
368 int failed;
370 failed = phys & 0x1 ? -EBADMSG : 0;
371 phys &= ~0x3;
372 crypt = crypt_phys2virt(phys);
374 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
375 case CTL_FLAG_PERFORM_AEAD: {
376 struct aead_request *req = crypt->data.aead_req;
377 struct aead_ctx *req_ctx = aead_request_ctx(req);
379 free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
380 if (req_ctx->hmac_virt) {
381 finish_scattered_hmac(crypt);
383 req->base.complete(&req->base, failed);
384 break;
386 case CTL_FLAG_PERFORM_ABLK: {
387 struct ablkcipher_request *req = crypt->data.ablk_req;
388 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
390 if (req_ctx->dst) {
391 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
393 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
394 req->base.complete(&req->base, failed);
395 break;
397 case CTL_FLAG_GEN_ICV:
398 ctx = crypto_tfm_ctx(crypt->data.tfm);
399 dma_pool_free(ctx_pool, crypt->regist_ptr,
400 crypt->regist_buf->phys_addr);
401 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
402 if (atomic_dec_and_test(&ctx->configuring))
403 complete(&ctx->completion);
404 break;
405 case CTL_FLAG_GEN_REVAES:
406 ctx = crypto_tfm_ctx(crypt->data.tfm);
407 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
408 if (atomic_dec_and_test(&ctx->configuring))
409 complete(&ctx->completion);
410 break;
411 default:
412 BUG();
414 crypt->ctl_flags = CTL_FLAG_UNUSED;
417 static void irqhandler(void *_unused)
419 tasklet_schedule(&crypto_done_tasklet);
422 static void crypto_done_action(unsigned long arg)
424 int i;
426 for(i=0; i<4; i++) {
427 dma_addr_t phys = qmgr_get_entry(RECV_QID);
428 if (!phys)
429 return;
430 one_packet(phys);
432 tasklet_schedule(&crypto_done_tasklet);
435 static int init_ixp_crypto(void)
437 int ret = -ENODEV;
438 u32 msg[2] = { 0, 0 };
440 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
441 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
442 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
443 return ret;
445 npe_c = npe_request(NPE_ID);
446 if (!npe_c)
447 return ret;
449 if (!npe_running(npe_c)) {
450 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
451 if (ret) {
452 return ret;
454 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
455 goto npe_error;
456 } else {
457 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
458 goto npe_error;
460 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
461 goto npe_error;
464 switch ((msg[1]>>16) & 0xff) {
465 case 3:
466 printk(KERN_WARNING "Firmware of %s lacks AES support\n",
467 npe_name(npe_c));
468 support_aes = 0;
469 break;
470 case 4:
471 case 5:
472 support_aes = 1;
473 break;
474 default:
475 printk(KERN_ERR "Firmware of %s lacks crypto support\n",
476 npe_name(npe_c));
477 return -ENODEV;
479 /* buffer_pool will also be used to sometimes store the hmac,
480 * so assure it is large enough
482 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
483 buffer_pool = dma_pool_create("buffer", dev,
484 sizeof(struct buffer_desc), 32, 0);
485 ret = -ENOMEM;
486 if (!buffer_pool) {
487 goto err;
489 ctx_pool = dma_pool_create("context", dev,
490 NPE_CTX_LEN, 16, 0);
491 if (!ctx_pool) {
492 goto err;
494 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
495 "ixp_crypto:out", NULL);
496 if (ret)
497 goto err;
498 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
499 "ixp_crypto:in", NULL);
500 if (ret) {
501 qmgr_release_queue(SEND_QID);
502 goto err;
504 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
505 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
507 qmgr_enable_irq(RECV_QID);
508 return 0;
510 npe_error:
511 printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
512 ret = -EIO;
513 err:
514 if (ctx_pool)
515 dma_pool_destroy(ctx_pool);
516 if (buffer_pool)
517 dma_pool_destroy(buffer_pool);
518 npe_release(npe_c);
519 return ret;
522 static void release_ixp_crypto(void)
524 qmgr_disable_irq(RECV_QID);
525 tasklet_kill(&crypto_done_tasklet);
527 qmgr_release_queue(SEND_QID);
528 qmgr_release_queue(RECV_QID);
530 dma_pool_destroy(ctx_pool);
531 dma_pool_destroy(buffer_pool);
533 npe_release(npe_c);
535 if (crypt_virt) {
536 dma_free_coherent(dev,
537 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
538 crypt_virt, crypt_phys);
540 return;
543 static void reset_sa_dir(struct ix_sa_dir *dir)
545 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
546 dir->npe_ctx_idx = 0;
547 dir->npe_mode = 0;
550 static int init_sa_dir(struct ix_sa_dir *dir)
552 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
553 if (!dir->npe_ctx) {
554 return -ENOMEM;
556 reset_sa_dir(dir);
557 return 0;
560 static void free_sa_dir(struct ix_sa_dir *dir)
562 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
563 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
566 static int init_tfm(struct crypto_tfm *tfm)
568 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
569 int ret;
571 atomic_set(&ctx->configuring, 0);
572 ret = init_sa_dir(&ctx->encrypt);
573 if (ret)
574 return ret;
575 ret = init_sa_dir(&ctx->decrypt);
576 if (ret) {
577 free_sa_dir(&ctx->encrypt);
579 return ret;
582 static int init_tfm_ablk(struct crypto_tfm *tfm)
584 tfm->crt_ablkcipher.reqsize = sizeof(struct ablk_ctx);
585 return init_tfm(tfm);
588 static int init_tfm_aead(struct crypto_tfm *tfm)
590 tfm->crt_aead.reqsize = sizeof(struct aead_ctx);
591 return init_tfm(tfm);
594 static void exit_tfm(struct crypto_tfm *tfm)
596 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
597 free_sa_dir(&ctx->encrypt);
598 free_sa_dir(&ctx->decrypt);
601 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
602 int init_len, u32 ctx_addr, const u8 *key, int key_len)
604 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
605 struct crypt_ctl *crypt;
606 struct buffer_desc *buf;
607 int i;
608 u8 *pad;
609 u32 pad_phys, buf_phys;
611 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
612 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
613 if (!pad)
614 return -ENOMEM;
615 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
616 if (!buf) {
617 dma_pool_free(ctx_pool, pad, pad_phys);
618 return -ENOMEM;
620 crypt = get_crypt_desc_emerg();
621 if (!crypt) {
622 dma_pool_free(ctx_pool, pad, pad_phys);
623 dma_pool_free(buffer_pool, buf, buf_phys);
624 return -EAGAIN;
627 memcpy(pad, key, key_len);
628 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
629 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
630 pad[i] ^= xpad;
633 crypt->data.tfm = tfm;
634 crypt->regist_ptr = pad;
635 crypt->regist_buf = buf;
637 crypt->auth_offs = 0;
638 crypt->auth_len = HMAC_PAD_BLOCKLEN;
639 crypt->crypto_ctx = ctx_addr;
640 crypt->src_buf = buf_phys;
641 crypt->icv_rev_aes = target;
642 crypt->mode = NPE_OP_HASH_GEN_ICV;
643 crypt->init_len = init_len;
644 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
646 buf->next = 0;
647 buf->buf_len = HMAC_PAD_BLOCKLEN;
648 buf->pkt_len = 0;
649 buf->phys_addr = pad_phys;
651 atomic_inc(&ctx->configuring);
652 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
653 BUG_ON(qmgr_stat_overflow(SEND_QID));
654 return 0;
657 static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
658 const u8 *key, int key_len, unsigned digest_len)
660 u32 itarget, otarget, npe_ctx_addr;
661 unsigned char *cinfo;
662 int init_len, ret = 0;
663 u32 cfgword;
664 struct ix_sa_dir *dir;
665 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
666 const struct ix_hash_algo *algo;
668 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
669 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
670 algo = ix_hash(tfm);
672 /* write cfg word to cryptinfo */
673 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
674 #ifndef __ARMEB__
675 cfgword ^= 0xAA000000; /* change the "byte swap" flags */
676 #endif
677 *(u32*)cinfo = cpu_to_be32(cfgword);
678 cinfo += sizeof(cfgword);
680 /* write ICV to cryptinfo */
681 memcpy(cinfo, algo->icv, digest_len);
682 cinfo += digest_len;
684 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
685 + sizeof(algo->cfgword);
686 otarget = itarget + digest_len;
687 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
688 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
690 dir->npe_ctx_idx += init_len;
691 dir->npe_mode |= NPE_OP_HASH_ENABLE;
693 if (!encrypt)
694 dir->npe_mode |= NPE_OP_HASH_VERIFY;
696 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
697 init_len, npe_ctx_addr, key, key_len);
698 if (ret)
699 return ret;
700 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
701 init_len, npe_ctx_addr, key, key_len);
704 static int gen_rev_aes_key(struct crypto_tfm *tfm)
706 struct crypt_ctl *crypt;
707 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
708 struct ix_sa_dir *dir = &ctx->decrypt;
710 crypt = get_crypt_desc_emerg();
711 if (!crypt) {
712 return -EAGAIN;
714 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
716 crypt->data.tfm = tfm;
717 crypt->crypt_offs = 0;
718 crypt->crypt_len = AES_BLOCK128;
719 crypt->src_buf = 0;
720 crypt->crypto_ctx = dir->npe_ctx_phys;
721 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
722 crypt->mode = NPE_OP_ENC_GEN_KEY;
723 crypt->init_len = dir->npe_ctx_idx;
724 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
726 atomic_inc(&ctx->configuring);
727 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
728 BUG_ON(qmgr_stat_overflow(SEND_QID));
729 return 0;
732 static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
733 const u8 *key, int key_len)
735 u8 *cinfo;
736 u32 cipher_cfg;
737 u32 keylen_cfg = 0;
738 struct ix_sa_dir *dir;
739 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
740 u32 *flags = &tfm->crt_flags;
742 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
743 cinfo = dir->npe_ctx;
745 if (encrypt) {
746 cipher_cfg = cipher_cfg_enc(tfm);
747 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
748 } else {
749 cipher_cfg = cipher_cfg_dec(tfm);
751 if (cipher_cfg & MOD_AES) {
752 switch (key_len) {
753 case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break;
754 case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break;
755 case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break;
756 default:
757 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
758 return -EINVAL;
760 cipher_cfg |= keylen_cfg;
761 } else if (cipher_cfg & MOD_3DES) {
762 const u32 *K = (const u32 *)key;
763 if (unlikely(!((K[0] ^ K[2]) | (K[1] ^ K[3])) ||
764 !((K[2] ^ K[4]) | (K[3] ^ K[5]))))
766 *flags |= CRYPTO_TFM_RES_BAD_KEY_SCHED;
767 return -EINVAL;
769 } else {
770 u32 tmp[DES_EXPKEY_WORDS];
771 if (des_ekey(tmp, key) == 0) {
772 *flags |= CRYPTO_TFM_RES_WEAK_KEY;
775 /* write cfg word to cryptinfo */
776 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
777 cinfo += sizeof(cipher_cfg);
779 /* write cipher key to cryptinfo */
780 memcpy(cinfo, key, key_len);
781 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
782 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
783 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
784 key_len = DES3_EDE_KEY_SIZE;
786 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
787 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
788 if ((cipher_cfg & MOD_AES) && !encrypt) {
789 return gen_rev_aes_key(tfm);
791 return 0;
794 static struct buffer_desc *chainup_buffers(struct device *dev,
795 struct scatterlist *sg, unsigned nbytes,
796 struct buffer_desc *buf, gfp_t flags,
797 enum dma_data_direction dir)
799 for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
800 unsigned len = min(nbytes, sg->length);
801 struct buffer_desc *next_buf;
802 u32 next_buf_phys;
803 void *ptr;
805 nbytes -= len;
806 ptr = page_address(sg_page(sg)) + sg->offset;
807 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
808 if (!next_buf) {
809 buf = NULL;
810 break;
812 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
813 buf->next = next_buf;
814 buf->phys_next = next_buf_phys;
815 buf = next_buf;
817 buf->phys_addr = sg_dma_address(sg);
818 buf->buf_len = len;
819 buf->dir = dir;
821 buf->next = NULL;
822 buf->phys_next = 0;
823 return buf;
826 static int ablk_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
827 unsigned int key_len)
829 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
830 u32 *flags = &tfm->base.crt_flags;
831 int ret;
833 init_completion(&ctx->completion);
834 atomic_inc(&ctx->configuring);
836 reset_sa_dir(&ctx->encrypt);
837 reset_sa_dir(&ctx->decrypt);
839 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
840 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
842 ret = setup_cipher(&tfm->base, 0, key, key_len);
843 if (ret)
844 goto out;
845 ret = setup_cipher(&tfm->base, 1, key, key_len);
846 if (ret)
847 goto out;
849 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
850 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
851 ret = -EINVAL;
852 } else {
853 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
856 out:
857 if (!atomic_dec_and_test(&ctx->configuring))
858 wait_for_completion(&ctx->completion);
859 return ret;
862 static int ablk_rfc3686_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
863 unsigned int key_len)
865 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
867 /* the nonce is stored in bytes at end of key */
868 if (key_len < CTR_RFC3686_NONCE_SIZE)
869 return -EINVAL;
871 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
872 CTR_RFC3686_NONCE_SIZE);
874 key_len -= CTR_RFC3686_NONCE_SIZE;
875 return ablk_setkey(tfm, key, key_len);
878 static int ablk_perform(struct ablkcipher_request *req, int encrypt)
880 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
881 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
882 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
883 struct ix_sa_dir *dir;
884 struct crypt_ctl *crypt;
885 unsigned int nbytes = req->nbytes;
886 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
887 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
888 struct buffer_desc src_hook;
889 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
890 GFP_KERNEL : GFP_ATOMIC;
892 if (qmgr_stat_full(SEND_QID))
893 return -EAGAIN;
894 if (atomic_read(&ctx->configuring))
895 return -EAGAIN;
897 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
899 crypt = get_crypt_desc();
900 if (!crypt)
901 return -ENOMEM;
903 crypt->data.ablk_req = req;
904 crypt->crypto_ctx = dir->npe_ctx_phys;
905 crypt->mode = dir->npe_mode;
906 crypt->init_len = dir->npe_ctx_idx;
908 crypt->crypt_offs = 0;
909 crypt->crypt_len = nbytes;
911 BUG_ON(ivsize && !req->info);
912 memcpy(crypt->iv, req->info, ivsize);
913 if (req->src != req->dst) {
914 struct buffer_desc dst_hook;
915 crypt->mode |= NPE_OP_NOT_IN_PLACE;
916 /* This was never tested by Intel
917 * for more than one dst buffer, I think. */
918 BUG_ON(req->dst->length < nbytes);
919 req_ctx->dst = NULL;
920 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
921 flags, DMA_FROM_DEVICE))
922 goto free_buf_dest;
923 src_direction = DMA_TO_DEVICE;
924 req_ctx->dst = dst_hook.next;
925 crypt->dst_buf = dst_hook.phys_next;
926 } else {
927 req_ctx->dst = NULL;
929 req_ctx->src = NULL;
930 if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
931 flags, src_direction))
932 goto free_buf_src;
934 req_ctx->src = src_hook.next;
935 crypt->src_buf = src_hook.phys_next;
936 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
937 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
938 BUG_ON(qmgr_stat_overflow(SEND_QID));
939 return -EINPROGRESS;
941 free_buf_src:
942 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
943 free_buf_dest:
944 if (req->src != req->dst) {
945 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
947 crypt->ctl_flags = CTL_FLAG_UNUSED;
948 return -ENOMEM;
951 static int ablk_encrypt(struct ablkcipher_request *req)
953 return ablk_perform(req, 1);
956 static int ablk_decrypt(struct ablkcipher_request *req)
958 return ablk_perform(req, 0);
961 static int ablk_rfc3686_crypt(struct ablkcipher_request *req)
963 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
964 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
965 u8 iv[CTR_RFC3686_BLOCK_SIZE];
966 u8 *info = req->info;
967 int ret;
969 /* set up counter block */
970 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
971 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
973 /* initialize counter portion of counter block */
974 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
975 cpu_to_be32(1);
977 req->info = iv;
978 ret = ablk_perform(req, 1);
979 req->info = info;
980 return ret;
983 static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
984 unsigned int nbytes)
986 int offset = 0;
988 if (!nbytes)
989 return 0;
991 for (;;) {
992 if (start < offset + sg->length)
993 break;
995 offset += sg->length;
996 sg = scatterwalk_sg_next(sg);
998 return (start + nbytes > offset + sg->length);
1001 static int aead_perform(struct aead_request *req, int encrypt,
1002 int cryptoffset, int eff_cryptlen, u8 *iv)
1004 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1005 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1006 unsigned ivsize = crypto_aead_ivsize(tfm);
1007 unsigned authsize = crypto_aead_authsize(tfm);
1008 struct ix_sa_dir *dir;
1009 struct crypt_ctl *crypt;
1010 unsigned int cryptlen;
1011 struct buffer_desc *buf, src_hook;
1012 struct aead_ctx *req_ctx = aead_request_ctx(req);
1013 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
1014 GFP_KERNEL : GFP_ATOMIC;
1016 if (qmgr_stat_full(SEND_QID))
1017 return -EAGAIN;
1018 if (atomic_read(&ctx->configuring))
1019 return -EAGAIN;
1021 if (encrypt) {
1022 dir = &ctx->encrypt;
1023 cryptlen = req->cryptlen;
1024 } else {
1025 dir = &ctx->decrypt;
1026 /* req->cryptlen includes the authsize when decrypting */
1027 cryptlen = req->cryptlen -authsize;
1028 eff_cryptlen -= authsize;
1030 crypt = get_crypt_desc();
1031 if (!crypt)
1032 return -ENOMEM;
1034 crypt->data.aead_req = req;
1035 crypt->crypto_ctx = dir->npe_ctx_phys;
1036 crypt->mode = dir->npe_mode;
1037 crypt->init_len = dir->npe_ctx_idx;
1039 crypt->crypt_offs = cryptoffset;
1040 crypt->crypt_len = eff_cryptlen;
1042 crypt->auth_offs = 0;
1043 crypt->auth_len = req->assoclen + ivsize + cryptlen;
1044 BUG_ON(ivsize && !req->iv);
1045 memcpy(crypt->iv, req->iv, ivsize);
1047 if (req->src != req->dst) {
1048 BUG(); /* -ENOTSUP because of my laziness */
1051 /* ASSOC data */
1052 buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
1053 flags, DMA_TO_DEVICE);
1054 req_ctx->buffer = src_hook.next;
1055 crypt->src_buf = src_hook.phys_next;
1056 if (!buf)
1057 goto out;
1058 /* IV */
1059 sg_init_table(&req_ctx->ivlist, 1);
1060 sg_set_buf(&req_ctx->ivlist, iv, ivsize);
1061 buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
1062 DMA_BIDIRECTIONAL);
1063 if (!buf)
1064 goto free_chain;
1065 if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
1066 /* The 12 hmac bytes are scattered,
1067 * we need to copy them into a safe buffer */
1068 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1069 &crypt->icv_rev_aes);
1070 if (unlikely(!req_ctx->hmac_virt))
1071 goto free_chain;
1072 if (!encrypt) {
1073 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1074 req->src, cryptlen, authsize, 0);
1076 req_ctx->encrypt = encrypt;
1077 } else {
1078 req_ctx->hmac_virt = NULL;
1080 /* Crypt */
1081 buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
1082 DMA_BIDIRECTIONAL);
1083 if (!buf)
1084 goto free_hmac_virt;
1085 if (!req_ctx->hmac_virt) {
1086 crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
1089 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1090 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1091 BUG_ON(qmgr_stat_overflow(SEND_QID));
1092 return -EINPROGRESS;
1093 free_hmac_virt:
1094 if (req_ctx->hmac_virt) {
1095 dma_pool_free(buffer_pool, req_ctx->hmac_virt,
1096 crypt->icv_rev_aes);
1098 free_chain:
1099 free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
1100 out:
1101 crypt->ctl_flags = CTL_FLAG_UNUSED;
1102 return -ENOMEM;
1105 static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1107 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1108 u32 *flags = &tfm->base.crt_flags;
1109 unsigned digest_len = crypto_aead_alg(tfm)->maxauthsize;
1110 int ret;
1112 if (!ctx->enckey_len && !ctx->authkey_len)
1113 return 0;
1114 init_completion(&ctx->completion);
1115 atomic_inc(&ctx->configuring);
1117 reset_sa_dir(&ctx->encrypt);
1118 reset_sa_dir(&ctx->decrypt);
1120 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1121 if (ret)
1122 goto out;
1123 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1124 if (ret)
1125 goto out;
1126 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1127 ctx->authkey_len, digest_len);
1128 if (ret)
1129 goto out;
1130 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1131 ctx->authkey_len, digest_len);
1132 if (ret)
1133 goto out;
1135 if (*flags & CRYPTO_TFM_RES_WEAK_KEY) {
1136 if (*flags & CRYPTO_TFM_REQ_WEAK_KEY) {
1137 ret = -EINVAL;
1138 goto out;
1139 } else {
1140 *flags &= ~CRYPTO_TFM_RES_WEAK_KEY;
1143 out:
1144 if (!atomic_dec_and_test(&ctx->configuring))
1145 wait_for_completion(&ctx->completion);
1146 return ret;
1149 static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1151 int max = crypto_aead_alg(tfm)->maxauthsize >> 2;
1153 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1154 return -EINVAL;
1155 return aead_setup(tfm, authsize);
1158 static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1159 unsigned int keylen)
1161 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1162 struct rtattr *rta = (struct rtattr *)key;
1163 struct crypto_authenc_key_param *param;
1165 if (!RTA_OK(rta, keylen))
1166 goto badkey;
1167 if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
1168 goto badkey;
1169 if (RTA_PAYLOAD(rta) < sizeof(*param))
1170 goto badkey;
1172 param = RTA_DATA(rta);
1173 ctx->enckey_len = be32_to_cpu(param->enckeylen);
1175 key += RTA_ALIGN(rta->rta_len);
1176 keylen -= RTA_ALIGN(rta->rta_len);
1178 if (keylen < ctx->enckey_len)
1179 goto badkey;
1181 ctx->authkey_len = keylen - ctx->enckey_len;
1182 memcpy(ctx->enckey, key + ctx->authkey_len, ctx->enckey_len);
1183 memcpy(ctx->authkey, key, ctx->authkey_len);
1185 return aead_setup(tfm, crypto_aead_authsize(tfm));
1186 badkey:
1187 ctx->enckey_len = 0;
1188 crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
1189 return -EINVAL;
1192 static int aead_encrypt(struct aead_request *req)
1194 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1195 return aead_perform(req, 1, req->assoclen + ivsize,
1196 req->cryptlen, req->iv);
1199 static int aead_decrypt(struct aead_request *req)
1201 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req));
1202 return aead_perform(req, 0, req->assoclen + ivsize,
1203 req->cryptlen, req->iv);
1206 static int aead_givencrypt(struct aead_givcrypt_request *req)
1208 struct crypto_aead *tfm = aead_givcrypt_reqtfm(req);
1209 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1210 unsigned len, ivsize = crypto_aead_ivsize(tfm);
1211 __be64 seq;
1213 /* copied from eseqiv.c */
1214 if (!ctx->salted) {
1215 get_random_bytes(ctx->salt, ivsize);
1216 ctx->salted = 1;
1218 memcpy(req->areq.iv, ctx->salt, ivsize);
1219 len = ivsize;
1220 if (ivsize > sizeof(u64)) {
1221 memset(req->giv, 0, ivsize - sizeof(u64));
1222 len = sizeof(u64);
1224 seq = cpu_to_be64(req->seq);
1225 memcpy(req->giv + ivsize - len, &seq, len);
1226 return aead_perform(&req->areq, 1, req->areq.assoclen,
1227 req->areq.cryptlen +ivsize, req->giv);
1230 static struct ixp_alg ixp4xx_algos[] = {
1232 .crypto = {
1233 .cra_name = "cbc(des)",
1234 .cra_blocksize = DES_BLOCK_SIZE,
1235 .cra_u = { .ablkcipher = {
1236 .min_keysize = DES_KEY_SIZE,
1237 .max_keysize = DES_KEY_SIZE,
1238 .ivsize = DES_BLOCK_SIZE,
1239 .geniv = "eseqiv",
1243 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1244 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1246 }, {
1247 .crypto = {
1248 .cra_name = "ecb(des)",
1249 .cra_blocksize = DES_BLOCK_SIZE,
1250 .cra_u = { .ablkcipher = {
1251 .min_keysize = DES_KEY_SIZE,
1252 .max_keysize = DES_KEY_SIZE,
1256 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1257 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1258 }, {
1259 .crypto = {
1260 .cra_name = "cbc(des3_ede)",
1261 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1262 .cra_u = { .ablkcipher = {
1263 .min_keysize = DES3_EDE_KEY_SIZE,
1264 .max_keysize = DES3_EDE_KEY_SIZE,
1265 .ivsize = DES3_EDE_BLOCK_SIZE,
1266 .geniv = "eseqiv",
1270 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1271 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1272 }, {
1273 .crypto = {
1274 .cra_name = "ecb(des3_ede)",
1275 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1276 .cra_u = { .ablkcipher = {
1277 .min_keysize = DES3_EDE_KEY_SIZE,
1278 .max_keysize = DES3_EDE_KEY_SIZE,
1282 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1283 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1284 }, {
1285 .crypto = {
1286 .cra_name = "cbc(aes)",
1287 .cra_blocksize = AES_BLOCK_SIZE,
1288 .cra_u = { .ablkcipher = {
1289 .min_keysize = AES_MIN_KEY_SIZE,
1290 .max_keysize = AES_MAX_KEY_SIZE,
1291 .ivsize = AES_BLOCK_SIZE,
1292 .geniv = "eseqiv",
1296 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1297 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1298 }, {
1299 .crypto = {
1300 .cra_name = "ecb(aes)",
1301 .cra_blocksize = AES_BLOCK_SIZE,
1302 .cra_u = { .ablkcipher = {
1303 .min_keysize = AES_MIN_KEY_SIZE,
1304 .max_keysize = AES_MAX_KEY_SIZE,
1308 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1309 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1310 }, {
1311 .crypto = {
1312 .cra_name = "ctr(aes)",
1313 .cra_blocksize = AES_BLOCK_SIZE,
1314 .cra_u = { .ablkcipher = {
1315 .min_keysize = AES_MIN_KEY_SIZE,
1316 .max_keysize = AES_MAX_KEY_SIZE,
1317 .ivsize = AES_BLOCK_SIZE,
1318 .geniv = "eseqiv",
1322 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1323 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1324 }, {
1325 .crypto = {
1326 .cra_name = "rfc3686(ctr(aes))",
1327 .cra_blocksize = AES_BLOCK_SIZE,
1328 .cra_u = { .ablkcipher = {
1329 .min_keysize = AES_MIN_KEY_SIZE,
1330 .max_keysize = AES_MAX_KEY_SIZE,
1331 .ivsize = AES_BLOCK_SIZE,
1332 .geniv = "eseqiv",
1333 .setkey = ablk_rfc3686_setkey,
1334 .encrypt = ablk_rfc3686_crypt,
1335 .decrypt = ablk_rfc3686_crypt }
1338 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1339 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1340 }, {
1341 .crypto = {
1342 .cra_name = "authenc(hmac(md5),cbc(des))",
1343 .cra_blocksize = DES_BLOCK_SIZE,
1344 .cra_u = { .aead = {
1345 .ivsize = DES_BLOCK_SIZE,
1346 .maxauthsize = MD5_DIGEST_SIZE,
1350 .hash = &hash_alg_md5,
1351 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1352 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1353 }, {
1354 .crypto = {
1355 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1356 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1357 .cra_u = { .aead = {
1358 .ivsize = DES3_EDE_BLOCK_SIZE,
1359 .maxauthsize = MD5_DIGEST_SIZE,
1363 .hash = &hash_alg_md5,
1364 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1365 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1366 }, {
1367 .crypto = {
1368 .cra_name = "authenc(hmac(sha1),cbc(des))",
1369 .cra_blocksize = DES_BLOCK_SIZE,
1370 .cra_u = { .aead = {
1371 .ivsize = DES_BLOCK_SIZE,
1372 .maxauthsize = SHA1_DIGEST_SIZE,
1376 .hash = &hash_alg_sha1,
1377 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1378 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1379 }, {
1380 .crypto = {
1381 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1382 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1383 .cra_u = { .aead = {
1384 .ivsize = DES3_EDE_BLOCK_SIZE,
1385 .maxauthsize = SHA1_DIGEST_SIZE,
1389 .hash = &hash_alg_sha1,
1390 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1391 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1392 }, {
1393 .crypto = {
1394 .cra_name = "authenc(hmac(md5),cbc(aes))",
1395 .cra_blocksize = AES_BLOCK_SIZE,
1396 .cra_u = { .aead = {
1397 .ivsize = AES_BLOCK_SIZE,
1398 .maxauthsize = MD5_DIGEST_SIZE,
1402 .hash = &hash_alg_md5,
1403 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1404 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1405 }, {
1406 .crypto = {
1407 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1408 .cra_blocksize = AES_BLOCK_SIZE,
1409 .cra_u = { .aead = {
1410 .ivsize = AES_BLOCK_SIZE,
1411 .maxauthsize = SHA1_DIGEST_SIZE,
1415 .hash = &hash_alg_sha1,
1416 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1417 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1418 } };
1420 #define IXP_POSTFIX "-ixp4xx"
1421 static int __init ixp_module_init(void)
1423 int num = ARRAY_SIZE(ixp4xx_algos);
1424 int i,err ;
1426 if (platform_device_register(&pseudo_dev))
1427 return -ENODEV;
1429 spin_lock_init(&desc_lock);
1430 spin_lock_init(&emerg_lock);
1432 err = init_ixp_crypto();
1433 if (err) {
1434 platform_device_unregister(&pseudo_dev);
1435 return err;
1437 for (i=0; i< num; i++) {
1438 struct crypto_alg *cra = &ixp4xx_algos[i].crypto;
1440 if (snprintf(cra->cra_driver_name, CRYPTO_MAX_ALG_NAME,
1441 "%s"IXP_POSTFIX, cra->cra_name) >=
1442 CRYPTO_MAX_ALG_NAME)
1444 continue;
1446 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1447 continue;
1449 if (!ixp4xx_algos[i].hash) {
1450 /* block ciphers */
1451 cra->cra_type = &crypto_ablkcipher_type;
1452 cra->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER |
1453 CRYPTO_ALG_KERN_DRIVER_ONLY |
1454 CRYPTO_ALG_ASYNC;
1455 if (!cra->cra_ablkcipher.setkey)
1456 cra->cra_ablkcipher.setkey = ablk_setkey;
1457 if (!cra->cra_ablkcipher.encrypt)
1458 cra->cra_ablkcipher.encrypt = ablk_encrypt;
1459 if (!cra->cra_ablkcipher.decrypt)
1460 cra->cra_ablkcipher.decrypt = ablk_decrypt;
1461 cra->cra_init = init_tfm_ablk;
1462 } else {
1463 /* authenc */
1464 cra->cra_type = &crypto_aead_type;
1465 cra->cra_flags = CRYPTO_ALG_TYPE_AEAD |
1466 CRYPTO_ALG_KERN_DRIVER_ONLY |
1467 CRYPTO_ALG_ASYNC;
1468 cra->cra_aead.setkey = aead_setkey;
1469 cra->cra_aead.setauthsize = aead_setauthsize;
1470 cra->cra_aead.encrypt = aead_encrypt;
1471 cra->cra_aead.decrypt = aead_decrypt;
1472 cra->cra_aead.givencrypt = aead_givencrypt;
1473 cra->cra_init = init_tfm_aead;
1475 cra->cra_ctxsize = sizeof(struct ixp_ctx);
1476 cra->cra_module = THIS_MODULE;
1477 cra->cra_alignmask = 3;
1478 cra->cra_priority = 300;
1479 cra->cra_exit = exit_tfm;
1480 if (crypto_register_alg(cra))
1481 printk(KERN_ERR "Failed to register '%s'\n",
1482 cra->cra_name);
1483 else
1484 ixp4xx_algos[i].registered = 1;
1486 return 0;
1489 static void __exit ixp_module_exit(void)
1491 int num = ARRAY_SIZE(ixp4xx_algos);
1492 int i;
1494 for (i=0; i< num; i++) {
1495 if (ixp4xx_algos[i].registered)
1496 crypto_unregister_alg(&ixp4xx_algos[i].crypto);
1498 release_ixp_crypto();
1499 platform_device_unregister(&pseudo_dev);
1502 module_init(ixp_module_init);
1503 module_exit(ixp_module_exit);
1505 MODULE_LICENSE("GPL");
1506 MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1507 MODULE_DESCRIPTION("IXP4xx hardware crypto");