treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / crypto / ixp4xx_crypto.c
blobad73fc9466821db720fe73c058ca42cc636c1c33
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Intel IXP4xx NPE-C crypto driver
5 * Copyright (C) 2008 Christian Hohnstaedt <chohnstaedt@innominate.com>
6 */
8 #include <linux/platform_device.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/dmapool.h>
11 #include <linux/crypto.h>
12 #include <linux/kernel.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/interrupt.h>
15 #include <linux/spinlock.h>
16 #include <linux/gfp.h>
17 #include <linux/module.h>
19 #include <crypto/ctr.h>
20 #include <crypto/internal/des.h>
21 #include <crypto/aes.h>
22 #include <crypto/hmac.h>
23 #include <crypto/sha.h>
24 #include <crypto/algapi.h>
25 #include <crypto/internal/aead.h>
26 #include <crypto/internal/skcipher.h>
27 #include <crypto/authenc.h>
28 #include <crypto/scatterwalk.h>
30 #include <linux/soc/ixp4xx/npe.h>
31 #include <linux/soc/ixp4xx/qmgr.h>
33 #define MAX_KEYLEN 32
35 /* hash: cfgword + 2 * digestlen; crypt: keylen + cfgword */
36 #define NPE_CTX_LEN 80
37 #define AES_BLOCK128 16
39 #define NPE_OP_HASH_VERIFY 0x01
40 #define NPE_OP_CCM_ENABLE 0x04
41 #define NPE_OP_CRYPT_ENABLE 0x08
42 #define NPE_OP_HASH_ENABLE 0x10
43 #define NPE_OP_NOT_IN_PLACE 0x20
44 #define NPE_OP_HMAC_DISABLE 0x40
45 #define NPE_OP_CRYPT_ENCRYPT 0x80
47 #define NPE_OP_CCM_GEN_MIC 0xcc
48 #define NPE_OP_HASH_GEN_ICV 0x50
49 #define NPE_OP_ENC_GEN_KEY 0xc9
51 #define MOD_ECB 0x0000
52 #define MOD_CTR 0x1000
53 #define MOD_CBC_ENC 0x2000
54 #define MOD_CBC_DEC 0x3000
55 #define MOD_CCM_ENC 0x4000
56 #define MOD_CCM_DEC 0x5000
58 #define KEYLEN_128 4
59 #define KEYLEN_192 6
60 #define KEYLEN_256 8
62 #define CIPH_DECR 0x0000
63 #define CIPH_ENCR 0x0400
65 #define MOD_DES 0x0000
66 #define MOD_TDEA2 0x0100
67 #define MOD_3DES 0x0200
68 #define MOD_AES 0x0800
69 #define MOD_AES128 (0x0800 | KEYLEN_128)
70 #define MOD_AES192 (0x0900 | KEYLEN_192)
71 #define MOD_AES256 (0x0a00 | KEYLEN_256)
73 #define MAX_IVLEN 16
74 #define NPE_ID 2 /* NPE C */
75 #define NPE_QLEN 16
76 /* Space for registering when the first
77 * NPE_QLEN crypt_ctl are busy */
78 #define NPE_QLEN_TOTAL 64
80 #define SEND_QID 29
81 #define RECV_QID 30
83 #define CTL_FLAG_UNUSED 0x0000
84 #define CTL_FLAG_USED 0x1000
85 #define CTL_FLAG_PERFORM_ABLK 0x0001
86 #define CTL_FLAG_GEN_ICV 0x0002
87 #define CTL_FLAG_GEN_REVAES 0x0004
88 #define CTL_FLAG_PERFORM_AEAD 0x0008
89 #define CTL_FLAG_MASK 0x000f
91 #define HMAC_PAD_BLOCKLEN SHA1_BLOCK_SIZE
93 #define MD5_DIGEST_SIZE 16
95 struct buffer_desc {
96 u32 phys_next;
97 #ifdef __ARMEB__
98 u16 buf_len;
99 u16 pkt_len;
100 #else
101 u16 pkt_len;
102 u16 buf_len;
103 #endif
104 dma_addr_t phys_addr;
105 u32 __reserved[4];
106 struct buffer_desc *next;
107 enum dma_data_direction dir;
110 struct crypt_ctl {
111 #ifdef __ARMEB__
112 u8 mode; /* NPE_OP_* operation mode */
113 u8 init_len;
114 u16 reserved;
115 #else
116 u16 reserved;
117 u8 init_len;
118 u8 mode; /* NPE_OP_* operation mode */
119 #endif
120 u8 iv[MAX_IVLEN]; /* IV for CBC mode or CTR IV for CTR mode */
121 dma_addr_t icv_rev_aes; /* icv or rev aes */
122 dma_addr_t src_buf;
123 dma_addr_t dst_buf;
124 #ifdef __ARMEB__
125 u16 auth_offs; /* Authentication start offset */
126 u16 auth_len; /* Authentication data length */
127 u16 crypt_offs; /* Cryption start offset */
128 u16 crypt_len; /* Cryption data length */
129 #else
130 u16 auth_len; /* Authentication data length */
131 u16 auth_offs; /* Authentication start offset */
132 u16 crypt_len; /* Cryption data length */
133 u16 crypt_offs; /* Cryption start offset */
134 #endif
135 u32 aadAddr; /* Additional Auth Data Addr for CCM mode */
136 u32 crypto_ctx; /* NPE Crypto Param structure address */
138 /* Used by Host: 4*4 bytes*/
139 unsigned ctl_flags;
140 union {
141 struct skcipher_request *ablk_req;
142 struct aead_request *aead_req;
143 struct crypto_tfm *tfm;
144 } data;
145 struct buffer_desc *regist_buf;
146 u8 *regist_ptr;
149 struct ablk_ctx {
150 struct buffer_desc *src;
151 struct buffer_desc *dst;
154 struct aead_ctx {
155 struct buffer_desc *src;
156 struct buffer_desc *dst;
157 struct scatterlist ivlist;
158 /* used when the hmac is not on one sg entry */
159 u8 *hmac_virt;
160 int encrypt;
163 struct ix_hash_algo {
164 u32 cfgword;
165 unsigned char *icv;
168 struct ix_sa_dir {
169 unsigned char *npe_ctx;
170 dma_addr_t npe_ctx_phys;
171 int npe_ctx_idx;
172 u8 npe_mode;
175 struct ixp_ctx {
176 struct ix_sa_dir encrypt;
177 struct ix_sa_dir decrypt;
178 int authkey_len;
179 u8 authkey[MAX_KEYLEN];
180 int enckey_len;
181 u8 enckey[MAX_KEYLEN];
182 u8 salt[MAX_IVLEN];
183 u8 nonce[CTR_RFC3686_NONCE_SIZE];
184 unsigned salted;
185 atomic_t configuring;
186 struct completion completion;
189 struct ixp_alg {
190 struct skcipher_alg crypto;
191 const struct ix_hash_algo *hash;
192 u32 cfg_enc;
193 u32 cfg_dec;
195 int registered;
198 struct ixp_aead_alg {
199 struct aead_alg crypto;
200 const struct ix_hash_algo *hash;
201 u32 cfg_enc;
202 u32 cfg_dec;
204 int registered;
207 static const struct ix_hash_algo hash_alg_md5 = {
208 .cfgword = 0xAA010004,
209 .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF"
210 "\xFE\xDC\xBA\x98\x76\x54\x32\x10",
212 static const struct ix_hash_algo hash_alg_sha1 = {
213 .cfgword = 0x00000005,
214 .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA"
215 "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0",
218 static struct npe *npe_c;
219 static struct dma_pool *buffer_pool = NULL;
220 static struct dma_pool *ctx_pool = NULL;
222 static struct crypt_ctl *crypt_virt = NULL;
223 static dma_addr_t crypt_phys;
225 static int support_aes = 1;
227 #define DRIVER_NAME "ixp4xx_crypto"
229 static struct platform_device *pdev;
231 static inline dma_addr_t crypt_virt2phys(struct crypt_ctl *virt)
233 return crypt_phys + (virt - crypt_virt) * sizeof(struct crypt_ctl);
236 static inline struct crypt_ctl *crypt_phys2virt(dma_addr_t phys)
238 return crypt_virt + (phys - crypt_phys) / sizeof(struct crypt_ctl);
241 static inline u32 cipher_cfg_enc(struct crypto_tfm *tfm)
243 return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_enc;
246 static inline u32 cipher_cfg_dec(struct crypto_tfm *tfm)
248 return container_of(tfm->__crt_alg, struct ixp_alg,crypto.base)->cfg_dec;
251 static inline const struct ix_hash_algo *ix_hash(struct crypto_tfm *tfm)
253 return container_of(tfm->__crt_alg, struct ixp_alg, crypto.base)->hash;
256 static int setup_crypt_desc(void)
258 struct device *dev = &pdev->dev;
259 BUILD_BUG_ON(sizeof(struct crypt_ctl) != 64);
260 crypt_virt = dma_alloc_coherent(dev,
261 NPE_QLEN * sizeof(struct crypt_ctl),
262 &crypt_phys, GFP_ATOMIC);
263 if (!crypt_virt)
264 return -ENOMEM;
265 return 0;
268 static spinlock_t desc_lock;
269 static struct crypt_ctl *get_crypt_desc(void)
271 int i;
272 static int idx = 0;
273 unsigned long flags;
275 spin_lock_irqsave(&desc_lock, flags);
277 if (unlikely(!crypt_virt))
278 setup_crypt_desc();
279 if (unlikely(!crypt_virt)) {
280 spin_unlock_irqrestore(&desc_lock, flags);
281 return NULL;
283 i = idx;
284 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
285 if (++idx >= NPE_QLEN)
286 idx = 0;
287 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
288 spin_unlock_irqrestore(&desc_lock, flags);
289 return crypt_virt +i;
290 } else {
291 spin_unlock_irqrestore(&desc_lock, flags);
292 return NULL;
296 static spinlock_t emerg_lock;
297 static struct crypt_ctl *get_crypt_desc_emerg(void)
299 int i;
300 static int idx = NPE_QLEN;
301 struct crypt_ctl *desc;
302 unsigned long flags;
304 desc = get_crypt_desc();
305 if (desc)
306 return desc;
307 if (unlikely(!crypt_virt))
308 return NULL;
310 spin_lock_irqsave(&emerg_lock, flags);
311 i = idx;
312 if (crypt_virt[i].ctl_flags == CTL_FLAG_UNUSED) {
313 if (++idx >= NPE_QLEN_TOTAL)
314 idx = NPE_QLEN;
315 crypt_virt[i].ctl_flags = CTL_FLAG_USED;
316 spin_unlock_irqrestore(&emerg_lock, flags);
317 return crypt_virt +i;
318 } else {
319 spin_unlock_irqrestore(&emerg_lock, flags);
320 return NULL;
324 static void free_buf_chain(struct device *dev, struct buffer_desc *buf,
325 dma_addr_t phys)
327 while (buf) {
328 struct buffer_desc *buf1;
329 u32 phys1;
331 buf1 = buf->next;
332 phys1 = buf->phys_next;
333 dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
334 dma_pool_free(buffer_pool, buf, phys);
335 buf = buf1;
336 phys = phys1;
340 static struct tasklet_struct crypto_done_tasklet;
342 static void finish_scattered_hmac(struct crypt_ctl *crypt)
344 struct aead_request *req = crypt->data.aead_req;
345 struct aead_ctx *req_ctx = aead_request_ctx(req);
346 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
347 int authsize = crypto_aead_authsize(tfm);
348 int decryptlen = req->assoclen + req->cryptlen - authsize;
350 if (req_ctx->encrypt) {
351 scatterwalk_map_and_copy(req_ctx->hmac_virt,
352 req->dst, decryptlen, authsize, 1);
354 dma_pool_free(buffer_pool, req_ctx->hmac_virt, crypt->icv_rev_aes);
357 static void one_packet(dma_addr_t phys)
359 struct device *dev = &pdev->dev;
360 struct crypt_ctl *crypt;
361 struct ixp_ctx *ctx;
362 int failed;
364 failed = phys & 0x1 ? -EBADMSG : 0;
365 phys &= ~0x3;
366 crypt = crypt_phys2virt(phys);
368 switch (crypt->ctl_flags & CTL_FLAG_MASK) {
369 case CTL_FLAG_PERFORM_AEAD: {
370 struct aead_request *req = crypt->data.aead_req;
371 struct aead_ctx *req_ctx = aead_request_ctx(req);
373 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
374 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
375 if (req_ctx->hmac_virt) {
376 finish_scattered_hmac(crypt);
378 req->base.complete(&req->base, failed);
379 break;
381 case CTL_FLAG_PERFORM_ABLK: {
382 struct skcipher_request *req = crypt->data.ablk_req;
383 struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
385 if (req_ctx->dst) {
386 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
388 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
389 req->base.complete(&req->base, failed);
390 break;
392 case CTL_FLAG_GEN_ICV:
393 ctx = crypto_tfm_ctx(crypt->data.tfm);
394 dma_pool_free(ctx_pool, crypt->regist_ptr,
395 crypt->regist_buf->phys_addr);
396 dma_pool_free(buffer_pool, crypt->regist_buf, crypt->src_buf);
397 if (atomic_dec_and_test(&ctx->configuring))
398 complete(&ctx->completion);
399 break;
400 case CTL_FLAG_GEN_REVAES:
401 ctx = crypto_tfm_ctx(crypt->data.tfm);
402 *(u32*)ctx->decrypt.npe_ctx &= cpu_to_be32(~CIPH_ENCR);
403 if (atomic_dec_and_test(&ctx->configuring))
404 complete(&ctx->completion);
405 break;
406 default:
407 BUG();
409 crypt->ctl_flags = CTL_FLAG_UNUSED;
412 static void irqhandler(void *_unused)
414 tasklet_schedule(&crypto_done_tasklet);
417 static void crypto_done_action(unsigned long arg)
419 int i;
421 for(i=0; i<4; i++) {
422 dma_addr_t phys = qmgr_get_entry(RECV_QID);
423 if (!phys)
424 return;
425 one_packet(phys);
427 tasklet_schedule(&crypto_done_tasklet);
430 static int init_ixp_crypto(struct device *dev)
432 int ret = -ENODEV;
433 u32 msg[2] = { 0, 0 };
435 if (! ( ~(*IXP4XX_EXP_CFG2) & (IXP4XX_FEATURE_HASH |
436 IXP4XX_FEATURE_AES | IXP4XX_FEATURE_DES))) {
437 printk(KERN_ERR "ixp_crypto: No HW crypto available\n");
438 return ret;
440 npe_c = npe_request(NPE_ID);
441 if (!npe_c)
442 return ret;
444 if (!npe_running(npe_c)) {
445 ret = npe_load_firmware(npe_c, npe_name(npe_c), dev);
446 if (ret)
447 goto npe_release;
448 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
449 goto npe_error;
450 } else {
451 if (npe_send_message(npe_c, msg, "STATUS_MSG"))
452 goto npe_error;
454 if (npe_recv_message(npe_c, msg, "STATUS_MSG"))
455 goto npe_error;
458 switch ((msg[1]>>16) & 0xff) {
459 case 3:
460 printk(KERN_WARNING "Firmware of %s lacks AES support\n",
461 npe_name(npe_c));
462 support_aes = 0;
463 break;
464 case 4:
465 case 5:
466 support_aes = 1;
467 break;
468 default:
469 printk(KERN_ERR "Firmware of %s lacks crypto support\n",
470 npe_name(npe_c));
471 ret = -ENODEV;
472 goto npe_release;
474 /* buffer_pool will also be used to sometimes store the hmac,
475 * so assure it is large enough
477 BUILD_BUG_ON(SHA1_DIGEST_SIZE > sizeof(struct buffer_desc));
478 buffer_pool = dma_pool_create("buffer", dev,
479 sizeof(struct buffer_desc), 32, 0);
480 ret = -ENOMEM;
481 if (!buffer_pool) {
482 goto err;
484 ctx_pool = dma_pool_create("context", dev,
485 NPE_CTX_LEN, 16, 0);
486 if (!ctx_pool) {
487 goto err;
489 ret = qmgr_request_queue(SEND_QID, NPE_QLEN_TOTAL, 0, 0,
490 "ixp_crypto:out", NULL);
491 if (ret)
492 goto err;
493 ret = qmgr_request_queue(RECV_QID, NPE_QLEN, 0, 0,
494 "ixp_crypto:in", NULL);
495 if (ret) {
496 qmgr_release_queue(SEND_QID);
497 goto err;
499 qmgr_set_irq(RECV_QID, QUEUE_IRQ_SRC_NOT_EMPTY, irqhandler, NULL);
500 tasklet_init(&crypto_done_tasklet, crypto_done_action, 0);
502 qmgr_enable_irq(RECV_QID);
503 return 0;
505 npe_error:
506 printk(KERN_ERR "%s not responding\n", npe_name(npe_c));
507 ret = -EIO;
508 err:
509 dma_pool_destroy(ctx_pool);
510 dma_pool_destroy(buffer_pool);
511 npe_release:
512 npe_release(npe_c);
513 return ret;
516 static void release_ixp_crypto(struct device *dev)
518 qmgr_disable_irq(RECV_QID);
519 tasklet_kill(&crypto_done_tasklet);
521 qmgr_release_queue(SEND_QID);
522 qmgr_release_queue(RECV_QID);
524 dma_pool_destroy(ctx_pool);
525 dma_pool_destroy(buffer_pool);
527 npe_release(npe_c);
529 if (crypt_virt) {
530 dma_free_coherent(dev,
531 NPE_QLEN_TOTAL * sizeof( struct crypt_ctl),
532 crypt_virt, crypt_phys);
536 static void reset_sa_dir(struct ix_sa_dir *dir)
538 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
539 dir->npe_ctx_idx = 0;
540 dir->npe_mode = 0;
543 static int init_sa_dir(struct ix_sa_dir *dir)
545 dir->npe_ctx = dma_pool_alloc(ctx_pool, GFP_KERNEL, &dir->npe_ctx_phys);
546 if (!dir->npe_ctx) {
547 return -ENOMEM;
549 reset_sa_dir(dir);
550 return 0;
553 static void free_sa_dir(struct ix_sa_dir *dir)
555 memset(dir->npe_ctx, 0, NPE_CTX_LEN);
556 dma_pool_free(ctx_pool, dir->npe_ctx, dir->npe_ctx_phys);
559 static int init_tfm(struct crypto_tfm *tfm)
561 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
562 int ret;
564 atomic_set(&ctx->configuring, 0);
565 ret = init_sa_dir(&ctx->encrypt);
566 if (ret)
567 return ret;
568 ret = init_sa_dir(&ctx->decrypt);
569 if (ret) {
570 free_sa_dir(&ctx->encrypt);
572 return ret;
575 static int init_tfm_ablk(struct crypto_skcipher *tfm)
577 crypto_skcipher_set_reqsize(tfm, sizeof(struct ablk_ctx));
578 return init_tfm(crypto_skcipher_tfm(tfm));
581 static int init_tfm_aead(struct crypto_aead *tfm)
583 crypto_aead_set_reqsize(tfm, sizeof(struct aead_ctx));
584 return init_tfm(crypto_aead_tfm(tfm));
587 static void exit_tfm(struct crypto_tfm *tfm)
589 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
590 free_sa_dir(&ctx->encrypt);
591 free_sa_dir(&ctx->decrypt);
594 static void exit_tfm_ablk(struct crypto_skcipher *tfm)
596 exit_tfm(crypto_skcipher_tfm(tfm));
599 static void exit_tfm_aead(struct crypto_aead *tfm)
601 exit_tfm(crypto_aead_tfm(tfm));
604 static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
605 int init_len, u32 ctx_addr, const u8 *key, int key_len)
607 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
608 struct crypt_ctl *crypt;
609 struct buffer_desc *buf;
610 int i;
611 u8 *pad;
612 dma_addr_t pad_phys, buf_phys;
614 BUILD_BUG_ON(NPE_CTX_LEN < HMAC_PAD_BLOCKLEN);
615 pad = dma_pool_alloc(ctx_pool, GFP_KERNEL, &pad_phys);
616 if (!pad)
617 return -ENOMEM;
618 buf = dma_pool_alloc(buffer_pool, GFP_KERNEL, &buf_phys);
619 if (!buf) {
620 dma_pool_free(ctx_pool, pad, pad_phys);
621 return -ENOMEM;
623 crypt = get_crypt_desc_emerg();
624 if (!crypt) {
625 dma_pool_free(ctx_pool, pad, pad_phys);
626 dma_pool_free(buffer_pool, buf, buf_phys);
627 return -EAGAIN;
630 memcpy(pad, key, key_len);
631 memset(pad + key_len, 0, HMAC_PAD_BLOCKLEN - key_len);
632 for (i = 0; i < HMAC_PAD_BLOCKLEN; i++) {
633 pad[i] ^= xpad;
636 crypt->data.tfm = tfm;
637 crypt->regist_ptr = pad;
638 crypt->regist_buf = buf;
640 crypt->auth_offs = 0;
641 crypt->auth_len = HMAC_PAD_BLOCKLEN;
642 crypt->crypto_ctx = ctx_addr;
643 crypt->src_buf = buf_phys;
644 crypt->icv_rev_aes = target;
645 crypt->mode = NPE_OP_HASH_GEN_ICV;
646 crypt->init_len = init_len;
647 crypt->ctl_flags |= CTL_FLAG_GEN_ICV;
649 buf->next = 0;
650 buf->buf_len = HMAC_PAD_BLOCKLEN;
651 buf->pkt_len = 0;
652 buf->phys_addr = pad_phys;
654 atomic_inc(&ctx->configuring);
655 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
656 BUG_ON(qmgr_stat_overflow(SEND_QID));
657 return 0;
660 static int setup_auth(struct crypto_tfm *tfm, int encrypt, unsigned authsize,
661 const u8 *key, int key_len, unsigned digest_len)
663 u32 itarget, otarget, npe_ctx_addr;
664 unsigned char *cinfo;
665 int init_len, ret = 0;
666 u32 cfgword;
667 struct ix_sa_dir *dir;
668 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
669 const struct ix_hash_algo *algo;
671 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
672 cinfo = dir->npe_ctx + dir->npe_ctx_idx;
673 algo = ix_hash(tfm);
675 /* write cfg word to cryptinfo */
676 cfgword = algo->cfgword | ( authsize << 6); /* (authsize/4) << 8 */
677 #ifndef __ARMEB__
678 cfgword ^= 0xAA000000; /* change the "byte swap" flags */
679 #endif
680 *(u32*)cinfo = cpu_to_be32(cfgword);
681 cinfo += sizeof(cfgword);
683 /* write ICV to cryptinfo */
684 memcpy(cinfo, algo->icv, digest_len);
685 cinfo += digest_len;
687 itarget = dir->npe_ctx_phys + dir->npe_ctx_idx
688 + sizeof(algo->cfgword);
689 otarget = itarget + digest_len;
690 init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx);
691 npe_ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx;
693 dir->npe_ctx_idx += init_len;
694 dir->npe_mode |= NPE_OP_HASH_ENABLE;
696 if (!encrypt)
697 dir->npe_mode |= NPE_OP_HASH_VERIFY;
699 ret = register_chain_var(tfm, HMAC_OPAD_VALUE, otarget,
700 init_len, npe_ctx_addr, key, key_len);
701 if (ret)
702 return ret;
703 return register_chain_var(tfm, HMAC_IPAD_VALUE, itarget,
704 init_len, npe_ctx_addr, key, key_len);
707 static int gen_rev_aes_key(struct crypto_tfm *tfm)
709 struct crypt_ctl *crypt;
710 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
711 struct ix_sa_dir *dir = &ctx->decrypt;
713 crypt = get_crypt_desc_emerg();
714 if (!crypt) {
715 return -EAGAIN;
717 *(u32*)dir->npe_ctx |= cpu_to_be32(CIPH_ENCR);
719 crypt->data.tfm = tfm;
720 crypt->crypt_offs = 0;
721 crypt->crypt_len = AES_BLOCK128;
722 crypt->src_buf = 0;
723 crypt->crypto_ctx = dir->npe_ctx_phys;
724 crypt->icv_rev_aes = dir->npe_ctx_phys + sizeof(u32);
725 crypt->mode = NPE_OP_ENC_GEN_KEY;
726 crypt->init_len = dir->npe_ctx_idx;
727 crypt->ctl_flags |= CTL_FLAG_GEN_REVAES;
729 atomic_inc(&ctx->configuring);
730 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
731 BUG_ON(qmgr_stat_overflow(SEND_QID));
732 return 0;
735 static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
736 const u8 *key, int key_len)
738 u8 *cinfo;
739 u32 cipher_cfg;
740 u32 keylen_cfg = 0;
741 struct ix_sa_dir *dir;
742 struct ixp_ctx *ctx = crypto_tfm_ctx(tfm);
743 int err;
745 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
746 cinfo = dir->npe_ctx;
748 if (encrypt) {
749 cipher_cfg = cipher_cfg_enc(tfm);
750 dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT;
751 } else {
752 cipher_cfg = cipher_cfg_dec(tfm);
754 if (cipher_cfg & MOD_AES) {
755 switch (key_len) {
756 case 16: keylen_cfg = MOD_AES128; break;
757 case 24: keylen_cfg = MOD_AES192; break;
758 case 32: keylen_cfg = MOD_AES256; break;
759 default:
760 return -EINVAL;
762 cipher_cfg |= keylen_cfg;
763 } else {
764 err = crypto_des_verify_key(tfm, key);
765 if (err)
766 return err;
768 /* write cfg word to cryptinfo */
769 *(u32*)cinfo = cpu_to_be32(cipher_cfg);
770 cinfo += sizeof(cipher_cfg);
772 /* write cipher key to cryptinfo */
773 memcpy(cinfo, key, key_len);
774 /* NPE wants keylen set to DES3_EDE_KEY_SIZE even for single DES */
775 if (key_len < DES3_EDE_KEY_SIZE && !(cipher_cfg & MOD_AES)) {
776 memset(cinfo + key_len, 0, DES3_EDE_KEY_SIZE -key_len);
777 key_len = DES3_EDE_KEY_SIZE;
779 dir->npe_ctx_idx = sizeof(cipher_cfg) + key_len;
780 dir->npe_mode |= NPE_OP_CRYPT_ENABLE;
781 if ((cipher_cfg & MOD_AES) && !encrypt) {
782 return gen_rev_aes_key(tfm);
784 return 0;
787 static struct buffer_desc *chainup_buffers(struct device *dev,
788 struct scatterlist *sg, unsigned nbytes,
789 struct buffer_desc *buf, gfp_t flags,
790 enum dma_data_direction dir)
792 for (; nbytes > 0; sg = sg_next(sg)) {
793 unsigned len = min(nbytes, sg->length);
794 struct buffer_desc *next_buf;
795 dma_addr_t next_buf_phys;
796 void *ptr;
798 nbytes -= len;
799 ptr = sg_virt(sg);
800 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
801 if (!next_buf) {
802 buf = NULL;
803 break;
805 sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
806 buf->next = next_buf;
807 buf->phys_next = next_buf_phys;
808 buf = next_buf;
810 buf->phys_addr = sg_dma_address(sg);
811 buf->buf_len = len;
812 buf->dir = dir;
814 buf->next = NULL;
815 buf->phys_next = 0;
816 return buf;
819 static int ablk_setkey(struct crypto_skcipher *tfm, const u8 *key,
820 unsigned int key_len)
822 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
823 int ret;
825 init_completion(&ctx->completion);
826 atomic_inc(&ctx->configuring);
828 reset_sa_dir(&ctx->encrypt);
829 reset_sa_dir(&ctx->decrypt);
831 ctx->encrypt.npe_mode = NPE_OP_HMAC_DISABLE;
832 ctx->decrypt.npe_mode = NPE_OP_HMAC_DISABLE;
834 ret = setup_cipher(&tfm->base, 0, key, key_len);
835 if (ret)
836 goto out;
837 ret = setup_cipher(&tfm->base, 1, key, key_len);
838 out:
839 if (!atomic_dec_and_test(&ctx->configuring))
840 wait_for_completion(&ctx->completion);
841 return ret;
844 static int ablk_des3_setkey(struct crypto_skcipher *tfm, const u8 *key,
845 unsigned int key_len)
847 return verify_skcipher_des3_key(tfm, key) ?:
848 ablk_setkey(tfm, key, key_len);
851 static int ablk_rfc3686_setkey(struct crypto_skcipher *tfm, const u8 *key,
852 unsigned int key_len)
854 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
856 /* the nonce is stored in bytes at end of key */
857 if (key_len < CTR_RFC3686_NONCE_SIZE)
858 return -EINVAL;
860 memcpy(ctx->nonce, key + (key_len - CTR_RFC3686_NONCE_SIZE),
861 CTR_RFC3686_NONCE_SIZE);
863 key_len -= CTR_RFC3686_NONCE_SIZE;
864 return ablk_setkey(tfm, key, key_len);
867 static int ablk_perform(struct skcipher_request *req, int encrypt)
869 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
870 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
871 unsigned ivsize = crypto_skcipher_ivsize(tfm);
872 struct ix_sa_dir *dir;
873 struct crypt_ctl *crypt;
874 unsigned int nbytes = req->cryptlen;
875 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
876 struct ablk_ctx *req_ctx = skcipher_request_ctx(req);
877 struct buffer_desc src_hook;
878 struct device *dev = &pdev->dev;
879 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
880 GFP_KERNEL : GFP_ATOMIC;
882 if (qmgr_stat_full(SEND_QID))
883 return -EAGAIN;
884 if (atomic_read(&ctx->configuring))
885 return -EAGAIN;
887 dir = encrypt ? &ctx->encrypt : &ctx->decrypt;
889 crypt = get_crypt_desc();
890 if (!crypt)
891 return -ENOMEM;
893 crypt->data.ablk_req = req;
894 crypt->crypto_ctx = dir->npe_ctx_phys;
895 crypt->mode = dir->npe_mode;
896 crypt->init_len = dir->npe_ctx_idx;
898 crypt->crypt_offs = 0;
899 crypt->crypt_len = nbytes;
901 BUG_ON(ivsize && !req->iv);
902 memcpy(crypt->iv, req->iv, ivsize);
903 if (req->src != req->dst) {
904 struct buffer_desc dst_hook;
905 crypt->mode |= NPE_OP_NOT_IN_PLACE;
906 /* This was never tested by Intel
907 * for more than one dst buffer, I think. */
908 req_ctx->dst = NULL;
909 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
910 flags, DMA_FROM_DEVICE))
911 goto free_buf_dest;
912 src_direction = DMA_TO_DEVICE;
913 req_ctx->dst = dst_hook.next;
914 crypt->dst_buf = dst_hook.phys_next;
915 } else {
916 req_ctx->dst = NULL;
918 req_ctx->src = NULL;
919 if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
920 flags, src_direction))
921 goto free_buf_src;
923 req_ctx->src = src_hook.next;
924 crypt->src_buf = src_hook.phys_next;
925 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
926 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
927 BUG_ON(qmgr_stat_overflow(SEND_QID));
928 return -EINPROGRESS;
930 free_buf_src:
931 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
932 free_buf_dest:
933 if (req->src != req->dst) {
934 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
936 crypt->ctl_flags = CTL_FLAG_UNUSED;
937 return -ENOMEM;
940 static int ablk_encrypt(struct skcipher_request *req)
942 return ablk_perform(req, 1);
945 static int ablk_decrypt(struct skcipher_request *req)
947 return ablk_perform(req, 0);
950 static int ablk_rfc3686_crypt(struct skcipher_request *req)
952 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
953 struct ixp_ctx *ctx = crypto_skcipher_ctx(tfm);
954 u8 iv[CTR_RFC3686_BLOCK_SIZE];
955 u8 *info = req->iv;
956 int ret;
958 /* set up counter block */
959 memcpy(iv, ctx->nonce, CTR_RFC3686_NONCE_SIZE);
960 memcpy(iv + CTR_RFC3686_NONCE_SIZE, info, CTR_RFC3686_IV_SIZE);
962 /* initialize counter portion of counter block */
963 *(__be32 *)(iv + CTR_RFC3686_NONCE_SIZE + CTR_RFC3686_IV_SIZE) =
964 cpu_to_be32(1);
966 req->iv = iv;
967 ret = ablk_perform(req, 1);
968 req->iv = info;
969 return ret;
972 static int aead_perform(struct aead_request *req, int encrypt,
973 int cryptoffset, int eff_cryptlen, u8 *iv)
975 struct crypto_aead *tfm = crypto_aead_reqtfm(req);
976 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
977 unsigned ivsize = crypto_aead_ivsize(tfm);
978 unsigned authsize = crypto_aead_authsize(tfm);
979 struct ix_sa_dir *dir;
980 struct crypt_ctl *crypt;
981 unsigned int cryptlen;
982 struct buffer_desc *buf, src_hook;
983 struct aead_ctx *req_ctx = aead_request_ctx(req);
984 struct device *dev = &pdev->dev;
985 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
986 GFP_KERNEL : GFP_ATOMIC;
987 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
988 unsigned int lastlen;
990 if (qmgr_stat_full(SEND_QID))
991 return -EAGAIN;
992 if (atomic_read(&ctx->configuring))
993 return -EAGAIN;
995 if (encrypt) {
996 dir = &ctx->encrypt;
997 cryptlen = req->cryptlen;
998 } else {
999 dir = &ctx->decrypt;
1000 /* req->cryptlen includes the authsize when decrypting */
1001 cryptlen = req->cryptlen -authsize;
1002 eff_cryptlen -= authsize;
1004 crypt = get_crypt_desc();
1005 if (!crypt)
1006 return -ENOMEM;
1008 crypt->data.aead_req = req;
1009 crypt->crypto_ctx = dir->npe_ctx_phys;
1010 crypt->mode = dir->npe_mode;
1011 crypt->init_len = dir->npe_ctx_idx;
1013 crypt->crypt_offs = cryptoffset;
1014 crypt->crypt_len = eff_cryptlen;
1016 crypt->auth_offs = 0;
1017 crypt->auth_len = req->assoclen + cryptlen;
1018 BUG_ON(ivsize && !req->iv);
1019 memcpy(crypt->iv, req->iv, ivsize);
1021 buf = chainup_buffers(dev, req->src, crypt->auth_len,
1022 &src_hook, flags, src_direction);
1023 req_ctx->src = src_hook.next;
1024 crypt->src_buf = src_hook.phys_next;
1025 if (!buf)
1026 goto free_buf_src;
1028 lastlen = buf->buf_len;
1029 if (lastlen >= authsize)
1030 crypt->icv_rev_aes = buf->phys_addr +
1031 buf->buf_len - authsize;
1033 req_ctx->dst = NULL;
1035 if (req->src != req->dst) {
1036 struct buffer_desc dst_hook;
1038 crypt->mode |= NPE_OP_NOT_IN_PLACE;
1039 src_direction = DMA_TO_DEVICE;
1041 buf = chainup_buffers(dev, req->dst, crypt->auth_len,
1042 &dst_hook, flags, DMA_FROM_DEVICE);
1043 req_ctx->dst = dst_hook.next;
1044 crypt->dst_buf = dst_hook.phys_next;
1046 if (!buf)
1047 goto free_buf_dst;
1049 if (encrypt) {
1050 lastlen = buf->buf_len;
1051 if (lastlen >= authsize)
1052 crypt->icv_rev_aes = buf->phys_addr +
1053 buf->buf_len - authsize;
1057 if (unlikely(lastlen < authsize)) {
1058 /* The 12 hmac bytes are scattered,
1059 * we need to copy them into a safe buffer */
1060 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
1061 &crypt->icv_rev_aes);
1062 if (unlikely(!req_ctx->hmac_virt))
1063 goto free_buf_dst;
1064 if (!encrypt) {
1065 scatterwalk_map_and_copy(req_ctx->hmac_virt,
1066 req->src, cryptlen, authsize, 0);
1068 req_ctx->encrypt = encrypt;
1069 } else {
1070 req_ctx->hmac_virt = NULL;
1073 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
1074 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
1075 BUG_ON(qmgr_stat_overflow(SEND_QID));
1076 return -EINPROGRESS;
1078 free_buf_dst:
1079 free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
1080 free_buf_src:
1081 free_buf_chain(dev, req_ctx->src, crypt->src_buf);
1082 crypt->ctl_flags = CTL_FLAG_UNUSED;
1083 return -ENOMEM;
1086 static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
1088 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1089 unsigned digest_len = crypto_aead_maxauthsize(tfm);
1090 int ret;
1092 if (!ctx->enckey_len && !ctx->authkey_len)
1093 return 0;
1094 init_completion(&ctx->completion);
1095 atomic_inc(&ctx->configuring);
1097 reset_sa_dir(&ctx->encrypt);
1098 reset_sa_dir(&ctx->decrypt);
1100 ret = setup_cipher(&tfm->base, 0, ctx->enckey, ctx->enckey_len);
1101 if (ret)
1102 goto out;
1103 ret = setup_cipher(&tfm->base, 1, ctx->enckey, ctx->enckey_len);
1104 if (ret)
1105 goto out;
1106 ret = setup_auth(&tfm->base, 0, authsize, ctx->authkey,
1107 ctx->authkey_len, digest_len);
1108 if (ret)
1109 goto out;
1110 ret = setup_auth(&tfm->base, 1, authsize, ctx->authkey,
1111 ctx->authkey_len, digest_len);
1112 out:
1113 if (!atomic_dec_and_test(&ctx->configuring))
1114 wait_for_completion(&ctx->completion);
1115 return ret;
1118 static int aead_setauthsize(struct crypto_aead *tfm, unsigned int authsize)
1120 int max = crypto_aead_maxauthsize(tfm) >> 2;
1122 if ((authsize>>2) < 1 || (authsize>>2) > max || (authsize & 3))
1123 return -EINVAL;
1124 return aead_setup(tfm, authsize);
1127 static int aead_setkey(struct crypto_aead *tfm, const u8 *key,
1128 unsigned int keylen)
1130 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1131 struct crypto_authenc_keys keys;
1133 if (crypto_authenc_extractkeys(&keys, key, keylen) != 0)
1134 goto badkey;
1136 if (keys.authkeylen > sizeof(ctx->authkey))
1137 goto badkey;
1139 if (keys.enckeylen > sizeof(ctx->enckey))
1140 goto badkey;
1142 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1143 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1144 ctx->authkey_len = keys.authkeylen;
1145 ctx->enckey_len = keys.enckeylen;
1147 memzero_explicit(&keys, sizeof(keys));
1148 return aead_setup(tfm, crypto_aead_authsize(tfm));
1149 badkey:
1150 memzero_explicit(&keys, sizeof(keys));
1151 return -EINVAL;
1154 static int des3_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1155 unsigned int keylen)
1157 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
1158 struct crypto_authenc_keys keys;
1159 int err;
1161 err = crypto_authenc_extractkeys(&keys, key, keylen);
1162 if (unlikely(err))
1163 goto badkey;
1165 err = -EINVAL;
1166 if (keys.authkeylen > sizeof(ctx->authkey))
1167 goto badkey;
1169 err = verify_aead_des3_key(tfm, keys.enckey, keys.enckeylen);
1170 if (err)
1171 goto badkey;
1173 memcpy(ctx->authkey, keys.authkey, keys.authkeylen);
1174 memcpy(ctx->enckey, keys.enckey, keys.enckeylen);
1175 ctx->authkey_len = keys.authkeylen;
1176 ctx->enckey_len = keys.enckeylen;
1178 memzero_explicit(&keys, sizeof(keys));
1179 return aead_setup(tfm, crypto_aead_authsize(tfm));
1180 badkey:
1181 memzero_explicit(&keys, sizeof(keys));
1182 return err;
1185 static int aead_encrypt(struct aead_request *req)
1187 return aead_perform(req, 1, req->assoclen, req->cryptlen, req->iv);
1190 static int aead_decrypt(struct aead_request *req)
1192 return aead_perform(req, 0, req->assoclen, req->cryptlen, req->iv);
1195 static struct ixp_alg ixp4xx_algos[] = {
1197 .crypto = {
1198 .base.cra_name = "cbc(des)",
1199 .base.cra_blocksize = DES_BLOCK_SIZE,
1201 .min_keysize = DES_KEY_SIZE,
1202 .max_keysize = DES_KEY_SIZE,
1203 .ivsize = DES_BLOCK_SIZE,
1205 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1206 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1208 }, {
1209 .crypto = {
1210 .base.cra_name = "ecb(des)",
1211 .base.cra_blocksize = DES_BLOCK_SIZE,
1212 .min_keysize = DES_KEY_SIZE,
1213 .max_keysize = DES_KEY_SIZE,
1215 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192,
1216 .cfg_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192,
1217 }, {
1218 .crypto = {
1219 .base.cra_name = "cbc(des3_ede)",
1220 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1222 .min_keysize = DES3_EDE_KEY_SIZE,
1223 .max_keysize = DES3_EDE_KEY_SIZE,
1224 .ivsize = DES3_EDE_BLOCK_SIZE,
1225 .setkey = ablk_des3_setkey,
1227 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1228 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1229 }, {
1230 .crypto = {
1231 .base.cra_name = "ecb(des3_ede)",
1232 .base.cra_blocksize = DES3_EDE_BLOCK_SIZE,
1234 .min_keysize = DES3_EDE_KEY_SIZE,
1235 .max_keysize = DES3_EDE_KEY_SIZE,
1236 .setkey = ablk_des3_setkey,
1238 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_ECB | KEYLEN_192,
1239 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_ECB | KEYLEN_192,
1240 }, {
1241 .crypto = {
1242 .base.cra_name = "cbc(aes)",
1243 .base.cra_blocksize = AES_BLOCK_SIZE,
1245 .min_keysize = AES_MIN_KEY_SIZE,
1246 .max_keysize = AES_MAX_KEY_SIZE,
1247 .ivsize = AES_BLOCK_SIZE,
1249 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1250 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1251 }, {
1252 .crypto = {
1253 .base.cra_name = "ecb(aes)",
1254 .base.cra_blocksize = AES_BLOCK_SIZE,
1256 .min_keysize = AES_MIN_KEY_SIZE,
1257 .max_keysize = AES_MAX_KEY_SIZE,
1259 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_ECB,
1260 .cfg_dec = CIPH_DECR | MOD_AES | MOD_ECB,
1261 }, {
1262 .crypto = {
1263 .base.cra_name = "ctr(aes)",
1264 .base.cra_blocksize = 1,
1266 .min_keysize = AES_MIN_KEY_SIZE,
1267 .max_keysize = AES_MAX_KEY_SIZE,
1268 .ivsize = AES_BLOCK_SIZE,
1270 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1271 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1272 }, {
1273 .crypto = {
1274 .base.cra_name = "rfc3686(ctr(aes))",
1275 .base.cra_blocksize = 1,
1277 .min_keysize = AES_MIN_KEY_SIZE,
1278 .max_keysize = AES_MAX_KEY_SIZE,
1279 .ivsize = AES_BLOCK_SIZE,
1280 .setkey = ablk_rfc3686_setkey,
1281 .encrypt = ablk_rfc3686_crypt,
1282 .decrypt = ablk_rfc3686_crypt,
1284 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CTR,
1285 .cfg_dec = CIPH_ENCR | MOD_AES | MOD_CTR,
1286 } };
1288 static struct ixp_aead_alg ixp4xx_aeads[] = {
1290 .crypto = {
1291 .base = {
1292 .cra_name = "authenc(hmac(md5),cbc(des))",
1293 .cra_blocksize = DES_BLOCK_SIZE,
1295 .ivsize = DES_BLOCK_SIZE,
1296 .maxauthsize = MD5_DIGEST_SIZE,
1298 .hash = &hash_alg_md5,
1299 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1300 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1301 }, {
1302 .crypto = {
1303 .base = {
1304 .cra_name = "authenc(hmac(md5),cbc(des3_ede))",
1305 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1307 .ivsize = DES3_EDE_BLOCK_SIZE,
1308 .maxauthsize = MD5_DIGEST_SIZE,
1309 .setkey = des3_aead_setkey,
1311 .hash = &hash_alg_md5,
1312 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1313 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1314 }, {
1315 .crypto = {
1316 .base = {
1317 .cra_name = "authenc(hmac(sha1),cbc(des))",
1318 .cra_blocksize = DES_BLOCK_SIZE,
1320 .ivsize = DES_BLOCK_SIZE,
1321 .maxauthsize = SHA1_DIGEST_SIZE,
1323 .hash = &hash_alg_sha1,
1324 .cfg_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192,
1325 .cfg_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192,
1326 }, {
1327 .crypto = {
1328 .base = {
1329 .cra_name = "authenc(hmac(sha1),cbc(des3_ede))",
1330 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
1332 .ivsize = DES3_EDE_BLOCK_SIZE,
1333 .maxauthsize = SHA1_DIGEST_SIZE,
1334 .setkey = des3_aead_setkey,
1336 .hash = &hash_alg_sha1,
1337 .cfg_enc = CIPH_ENCR | MOD_3DES | MOD_CBC_ENC | KEYLEN_192,
1338 .cfg_dec = CIPH_DECR | MOD_3DES | MOD_CBC_DEC | KEYLEN_192,
1339 }, {
1340 .crypto = {
1341 .base = {
1342 .cra_name = "authenc(hmac(md5),cbc(aes))",
1343 .cra_blocksize = AES_BLOCK_SIZE,
1345 .ivsize = AES_BLOCK_SIZE,
1346 .maxauthsize = MD5_DIGEST_SIZE,
1348 .hash = &hash_alg_md5,
1349 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1350 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1351 }, {
1352 .crypto = {
1353 .base = {
1354 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1355 .cra_blocksize = AES_BLOCK_SIZE,
1357 .ivsize = AES_BLOCK_SIZE,
1358 .maxauthsize = SHA1_DIGEST_SIZE,
1360 .hash = &hash_alg_sha1,
1361 .cfg_enc = CIPH_ENCR | MOD_AES | MOD_CBC_ENC,
1362 .cfg_dec = CIPH_DECR | MOD_AES | MOD_CBC_DEC,
1363 } };
1365 #define IXP_POSTFIX "-ixp4xx"
1367 static const struct platform_device_info ixp_dev_info __initdata = {
1368 .name = DRIVER_NAME,
1369 .id = 0,
1370 .dma_mask = DMA_BIT_MASK(32),
1373 static int __init ixp_module_init(void)
1375 int num = ARRAY_SIZE(ixp4xx_algos);
1376 int i, err;
1378 pdev = platform_device_register_full(&ixp_dev_info);
1379 if (IS_ERR(pdev))
1380 return PTR_ERR(pdev);
1382 spin_lock_init(&desc_lock);
1383 spin_lock_init(&emerg_lock);
1385 err = init_ixp_crypto(&pdev->dev);
1386 if (err) {
1387 platform_device_unregister(pdev);
1388 return err;
1390 for (i=0; i< num; i++) {
1391 struct skcipher_alg *cra = &ixp4xx_algos[i].crypto;
1393 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1394 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1395 CRYPTO_MAX_ALG_NAME)
1397 continue;
1399 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES)) {
1400 continue;
1403 /* block ciphers */
1404 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1405 CRYPTO_ALG_ASYNC;
1406 if (!cra->setkey)
1407 cra->setkey = ablk_setkey;
1408 if (!cra->encrypt)
1409 cra->encrypt = ablk_encrypt;
1410 if (!cra->decrypt)
1411 cra->decrypt = ablk_decrypt;
1412 cra->init = init_tfm_ablk;
1413 cra->exit = exit_tfm_ablk;
1415 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1416 cra->base.cra_module = THIS_MODULE;
1417 cra->base.cra_alignmask = 3;
1418 cra->base.cra_priority = 300;
1419 if (crypto_register_skcipher(cra))
1420 printk(KERN_ERR "Failed to register '%s'\n",
1421 cra->base.cra_name);
1422 else
1423 ixp4xx_algos[i].registered = 1;
1426 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1427 struct aead_alg *cra = &ixp4xx_aeads[i].crypto;
1429 if (snprintf(cra->base.cra_driver_name, CRYPTO_MAX_ALG_NAME,
1430 "%s"IXP_POSTFIX, cra->base.cra_name) >=
1431 CRYPTO_MAX_ALG_NAME)
1432 continue;
1433 if (!support_aes && (ixp4xx_algos[i].cfg_enc & MOD_AES))
1434 continue;
1436 /* authenc */
1437 cra->base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1438 CRYPTO_ALG_ASYNC;
1439 cra->setkey = cra->setkey ?: aead_setkey;
1440 cra->setauthsize = aead_setauthsize;
1441 cra->encrypt = aead_encrypt;
1442 cra->decrypt = aead_decrypt;
1443 cra->init = init_tfm_aead;
1444 cra->exit = exit_tfm_aead;
1446 cra->base.cra_ctxsize = sizeof(struct ixp_ctx);
1447 cra->base.cra_module = THIS_MODULE;
1448 cra->base.cra_alignmask = 3;
1449 cra->base.cra_priority = 300;
1451 if (crypto_register_aead(cra))
1452 printk(KERN_ERR "Failed to register '%s'\n",
1453 cra->base.cra_driver_name);
1454 else
1455 ixp4xx_aeads[i].registered = 1;
1457 return 0;
1460 static void __exit ixp_module_exit(void)
1462 int num = ARRAY_SIZE(ixp4xx_algos);
1463 int i;
1465 for (i = 0; i < ARRAY_SIZE(ixp4xx_aeads); i++) {
1466 if (ixp4xx_aeads[i].registered)
1467 crypto_unregister_aead(&ixp4xx_aeads[i].crypto);
1470 for (i=0; i< num; i++) {
1471 if (ixp4xx_algos[i].registered)
1472 crypto_unregister_skcipher(&ixp4xx_algos[i].crypto);
1474 release_ixp_crypto(&pdev->dev);
1475 platform_device_unregister(pdev);
1478 module_init(ixp_module_init);
1479 module_exit(ixp_module_exit);
1481 MODULE_LICENSE("GPL");
1482 MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>");
1483 MODULE_DESCRIPTION("IXP4xx hardware crypto");