Linux 3.3-rc6
[linux/fpc-iii.git] / drivers / crypto / n2_core.c
blob8944dabc0e3c79cae9e960a89cbb7b937072912a
1 /* n2_core.c: Niagara2 Stream Processing Unit (SPU) crypto support.
3 * Copyright (C) 2010, 2011 David S. Miller <davem@davemloft.net>
4 */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/of.h>
11 #include <linux/of_device.h>
12 #include <linux/cpumask.h>
13 #include <linux/slab.h>
14 #include <linux/interrupt.h>
15 #include <linux/crypto.h>
16 #include <crypto/md5.h>
17 #include <crypto/sha.h>
18 #include <crypto/aes.h>
19 #include <crypto/des.h>
20 #include <linux/mutex.h>
21 #include <linux/delay.h>
22 #include <linux/sched.h>
24 #include <crypto/internal/hash.h>
25 #include <crypto/scatterwalk.h>
26 #include <crypto/algapi.h>
28 #include <asm/hypervisor.h>
29 #include <asm/mdesc.h>
31 #include "n2_core.h"
33 #define DRV_MODULE_NAME "n2_crypto"
34 #define DRV_MODULE_VERSION "0.2"
35 #define DRV_MODULE_RELDATE "July 28, 2011"
37 static char version[] __devinitdata =
38 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
40 MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
41 MODULE_DESCRIPTION("Niagara2 Crypto driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(DRV_MODULE_VERSION);
45 #define N2_CRA_PRIORITY 300
47 static DEFINE_MUTEX(spu_lock);
49 struct spu_queue {
50 cpumask_t sharing;
51 unsigned long qhandle;
53 spinlock_t lock;
54 u8 q_type;
55 void *q;
56 unsigned long head;
57 unsigned long tail;
58 struct list_head jobs;
60 unsigned long devino;
62 char irq_name[32];
63 unsigned int irq;
65 struct list_head list;
68 static struct spu_queue **cpu_to_cwq;
69 static struct spu_queue **cpu_to_mau;
71 static unsigned long spu_next_offset(struct spu_queue *q, unsigned long off)
73 if (q->q_type == HV_NCS_QTYPE_MAU) {
74 off += MAU_ENTRY_SIZE;
75 if (off == (MAU_ENTRY_SIZE * MAU_NUM_ENTRIES))
76 off = 0;
77 } else {
78 off += CWQ_ENTRY_SIZE;
79 if (off == (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES))
80 off = 0;
82 return off;
85 struct n2_request_common {
86 struct list_head entry;
87 unsigned int offset;
89 #define OFFSET_NOT_RUNNING (~(unsigned int)0)
91 /* An async job request records the final tail value it used in
92 * n2_request_common->offset, test to see if that offset is in
93 * the range old_head, new_head, inclusive.
95 static inline bool job_finished(struct spu_queue *q, unsigned int offset,
96 unsigned long old_head, unsigned long new_head)
98 if (old_head <= new_head) {
99 if (offset > old_head && offset <= new_head)
100 return true;
101 } else {
102 if (offset > old_head || offset <= new_head)
103 return true;
105 return false;
108 /* When the HEAD marker is unequal to the actual HEAD, we get
109 * a virtual device INO interrupt. We should process the
110 * completed CWQ entries and adjust the HEAD marker to clear
111 * the IRQ.
113 static irqreturn_t cwq_intr(int irq, void *dev_id)
115 unsigned long off, new_head, hv_ret;
116 struct spu_queue *q = dev_id;
118 pr_err("CPU[%d]: Got CWQ interrupt for qhdl[%lx]\n",
119 smp_processor_id(), q->qhandle);
121 spin_lock(&q->lock);
123 hv_ret = sun4v_ncs_gethead(q->qhandle, &new_head);
125 pr_err("CPU[%d]: CWQ gethead[%lx] hv_ret[%lu]\n",
126 smp_processor_id(), new_head, hv_ret);
128 for (off = q->head; off != new_head; off = spu_next_offset(q, off)) {
129 /* XXX ... XXX */
132 hv_ret = sun4v_ncs_sethead_marker(q->qhandle, new_head);
133 if (hv_ret == HV_EOK)
134 q->head = new_head;
136 spin_unlock(&q->lock);
138 return IRQ_HANDLED;
141 static irqreturn_t mau_intr(int irq, void *dev_id)
143 struct spu_queue *q = dev_id;
144 unsigned long head, hv_ret;
146 spin_lock(&q->lock);
148 pr_err("CPU[%d]: Got MAU interrupt for qhdl[%lx]\n",
149 smp_processor_id(), q->qhandle);
151 hv_ret = sun4v_ncs_gethead(q->qhandle, &head);
153 pr_err("CPU[%d]: MAU gethead[%lx] hv_ret[%lu]\n",
154 smp_processor_id(), head, hv_ret);
156 sun4v_ncs_sethead_marker(q->qhandle, head);
158 spin_unlock(&q->lock);
160 return IRQ_HANDLED;
163 static void *spu_queue_next(struct spu_queue *q, void *cur)
165 return q->q + spu_next_offset(q, cur - q->q);
168 static int spu_queue_num_free(struct spu_queue *q)
170 unsigned long head = q->head;
171 unsigned long tail = q->tail;
172 unsigned long end = (CWQ_ENTRY_SIZE * CWQ_NUM_ENTRIES);
173 unsigned long diff;
175 if (head > tail)
176 diff = head - tail;
177 else
178 diff = (end - tail) + head;
180 return (diff / CWQ_ENTRY_SIZE) - 1;
183 static void *spu_queue_alloc(struct spu_queue *q, int num_entries)
185 int avail = spu_queue_num_free(q);
187 if (avail >= num_entries)
188 return q->q + q->tail;
190 return NULL;
193 static unsigned long spu_queue_submit(struct spu_queue *q, void *last)
195 unsigned long hv_ret, new_tail;
197 new_tail = spu_next_offset(q, last - q->q);
199 hv_ret = sun4v_ncs_settail(q->qhandle, new_tail);
200 if (hv_ret == HV_EOK)
201 q->tail = new_tail;
202 return hv_ret;
205 static u64 control_word_base(unsigned int len, unsigned int hmac_key_len,
206 int enc_type, int auth_type,
207 unsigned int hash_len,
208 bool sfas, bool sob, bool eob, bool encrypt,
209 int opcode)
211 u64 word = (len - 1) & CONTROL_LEN;
213 word |= ((u64) opcode << CONTROL_OPCODE_SHIFT);
214 word |= ((u64) enc_type << CONTROL_ENC_TYPE_SHIFT);
215 word |= ((u64) auth_type << CONTROL_AUTH_TYPE_SHIFT);
216 if (sfas)
217 word |= CONTROL_STORE_FINAL_AUTH_STATE;
218 if (sob)
219 word |= CONTROL_START_OF_BLOCK;
220 if (eob)
221 word |= CONTROL_END_OF_BLOCK;
222 if (encrypt)
223 word |= CONTROL_ENCRYPT;
224 if (hmac_key_len)
225 word |= ((u64) (hmac_key_len - 1)) << CONTROL_HMAC_KEY_LEN_SHIFT;
226 if (hash_len)
227 word |= ((u64) (hash_len - 1)) << CONTROL_HASH_LEN_SHIFT;
229 return word;
232 #if 0
233 static inline bool n2_should_run_async(struct spu_queue *qp, int this_len)
235 if (this_len >= 64 ||
236 qp->head != qp->tail)
237 return true;
238 return false;
240 #endif
242 struct n2_ahash_alg {
243 struct list_head entry;
244 const char *hash_zero;
245 const u32 *hash_init;
246 u8 hw_op_hashsz;
247 u8 digest_size;
248 u8 auth_type;
249 u8 hmac_type;
250 struct ahash_alg alg;
253 static inline struct n2_ahash_alg *n2_ahash_alg(struct crypto_tfm *tfm)
255 struct crypto_alg *alg = tfm->__crt_alg;
256 struct ahash_alg *ahash_alg;
258 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
260 return container_of(ahash_alg, struct n2_ahash_alg, alg);
263 struct n2_hmac_alg {
264 const char *child_alg;
265 struct n2_ahash_alg derived;
268 static inline struct n2_hmac_alg *n2_hmac_alg(struct crypto_tfm *tfm)
270 struct crypto_alg *alg = tfm->__crt_alg;
271 struct ahash_alg *ahash_alg;
273 ahash_alg = container_of(alg, struct ahash_alg, halg.base);
275 return container_of(ahash_alg, struct n2_hmac_alg, derived.alg);
278 struct n2_hash_ctx {
279 struct crypto_ahash *fallback_tfm;
282 #define N2_HASH_KEY_MAX 32 /* HW limit for all HMAC requests */
284 struct n2_hmac_ctx {
285 struct n2_hash_ctx base;
287 struct crypto_shash *child_shash;
289 int hash_key_len;
290 unsigned char hash_key[N2_HASH_KEY_MAX];
293 struct n2_hash_req_ctx {
294 union {
295 struct md5_state md5;
296 struct sha1_state sha1;
297 struct sha256_state sha256;
298 } u;
300 struct ahash_request fallback_req;
303 static int n2_hash_async_init(struct ahash_request *req)
305 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
306 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
307 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
309 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
310 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
312 return crypto_ahash_init(&rctx->fallback_req);
315 static int n2_hash_async_update(struct ahash_request *req)
317 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
318 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
319 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
321 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
322 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
323 rctx->fallback_req.nbytes = req->nbytes;
324 rctx->fallback_req.src = req->src;
326 return crypto_ahash_update(&rctx->fallback_req);
329 static int n2_hash_async_final(struct ahash_request *req)
331 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
332 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
333 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
335 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
336 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
337 rctx->fallback_req.result = req->result;
339 return crypto_ahash_final(&rctx->fallback_req);
342 static int n2_hash_async_finup(struct ahash_request *req)
344 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
345 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
346 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
348 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
349 rctx->fallback_req.base.flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
350 rctx->fallback_req.nbytes = req->nbytes;
351 rctx->fallback_req.src = req->src;
352 rctx->fallback_req.result = req->result;
354 return crypto_ahash_finup(&rctx->fallback_req);
357 static int n2_hash_cra_init(struct crypto_tfm *tfm)
359 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
360 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
361 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
362 struct crypto_ahash *fallback_tfm;
363 int err;
365 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
366 CRYPTO_ALG_NEED_FALLBACK);
367 if (IS_ERR(fallback_tfm)) {
368 pr_warning("Fallback driver '%s' could not be loaded!\n",
369 fallback_driver_name);
370 err = PTR_ERR(fallback_tfm);
371 goto out;
374 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
375 crypto_ahash_reqsize(fallback_tfm)));
377 ctx->fallback_tfm = fallback_tfm;
378 return 0;
380 out:
381 return err;
384 static void n2_hash_cra_exit(struct crypto_tfm *tfm)
386 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
387 struct n2_hash_ctx *ctx = crypto_ahash_ctx(ahash);
389 crypto_free_ahash(ctx->fallback_tfm);
392 static int n2_hmac_cra_init(struct crypto_tfm *tfm)
394 const char *fallback_driver_name = tfm->__crt_alg->cra_name;
395 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
396 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
397 struct n2_hmac_alg *n2alg = n2_hmac_alg(tfm);
398 struct crypto_ahash *fallback_tfm;
399 struct crypto_shash *child_shash;
400 int err;
402 fallback_tfm = crypto_alloc_ahash(fallback_driver_name, 0,
403 CRYPTO_ALG_NEED_FALLBACK);
404 if (IS_ERR(fallback_tfm)) {
405 pr_warning("Fallback driver '%s' could not be loaded!\n",
406 fallback_driver_name);
407 err = PTR_ERR(fallback_tfm);
408 goto out;
411 child_shash = crypto_alloc_shash(n2alg->child_alg, 0, 0);
412 if (IS_ERR(child_shash)) {
413 pr_warning("Child shash '%s' could not be loaded!\n",
414 n2alg->child_alg);
415 err = PTR_ERR(child_shash);
416 goto out_free_fallback;
419 crypto_ahash_set_reqsize(ahash, (sizeof(struct n2_hash_req_ctx) +
420 crypto_ahash_reqsize(fallback_tfm)));
422 ctx->child_shash = child_shash;
423 ctx->base.fallback_tfm = fallback_tfm;
424 return 0;
426 out_free_fallback:
427 crypto_free_ahash(fallback_tfm);
429 out:
430 return err;
433 static void n2_hmac_cra_exit(struct crypto_tfm *tfm)
435 struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
436 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(ahash);
438 crypto_free_ahash(ctx->base.fallback_tfm);
439 crypto_free_shash(ctx->child_shash);
442 static int n2_hmac_async_setkey(struct crypto_ahash *tfm, const u8 *key,
443 unsigned int keylen)
445 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
446 struct crypto_shash *child_shash = ctx->child_shash;
447 struct crypto_ahash *fallback_tfm;
448 struct {
449 struct shash_desc shash;
450 char ctx[crypto_shash_descsize(child_shash)];
451 } desc;
452 int err, bs, ds;
454 fallback_tfm = ctx->base.fallback_tfm;
455 err = crypto_ahash_setkey(fallback_tfm, key, keylen);
456 if (err)
457 return err;
459 desc.shash.tfm = child_shash;
460 desc.shash.flags = crypto_ahash_get_flags(tfm) &
461 CRYPTO_TFM_REQ_MAY_SLEEP;
463 bs = crypto_shash_blocksize(child_shash);
464 ds = crypto_shash_digestsize(child_shash);
465 BUG_ON(ds > N2_HASH_KEY_MAX);
466 if (keylen > bs) {
467 err = crypto_shash_digest(&desc.shash, key, keylen,
468 ctx->hash_key);
469 if (err)
470 return err;
471 keylen = ds;
472 } else if (keylen <= N2_HASH_KEY_MAX)
473 memcpy(ctx->hash_key, key, keylen);
475 ctx->hash_key_len = keylen;
477 return err;
480 static unsigned long wait_for_tail(struct spu_queue *qp)
482 unsigned long head, hv_ret;
484 do {
485 hv_ret = sun4v_ncs_gethead(qp->qhandle, &head);
486 if (hv_ret != HV_EOK) {
487 pr_err("Hypervisor error on gethead\n");
488 break;
490 if (head == qp->tail) {
491 qp->head = head;
492 break;
494 } while (1);
495 return hv_ret;
498 static unsigned long submit_and_wait_for_tail(struct spu_queue *qp,
499 struct cwq_initial_entry *ent)
501 unsigned long hv_ret = spu_queue_submit(qp, ent);
503 if (hv_ret == HV_EOK)
504 hv_ret = wait_for_tail(qp);
506 return hv_ret;
509 static int n2_do_async_digest(struct ahash_request *req,
510 unsigned int auth_type, unsigned int digest_size,
511 unsigned int result_size, void *hash_loc,
512 unsigned long auth_key, unsigned int auth_key_len)
514 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
515 struct cwq_initial_entry *ent;
516 struct crypto_hash_walk walk;
517 struct spu_queue *qp;
518 unsigned long flags;
519 int err = -ENODEV;
520 int nbytes, cpu;
522 /* The total effective length of the operation may not
523 * exceed 2^16.
525 if (unlikely(req->nbytes > (1 << 16))) {
526 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
527 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
529 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
530 rctx->fallback_req.base.flags =
531 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
532 rctx->fallback_req.nbytes = req->nbytes;
533 rctx->fallback_req.src = req->src;
534 rctx->fallback_req.result = req->result;
536 return crypto_ahash_digest(&rctx->fallback_req);
539 nbytes = crypto_hash_walk_first(req, &walk);
541 cpu = get_cpu();
542 qp = cpu_to_cwq[cpu];
543 if (!qp)
544 goto out;
546 spin_lock_irqsave(&qp->lock, flags);
548 /* XXX can do better, improve this later by doing a by-hand scatterlist
549 * XXX walk, etc.
551 ent = qp->q + qp->tail;
553 ent->control = control_word_base(nbytes, auth_key_len, 0,
554 auth_type, digest_size,
555 false, true, false, false,
556 OPCODE_INPLACE_BIT |
557 OPCODE_AUTH_MAC);
558 ent->src_addr = __pa(walk.data);
559 ent->auth_key_addr = auth_key;
560 ent->auth_iv_addr = __pa(hash_loc);
561 ent->final_auth_state_addr = 0UL;
562 ent->enc_key_addr = 0UL;
563 ent->enc_iv_addr = 0UL;
564 ent->dest_addr = __pa(hash_loc);
566 nbytes = crypto_hash_walk_done(&walk, 0);
567 while (nbytes > 0) {
568 ent = spu_queue_next(qp, ent);
570 ent->control = (nbytes - 1);
571 ent->src_addr = __pa(walk.data);
572 ent->auth_key_addr = 0UL;
573 ent->auth_iv_addr = 0UL;
574 ent->final_auth_state_addr = 0UL;
575 ent->enc_key_addr = 0UL;
576 ent->enc_iv_addr = 0UL;
577 ent->dest_addr = 0UL;
579 nbytes = crypto_hash_walk_done(&walk, 0);
581 ent->control |= CONTROL_END_OF_BLOCK;
583 if (submit_and_wait_for_tail(qp, ent) != HV_EOK)
584 err = -EINVAL;
585 else
586 err = 0;
588 spin_unlock_irqrestore(&qp->lock, flags);
590 if (!err)
591 memcpy(req->result, hash_loc, result_size);
592 out:
593 put_cpu();
595 return err;
598 static int n2_hash_async_digest(struct ahash_request *req)
600 struct n2_ahash_alg *n2alg = n2_ahash_alg(req->base.tfm);
601 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
602 int ds;
604 ds = n2alg->digest_size;
605 if (unlikely(req->nbytes == 0)) {
606 memcpy(req->result, n2alg->hash_zero, ds);
607 return 0;
609 memcpy(&rctx->u, n2alg->hash_init, n2alg->hw_op_hashsz);
611 return n2_do_async_digest(req, n2alg->auth_type,
612 n2alg->hw_op_hashsz, ds,
613 &rctx->u, 0UL, 0);
616 static int n2_hmac_async_digest(struct ahash_request *req)
618 struct n2_hmac_alg *n2alg = n2_hmac_alg(req->base.tfm);
619 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
620 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
621 struct n2_hmac_ctx *ctx = crypto_ahash_ctx(tfm);
622 int ds;
624 ds = n2alg->derived.digest_size;
625 if (unlikely(req->nbytes == 0) ||
626 unlikely(ctx->hash_key_len > N2_HASH_KEY_MAX)) {
627 struct n2_hash_req_ctx *rctx = ahash_request_ctx(req);
628 struct n2_hash_ctx *ctx = crypto_ahash_ctx(tfm);
630 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback_tfm);
631 rctx->fallback_req.base.flags =
632 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP;
633 rctx->fallback_req.nbytes = req->nbytes;
634 rctx->fallback_req.src = req->src;
635 rctx->fallback_req.result = req->result;
637 return crypto_ahash_digest(&rctx->fallback_req);
639 memcpy(&rctx->u, n2alg->derived.hash_init,
640 n2alg->derived.hw_op_hashsz);
642 return n2_do_async_digest(req, n2alg->derived.hmac_type,
643 n2alg->derived.hw_op_hashsz, ds,
644 &rctx->u,
645 __pa(&ctx->hash_key),
646 ctx->hash_key_len);
649 struct n2_cipher_context {
650 int key_len;
651 int enc_type;
652 union {
653 u8 aes[AES_MAX_KEY_SIZE];
654 u8 des[DES_KEY_SIZE];
655 u8 des3[3 * DES_KEY_SIZE];
656 u8 arc4[258]; /* S-box, X, Y */
657 } key;
660 #define N2_CHUNK_ARR_LEN 16
662 struct n2_crypto_chunk {
663 struct list_head entry;
664 unsigned long iv_paddr : 44;
665 unsigned long arr_len : 20;
666 unsigned long dest_paddr;
667 unsigned long dest_final;
668 struct {
669 unsigned long src_paddr : 44;
670 unsigned long src_len : 20;
671 } arr[N2_CHUNK_ARR_LEN];
674 struct n2_request_context {
675 struct ablkcipher_walk walk;
676 struct list_head chunk_list;
677 struct n2_crypto_chunk chunk;
678 u8 temp_iv[16];
681 /* The SPU allows some level of flexibility for partial cipher blocks
682 * being specified in a descriptor.
684 * It merely requires that every descriptor's length field is at least
685 * as large as the cipher block size. This means that a cipher block
686 * can span at most 2 descriptors. However, this does not allow a
687 * partial block to span into the final descriptor as that would
688 * violate the rule (since every descriptor's length must be at lest
689 * the block size). So, for example, assuming an 8 byte block size:
691 * 0xe --> 0xa --> 0x8
693 * is a valid length sequence, whereas:
695 * 0xe --> 0xb --> 0x7
697 * is not a valid sequence.
700 struct n2_cipher_alg {
701 struct list_head entry;
702 u8 enc_type;
703 struct crypto_alg alg;
706 static inline struct n2_cipher_alg *n2_cipher_alg(struct crypto_tfm *tfm)
708 struct crypto_alg *alg = tfm->__crt_alg;
710 return container_of(alg, struct n2_cipher_alg, alg);
713 struct n2_cipher_request_context {
714 struct ablkcipher_walk walk;
717 static int n2_aes_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
718 unsigned int keylen)
720 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
721 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
722 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
724 ctx->enc_type = (n2alg->enc_type & ENC_TYPE_CHAINING_MASK);
726 switch (keylen) {
727 case AES_KEYSIZE_128:
728 ctx->enc_type |= ENC_TYPE_ALG_AES128;
729 break;
730 case AES_KEYSIZE_192:
731 ctx->enc_type |= ENC_TYPE_ALG_AES192;
732 break;
733 case AES_KEYSIZE_256:
734 ctx->enc_type |= ENC_TYPE_ALG_AES256;
735 break;
736 default:
737 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
738 return -EINVAL;
741 ctx->key_len = keylen;
742 memcpy(ctx->key.aes, key, keylen);
743 return 0;
746 static int n2_des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
747 unsigned int keylen)
749 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
750 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
751 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
752 u32 tmp[DES_EXPKEY_WORDS];
753 int err;
755 ctx->enc_type = n2alg->enc_type;
757 if (keylen != DES_KEY_SIZE) {
758 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
759 return -EINVAL;
762 err = des_ekey(tmp, key);
763 if (err == 0 && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
764 tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
765 return -EINVAL;
768 ctx->key_len = keylen;
769 memcpy(ctx->key.des, key, keylen);
770 return 0;
773 static int n2_3des_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
774 unsigned int keylen)
776 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
777 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
778 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
780 ctx->enc_type = n2alg->enc_type;
782 if (keylen != (3 * DES_KEY_SIZE)) {
783 crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
784 return -EINVAL;
786 ctx->key_len = keylen;
787 memcpy(ctx->key.des3, key, keylen);
788 return 0;
791 static int n2_arc4_setkey(struct crypto_ablkcipher *cipher, const u8 *key,
792 unsigned int keylen)
794 struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
795 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
796 struct n2_cipher_alg *n2alg = n2_cipher_alg(tfm);
797 u8 *s = ctx->key.arc4;
798 u8 *x = s + 256;
799 u8 *y = x + 1;
800 int i, j, k;
802 ctx->enc_type = n2alg->enc_type;
804 j = k = 0;
805 *x = 0;
806 *y = 0;
807 for (i = 0; i < 256; i++)
808 s[i] = i;
809 for (i = 0; i < 256; i++) {
810 u8 a = s[i];
811 j = (j + key[k] + a) & 0xff;
812 s[i] = s[j];
813 s[j] = a;
814 if (++k >= keylen)
815 k = 0;
818 return 0;
821 static inline int cipher_descriptor_len(int nbytes, unsigned int block_size)
823 int this_len = nbytes;
825 this_len -= (nbytes & (block_size - 1));
826 return this_len > (1 << 16) ? (1 << 16) : this_len;
829 static int __n2_crypt_chunk(struct crypto_tfm *tfm, struct n2_crypto_chunk *cp,
830 struct spu_queue *qp, bool encrypt)
832 struct n2_cipher_context *ctx = crypto_tfm_ctx(tfm);
833 struct cwq_initial_entry *ent;
834 bool in_place;
835 int i;
837 ent = spu_queue_alloc(qp, cp->arr_len);
838 if (!ent) {
839 pr_info("queue_alloc() of %d fails\n",
840 cp->arr_len);
841 return -EBUSY;
844 in_place = (cp->dest_paddr == cp->arr[0].src_paddr);
846 ent->control = control_word_base(cp->arr[0].src_len,
847 0, ctx->enc_type, 0, 0,
848 false, true, false, encrypt,
849 OPCODE_ENCRYPT |
850 (in_place ? OPCODE_INPLACE_BIT : 0));
851 ent->src_addr = cp->arr[0].src_paddr;
852 ent->auth_key_addr = 0UL;
853 ent->auth_iv_addr = 0UL;
854 ent->final_auth_state_addr = 0UL;
855 ent->enc_key_addr = __pa(&ctx->key);
856 ent->enc_iv_addr = cp->iv_paddr;
857 ent->dest_addr = (in_place ? 0UL : cp->dest_paddr);
859 for (i = 1; i < cp->arr_len; i++) {
860 ent = spu_queue_next(qp, ent);
862 ent->control = cp->arr[i].src_len - 1;
863 ent->src_addr = cp->arr[i].src_paddr;
864 ent->auth_key_addr = 0UL;
865 ent->auth_iv_addr = 0UL;
866 ent->final_auth_state_addr = 0UL;
867 ent->enc_key_addr = 0UL;
868 ent->enc_iv_addr = 0UL;
869 ent->dest_addr = 0UL;
871 ent->control |= CONTROL_END_OF_BLOCK;
873 return (spu_queue_submit(qp, ent) != HV_EOK) ? -EINVAL : 0;
876 static int n2_compute_chunks(struct ablkcipher_request *req)
878 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
879 struct ablkcipher_walk *walk = &rctx->walk;
880 struct n2_crypto_chunk *chunk;
881 unsigned long dest_prev;
882 unsigned int tot_len;
883 bool prev_in_place;
884 int err, nbytes;
886 ablkcipher_walk_init(walk, req->dst, req->src, req->nbytes);
887 err = ablkcipher_walk_phys(req, walk);
888 if (err)
889 return err;
891 INIT_LIST_HEAD(&rctx->chunk_list);
893 chunk = &rctx->chunk;
894 INIT_LIST_HEAD(&chunk->entry);
896 chunk->iv_paddr = 0UL;
897 chunk->arr_len = 0;
898 chunk->dest_paddr = 0UL;
900 prev_in_place = false;
901 dest_prev = ~0UL;
902 tot_len = 0;
904 while ((nbytes = walk->nbytes) != 0) {
905 unsigned long dest_paddr, src_paddr;
906 bool in_place;
907 int this_len;
909 src_paddr = (page_to_phys(walk->src.page) +
910 walk->src.offset);
911 dest_paddr = (page_to_phys(walk->dst.page) +
912 walk->dst.offset);
913 in_place = (src_paddr == dest_paddr);
914 this_len = cipher_descriptor_len(nbytes, walk->blocksize);
916 if (chunk->arr_len != 0) {
917 if (in_place != prev_in_place ||
918 (!prev_in_place &&
919 dest_paddr != dest_prev) ||
920 chunk->arr_len == N2_CHUNK_ARR_LEN ||
921 tot_len + this_len > (1 << 16)) {
922 chunk->dest_final = dest_prev;
923 list_add_tail(&chunk->entry,
924 &rctx->chunk_list);
925 chunk = kzalloc(sizeof(*chunk), GFP_ATOMIC);
926 if (!chunk) {
927 err = -ENOMEM;
928 break;
930 INIT_LIST_HEAD(&chunk->entry);
933 if (chunk->arr_len == 0) {
934 chunk->dest_paddr = dest_paddr;
935 tot_len = 0;
937 chunk->arr[chunk->arr_len].src_paddr = src_paddr;
938 chunk->arr[chunk->arr_len].src_len = this_len;
939 chunk->arr_len++;
941 dest_prev = dest_paddr + this_len;
942 prev_in_place = in_place;
943 tot_len += this_len;
945 err = ablkcipher_walk_done(req, walk, nbytes - this_len);
946 if (err)
947 break;
949 if (!err && chunk->arr_len != 0) {
950 chunk->dest_final = dest_prev;
951 list_add_tail(&chunk->entry, &rctx->chunk_list);
954 return err;
957 static void n2_chunk_complete(struct ablkcipher_request *req, void *final_iv)
959 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
960 struct n2_crypto_chunk *c, *tmp;
962 if (final_iv)
963 memcpy(rctx->walk.iv, final_iv, rctx->walk.blocksize);
965 ablkcipher_walk_complete(&rctx->walk);
966 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
967 list_del(&c->entry);
968 if (unlikely(c != &rctx->chunk))
969 kfree(c);
974 static int n2_do_ecb(struct ablkcipher_request *req, bool encrypt)
976 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
977 struct crypto_tfm *tfm = req->base.tfm;
978 int err = n2_compute_chunks(req);
979 struct n2_crypto_chunk *c, *tmp;
980 unsigned long flags, hv_ret;
981 struct spu_queue *qp;
983 if (err)
984 return err;
986 qp = cpu_to_cwq[get_cpu()];
987 err = -ENODEV;
988 if (!qp)
989 goto out;
991 spin_lock_irqsave(&qp->lock, flags);
993 list_for_each_entry_safe(c, tmp, &rctx->chunk_list, entry) {
994 err = __n2_crypt_chunk(tfm, c, qp, encrypt);
995 if (err)
996 break;
997 list_del(&c->entry);
998 if (unlikely(c != &rctx->chunk))
999 kfree(c);
1001 if (!err) {
1002 hv_ret = wait_for_tail(qp);
1003 if (hv_ret != HV_EOK)
1004 err = -EINVAL;
1007 spin_unlock_irqrestore(&qp->lock, flags);
1009 out:
1010 put_cpu();
1012 n2_chunk_complete(req, NULL);
1013 return err;
1016 static int n2_encrypt_ecb(struct ablkcipher_request *req)
1018 return n2_do_ecb(req, true);
1021 static int n2_decrypt_ecb(struct ablkcipher_request *req)
1023 return n2_do_ecb(req, false);
1026 static int n2_do_chaining(struct ablkcipher_request *req, bool encrypt)
1028 struct n2_request_context *rctx = ablkcipher_request_ctx(req);
1029 struct crypto_tfm *tfm = req->base.tfm;
1030 unsigned long flags, hv_ret, iv_paddr;
1031 int err = n2_compute_chunks(req);
1032 struct n2_crypto_chunk *c, *tmp;
1033 struct spu_queue *qp;
1034 void *final_iv_addr;
1036 final_iv_addr = NULL;
1038 if (err)
1039 return err;
1041 qp = cpu_to_cwq[get_cpu()];
1042 err = -ENODEV;
1043 if (!qp)
1044 goto out;
1046 spin_lock_irqsave(&qp->lock, flags);
1048 if (encrypt) {
1049 iv_paddr = __pa(rctx->walk.iv);
1050 list_for_each_entry_safe(c, tmp, &rctx->chunk_list,
1051 entry) {
1052 c->iv_paddr = iv_paddr;
1053 err = __n2_crypt_chunk(tfm, c, qp, true);
1054 if (err)
1055 break;
1056 iv_paddr = c->dest_final - rctx->walk.blocksize;
1057 list_del(&c->entry);
1058 if (unlikely(c != &rctx->chunk))
1059 kfree(c);
1061 final_iv_addr = __va(iv_paddr);
1062 } else {
1063 list_for_each_entry_safe_reverse(c, tmp, &rctx->chunk_list,
1064 entry) {
1065 if (c == &rctx->chunk) {
1066 iv_paddr = __pa(rctx->walk.iv);
1067 } else {
1068 iv_paddr = (tmp->arr[tmp->arr_len-1].src_paddr +
1069 tmp->arr[tmp->arr_len-1].src_len -
1070 rctx->walk.blocksize);
1072 if (!final_iv_addr) {
1073 unsigned long pa;
1075 pa = (c->arr[c->arr_len-1].src_paddr +
1076 c->arr[c->arr_len-1].src_len -
1077 rctx->walk.blocksize);
1078 final_iv_addr = rctx->temp_iv;
1079 memcpy(rctx->temp_iv, __va(pa),
1080 rctx->walk.blocksize);
1082 c->iv_paddr = iv_paddr;
1083 err = __n2_crypt_chunk(tfm, c, qp, false);
1084 if (err)
1085 break;
1086 list_del(&c->entry);
1087 if (unlikely(c != &rctx->chunk))
1088 kfree(c);
1091 if (!err) {
1092 hv_ret = wait_for_tail(qp);
1093 if (hv_ret != HV_EOK)
1094 err = -EINVAL;
1097 spin_unlock_irqrestore(&qp->lock, flags);
1099 out:
1100 put_cpu();
1102 n2_chunk_complete(req, err ? NULL : final_iv_addr);
1103 return err;
1106 static int n2_encrypt_chaining(struct ablkcipher_request *req)
1108 return n2_do_chaining(req, true);
1111 static int n2_decrypt_chaining(struct ablkcipher_request *req)
1113 return n2_do_chaining(req, false);
1116 struct n2_cipher_tmpl {
1117 const char *name;
1118 const char *drv_name;
1119 u8 block_size;
1120 u8 enc_type;
1121 struct ablkcipher_alg ablkcipher;
1124 static const struct n2_cipher_tmpl cipher_tmpls[] = {
1125 /* ARC4: only ECB is supported (chaining bits ignored) */
1126 { .name = "ecb(arc4)",
1127 .drv_name = "ecb-arc4",
1128 .block_size = 1,
1129 .enc_type = (ENC_TYPE_ALG_RC4_STREAM |
1130 ENC_TYPE_CHAINING_ECB),
1131 .ablkcipher = {
1132 .min_keysize = 1,
1133 .max_keysize = 256,
1134 .setkey = n2_arc4_setkey,
1135 .encrypt = n2_encrypt_ecb,
1136 .decrypt = n2_decrypt_ecb,
1140 /* DES: ECB CBC and CFB are supported */
1141 { .name = "ecb(des)",
1142 .drv_name = "ecb-des",
1143 .block_size = DES_BLOCK_SIZE,
1144 .enc_type = (ENC_TYPE_ALG_DES |
1145 ENC_TYPE_CHAINING_ECB),
1146 .ablkcipher = {
1147 .min_keysize = DES_KEY_SIZE,
1148 .max_keysize = DES_KEY_SIZE,
1149 .setkey = n2_des_setkey,
1150 .encrypt = n2_encrypt_ecb,
1151 .decrypt = n2_decrypt_ecb,
1154 { .name = "cbc(des)",
1155 .drv_name = "cbc-des",
1156 .block_size = DES_BLOCK_SIZE,
1157 .enc_type = (ENC_TYPE_ALG_DES |
1158 ENC_TYPE_CHAINING_CBC),
1159 .ablkcipher = {
1160 .ivsize = DES_BLOCK_SIZE,
1161 .min_keysize = DES_KEY_SIZE,
1162 .max_keysize = DES_KEY_SIZE,
1163 .setkey = n2_des_setkey,
1164 .encrypt = n2_encrypt_chaining,
1165 .decrypt = n2_decrypt_chaining,
1168 { .name = "cfb(des)",
1169 .drv_name = "cfb-des",
1170 .block_size = DES_BLOCK_SIZE,
1171 .enc_type = (ENC_TYPE_ALG_DES |
1172 ENC_TYPE_CHAINING_CFB),
1173 .ablkcipher = {
1174 .min_keysize = DES_KEY_SIZE,
1175 .max_keysize = DES_KEY_SIZE,
1176 .setkey = n2_des_setkey,
1177 .encrypt = n2_encrypt_chaining,
1178 .decrypt = n2_decrypt_chaining,
1182 /* 3DES: ECB CBC and CFB are supported */
1183 { .name = "ecb(des3_ede)",
1184 .drv_name = "ecb-3des",
1185 .block_size = DES_BLOCK_SIZE,
1186 .enc_type = (ENC_TYPE_ALG_3DES |
1187 ENC_TYPE_CHAINING_ECB),
1188 .ablkcipher = {
1189 .min_keysize = 3 * DES_KEY_SIZE,
1190 .max_keysize = 3 * DES_KEY_SIZE,
1191 .setkey = n2_3des_setkey,
1192 .encrypt = n2_encrypt_ecb,
1193 .decrypt = n2_decrypt_ecb,
1196 { .name = "cbc(des3_ede)",
1197 .drv_name = "cbc-3des",
1198 .block_size = DES_BLOCK_SIZE,
1199 .enc_type = (ENC_TYPE_ALG_3DES |
1200 ENC_TYPE_CHAINING_CBC),
1201 .ablkcipher = {
1202 .ivsize = DES_BLOCK_SIZE,
1203 .min_keysize = 3 * DES_KEY_SIZE,
1204 .max_keysize = 3 * DES_KEY_SIZE,
1205 .setkey = n2_3des_setkey,
1206 .encrypt = n2_encrypt_chaining,
1207 .decrypt = n2_decrypt_chaining,
1210 { .name = "cfb(des3_ede)",
1211 .drv_name = "cfb-3des",
1212 .block_size = DES_BLOCK_SIZE,
1213 .enc_type = (ENC_TYPE_ALG_3DES |
1214 ENC_TYPE_CHAINING_CFB),
1215 .ablkcipher = {
1216 .min_keysize = 3 * DES_KEY_SIZE,
1217 .max_keysize = 3 * DES_KEY_SIZE,
1218 .setkey = n2_3des_setkey,
1219 .encrypt = n2_encrypt_chaining,
1220 .decrypt = n2_decrypt_chaining,
1223 /* AES: ECB CBC and CTR are supported */
1224 { .name = "ecb(aes)",
1225 .drv_name = "ecb-aes",
1226 .block_size = AES_BLOCK_SIZE,
1227 .enc_type = (ENC_TYPE_ALG_AES128 |
1228 ENC_TYPE_CHAINING_ECB),
1229 .ablkcipher = {
1230 .min_keysize = AES_MIN_KEY_SIZE,
1231 .max_keysize = AES_MAX_KEY_SIZE,
1232 .setkey = n2_aes_setkey,
1233 .encrypt = n2_encrypt_ecb,
1234 .decrypt = n2_decrypt_ecb,
1237 { .name = "cbc(aes)",
1238 .drv_name = "cbc-aes",
1239 .block_size = AES_BLOCK_SIZE,
1240 .enc_type = (ENC_TYPE_ALG_AES128 |
1241 ENC_TYPE_CHAINING_CBC),
1242 .ablkcipher = {
1243 .ivsize = AES_BLOCK_SIZE,
1244 .min_keysize = AES_MIN_KEY_SIZE,
1245 .max_keysize = AES_MAX_KEY_SIZE,
1246 .setkey = n2_aes_setkey,
1247 .encrypt = n2_encrypt_chaining,
1248 .decrypt = n2_decrypt_chaining,
1251 { .name = "ctr(aes)",
1252 .drv_name = "ctr-aes",
1253 .block_size = AES_BLOCK_SIZE,
1254 .enc_type = (ENC_TYPE_ALG_AES128 |
1255 ENC_TYPE_CHAINING_COUNTER),
1256 .ablkcipher = {
1257 .ivsize = AES_BLOCK_SIZE,
1258 .min_keysize = AES_MIN_KEY_SIZE,
1259 .max_keysize = AES_MAX_KEY_SIZE,
1260 .setkey = n2_aes_setkey,
1261 .encrypt = n2_encrypt_chaining,
1262 .decrypt = n2_encrypt_chaining,
1267 #define NUM_CIPHER_TMPLS ARRAY_SIZE(cipher_tmpls)
1269 static LIST_HEAD(cipher_algs);
1271 struct n2_hash_tmpl {
1272 const char *name;
1273 const char *hash_zero;
1274 const u32 *hash_init;
1275 u8 hw_op_hashsz;
1276 u8 digest_size;
1277 u8 block_size;
1278 u8 auth_type;
1279 u8 hmac_type;
1282 static const char md5_zero[MD5_DIGEST_SIZE] = {
1283 0xd4, 0x1d, 0x8c, 0xd9, 0x8f, 0x00, 0xb2, 0x04,
1284 0xe9, 0x80, 0x09, 0x98, 0xec, 0xf8, 0x42, 0x7e,
1286 static const u32 md5_init[MD5_HASH_WORDS] = {
1287 cpu_to_le32(0x67452301),
1288 cpu_to_le32(0xefcdab89),
1289 cpu_to_le32(0x98badcfe),
1290 cpu_to_le32(0x10325476),
1292 static const char sha1_zero[SHA1_DIGEST_SIZE] = {
1293 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32,
1294 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8,
1295 0x07, 0x09
1297 static const u32 sha1_init[SHA1_DIGEST_SIZE / 4] = {
1298 SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4,
1300 static const char sha256_zero[SHA256_DIGEST_SIZE] = {
1301 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a,
1302 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae,
1303 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99,
1304 0x1b, 0x78, 0x52, 0xb8, 0x55
1306 static const u32 sha256_init[SHA256_DIGEST_SIZE / 4] = {
1307 SHA256_H0, SHA256_H1, SHA256_H2, SHA256_H3,
1308 SHA256_H4, SHA256_H5, SHA256_H6, SHA256_H7,
1310 static const char sha224_zero[SHA224_DIGEST_SIZE] = {
1311 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9, 0x47,
1312 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4, 0x15, 0xa2,
1313 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a, 0xc5, 0xb3, 0xe4,
1314 0x2f
1316 static const u32 sha224_init[SHA256_DIGEST_SIZE / 4] = {
1317 SHA224_H0, SHA224_H1, SHA224_H2, SHA224_H3,
1318 SHA224_H4, SHA224_H5, SHA224_H6, SHA224_H7,
1321 static const struct n2_hash_tmpl hash_tmpls[] = {
1322 { .name = "md5",
1323 .hash_zero = md5_zero,
1324 .hash_init = md5_init,
1325 .auth_type = AUTH_TYPE_MD5,
1326 .hmac_type = AUTH_TYPE_HMAC_MD5,
1327 .hw_op_hashsz = MD5_DIGEST_SIZE,
1328 .digest_size = MD5_DIGEST_SIZE,
1329 .block_size = MD5_HMAC_BLOCK_SIZE },
1330 { .name = "sha1",
1331 .hash_zero = sha1_zero,
1332 .hash_init = sha1_init,
1333 .auth_type = AUTH_TYPE_SHA1,
1334 .hmac_type = AUTH_TYPE_HMAC_SHA1,
1335 .hw_op_hashsz = SHA1_DIGEST_SIZE,
1336 .digest_size = SHA1_DIGEST_SIZE,
1337 .block_size = SHA1_BLOCK_SIZE },
1338 { .name = "sha256",
1339 .hash_zero = sha256_zero,
1340 .hash_init = sha256_init,
1341 .auth_type = AUTH_TYPE_SHA256,
1342 .hmac_type = AUTH_TYPE_HMAC_SHA256,
1343 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1344 .digest_size = SHA256_DIGEST_SIZE,
1345 .block_size = SHA256_BLOCK_SIZE },
1346 { .name = "sha224",
1347 .hash_zero = sha224_zero,
1348 .hash_init = sha224_init,
1349 .auth_type = AUTH_TYPE_SHA256,
1350 .hmac_type = AUTH_TYPE_RESERVED,
1351 .hw_op_hashsz = SHA256_DIGEST_SIZE,
1352 .digest_size = SHA224_DIGEST_SIZE,
1353 .block_size = SHA224_BLOCK_SIZE },
1355 #define NUM_HASH_TMPLS ARRAY_SIZE(hash_tmpls)
1357 static LIST_HEAD(ahash_algs);
1358 static LIST_HEAD(hmac_algs);
1360 static int algs_registered;
1362 static void __n2_unregister_algs(void)
1364 struct n2_cipher_alg *cipher, *cipher_tmp;
1365 struct n2_ahash_alg *alg, *alg_tmp;
1366 struct n2_hmac_alg *hmac, *hmac_tmp;
1368 list_for_each_entry_safe(cipher, cipher_tmp, &cipher_algs, entry) {
1369 crypto_unregister_alg(&cipher->alg);
1370 list_del(&cipher->entry);
1371 kfree(cipher);
1373 list_for_each_entry_safe(hmac, hmac_tmp, &hmac_algs, derived.entry) {
1374 crypto_unregister_ahash(&hmac->derived.alg);
1375 list_del(&hmac->derived.entry);
1376 kfree(hmac);
1378 list_for_each_entry_safe(alg, alg_tmp, &ahash_algs, entry) {
1379 crypto_unregister_ahash(&alg->alg);
1380 list_del(&alg->entry);
1381 kfree(alg);
1385 static int n2_cipher_cra_init(struct crypto_tfm *tfm)
1387 tfm->crt_ablkcipher.reqsize = sizeof(struct n2_request_context);
1388 return 0;
1391 static int __devinit __n2_register_one_cipher(const struct n2_cipher_tmpl *tmpl)
1393 struct n2_cipher_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1394 struct crypto_alg *alg;
1395 int err;
1397 if (!p)
1398 return -ENOMEM;
1400 alg = &p->alg;
1402 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1403 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->drv_name);
1404 alg->cra_priority = N2_CRA_PRIORITY;
1405 alg->cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1406 alg->cra_blocksize = tmpl->block_size;
1407 p->enc_type = tmpl->enc_type;
1408 alg->cra_ctxsize = sizeof(struct n2_cipher_context);
1409 alg->cra_type = &crypto_ablkcipher_type;
1410 alg->cra_u.ablkcipher = tmpl->ablkcipher;
1411 alg->cra_init = n2_cipher_cra_init;
1412 alg->cra_module = THIS_MODULE;
1414 list_add(&p->entry, &cipher_algs);
1415 err = crypto_register_alg(alg);
1416 if (err) {
1417 pr_err("%s alg registration failed\n", alg->cra_name);
1418 list_del(&p->entry);
1419 kfree(p);
1420 } else {
1421 pr_info("%s alg registered\n", alg->cra_name);
1423 return err;
1426 static int __devinit __n2_register_one_hmac(struct n2_ahash_alg *n2ahash)
1428 struct n2_hmac_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1429 struct ahash_alg *ahash;
1430 struct crypto_alg *base;
1431 int err;
1433 if (!p)
1434 return -ENOMEM;
1436 p->child_alg = n2ahash->alg.halg.base.cra_name;
1437 memcpy(&p->derived, n2ahash, sizeof(struct n2_ahash_alg));
1438 INIT_LIST_HEAD(&p->derived.entry);
1440 ahash = &p->derived.alg;
1441 ahash->digest = n2_hmac_async_digest;
1442 ahash->setkey = n2_hmac_async_setkey;
1444 base = &ahash->halg.base;
1445 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)", p->child_alg);
1446 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "hmac-%s-n2", p->child_alg);
1448 base->cra_ctxsize = sizeof(struct n2_hmac_ctx);
1449 base->cra_init = n2_hmac_cra_init;
1450 base->cra_exit = n2_hmac_cra_exit;
1452 list_add(&p->derived.entry, &hmac_algs);
1453 err = crypto_register_ahash(ahash);
1454 if (err) {
1455 pr_err("%s alg registration failed\n", base->cra_name);
1456 list_del(&p->derived.entry);
1457 kfree(p);
1458 } else {
1459 pr_info("%s alg registered\n", base->cra_name);
1461 return err;
1464 static int __devinit __n2_register_one_ahash(const struct n2_hash_tmpl *tmpl)
1466 struct n2_ahash_alg *p = kzalloc(sizeof(*p), GFP_KERNEL);
1467 struct hash_alg_common *halg;
1468 struct crypto_alg *base;
1469 struct ahash_alg *ahash;
1470 int err;
1472 if (!p)
1473 return -ENOMEM;
1475 p->hash_zero = tmpl->hash_zero;
1476 p->hash_init = tmpl->hash_init;
1477 p->auth_type = tmpl->auth_type;
1478 p->hmac_type = tmpl->hmac_type;
1479 p->hw_op_hashsz = tmpl->hw_op_hashsz;
1480 p->digest_size = tmpl->digest_size;
1482 ahash = &p->alg;
1483 ahash->init = n2_hash_async_init;
1484 ahash->update = n2_hash_async_update;
1485 ahash->final = n2_hash_async_final;
1486 ahash->finup = n2_hash_async_finup;
1487 ahash->digest = n2_hash_async_digest;
1489 halg = &ahash->halg;
1490 halg->digestsize = tmpl->digest_size;
1492 base = &halg->base;
1493 snprintf(base->cra_name, CRYPTO_MAX_ALG_NAME, "%s", tmpl->name);
1494 snprintf(base->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s-n2", tmpl->name);
1495 base->cra_priority = N2_CRA_PRIORITY;
1496 base->cra_flags = CRYPTO_ALG_TYPE_AHASH | CRYPTO_ALG_NEED_FALLBACK;
1497 base->cra_blocksize = tmpl->block_size;
1498 base->cra_ctxsize = sizeof(struct n2_hash_ctx);
1499 base->cra_module = THIS_MODULE;
1500 base->cra_init = n2_hash_cra_init;
1501 base->cra_exit = n2_hash_cra_exit;
1503 list_add(&p->entry, &ahash_algs);
1504 err = crypto_register_ahash(ahash);
1505 if (err) {
1506 pr_err("%s alg registration failed\n", base->cra_name);
1507 list_del(&p->entry);
1508 kfree(p);
1509 } else {
1510 pr_info("%s alg registered\n", base->cra_name);
1512 if (!err && p->hmac_type != AUTH_TYPE_RESERVED)
1513 err = __n2_register_one_hmac(p);
1514 return err;
1517 static int __devinit n2_register_algs(void)
1519 int i, err = 0;
1521 mutex_lock(&spu_lock);
1522 if (algs_registered++)
1523 goto out;
1525 for (i = 0; i < NUM_HASH_TMPLS; i++) {
1526 err = __n2_register_one_ahash(&hash_tmpls[i]);
1527 if (err) {
1528 __n2_unregister_algs();
1529 goto out;
1532 for (i = 0; i < NUM_CIPHER_TMPLS; i++) {
1533 err = __n2_register_one_cipher(&cipher_tmpls[i]);
1534 if (err) {
1535 __n2_unregister_algs();
1536 goto out;
1540 out:
1541 mutex_unlock(&spu_lock);
1542 return err;
1545 static void __devexit n2_unregister_algs(void)
1547 mutex_lock(&spu_lock);
1548 if (!--algs_registered)
1549 __n2_unregister_algs();
1550 mutex_unlock(&spu_lock);
1553 /* To map CWQ queues to interrupt sources, the hypervisor API provides
1554 * a devino. This isn't very useful to us because all of the
1555 * interrupts listed in the device_node have been translated to
1556 * Linux virtual IRQ cookie numbers.
1558 * So we have to back-translate, going through the 'intr' and 'ino'
1559 * property tables of the n2cp MDESC node, matching it with the OF
1560 * 'interrupts' property entries, in order to to figure out which
1561 * devino goes to which already-translated IRQ.
1563 static int find_devino_index(struct platform_device *dev, struct spu_mdesc_info *ip,
1564 unsigned long dev_ino)
1566 const unsigned int *dev_intrs;
1567 unsigned int intr;
1568 int i;
1570 for (i = 0; i < ip->num_intrs; i++) {
1571 if (ip->ino_table[i].ino == dev_ino)
1572 break;
1574 if (i == ip->num_intrs)
1575 return -ENODEV;
1577 intr = ip->ino_table[i].intr;
1579 dev_intrs = of_get_property(dev->dev.of_node, "interrupts", NULL);
1580 if (!dev_intrs)
1581 return -ENODEV;
1583 for (i = 0; i < dev->archdata.num_irqs; i++) {
1584 if (dev_intrs[i] == intr)
1585 return i;
1588 return -ENODEV;
1591 static int spu_map_ino(struct platform_device *dev, struct spu_mdesc_info *ip,
1592 const char *irq_name, struct spu_queue *p,
1593 irq_handler_t handler)
1595 unsigned long herr;
1596 int index;
1598 herr = sun4v_ncs_qhandle_to_devino(p->qhandle, &p->devino);
1599 if (herr)
1600 return -EINVAL;
1602 index = find_devino_index(dev, ip, p->devino);
1603 if (index < 0)
1604 return index;
1606 p->irq = dev->archdata.irqs[index];
1608 sprintf(p->irq_name, "%s-%d", irq_name, index);
1610 return request_irq(p->irq, handler, IRQF_SAMPLE_RANDOM,
1611 p->irq_name, p);
1614 static struct kmem_cache *queue_cache[2];
1616 static void *new_queue(unsigned long q_type)
1618 return kmem_cache_zalloc(queue_cache[q_type - 1], GFP_KERNEL);
1621 static void free_queue(void *p, unsigned long q_type)
1623 return kmem_cache_free(queue_cache[q_type - 1], p);
1626 static int queue_cache_init(void)
1628 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1629 queue_cache[HV_NCS_QTYPE_MAU - 1] =
1630 kmem_cache_create("mau_queue",
1631 (MAU_NUM_ENTRIES *
1632 MAU_ENTRY_SIZE),
1633 MAU_ENTRY_SIZE, 0, NULL);
1634 if (!queue_cache[HV_NCS_QTYPE_MAU - 1])
1635 return -ENOMEM;
1637 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1])
1638 queue_cache[HV_NCS_QTYPE_CWQ - 1] =
1639 kmem_cache_create("cwq_queue",
1640 (CWQ_NUM_ENTRIES *
1641 CWQ_ENTRY_SIZE),
1642 CWQ_ENTRY_SIZE, 0, NULL);
1643 if (!queue_cache[HV_NCS_QTYPE_CWQ - 1]) {
1644 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1645 return -ENOMEM;
1647 return 0;
1650 static void queue_cache_destroy(void)
1652 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_MAU - 1]);
1653 kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
1656 static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
1658 cpumask_var_t old_allowed;
1659 unsigned long hv_ret;
1661 if (cpumask_empty(&p->sharing))
1662 return -EINVAL;
1664 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
1665 return -ENOMEM;
1667 cpumask_copy(old_allowed, &current->cpus_allowed);
1669 set_cpus_allowed_ptr(current, &p->sharing);
1671 hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
1672 CWQ_NUM_ENTRIES, &p->qhandle);
1673 if (!hv_ret)
1674 sun4v_ncs_sethead_marker(p->qhandle, 0);
1676 set_cpus_allowed_ptr(current, old_allowed);
1678 free_cpumask_var(old_allowed);
1680 return (hv_ret ? -EINVAL : 0);
1683 static int spu_queue_setup(struct spu_queue *p)
1685 int err;
1687 p->q = new_queue(p->q_type);
1688 if (!p->q)
1689 return -ENOMEM;
1691 err = spu_queue_register(p, p->q_type);
1692 if (err) {
1693 free_queue(p->q, p->q_type);
1694 p->q = NULL;
1697 return err;
1700 static void spu_queue_destroy(struct spu_queue *p)
1702 unsigned long hv_ret;
1704 if (!p->q)
1705 return;
1707 hv_ret = sun4v_ncs_qconf(p->q_type, p->qhandle, 0, &p->qhandle);
1709 if (!hv_ret)
1710 free_queue(p->q, p->q_type);
1713 static void spu_list_destroy(struct list_head *list)
1715 struct spu_queue *p, *n;
1717 list_for_each_entry_safe(p, n, list, list) {
1718 int i;
1720 for (i = 0; i < NR_CPUS; i++) {
1721 if (cpu_to_cwq[i] == p)
1722 cpu_to_cwq[i] = NULL;
1725 if (p->irq) {
1726 free_irq(p->irq, p);
1727 p->irq = 0;
1729 spu_queue_destroy(p);
1730 list_del(&p->list);
1731 kfree(p);
1735 /* Walk the backward arcs of a CWQ 'exec-unit' node,
1736 * gathering cpu membership information.
1738 static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
1739 struct platform_device *dev,
1740 u64 node, struct spu_queue *p,
1741 struct spu_queue **table)
1743 u64 arc;
1745 mdesc_for_each_arc(arc, mdesc, node, MDESC_ARC_TYPE_BACK) {
1746 u64 tgt = mdesc_arc_target(mdesc, arc);
1747 const char *name = mdesc_node_name(mdesc, tgt);
1748 const u64 *id;
1750 if (strcmp(name, "cpu"))
1751 continue;
1752 id = mdesc_get_property(mdesc, tgt, "id", NULL);
1753 if (table[*id] != NULL) {
1754 dev_err(&dev->dev, "%s: SPU cpu slot already set.\n",
1755 dev->dev.of_node->full_name);
1756 return -EINVAL;
1758 cpu_set(*id, p->sharing);
1759 table[*id] = p;
1761 return 0;
1764 /* Process an 'exec-unit' MDESC node of type 'cwq'. */
1765 static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
1766 struct platform_device *dev, struct mdesc_handle *mdesc,
1767 u64 node, const char *iname, unsigned long q_type,
1768 irq_handler_t handler, struct spu_queue **table)
1770 struct spu_queue *p;
1771 int err;
1773 p = kzalloc(sizeof(struct spu_queue), GFP_KERNEL);
1774 if (!p) {
1775 dev_err(&dev->dev, "%s: Could not allocate SPU queue.\n",
1776 dev->dev.of_node->full_name);
1777 return -ENOMEM;
1780 cpus_clear(p->sharing);
1781 spin_lock_init(&p->lock);
1782 p->q_type = q_type;
1783 INIT_LIST_HEAD(&p->jobs);
1784 list_add(&p->list, list);
1786 err = spu_mdesc_walk_arcs(mdesc, dev, node, p, table);
1787 if (err)
1788 return err;
1790 err = spu_queue_setup(p);
1791 if (err)
1792 return err;
1794 return spu_map_ino(dev, ip, iname, p, handler);
1797 static int spu_mdesc_scan(struct mdesc_handle *mdesc, struct platform_device *dev,
1798 struct spu_mdesc_info *ip, struct list_head *list,
1799 const char *exec_name, unsigned long q_type,
1800 irq_handler_t handler, struct spu_queue **table)
1802 int err = 0;
1803 u64 node;
1805 mdesc_for_each_node_by_name(mdesc, node, "exec-unit") {
1806 const char *type;
1808 type = mdesc_get_property(mdesc, node, "type", NULL);
1809 if (!type || strcmp(type, exec_name))
1810 continue;
1812 err = handle_exec_unit(ip, list, dev, mdesc, node,
1813 exec_name, q_type, handler, table);
1814 if (err) {
1815 spu_list_destroy(list);
1816 break;
1820 return err;
1823 static int __devinit get_irq_props(struct mdesc_handle *mdesc, u64 node,
1824 struct spu_mdesc_info *ip)
1826 const u64 *ino;
1827 int ino_len;
1828 int i;
1830 ino = mdesc_get_property(mdesc, node, "ino", &ino_len);
1831 if (!ino) {
1832 printk("NO 'ino'\n");
1833 return -ENODEV;
1836 ip->num_intrs = ino_len / sizeof(u64);
1837 ip->ino_table = kzalloc((sizeof(struct ino_blob) *
1838 ip->num_intrs),
1839 GFP_KERNEL);
1840 if (!ip->ino_table)
1841 return -ENOMEM;
1843 for (i = 0; i < ip->num_intrs; i++) {
1844 struct ino_blob *b = &ip->ino_table[i];
1845 b->intr = i + 1;
1846 b->ino = ino[i];
1849 return 0;
1852 static int __devinit grab_mdesc_irq_props(struct mdesc_handle *mdesc,
1853 struct platform_device *dev,
1854 struct spu_mdesc_info *ip,
1855 const char *node_name)
1857 const unsigned int *reg;
1858 u64 node;
1860 reg = of_get_property(dev->dev.of_node, "reg", NULL);
1861 if (!reg)
1862 return -ENODEV;
1864 mdesc_for_each_node_by_name(mdesc, node, "virtual-device") {
1865 const char *name;
1866 const u64 *chdl;
1868 name = mdesc_get_property(mdesc, node, "name", NULL);
1869 if (!name || strcmp(name, node_name))
1870 continue;
1871 chdl = mdesc_get_property(mdesc, node, "cfg-handle", NULL);
1872 if (!chdl || (*chdl != *reg))
1873 continue;
1874 ip->cfg_handle = *chdl;
1875 return get_irq_props(mdesc, node, ip);
1878 return -ENODEV;
1881 static unsigned long n2_spu_hvapi_major;
1882 static unsigned long n2_spu_hvapi_minor;
1884 static int __devinit n2_spu_hvapi_register(void)
1886 int err;
1888 n2_spu_hvapi_major = 2;
1889 n2_spu_hvapi_minor = 0;
1891 err = sun4v_hvapi_register(HV_GRP_NCS,
1892 n2_spu_hvapi_major,
1893 &n2_spu_hvapi_minor);
1895 if (!err)
1896 pr_info("Registered NCS HVAPI version %lu.%lu\n",
1897 n2_spu_hvapi_major,
1898 n2_spu_hvapi_minor);
1900 return err;
1903 static void n2_spu_hvapi_unregister(void)
1905 sun4v_hvapi_unregister(HV_GRP_NCS);
1908 static int global_ref;
1910 static int __devinit grab_global_resources(void)
1912 int err = 0;
1914 mutex_lock(&spu_lock);
1916 if (global_ref++)
1917 goto out;
1919 err = n2_spu_hvapi_register();
1920 if (err)
1921 goto out;
1923 err = queue_cache_init();
1924 if (err)
1925 goto out_hvapi_release;
1927 err = -ENOMEM;
1928 cpu_to_cwq = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1929 GFP_KERNEL);
1930 if (!cpu_to_cwq)
1931 goto out_queue_cache_destroy;
1933 cpu_to_mau = kzalloc(sizeof(struct spu_queue *) * NR_CPUS,
1934 GFP_KERNEL);
1935 if (!cpu_to_mau)
1936 goto out_free_cwq_table;
1938 err = 0;
1940 out:
1941 if (err)
1942 global_ref--;
1943 mutex_unlock(&spu_lock);
1944 return err;
1946 out_free_cwq_table:
1947 kfree(cpu_to_cwq);
1948 cpu_to_cwq = NULL;
1950 out_queue_cache_destroy:
1951 queue_cache_destroy();
1953 out_hvapi_release:
1954 n2_spu_hvapi_unregister();
1955 goto out;
1958 static void release_global_resources(void)
1960 mutex_lock(&spu_lock);
1961 if (!--global_ref) {
1962 kfree(cpu_to_cwq);
1963 cpu_to_cwq = NULL;
1965 kfree(cpu_to_mau);
1966 cpu_to_mau = NULL;
1968 queue_cache_destroy();
1969 n2_spu_hvapi_unregister();
1971 mutex_unlock(&spu_lock);
1974 static struct n2_crypto * __devinit alloc_n2cp(void)
1976 struct n2_crypto *np = kzalloc(sizeof(struct n2_crypto), GFP_KERNEL);
1978 if (np)
1979 INIT_LIST_HEAD(&np->cwq_list);
1981 return np;
1984 static void free_n2cp(struct n2_crypto *np)
1986 if (np->cwq_info.ino_table) {
1987 kfree(np->cwq_info.ino_table);
1988 np->cwq_info.ino_table = NULL;
1991 kfree(np);
1994 static void __devinit n2_spu_driver_version(void)
1996 static int n2_spu_version_printed;
1998 if (n2_spu_version_printed++ == 0)
1999 pr_info("%s", version);
2002 static int __devinit n2_crypto_probe(struct platform_device *dev)
2004 struct mdesc_handle *mdesc;
2005 const char *full_name;
2006 struct n2_crypto *np;
2007 int err;
2009 n2_spu_driver_version();
2011 full_name = dev->dev.of_node->full_name;
2012 pr_info("Found N2CP at %s\n", full_name);
2014 np = alloc_n2cp();
2015 if (!np) {
2016 dev_err(&dev->dev, "%s: Unable to allocate n2cp.\n",
2017 full_name);
2018 return -ENOMEM;
2021 err = grab_global_resources();
2022 if (err) {
2023 dev_err(&dev->dev, "%s: Unable to grab "
2024 "global resources.\n", full_name);
2025 goto out_free_n2cp;
2028 mdesc = mdesc_grab();
2030 if (!mdesc) {
2031 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
2032 full_name);
2033 err = -ENODEV;
2034 goto out_free_global;
2036 err = grab_mdesc_irq_props(mdesc, dev, &np->cwq_info, "n2cp");
2037 if (err) {
2038 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
2039 full_name);
2040 mdesc_release(mdesc);
2041 goto out_free_global;
2044 err = spu_mdesc_scan(mdesc, dev, &np->cwq_info, &np->cwq_list,
2045 "cwq", HV_NCS_QTYPE_CWQ, cwq_intr,
2046 cpu_to_cwq);
2047 mdesc_release(mdesc);
2049 if (err) {
2050 dev_err(&dev->dev, "%s: CWQ MDESC scan failed.\n",
2051 full_name);
2052 goto out_free_global;
2055 err = n2_register_algs();
2056 if (err) {
2057 dev_err(&dev->dev, "%s: Unable to register algorithms.\n",
2058 full_name);
2059 goto out_free_spu_list;
2062 dev_set_drvdata(&dev->dev, np);
2064 return 0;
2066 out_free_spu_list:
2067 spu_list_destroy(&np->cwq_list);
2069 out_free_global:
2070 release_global_resources();
2072 out_free_n2cp:
2073 free_n2cp(np);
2075 return err;
2078 static int __devexit n2_crypto_remove(struct platform_device *dev)
2080 struct n2_crypto *np = dev_get_drvdata(&dev->dev);
2082 n2_unregister_algs();
2084 spu_list_destroy(&np->cwq_list);
2086 release_global_resources();
2088 free_n2cp(np);
2090 return 0;
2093 static struct n2_mau * __devinit alloc_ncp(void)
2095 struct n2_mau *mp = kzalloc(sizeof(struct n2_mau), GFP_KERNEL);
2097 if (mp)
2098 INIT_LIST_HEAD(&mp->mau_list);
2100 return mp;
2103 static void free_ncp(struct n2_mau *mp)
2105 if (mp->mau_info.ino_table) {
2106 kfree(mp->mau_info.ino_table);
2107 mp->mau_info.ino_table = NULL;
2110 kfree(mp);
2113 static int __devinit n2_mau_probe(struct platform_device *dev)
2115 struct mdesc_handle *mdesc;
2116 const char *full_name;
2117 struct n2_mau *mp;
2118 int err;
2120 n2_spu_driver_version();
2122 full_name = dev->dev.of_node->full_name;
2123 pr_info("Found NCP at %s\n", full_name);
2125 mp = alloc_ncp();
2126 if (!mp) {
2127 dev_err(&dev->dev, "%s: Unable to allocate ncp.\n",
2128 full_name);
2129 return -ENOMEM;
2132 err = grab_global_resources();
2133 if (err) {
2134 dev_err(&dev->dev, "%s: Unable to grab "
2135 "global resources.\n", full_name);
2136 goto out_free_ncp;
2139 mdesc = mdesc_grab();
2141 if (!mdesc) {
2142 dev_err(&dev->dev, "%s: Unable to grab MDESC.\n",
2143 full_name);
2144 err = -ENODEV;
2145 goto out_free_global;
2148 err = grab_mdesc_irq_props(mdesc, dev, &mp->mau_info, "ncp");
2149 if (err) {
2150 dev_err(&dev->dev, "%s: Unable to grab IRQ props.\n",
2151 full_name);
2152 mdesc_release(mdesc);
2153 goto out_free_global;
2156 err = spu_mdesc_scan(mdesc, dev, &mp->mau_info, &mp->mau_list,
2157 "mau", HV_NCS_QTYPE_MAU, mau_intr,
2158 cpu_to_mau);
2159 mdesc_release(mdesc);
2161 if (err) {
2162 dev_err(&dev->dev, "%s: MAU MDESC scan failed.\n",
2163 full_name);
2164 goto out_free_global;
2167 dev_set_drvdata(&dev->dev, mp);
2169 return 0;
2171 out_free_global:
2172 release_global_resources();
2174 out_free_ncp:
2175 free_ncp(mp);
2177 return err;
2180 static int __devexit n2_mau_remove(struct platform_device *dev)
2182 struct n2_mau *mp = dev_get_drvdata(&dev->dev);
2184 spu_list_destroy(&mp->mau_list);
2186 release_global_resources();
2188 free_ncp(mp);
2190 return 0;
2193 static struct of_device_id n2_crypto_match[] = {
2195 .name = "n2cp",
2196 .compatible = "SUNW,n2-cwq",
2199 .name = "n2cp",
2200 .compatible = "SUNW,vf-cwq",
2203 .name = "n2cp",
2204 .compatible = "SUNW,kt-cwq",
2209 MODULE_DEVICE_TABLE(of, n2_crypto_match);
2211 static struct platform_driver n2_crypto_driver = {
2212 .driver = {
2213 .name = "n2cp",
2214 .owner = THIS_MODULE,
2215 .of_match_table = n2_crypto_match,
2217 .probe = n2_crypto_probe,
2218 .remove = __devexit_p(n2_crypto_remove),
2221 static struct of_device_id n2_mau_match[] = {
2223 .name = "ncp",
2224 .compatible = "SUNW,n2-mau",
2227 .name = "ncp",
2228 .compatible = "SUNW,vf-mau",
2231 .name = "ncp",
2232 .compatible = "SUNW,kt-mau",
2237 MODULE_DEVICE_TABLE(of, n2_mau_match);
2239 static struct platform_driver n2_mau_driver = {
2240 .driver = {
2241 .name = "ncp",
2242 .owner = THIS_MODULE,
2243 .of_match_table = n2_mau_match,
2245 .probe = n2_mau_probe,
2246 .remove = __devexit_p(n2_mau_remove),
2249 static int __init n2_init(void)
2251 int err = platform_driver_register(&n2_crypto_driver);
2253 if (!err) {
2254 err = platform_driver_register(&n2_mau_driver);
2255 if (err)
2256 platform_driver_unregister(&n2_crypto_driver);
2258 return err;
2261 static void __exit n2_exit(void)
2263 platform_driver_unregister(&n2_mau_driver);
2264 platform_driver_unregister(&n2_crypto_driver);
2267 module_init(n2_init);
2268 module_exit(n2_exit);