Linux 4.18.10
[linux/fpc-iii.git] / drivers / crypto / amcc / crypto4xx_core.c
blob05981ccd9901a90ec5e216e2ce01a1ba41e0a51e
1 /**
2 * AMCC SoC PPC4xx Crypto Driver
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * This file implements AMCC crypto offload Linux device driver for use with
18 * Linux CryptoAPI.
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock_types.h>
24 #include <linux/random.h>
25 #include <linux/scatterlist.h>
26 #include <linux/crypto.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/of_address.h>
32 #include <linux/of_irq.h>
33 #include <linux/of_platform.h>
34 #include <linux/slab.h>
35 #include <asm/dcr.h>
36 #include <asm/dcr-regs.h>
37 #include <asm/cacheflush.h>
38 #include <crypto/aead.h>
39 #include <crypto/aes.h>
40 #include <crypto/ctr.h>
41 #include <crypto/gcm.h>
42 #include <crypto/sha.h>
43 #include <crypto/scatterwalk.h>
44 #include <crypto/skcipher.h>
45 #include <crypto/internal/aead.h>
46 #include <crypto/internal/skcipher.h>
47 #include "crypto4xx_reg_def.h"
48 #include "crypto4xx_core.h"
49 #include "crypto4xx_sa.h"
50 #include "crypto4xx_trng.h"
52 #define PPC4XX_SEC_VERSION_STR "0.5"
54 /**
55 * PPC4xx Crypto Engine Initialization Routine
57 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
59 union ce_ring_size ring_size;
60 union ce_ring_control ring_ctrl;
61 union ce_part_ring_size part_ring_size;
62 union ce_io_threshold io_threshold;
63 u32 rand_num;
64 union ce_pe_dma_cfg pe_dma_cfg;
65 u32 device_ctrl;
67 writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
68 /* setup pe dma, include reset sg, pdr and pe, then release reset */
69 pe_dma_cfg.w = 0;
70 pe_dma_cfg.bf.bo_sgpd_en = 1;
71 pe_dma_cfg.bf.bo_data_en = 0;
72 pe_dma_cfg.bf.bo_sa_en = 1;
73 pe_dma_cfg.bf.bo_pd_en = 1;
74 pe_dma_cfg.bf.dynamic_sa_en = 1;
75 pe_dma_cfg.bf.reset_sg = 1;
76 pe_dma_cfg.bf.reset_pdr = 1;
77 pe_dma_cfg.bf.reset_pe = 1;
78 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
79 /* un reset pe,sg and pdr */
80 pe_dma_cfg.bf.pe_mode = 0;
81 pe_dma_cfg.bf.reset_sg = 0;
82 pe_dma_cfg.bf.reset_pdr = 0;
83 pe_dma_cfg.bf.reset_pe = 0;
84 pe_dma_cfg.bf.bo_td_en = 0;
85 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
86 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
87 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
88 writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
89 get_random_bytes(&rand_num, sizeof(rand_num));
90 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
91 get_random_bytes(&rand_num, sizeof(rand_num));
92 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
93 ring_size.w = 0;
94 ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
95 ring_size.bf.ring_size = PPC4XX_NUM_PD;
96 writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
97 ring_ctrl.w = 0;
98 writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
99 device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
100 device_ctrl |= PPC4XX_DC_3DES_EN;
101 writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
102 writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
103 writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
104 part_ring_size.w = 0;
105 part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
106 part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
107 writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
108 writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
109 io_threshold.w = 0;
110 io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
111 io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
112 writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
113 writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
114 writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
115 writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
116 writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
117 writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
118 writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
119 writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
120 /* un reset pe,sg and pdr */
121 pe_dma_cfg.bf.pe_mode = 1;
122 pe_dma_cfg.bf.reset_sg = 0;
123 pe_dma_cfg.bf.reset_pdr = 0;
124 pe_dma_cfg.bf.reset_pe = 0;
125 pe_dma_cfg.bf.bo_td_en = 0;
126 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
127 /*clear all pending interrupt*/
128 writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
129 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
130 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
131 writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
132 if (dev->is_revb) {
133 writel(PPC4XX_INT_TIMEOUT_CNT_REVB << 10,
134 dev->ce_base + CRYPTO4XX_INT_TIMEOUT_CNT);
135 writel(PPC4XX_PD_DONE_INT | PPC4XX_TMO_ERR_INT,
136 dev->ce_base + CRYPTO4XX_INT_EN);
137 } else {
138 writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
142 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
144 ctx->sa_in = kcalloc(size, 4, GFP_ATOMIC);
145 if (ctx->sa_in == NULL)
146 return -ENOMEM;
148 ctx->sa_out = kcalloc(size, 4, GFP_ATOMIC);
149 if (ctx->sa_out == NULL) {
150 kfree(ctx->sa_in);
151 ctx->sa_in = NULL;
152 return -ENOMEM;
155 ctx->sa_len = size;
157 return 0;
160 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
162 kfree(ctx->sa_in);
163 ctx->sa_in = NULL;
164 kfree(ctx->sa_out);
165 ctx->sa_out = NULL;
166 ctx->sa_len = 0;
170 * alloc memory for the gather ring
171 * no need to alloc buf for the ring
172 * gdr_tail, gdr_head and gdr_count are initialized by this function
174 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
176 int i;
177 dev->pdr = dma_alloc_coherent(dev->core_dev->device,
178 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
179 &dev->pdr_pa, GFP_ATOMIC);
180 if (!dev->pdr)
181 return -ENOMEM;
183 dev->pdr_uinfo = kcalloc(PPC4XX_NUM_PD, sizeof(struct pd_uinfo),
184 GFP_KERNEL);
185 if (!dev->pdr_uinfo) {
186 dma_free_coherent(dev->core_dev->device,
187 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
188 dev->pdr,
189 dev->pdr_pa);
190 return -ENOMEM;
192 memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
193 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
194 sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
195 &dev->shadow_sa_pool_pa,
196 GFP_ATOMIC);
197 if (!dev->shadow_sa_pool)
198 return -ENOMEM;
200 dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
201 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
202 &dev->shadow_sr_pool_pa, GFP_ATOMIC);
203 if (!dev->shadow_sr_pool)
204 return -ENOMEM;
205 for (i = 0; i < PPC4XX_NUM_PD; i++) {
206 struct ce_pd *pd = &dev->pdr[i];
207 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[i];
209 pd->sa = dev->shadow_sa_pool_pa +
210 sizeof(union shadow_sa_buf) * i;
212 /* alloc 256 bytes which is enough for any kind of dynamic sa */
213 pd_uinfo->sa_va = &dev->shadow_sa_pool[i].sa;
215 /* alloc state record */
216 pd_uinfo->sr_va = &dev->shadow_sr_pool[i];
217 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
218 sizeof(struct sa_state_record) * i;
221 return 0;
224 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
226 if (dev->pdr)
227 dma_free_coherent(dev->core_dev->device,
228 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
229 dev->pdr, dev->pdr_pa);
231 if (dev->shadow_sa_pool)
232 dma_free_coherent(dev->core_dev->device,
233 sizeof(union shadow_sa_buf) * PPC4XX_NUM_PD,
234 dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
236 if (dev->shadow_sr_pool)
237 dma_free_coherent(dev->core_dev->device,
238 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
239 dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
241 kfree(dev->pdr_uinfo);
244 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
246 u32 retval;
247 u32 tmp;
249 retval = dev->pdr_head;
250 tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
252 if (tmp == dev->pdr_tail)
253 return ERING_WAS_FULL;
255 dev->pdr_head = tmp;
257 return retval;
260 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
262 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
263 u32 tail;
264 unsigned long flags;
266 spin_lock_irqsave(&dev->core_dev->lock, flags);
267 pd_uinfo->state = PD_ENTRY_FREE;
269 if (dev->pdr_tail != PPC4XX_LAST_PD)
270 dev->pdr_tail++;
271 else
272 dev->pdr_tail = 0;
273 tail = dev->pdr_tail;
274 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
276 return tail;
280 * alloc memory for the gather ring
281 * no need to alloc buf for the ring
282 * gdr_tail, gdr_head and gdr_count are initialized by this function
284 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
286 dev->gdr = dma_zalloc_coherent(dev->core_dev->device,
287 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
288 &dev->gdr_pa, GFP_ATOMIC);
289 if (!dev->gdr)
290 return -ENOMEM;
292 return 0;
295 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
297 dma_free_coherent(dev->core_dev->device,
298 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
299 dev->gdr, dev->gdr_pa);
303 * when this function is called.
304 * preemption or interrupt must be disabled
306 static u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
308 u32 retval;
309 u32 tmp;
311 if (n >= PPC4XX_NUM_GD)
312 return ERING_WAS_FULL;
314 retval = dev->gdr_head;
315 tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
316 if (dev->gdr_head > dev->gdr_tail) {
317 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
318 return ERING_WAS_FULL;
319 } else if (dev->gdr_head < dev->gdr_tail) {
320 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
321 return ERING_WAS_FULL;
323 dev->gdr_head = tmp;
325 return retval;
328 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
330 unsigned long flags;
332 spin_lock_irqsave(&dev->core_dev->lock, flags);
333 if (dev->gdr_tail == dev->gdr_head) {
334 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
335 return 0;
338 if (dev->gdr_tail != PPC4XX_LAST_GD)
339 dev->gdr_tail++;
340 else
341 dev->gdr_tail = 0;
343 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
345 return 0;
348 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
349 dma_addr_t *gd_dma, u32 idx)
351 *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
353 return &dev->gdr[idx];
357 * alloc memory for the scatter ring
358 * need to alloc buf for the ring
359 * sdr_tail, sdr_head and sdr_count are initialized by this function
361 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
363 int i;
365 /* alloc memory for scatter descriptor ring */
366 dev->sdr = dma_alloc_coherent(dev->core_dev->device,
367 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
368 &dev->sdr_pa, GFP_ATOMIC);
369 if (!dev->sdr)
370 return -ENOMEM;
372 dev->scatter_buffer_va =
373 dma_alloc_coherent(dev->core_dev->device,
374 PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
375 &dev->scatter_buffer_pa, GFP_ATOMIC);
376 if (!dev->scatter_buffer_va) {
377 dma_free_coherent(dev->core_dev->device,
378 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
379 dev->sdr, dev->sdr_pa);
380 return -ENOMEM;
383 for (i = 0; i < PPC4XX_NUM_SD; i++) {
384 dev->sdr[i].ptr = dev->scatter_buffer_pa +
385 PPC4XX_SD_BUFFER_SIZE * i;
388 return 0;
391 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
393 if (dev->sdr)
394 dma_free_coherent(dev->core_dev->device,
395 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
396 dev->sdr, dev->sdr_pa);
398 if (dev->scatter_buffer_va)
399 dma_free_coherent(dev->core_dev->device,
400 PPC4XX_SD_BUFFER_SIZE * PPC4XX_NUM_SD,
401 dev->scatter_buffer_va,
402 dev->scatter_buffer_pa);
406 * when this function is called.
407 * preemption or interrupt must be disabled
409 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
411 u32 retval;
412 u32 tmp;
414 if (n >= PPC4XX_NUM_SD)
415 return ERING_WAS_FULL;
417 retval = dev->sdr_head;
418 tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
419 if (dev->sdr_head > dev->gdr_tail) {
420 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
421 return ERING_WAS_FULL;
422 } else if (dev->sdr_head < dev->sdr_tail) {
423 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
424 return ERING_WAS_FULL;
425 } /* the head = tail, or empty case is already take cared */
426 dev->sdr_head = tmp;
428 return retval;
431 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
433 unsigned long flags;
435 spin_lock_irqsave(&dev->core_dev->lock, flags);
436 if (dev->sdr_tail == dev->sdr_head) {
437 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
438 return 0;
440 if (dev->sdr_tail != PPC4XX_LAST_SD)
441 dev->sdr_tail++;
442 else
443 dev->sdr_tail = 0;
444 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
446 return 0;
449 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
450 dma_addr_t *sd_dma, u32 idx)
452 *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
454 return &dev->sdr[idx];
457 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
458 struct ce_pd *pd,
459 struct pd_uinfo *pd_uinfo,
460 u32 nbytes,
461 struct scatterlist *dst)
463 unsigned int first_sd = pd_uinfo->first_sd;
464 unsigned int last_sd;
465 unsigned int overflow = 0;
466 unsigned int to_copy;
467 unsigned int dst_start = 0;
470 * Because the scatter buffers are all neatly organized in one
471 * big continuous ringbuffer; scatterwalk_map_and_copy() can
472 * be instructed to copy a range of buffers in one go.
475 last_sd = (first_sd + pd_uinfo->num_sd);
476 if (last_sd > PPC4XX_LAST_SD) {
477 last_sd = PPC4XX_LAST_SD;
478 overflow = last_sd % PPC4XX_NUM_SD;
481 while (nbytes) {
482 void *buf = dev->scatter_buffer_va +
483 first_sd * PPC4XX_SD_BUFFER_SIZE;
485 to_copy = min(nbytes, PPC4XX_SD_BUFFER_SIZE *
486 (1 + last_sd - first_sd));
487 scatterwalk_map_and_copy(buf, dst, dst_start, to_copy, 1);
488 nbytes -= to_copy;
490 if (overflow) {
491 first_sd = 0;
492 last_sd = overflow;
493 dst_start += to_copy;
494 overflow = 0;
499 static void crypto4xx_copy_digest_to_dst(void *dst,
500 struct pd_uinfo *pd_uinfo,
501 struct crypto4xx_ctx *ctx)
503 struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
505 if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
506 memcpy(dst, pd_uinfo->sr_va->save_digest,
507 SA_HASH_ALG_SHA1_DIGEST_SIZE);
511 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
512 struct pd_uinfo *pd_uinfo)
514 int i;
515 if (pd_uinfo->num_gd) {
516 for (i = 0; i < pd_uinfo->num_gd; i++)
517 crypto4xx_put_gd_to_gdr(dev);
518 pd_uinfo->first_gd = 0xffffffff;
519 pd_uinfo->num_gd = 0;
521 if (pd_uinfo->num_sd) {
522 for (i = 0; i < pd_uinfo->num_sd; i++)
523 crypto4xx_put_sd_to_sdr(dev);
525 pd_uinfo->first_sd = 0xffffffff;
526 pd_uinfo->num_sd = 0;
530 static void crypto4xx_cipher_done(struct crypto4xx_device *dev,
531 struct pd_uinfo *pd_uinfo,
532 struct ce_pd *pd)
534 struct skcipher_request *req;
535 struct scatterlist *dst;
536 dma_addr_t addr;
538 req = skcipher_request_cast(pd_uinfo->async_req);
540 if (pd_uinfo->using_sd) {
541 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
542 req->cryptlen, req->dst);
543 } else {
544 dst = pd_uinfo->dest_va;
545 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
546 dst->offset, dst->length, DMA_FROM_DEVICE);
549 if (pd_uinfo->sa_va->sa_command_0.bf.save_iv == SA_SAVE_IV) {
550 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
552 crypto4xx_memcpy_from_le32((u32 *)req->iv,
553 pd_uinfo->sr_va->save_iv,
554 crypto_skcipher_ivsize(skcipher));
557 crypto4xx_ret_sg_desc(dev, pd_uinfo);
559 if (pd_uinfo->state & PD_ENTRY_BUSY)
560 skcipher_request_complete(req, -EINPROGRESS);
561 skcipher_request_complete(req, 0);
564 static void crypto4xx_ahash_done(struct crypto4xx_device *dev,
565 struct pd_uinfo *pd_uinfo)
567 struct crypto4xx_ctx *ctx;
568 struct ahash_request *ahash_req;
570 ahash_req = ahash_request_cast(pd_uinfo->async_req);
571 ctx = crypto_tfm_ctx(ahash_req->base.tfm);
573 crypto4xx_copy_digest_to_dst(ahash_req->result, pd_uinfo,
574 crypto_tfm_ctx(ahash_req->base.tfm));
575 crypto4xx_ret_sg_desc(dev, pd_uinfo);
577 if (pd_uinfo->state & PD_ENTRY_BUSY)
578 ahash_request_complete(ahash_req, -EINPROGRESS);
579 ahash_request_complete(ahash_req, 0);
582 static void crypto4xx_aead_done(struct crypto4xx_device *dev,
583 struct pd_uinfo *pd_uinfo,
584 struct ce_pd *pd)
586 struct aead_request *aead_req = container_of(pd_uinfo->async_req,
587 struct aead_request, base);
588 struct scatterlist *dst = pd_uinfo->dest_va;
589 size_t cp_len = crypto_aead_authsize(
590 crypto_aead_reqtfm(aead_req));
591 u32 icv[AES_BLOCK_SIZE];
592 int err = 0;
594 if (pd_uinfo->using_sd) {
595 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo,
596 pd->pd_ctl_len.bf.pkt_len,
597 dst);
598 } else {
599 __dma_sync_page(sg_page(dst), dst->offset, dst->length,
600 DMA_FROM_DEVICE);
603 if (pd_uinfo->sa_va->sa_command_0.bf.dir == DIR_OUTBOUND) {
604 /* append icv at the end */
605 crypto4xx_memcpy_from_le32(icv, pd_uinfo->sr_va->save_digest,
606 sizeof(icv));
608 scatterwalk_map_and_copy(icv, dst, aead_req->cryptlen,
609 cp_len, 1);
610 } else {
611 /* check icv at the end */
612 scatterwalk_map_and_copy(icv, aead_req->src,
613 aead_req->assoclen + aead_req->cryptlen -
614 cp_len, cp_len, 0);
616 crypto4xx_memcpy_from_le32(icv, icv, sizeof(icv));
618 if (crypto_memneq(icv, pd_uinfo->sr_va->save_digest, cp_len))
619 err = -EBADMSG;
622 crypto4xx_ret_sg_desc(dev, pd_uinfo);
624 if (pd->pd_ctl.bf.status & 0xff) {
625 if (!__ratelimit(&dev->aead_ratelimit)) {
626 if (pd->pd_ctl.bf.status & 2)
627 pr_err("pad fail error\n");
628 if (pd->pd_ctl.bf.status & 4)
629 pr_err("seqnum fail\n");
630 if (pd->pd_ctl.bf.status & 8)
631 pr_err("error _notify\n");
632 pr_err("aead return err status = 0x%02x\n",
633 pd->pd_ctl.bf.status & 0xff);
634 pr_err("pd pad_ctl = 0x%08x\n",
635 pd->pd_ctl.bf.pd_pad_ctl);
637 err = -EINVAL;
640 if (pd_uinfo->state & PD_ENTRY_BUSY)
641 aead_request_complete(aead_req, -EINPROGRESS);
643 aead_request_complete(aead_req, err);
646 static void crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
648 struct ce_pd *pd = &dev->pdr[idx];
649 struct pd_uinfo *pd_uinfo = &dev->pdr_uinfo[idx];
651 switch (crypto_tfm_alg_type(pd_uinfo->async_req->tfm)) {
652 case CRYPTO_ALG_TYPE_SKCIPHER:
653 crypto4xx_cipher_done(dev, pd_uinfo, pd);
654 break;
655 case CRYPTO_ALG_TYPE_AEAD:
656 crypto4xx_aead_done(dev, pd_uinfo, pd);
657 break;
658 case CRYPTO_ALG_TYPE_AHASH:
659 crypto4xx_ahash_done(dev, pd_uinfo);
660 break;
664 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
666 crypto4xx_destroy_pdr(core_dev->dev);
667 crypto4xx_destroy_gdr(core_dev->dev);
668 crypto4xx_destroy_sdr(core_dev->dev);
669 iounmap(core_dev->dev->ce_base);
670 kfree(core_dev->dev);
671 kfree(core_dev);
674 static u32 get_next_gd(u32 current)
676 if (current != PPC4XX_LAST_GD)
677 return current + 1;
678 else
679 return 0;
682 static u32 get_next_sd(u32 current)
684 if (current != PPC4XX_LAST_SD)
685 return current + 1;
686 else
687 return 0;
690 int crypto4xx_build_pd(struct crypto_async_request *req,
691 struct crypto4xx_ctx *ctx,
692 struct scatterlist *src,
693 struct scatterlist *dst,
694 const unsigned int datalen,
695 const __le32 *iv, const u32 iv_len,
696 const struct dynamic_sa_ctl *req_sa,
697 const unsigned int sa_len,
698 const unsigned int assoclen,
699 struct scatterlist *_dst)
701 struct crypto4xx_device *dev = ctx->dev;
702 struct dynamic_sa_ctl *sa;
703 struct ce_gd *gd;
704 struct ce_pd *pd;
705 u32 num_gd, num_sd;
706 u32 fst_gd = 0xffffffff;
707 u32 fst_sd = 0xffffffff;
708 u32 pd_entry;
709 unsigned long flags;
710 struct pd_uinfo *pd_uinfo;
711 unsigned int nbytes = datalen;
712 size_t offset_to_sr_ptr;
713 u32 gd_idx = 0;
714 int tmp;
715 bool is_busy;
717 /* figure how many gd are needed */
718 tmp = sg_nents_for_len(src, assoclen + datalen);
719 if (tmp < 0) {
720 dev_err(dev->core_dev->device, "Invalid number of src SG.\n");
721 return tmp;
723 if (tmp == 1)
724 tmp = 0;
725 num_gd = tmp;
727 if (assoclen) {
728 nbytes += assoclen;
729 dst = scatterwalk_ffwd(_dst, dst, assoclen);
732 /* figure how many sd are needed */
733 if (sg_is_last(dst)) {
734 num_sd = 0;
735 } else {
736 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
737 num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
738 if (datalen % PPC4XX_SD_BUFFER_SIZE)
739 num_sd++;
740 } else {
741 num_sd = 1;
746 * The follow section of code needs to be protected
747 * The gather ring and scatter ring needs to be consecutive
748 * In case of run out of any kind of descriptor, the descriptor
749 * already got must be return the original place.
751 spin_lock_irqsave(&dev->core_dev->lock, flags);
753 * Let the caller know to slow down, once more than 13/16ths = 81%
754 * of the available data contexts are being used simultaneously.
756 * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
757 * 31 more contexts. Before new requests have to be rejected.
759 if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) {
760 is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
761 ((PPC4XX_NUM_PD * 13) / 16);
762 } else {
764 * To fix contention issues between ipsec (no blacklog) and
765 * dm-crypto (backlog) reserve 32 entries for "no backlog"
766 * data contexts.
768 is_busy = ((dev->pdr_head - dev->pdr_tail) % PPC4XX_NUM_PD) >=
769 ((PPC4XX_NUM_PD * 15) / 16);
771 if (is_busy) {
772 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
773 return -EBUSY;
777 if (num_gd) {
778 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
779 if (fst_gd == ERING_WAS_FULL) {
780 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
781 return -EAGAIN;
784 if (num_sd) {
785 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
786 if (fst_sd == ERING_WAS_FULL) {
787 if (num_gd)
788 dev->gdr_head = fst_gd;
789 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
790 return -EAGAIN;
793 pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
794 if (pd_entry == ERING_WAS_FULL) {
795 if (num_gd)
796 dev->gdr_head = fst_gd;
797 if (num_sd)
798 dev->sdr_head = fst_sd;
799 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
800 return -EAGAIN;
802 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
804 pd = &dev->pdr[pd_entry];
805 pd->sa_len = sa_len;
807 pd_uinfo = &dev->pdr_uinfo[pd_entry];
808 pd_uinfo->async_req = req;
809 pd_uinfo->num_gd = num_gd;
810 pd_uinfo->num_sd = num_sd;
812 if (iv_len)
813 memcpy(pd_uinfo->sr_va->save_iv, iv, iv_len);
815 sa = pd_uinfo->sa_va;
816 memcpy(sa, req_sa, sa_len * 4);
818 sa->sa_command_1.bf.hash_crypto_offset = (assoclen >> 2);
819 offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(sa);
820 *(u32 *)((unsigned long)sa + offset_to_sr_ptr) = pd_uinfo->sr_pa;
822 if (num_gd) {
823 dma_addr_t gd_dma;
824 struct scatterlist *sg;
826 /* get first gd we are going to use */
827 gd_idx = fst_gd;
828 pd_uinfo->first_gd = fst_gd;
829 pd_uinfo->num_gd = num_gd;
830 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
831 pd->src = gd_dma;
832 /* enable gather */
833 sa->sa_command_0.bf.gather = 1;
834 /* walk the sg, and setup gather array */
836 sg = src;
837 while (nbytes) {
838 size_t len;
840 len = min(sg->length, nbytes);
841 gd->ptr = dma_map_page(dev->core_dev->device,
842 sg_page(sg), sg->offset, len, DMA_TO_DEVICE);
843 gd->ctl_len.len = len;
844 gd->ctl_len.done = 0;
845 gd->ctl_len.ready = 1;
846 if (len >= nbytes)
847 break;
849 nbytes -= sg->length;
850 gd_idx = get_next_gd(gd_idx);
851 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
852 sg = sg_next(sg);
854 } else {
855 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
856 src->offset, min(nbytes, src->length),
857 DMA_TO_DEVICE);
859 * Disable gather in sa command
861 sa->sa_command_0.bf.gather = 0;
863 * Indicate gather array is not used
865 pd_uinfo->first_gd = 0xffffffff;
866 pd_uinfo->num_gd = 0;
868 if (sg_is_last(dst)) {
870 * we know application give us dst a whole piece of memory
871 * no need to use scatter ring.
873 pd_uinfo->using_sd = 0;
874 pd_uinfo->first_sd = 0xffffffff;
875 pd_uinfo->num_sd = 0;
876 pd_uinfo->dest_va = dst;
877 sa->sa_command_0.bf.scatter = 0;
878 pd->dest = (u32)dma_map_page(dev->core_dev->device,
879 sg_page(dst), dst->offset,
880 min(datalen, dst->length),
881 DMA_TO_DEVICE);
882 } else {
883 dma_addr_t sd_dma;
884 struct ce_sd *sd = NULL;
886 u32 sd_idx = fst_sd;
887 nbytes = datalen;
888 sa->sa_command_0.bf.scatter = 1;
889 pd_uinfo->using_sd = 1;
890 pd_uinfo->dest_va = dst;
891 pd_uinfo->first_sd = fst_sd;
892 pd_uinfo->num_sd = num_sd;
893 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
894 pd->dest = sd_dma;
895 /* setup scatter descriptor */
896 sd->ctl.done = 0;
897 sd->ctl.rdy = 1;
898 /* sd->ptr should be setup by sd_init routine*/
899 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
900 nbytes -= PPC4XX_SD_BUFFER_SIZE;
901 else
902 nbytes = 0;
903 while (nbytes) {
904 sd_idx = get_next_sd(sd_idx);
905 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
906 /* setup scatter descriptor */
907 sd->ctl.done = 0;
908 sd->ctl.rdy = 1;
909 if (nbytes >= PPC4XX_SD_BUFFER_SIZE) {
910 nbytes -= PPC4XX_SD_BUFFER_SIZE;
911 } else {
913 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
914 * which is more than nbytes, so done.
916 nbytes = 0;
921 pd->pd_ctl.w = PD_CTL_HOST_READY |
922 ((crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AHASH) |
923 (crypto_tfm_alg_type(req->tfm) == CRYPTO_ALG_TYPE_AEAD) ?
924 PD_CTL_HASH_FINAL : 0);
925 pd->pd_ctl_len.w = 0x00400000 | (assoclen + datalen);
926 pd_uinfo->state = PD_ENTRY_INUSE | (is_busy ? PD_ENTRY_BUSY : 0);
928 wmb();
929 /* write any value to push engine to read a pd */
930 writel(0, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
931 writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
932 return is_busy ? -EBUSY : -EINPROGRESS;
936 * Algorithm Registration Functions
938 static void crypto4xx_ctx_init(struct crypto4xx_alg *amcc_alg,
939 struct crypto4xx_ctx *ctx)
941 ctx->dev = amcc_alg->dev;
942 ctx->sa_in = NULL;
943 ctx->sa_out = NULL;
944 ctx->sa_len = 0;
947 static int crypto4xx_sk_init(struct crypto_skcipher *sk)
949 struct skcipher_alg *alg = crypto_skcipher_alg(sk);
950 struct crypto4xx_alg *amcc_alg;
951 struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
953 if (alg->base.cra_flags & CRYPTO_ALG_NEED_FALLBACK) {
954 ctx->sw_cipher.cipher =
955 crypto_alloc_skcipher(alg->base.cra_name, 0,
956 CRYPTO_ALG_NEED_FALLBACK |
957 CRYPTO_ALG_ASYNC);
958 if (IS_ERR(ctx->sw_cipher.cipher))
959 return PTR_ERR(ctx->sw_cipher.cipher);
961 crypto_skcipher_set_reqsize(sk,
962 sizeof(struct skcipher_request) + 32 +
963 crypto_skcipher_reqsize(ctx->sw_cipher.cipher));
966 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.cipher);
967 crypto4xx_ctx_init(amcc_alg, ctx);
968 return 0;
971 static void crypto4xx_common_exit(struct crypto4xx_ctx *ctx)
973 crypto4xx_free_sa(ctx);
976 static void crypto4xx_sk_exit(struct crypto_skcipher *sk)
978 struct crypto4xx_ctx *ctx = crypto_skcipher_ctx(sk);
980 crypto4xx_common_exit(ctx);
981 if (ctx->sw_cipher.cipher)
982 crypto_free_skcipher(ctx->sw_cipher.cipher);
985 static int crypto4xx_aead_init(struct crypto_aead *tfm)
987 struct aead_alg *alg = crypto_aead_alg(tfm);
988 struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
989 struct crypto4xx_alg *amcc_alg;
991 ctx->sw_cipher.aead = crypto_alloc_aead(alg->base.cra_name, 0,
992 CRYPTO_ALG_NEED_FALLBACK |
993 CRYPTO_ALG_ASYNC);
994 if (IS_ERR(ctx->sw_cipher.aead))
995 return PTR_ERR(ctx->sw_cipher.aead);
997 amcc_alg = container_of(alg, struct crypto4xx_alg, alg.u.aead);
998 crypto4xx_ctx_init(amcc_alg, ctx);
999 crypto_aead_set_reqsize(tfm, max(sizeof(struct aead_request) + 32 +
1000 crypto_aead_reqsize(ctx->sw_cipher.aead),
1001 sizeof(struct crypto4xx_aead_reqctx)));
1002 return 0;
1005 static void crypto4xx_aead_exit(struct crypto_aead *tfm)
1007 struct crypto4xx_ctx *ctx = crypto_aead_ctx(tfm);
1009 crypto4xx_common_exit(ctx);
1010 crypto_free_aead(ctx->sw_cipher.aead);
1013 static int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1014 struct crypto4xx_alg_common *crypto_alg,
1015 int array_size)
1017 struct crypto4xx_alg *alg;
1018 int i;
1019 int rc = 0;
1021 for (i = 0; i < array_size; i++) {
1022 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1023 if (!alg)
1024 return -ENOMEM;
1026 alg->alg = crypto_alg[i];
1027 alg->dev = sec_dev;
1029 switch (alg->alg.type) {
1030 case CRYPTO_ALG_TYPE_AEAD:
1031 rc = crypto_register_aead(&alg->alg.u.aead);
1032 break;
1034 case CRYPTO_ALG_TYPE_AHASH:
1035 rc = crypto_register_ahash(&alg->alg.u.hash);
1036 break;
1038 default:
1039 rc = crypto_register_skcipher(&alg->alg.u.cipher);
1040 break;
1043 if (rc)
1044 kfree(alg);
1045 else
1046 list_add_tail(&alg->entry, &sec_dev->alg_list);
1049 return 0;
1052 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1054 struct crypto4xx_alg *alg, *tmp;
1056 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1057 list_del(&alg->entry);
1058 switch (alg->alg.type) {
1059 case CRYPTO_ALG_TYPE_AHASH:
1060 crypto_unregister_ahash(&alg->alg.u.hash);
1061 break;
1063 case CRYPTO_ALG_TYPE_AEAD:
1064 crypto_unregister_aead(&alg->alg.u.aead);
1065 break;
1067 default:
1068 crypto_unregister_skcipher(&alg->alg.u.cipher);
1070 kfree(alg);
1074 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1076 struct device *dev = (struct device *)data;
1077 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1078 struct pd_uinfo *pd_uinfo;
1079 struct ce_pd *pd;
1080 u32 tail = core_dev->dev->pdr_tail;
1081 u32 head = core_dev->dev->pdr_head;
1083 do {
1084 pd_uinfo = &core_dev->dev->pdr_uinfo[tail];
1085 pd = &core_dev->dev->pdr[tail];
1086 if ((pd_uinfo->state & PD_ENTRY_INUSE) &&
1087 ((READ_ONCE(pd->pd_ctl.w) &
1088 (PD_CTL_PE_DONE | PD_CTL_HOST_READY)) ==
1089 PD_CTL_PE_DONE)) {
1090 crypto4xx_pd_done(core_dev->dev, tail);
1091 tail = crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1092 } else {
1093 /* if tail not done, break */
1094 break;
1096 } while (head != tail);
1100 * Top Half of isr.
1102 static inline irqreturn_t crypto4xx_interrupt_handler(int irq, void *data,
1103 u32 clr_val)
1105 struct device *dev = (struct device *)data;
1106 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1108 writel(clr_val, core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1109 tasklet_schedule(&core_dev->tasklet);
1111 return IRQ_HANDLED;
1114 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1116 return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR);
1119 static irqreturn_t crypto4xx_ce_interrupt_handler_revb(int irq, void *data)
1121 return crypto4xx_interrupt_handler(irq, data, PPC4XX_INTERRUPT_CLR |
1122 PPC4XX_TMO_ERR_INT);
1126 * Supported Crypto Algorithms
1128 static struct crypto4xx_alg_common crypto4xx_alg[] = {
1129 /* Crypto AES modes */
1130 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1131 .base = {
1132 .cra_name = "cbc(aes)",
1133 .cra_driver_name = "cbc-aes-ppc4xx",
1134 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1135 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1136 CRYPTO_ALG_ASYNC |
1137 CRYPTO_ALG_KERN_DRIVER_ONLY,
1138 .cra_blocksize = AES_BLOCK_SIZE,
1139 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1140 .cra_module = THIS_MODULE,
1142 .min_keysize = AES_MIN_KEY_SIZE,
1143 .max_keysize = AES_MAX_KEY_SIZE,
1144 .ivsize = AES_IV_SIZE,
1145 .setkey = crypto4xx_setkey_aes_cbc,
1146 .encrypt = crypto4xx_encrypt_iv,
1147 .decrypt = crypto4xx_decrypt_iv,
1148 .init = crypto4xx_sk_init,
1149 .exit = crypto4xx_sk_exit,
1150 } },
1151 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1152 .base = {
1153 .cra_name = "cfb(aes)",
1154 .cra_driver_name = "cfb-aes-ppc4xx",
1155 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1156 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1157 CRYPTO_ALG_ASYNC |
1158 CRYPTO_ALG_KERN_DRIVER_ONLY,
1159 .cra_blocksize = AES_BLOCK_SIZE,
1160 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1161 .cra_module = THIS_MODULE,
1163 .min_keysize = AES_MIN_KEY_SIZE,
1164 .max_keysize = AES_MAX_KEY_SIZE,
1165 .ivsize = AES_IV_SIZE,
1166 .setkey = crypto4xx_setkey_aes_cfb,
1167 .encrypt = crypto4xx_encrypt_iv,
1168 .decrypt = crypto4xx_decrypt_iv,
1169 .init = crypto4xx_sk_init,
1170 .exit = crypto4xx_sk_exit,
1171 } },
1172 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1173 .base = {
1174 .cra_name = "ctr(aes)",
1175 .cra_driver_name = "ctr-aes-ppc4xx",
1176 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1177 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1178 CRYPTO_ALG_NEED_FALLBACK |
1179 CRYPTO_ALG_ASYNC |
1180 CRYPTO_ALG_KERN_DRIVER_ONLY,
1181 .cra_blocksize = AES_BLOCK_SIZE,
1182 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1183 .cra_module = THIS_MODULE,
1185 .min_keysize = AES_MIN_KEY_SIZE,
1186 .max_keysize = AES_MAX_KEY_SIZE,
1187 .ivsize = AES_IV_SIZE,
1188 .setkey = crypto4xx_setkey_aes_ctr,
1189 .encrypt = crypto4xx_encrypt_ctr,
1190 .decrypt = crypto4xx_decrypt_ctr,
1191 .init = crypto4xx_sk_init,
1192 .exit = crypto4xx_sk_exit,
1193 } },
1194 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1195 .base = {
1196 .cra_name = "rfc3686(ctr(aes))",
1197 .cra_driver_name = "rfc3686-ctr-aes-ppc4xx",
1198 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1199 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1200 CRYPTO_ALG_ASYNC |
1201 CRYPTO_ALG_KERN_DRIVER_ONLY,
1202 .cra_blocksize = AES_BLOCK_SIZE,
1203 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1204 .cra_module = THIS_MODULE,
1206 .min_keysize = AES_MIN_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1207 .max_keysize = AES_MAX_KEY_SIZE + CTR_RFC3686_NONCE_SIZE,
1208 .ivsize = CTR_RFC3686_IV_SIZE,
1209 .setkey = crypto4xx_setkey_rfc3686,
1210 .encrypt = crypto4xx_rfc3686_encrypt,
1211 .decrypt = crypto4xx_rfc3686_decrypt,
1212 .init = crypto4xx_sk_init,
1213 .exit = crypto4xx_sk_exit,
1214 } },
1215 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1216 .base = {
1217 .cra_name = "ecb(aes)",
1218 .cra_driver_name = "ecb-aes-ppc4xx",
1219 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1220 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1221 CRYPTO_ALG_ASYNC |
1222 CRYPTO_ALG_KERN_DRIVER_ONLY,
1223 .cra_blocksize = AES_BLOCK_SIZE,
1224 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1225 .cra_module = THIS_MODULE,
1227 .min_keysize = AES_MIN_KEY_SIZE,
1228 .max_keysize = AES_MAX_KEY_SIZE,
1229 .setkey = crypto4xx_setkey_aes_ecb,
1230 .encrypt = crypto4xx_encrypt_noiv,
1231 .decrypt = crypto4xx_decrypt_noiv,
1232 .init = crypto4xx_sk_init,
1233 .exit = crypto4xx_sk_exit,
1234 } },
1235 { .type = CRYPTO_ALG_TYPE_SKCIPHER, .u.cipher = {
1236 .base = {
1237 .cra_name = "ofb(aes)",
1238 .cra_driver_name = "ofb-aes-ppc4xx",
1239 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1240 .cra_flags = CRYPTO_ALG_TYPE_SKCIPHER |
1241 CRYPTO_ALG_ASYNC |
1242 CRYPTO_ALG_KERN_DRIVER_ONLY,
1243 .cra_blocksize = AES_BLOCK_SIZE,
1244 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1245 .cra_module = THIS_MODULE,
1247 .min_keysize = AES_MIN_KEY_SIZE,
1248 .max_keysize = AES_MAX_KEY_SIZE,
1249 .ivsize = AES_IV_SIZE,
1250 .setkey = crypto4xx_setkey_aes_ofb,
1251 .encrypt = crypto4xx_encrypt_iv,
1252 .decrypt = crypto4xx_decrypt_iv,
1253 .init = crypto4xx_sk_init,
1254 .exit = crypto4xx_sk_exit,
1255 } },
1257 /* AEAD */
1258 { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1259 .setkey = crypto4xx_setkey_aes_ccm,
1260 .setauthsize = crypto4xx_setauthsize_aead,
1261 .encrypt = crypto4xx_encrypt_aes_ccm,
1262 .decrypt = crypto4xx_decrypt_aes_ccm,
1263 .init = crypto4xx_aead_init,
1264 .exit = crypto4xx_aead_exit,
1265 .ivsize = AES_BLOCK_SIZE,
1266 .maxauthsize = 16,
1267 .base = {
1268 .cra_name = "ccm(aes)",
1269 .cra_driver_name = "ccm-aes-ppc4xx",
1270 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1271 .cra_flags = CRYPTO_ALG_ASYNC |
1272 CRYPTO_ALG_NEED_FALLBACK |
1273 CRYPTO_ALG_KERN_DRIVER_ONLY,
1274 .cra_blocksize = 1,
1275 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1276 .cra_module = THIS_MODULE,
1278 } },
1279 { .type = CRYPTO_ALG_TYPE_AEAD, .u.aead = {
1280 .setkey = crypto4xx_setkey_aes_gcm,
1281 .setauthsize = crypto4xx_setauthsize_aead,
1282 .encrypt = crypto4xx_encrypt_aes_gcm,
1283 .decrypt = crypto4xx_decrypt_aes_gcm,
1284 .init = crypto4xx_aead_init,
1285 .exit = crypto4xx_aead_exit,
1286 .ivsize = GCM_AES_IV_SIZE,
1287 .maxauthsize = 16,
1288 .base = {
1289 .cra_name = "gcm(aes)",
1290 .cra_driver_name = "gcm-aes-ppc4xx",
1291 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1292 .cra_flags = CRYPTO_ALG_ASYNC |
1293 CRYPTO_ALG_NEED_FALLBACK |
1294 CRYPTO_ALG_KERN_DRIVER_ONLY,
1295 .cra_blocksize = 1,
1296 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1297 .cra_module = THIS_MODULE,
1299 } },
1303 * Module Initialization Routine
1305 static int crypto4xx_probe(struct platform_device *ofdev)
1307 int rc;
1308 struct resource res;
1309 struct device *dev = &ofdev->dev;
1310 struct crypto4xx_core_device *core_dev;
1311 u32 pvr;
1312 bool is_revb = true;
1314 rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1315 if (rc)
1316 return -ENODEV;
1318 if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1319 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1320 mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1321 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1322 mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1323 } else if (of_find_compatible_node(NULL, NULL,
1324 "amcc,ppc405ex-crypto")) {
1325 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1326 mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1327 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1328 mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1329 is_revb = false;
1330 } else if (of_find_compatible_node(NULL, NULL,
1331 "amcc,ppc460sx-crypto")) {
1332 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1333 mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1334 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1335 mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1336 } else {
1337 printk(KERN_ERR "Crypto Function Not supported!\n");
1338 return -EINVAL;
1341 core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1342 if (!core_dev)
1343 return -ENOMEM;
1345 dev_set_drvdata(dev, core_dev);
1346 core_dev->ofdev = ofdev;
1347 core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1348 rc = -ENOMEM;
1349 if (!core_dev->dev)
1350 goto err_alloc_dev;
1353 * Older version of 460EX/GT have a hardware bug.
1354 * Hence they do not support H/W based security intr coalescing
1356 pvr = mfspr(SPRN_PVR);
1357 if (is_revb && ((pvr >> 4) == 0x130218A)) {
1358 u32 min = PVR_MIN(pvr);
1360 if (min < 4) {
1361 dev_info(dev, "RevA detected - disable interrupt coalescing\n");
1362 is_revb = false;
1366 core_dev->dev->core_dev = core_dev;
1367 core_dev->dev->is_revb = is_revb;
1368 core_dev->device = dev;
1369 spin_lock_init(&core_dev->lock);
1370 INIT_LIST_HEAD(&core_dev->dev->alg_list);
1371 ratelimit_default_init(&core_dev->dev->aead_ratelimit);
1372 rc = crypto4xx_build_pdr(core_dev->dev);
1373 if (rc)
1374 goto err_build_pdr;
1376 rc = crypto4xx_build_gdr(core_dev->dev);
1377 if (rc)
1378 goto err_build_pdr;
1380 rc = crypto4xx_build_sdr(core_dev->dev);
1381 if (rc)
1382 goto err_build_sdr;
1384 /* Init tasklet for bottom half processing */
1385 tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1386 (unsigned long) dev);
1388 core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1389 if (!core_dev->dev->ce_base) {
1390 dev_err(dev, "failed to of_iomap\n");
1391 rc = -ENOMEM;
1392 goto err_iomap;
1395 /* Register for Crypto isr, Crypto Engine IRQ */
1396 core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1397 rc = request_irq(core_dev->irq, is_revb ?
1398 crypto4xx_ce_interrupt_handler_revb :
1399 crypto4xx_ce_interrupt_handler, 0,
1400 KBUILD_MODNAME, dev);
1401 if (rc)
1402 goto err_request_irq;
1404 /* need to setup pdr, rdr, gdr and sdr before this */
1405 crypto4xx_hw_init(core_dev->dev);
1407 /* Register security algorithms with Linux CryptoAPI */
1408 rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1409 ARRAY_SIZE(crypto4xx_alg));
1410 if (rc)
1411 goto err_start_dev;
1413 ppc4xx_trng_probe(core_dev);
1414 return 0;
1416 err_start_dev:
1417 free_irq(core_dev->irq, dev);
1418 err_request_irq:
1419 irq_dispose_mapping(core_dev->irq);
1420 iounmap(core_dev->dev->ce_base);
1421 err_iomap:
1422 tasklet_kill(&core_dev->tasklet);
1423 err_build_sdr:
1424 crypto4xx_destroy_sdr(core_dev->dev);
1425 crypto4xx_destroy_gdr(core_dev->dev);
1426 err_build_pdr:
1427 crypto4xx_destroy_pdr(core_dev->dev);
1428 kfree(core_dev->dev);
1429 err_alloc_dev:
1430 kfree(core_dev);
1432 return rc;
1435 static int crypto4xx_remove(struct platform_device *ofdev)
1437 struct device *dev = &ofdev->dev;
1438 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1440 ppc4xx_trng_remove(core_dev);
1442 free_irq(core_dev->irq, dev);
1443 irq_dispose_mapping(core_dev->irq);
1445 tasklet_kill(&core_dev->tasklet);
1446 /* Un-register with Linux CryptoAPI */
1447 crypto4xx_unregister_alg(core_dev->dev);
1448 /* Free all allocated memory */
1449 crypto4xx_stop_all(core_dev);
1451 return 0;
1454 static const struct of_device_id crypto4xx_match[] = {
1455 { .compatible = "amcc,ppc4xx-crypto",},
1456 { },
1458 MODULE_DEVICE_TABLE(of, crypto4xx_match);
1460 static struct platform_driver crypto4xx_driver = {
1461 .driver = {
1462 .name = KBUILD_MODNAME,
1463 .of_match_table = crypto4xx_match,
1465 .probe = crypto4xx_probe,
1466 .remove = crypto4xx_remove,
1469 module_platform_driver(crypto4xx_driver);
1471 MODULE_LICENSE("GPL");
1472 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1473 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");