PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / crypto / amcc / crypto4xx_core.c
blob37f9cc98ba171db75c07c24975e74819c4e272cb
1 /**
2 * AMCC SoC PPC4xx Crypto Driver
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * This file implements AMCC crypto offload Linux device driver for use with
18 * Linux CryptoAPI.
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock_types.h>
24 #include <linux/random.h>
25 #include <linux/scatterlist.h>
26 #include <linux/crypto.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/of_address.h>
32 #include <linux/of_irq.h>
33 #include <linux/of_platform.h>
34 #include <linux/slab.h>
35 #include <asm/dcr.h>
36 #include <asm/dcr-regs.h>
37 #include <asm/cacheflush.h>
38 #include <crypto/aes.h>
39 #include <crypto/sha.h>
40 #include "crypto4xx_reg_def.h"
41 #include "crypto4xx_core.h"
42 #include "crypto4xx_sa.h"
44 #define PPC4XX_SEC_VERSION_STR "0.5"
46 /**
47 * PPC4xx Crypto Engine Initialization Routine
49 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
51 union ce_ring_size ring_size;
52 union ce_ring_contol ring_ctrl;
53 union ce_part_ring_size part_ring_size;
54 union ce_io_threshold io_threshold;
55 u32 rand_num;
56 union ce_pe_dma_cfg pe_dma_cfg;
57 u32 device_ctrl;
59 writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
60 /* setup pe dma, include reset sg, pdr and pe, then release reset */
61 pe_dma_cfg.w = 0;
62 pe_dma_cfg.bf.bo_sgpd_en = 1;
63 pe_dma_cfg.bf.bo_data_en = 0;
64 pe_dma_cfg.bf.bo_sa_en = 1;
65 pe_dma_cfg.bf.bo_pd_en = 1;
66 pe_dma_cfg.bf.dynamic_sa_en = 1;
67 pe_dma_cfg.bf.reset_sg = 1;
68 pe_dma_cfg.bf.reset_pdr = 1;
69 pe_dma_cfg.bf.reset_pe = 1;
70 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
71 /* un reset pe,sg and pdr */
72 pe_dma_cfg.bf.pe_mode = 0;
73 pe_dma_cfg.bf.reset_sg = 0;
74 pe_dma_cfg.bf.reset_pdr = 0;
75 pe_dma_cfg.bf.reset_pe = 0;
76 pe_dma_cfg.bf.bo_td_en = 0;
77 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
78 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
79 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
80 writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
81 get_random_bytes(&rand_num, sizeof(rand_num));
82 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
83 get_random_bytes(&rand_num, sizeof(rand_num));
84 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
85 ring_size.w = 0;
86 ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
87 ring_size.bf.ring_size = PPC4XX_NUM_PD;
88 writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
89 ring_ctrl.w = 0;
90 writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
91 device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
92 device_ctrl |= PPC4XX_DC_3DES_EN;
93 writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
94 writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
95 writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
96 part_ring_size.w = 0;
97 part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
98 part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
99 writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
100 writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
101 io_threshold.w = 0;
102 io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
103 io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
104 writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
105 writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
106 writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
107 writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
108 writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
109 writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
110 writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
111 writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
112 /* un reset pe,sg and pdr */
113 pe_dma_cfg.bf.pe_mode = 1;
114 pe_dma_cfg.bf.reset_sg = 0;
115 pe_dma_cfg.bf.reset_pdr = 0;
116 pe_dma_cfg.bf.reset_pe = 0;
117 pe_dma_cfg.bf.bo_td_en = 0;
118 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
119 /*clear all pending interrupt*/
120 writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
121 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
122 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
123 writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
124 writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
127 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
129 ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
130 &ctx->sa_in_dma_addr, GFP_ATOMIC);
131 if (ctx->sa_in == NULL)
132 return -ENOMEM;
134 ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
135 &ctx->sa_out_dma_addr, GFP_ATOMIC);
136 if (ctx->sa_out == NULL) {
137 dma_free_coherent(ctx->dev->core_dev->device,
138 ctx->sa_len * 4,
139 ctx->sa_in, ctx->sa_in_dma_addr);
140 return -ENOMEM;
143 memset(ctx->sa_in, 0, size * 4);
144 memset(ctx->sa_out, 0, size * 4);
145 ctx->sa_len = size;
147 return 0;
150 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
152 if (ctx->sa_in != NULL)
153 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
154 ctx->sa_in, ctx->sa_in_dma_addr);
155 if (ctx->sa_out != NULL)
156 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
157 ctx->sa_out, ctx->sa_out_dma_addr);
159 ctx->sa_in_dma_addr = 0;
160 ctx->sa_out_dma_addr = 0;
161 ctx->sa_len = 0;
164 u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
166 ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
167 sizeof(struct sa_state_record),
168 &ctx->state_record_dma_addr, GFP_ATOMIC);
169 if (!ctx->state_record_dma_addr)
170 return -ENOMEM;
171 memset(ctx->state_record, 0, sizeof(struct sa_state_record));
173 return 0;
176 void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
178 if (ctx->state_record != NULL)
179 dma_free_coherent(ctx->dev->core_dev->device,
180 sizeof(struct sa_state_record),
181 ctx->state_record,
182 ctx->state_record_dma_addr);
183 ctx->state_record_dma_addr = 0;
187 * alloc memory for the gather ring
188 * no need to alloc buf for the ring
189 * gdr_tail, gdr_head and gdr_count are initialized by this function
191 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
193 int i;
194 struct pd_uinfo *pd_uinfo;
195 dev->pdr = dma_alloc_coherent(dev->core_dev->device,
196 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
197 &dev->pdr_pa, GFP_ATOMIC);
198 if (!dev->pdr)
199 return -ENOMEM;
201 dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
202 GFP_KERNEL);
203 if (!dev->pdr_uinfo) {
204 dma_free_coherent(dev->core_dev->device,
205 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
206 dev->pdr,
207 dev->pdr_pa);
208 return -ENOMEM;
210 memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
211 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
212 256 * PPC4XX_NUM_PD,
213 &dev->shadow_sa_pool_pa,
214 GFP_ATOMIC);
215 if (!dev->shadow_sa_pool)
216 return -ENOMEM;
218 dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
219 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
220 &dev->shadow_sr_pool_pa, GFP_ATOMIC);
221 if (!dev->shadow_sr_pool)
222 return -ENOMEM;
223 for (i = 0; i < PPC4XX_NUM_PD; i++) {
224 pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
225 sizeof(struct pd_uinfo) * i);
227 /* alloc 256 bytes which is enough for any kind of dynamic sa */
228 pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
229 pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
231 /* alloc state record */
232 pd_uinfo->sr_va = dev->shadow_sr_pool +
233 sizeof(struct sa_state_record) * i;
234 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
235 sizeof(struct sa_state_record) * i;
238 return 0;
241 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
243 if (dev->pdr != NULL)
244 dma_free_coherent(dev->core_dev->device,
245 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
246 dev->pdr, dev->pdr_pa);
247 if (dev->shadow_sa_pool)
248 dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
249 dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
250 if (dev->shadow_sr_pool)
251 dma_free_coherent(dev->core_dev->device,
252 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
253 dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
255 kfree(dev->pdr_uinfo);
258 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
260 u32 retval;
261 u32 tmp;
263 retval = dev->pdr_head;
264 tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
266 if (tmp == dev->pdr_tail)
267 return ERING_WAS_FULL;
269 dev->pdr_head = tmp;
271 return retval;
274 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
276 struct pd_uinfo *pd_uinfo;
277 unsigned long flags;
279 pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
280 sizeof(struct pd_uinfo) * idx);
281 spin_lock_irqsave(&dev->core_dev->lock, flags);
282 if (dev->pdr_tail != PPC4XX_LAST_PD)
283 dev->pdr_tail++;
284 else
285 dev->pdr_tail = 0;
286 pd_uinfo->state = PD_ENTRY_FREE;
287 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
289 return 0;
292 static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
293 dma_addr_t *pd_dma, u32 idx)
295 *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
297 return dev->pdr + sizeof(struct ce_pd) * idx;
301 * alloc memory for the gather ring
302 * no need to alloc buf for the ring
303 * gdr_tail, gdr_head and gdr_count are initialized by this function
305 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
307 dev->gdr = dma_alloc_coherent(dev->core_dev->device,
308 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
309 &dev->gdr_pa, GFP_ATOMIC);
310 if (!dev->gdr)
311 return -ENOMEM;
313 memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
315 return 0;
318 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
320 dma_free_coherent(dev->core_dev->device,
321 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
322 dev->gdr, dev->gdr_pa);
326 * when this function is called.
327 * preemption or interrupt must be disabled
329 u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
331 u32 retval;
332 u32 tmp;
333 if (n >= PPC4XX_NUM_GD)
334 return ERING_WAS_FULL;
336 retval = dev->gdr_head;
337 tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
338 if (dev->gdr_head > dev->gdr_tail) {
339 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
340 return ERING_WAS_FULL;
341 } else if (dev->gdr_head < dev->gdr_tail) {
342 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
343 return ERING_WAS_FULL;
345 dev->gdr_head = tmp;
347 return retval;
350 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
352 unsigned long flags;
354 spin_lock_irqsave(&dev->core_dev->lock, flags);
355 if (dev->gdr_tail == dev->gdr_head) {
356 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
357 return 0;
360 if (dev->gdr_tail != PPC4XX_LAST_GD)
361 dev->gdr_tail++;
362 else
363 dev->gdr_tail = 0;
365 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
367 return 0;
370 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
371 dma_addr_t *gd_dma, u32 idx)
373 *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
375 return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
379 * alloc memory for the scatter ring
380 * need to alloc buf for the ring
381 * sdr_tail, sdr_head and sdr_count are initialized by this function
383 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
385 int i;
386 struct ce_sd *sd_array;
388 /* alloc memory for scatter descriptor ring */
389 dev->sdr = dma_alloc_coherent(dev->core_dev->device,
390 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
391 &dev->sdr_pa, GFP_ATOMIC);
392 if (!dev->sdr)
393 return -ENOMEM;
395 dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
396 dev->scatter_buffer_va =
397 dma_alloc_coherent(dev->core_dev->device,
398 dev->scatter_buffer_size * PPC4XX_NUM_SD,
399 &dev->scatter_buffer_pa, GFP_ATOMIC);
400 if (!dev->scatter_buffer_va) {
401 dma_free_coherent(dev->core_dev->device,
402 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
403 dev->sdr, dev->sdr_pa);
404 return -ENOMEM;
407 sd_array = dev->sdr;
409 for (i = 0; i < PPC4XX_NUM_SD; i++) {
410 sd_array[i].ptr = dev->scatter_buffer_pa +
411 dev->scatter_buffer_size * i;
414 return 0;
417 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
419 if (dev->sdr != NULL)
420 dma_free_coherent(dev->core_dev->device,
421 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
422 dev->sdr, dev->sdr_pa);
424 if (dev->scatter_buffer_va != NULL)
425 dma_free_coherent(dev->core_dev->device,
426 dev->scatter_buffer_size * PPC4XX_NUM_SD,
427 dev->scatter_buffer_va,
428 dev->scatter_buffer_pa);
432 * when this function is called.
433 * preemption or interrupt must be disabled
435 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
437 u32 retval;
438 u32 tmp;
440 if (n >= PPC4XX_NUM_SD)
441 return ERING_WAS_FULL;
443 retval = dev->sdr_head;
444 tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
445 if (dev->sdr_head > dev->gdr_tail) {
446 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
447 return ERING_WAS_FULL;
448 } else if (dev->sdr_head < dev->sdr_tail) {
449 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
450 return ERING_WAS_FULL;
451 } /* the head = tail, or empty case is already take cared */
452 dev->sdr_head = tmp;
454 return retval;
457 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
459 unsigned long flags;
461 spin_lock_irqsave(&dev->core_dev->lock, flags);
462 if (dev->sdr_tail == dev->sdr_head) {
463 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
464 return 0;
466 if (dev->sdr_tail != PPC4XX_LAST_SD)
467 dev->sdr_tail++;
468 else
469 dev->sdr_tail = 0;
470 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
472 return 0;
475 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
476 dma_addr_t *sd_dma, u32 idx)
478 *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
480 return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
483 static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
484 dma_addr_t *addr, u32 *length,
485 u32 *idx, u32 *offset, u32 *nbytes)
487 u32 len;
489 if (*length > dev->scatter_buffer_size) {
490 memcpy(phys_to_virt(*addr),
491 dev->scatter_buffer_va +
492 *idx * dev->scatter_buffer_size + *offset,
493 dev->scatter_buffer_size);
494 *offset = 0;
495 *length -= dev->scatter_buffer_size;
496 *nbytes -= dev->scatter_buffer_size;
497 if (*idx == PPC4XX_LAST_SD)
498 *idx = 0;
499 else
500 (*idx)++;
501 *addr = *addr + dev->scatter_buffer_size;
502 return 1;
503 } else if (*length < dev->scatter_buffer_size) {
504 memcpy(phys_to_virt(*addr),
505 dev->scatter_buffer_va +
506 *idx * dev->scatter_buffer_size + *offset, *length);
507 if ((*offset + *length) == dev->scatter_buffer_size) {
508 if (*idx == PPC4XX_LAST_SD)
509 *idx = 0;
510 else
511 (*idx)++;
512 *nbytes -= *length;
513 *offset = 0;
514 } else {
515 *nbytes -= *length;
516 *offset += *length;
519 return 0;
520 } else {
521 len = (*nbytes <= dev->scatter_buffer_size) ?
522 (*nbytes) : dev->scatter_buffer_size;
523 memcpy(phys_to_virt(*addr),
524 dev->scatter_buffer_va +
525 *idx * dev->scatter_buffer_size + *offset,
526 len);
527 *offset = 0;
528 *nbytes -= len;
530 if (*idx == PPC4XX_LAST_SD)
531 *idx = 0;
532 else
533 (*idx)++;
535 return 0;
539 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
540 struct ce_pd *pd,
541 struct pd_uinfo *pd_uinfo,
542 u32 nbytes,
543 struct scatterlist *dst)
545 dma_addr_t addr;
546 u32 this_sd;
547 u32 offset;
548 u32 len;
549 u32 i;
550 u32 sg_len;
551 struct scatterlist *sg;
553 this_sd = pd_uinfo->first_sd;
554 offset = 0;
555 i = 0;
557 while (nbytes) {
558 sg = &dst[i];
559 sg_len = sg->length;
560 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
561 sg->offset, sg->length, DMA_TO_DEVICE);
563 if (offset == 0) {
564 len = (nbytes <= sg->length) ? nbytes : sg->length;
565 while (crypto4xx_fill_one_page(dev, &addr, &len,
566 &this_sd, &offset, &nbytes))
568 if (!nbytes)
569 return;
570 i++;
571 } else {
572 len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
573 nbytes : (dev->scatter_buffer_size - offset);
574 len = (sg->length < len) ? sg->length : len;
575 while (crypto4xx_fill_one_page(dev, &addr, &len,
576 &this_sd, &offset, &nbytes))
578 if (!nbytes)
579 return;
580 sg_len -= len;
581 if (sg_len) {
582 addr += len;
583 while (crypto4xx_fill_one_page(dev, &addr,
584 &sg_len, &this_sd, &offset, &nbytes))
587 i++;
592 static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
593 struct crypto4xx_ctx *ctx)
595 struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
596 struct sa_state_record *state_record =
597 (struct sa_state_record *) pd_uinfo->sr_va;
599 if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
600 memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
601 SA_HASH_ALG_SHA1_DIGEST_SIZE);
604 return 0;
607 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
608 struct pd_uinfo *pd_uinfo)
610 int i;
611 if (pd_uinfo->num_gd) {
612 for (i = 0; i < pd_uinfo->num_gd; i++)
613 crypto4xx_put_gd_to_gdr(dev);
614 pd_uinfo->first_gd = 0xffffffff;
615 pd_uinfo->num_gd = 0;
617 if (pd_uinfo->num_sd) {
618 for (i = 0; i < pd_uinfo->num_sd; i++)
619 crypto4xx_put_sd_to_sdr(dev);
621 pd_uinfo->first_sd = 0xffffffff;
622 pd_uinfo->num_sd = 0;
626 static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
627 struct pd_uinfo *pd_uinfo,
628 struct ce_pd *pd)
630 struct crypto4xx_ctx *ctx;
631 struct ablkcipher_request *ablk_req;
632 struct scatterlist *dst;
633 dma_addr_t addr;
635 ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
636 ctx = crypto_tfm_ctx(ablk_req->base.tfm);
638 if (pd_uinfo->using_sd) {
639 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
640 ablk_req->dst);
641 } else {
642 dst = pd_uinfo->dest_va;
643 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
644 dst->offset, dst->length, DMA_FROM_DEVICE);
646 crypto4xx_ret_sg_desc(dev, pd_uinfo);
647 if (ablk_req->base.complete != NULL)
648 ablk_req->base.complete(&ablk_req->base, 0);
650 return 0;
653 static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
654 struct pd_uinfo *pd_uinfo)
656 struct crypto4xx_ctx *ctx;
657 struct ahash_request *ahash_req;
659 ahash_req = ahash_request_cast(pd_uinfo->async_req);
660 ctx = crypto_tfm_ctx(ahash_req->base.tfm);
662 crypto4xx_copy_digest_to_dst(pd_uinfo,
663 crypto_tfm_ctx(ahash_req->base.tfm));
664 crypto4xx_ret_sg_desc(dev, pd_uinfo);
665 /* call user provided callback function x */
666 if (ahash_req->base.complete != NULL)
667 ahash_req->base.complete(&ahash_req->base, 0);
669 return 0;
672 static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
674 struct ce_pd *pd;
675 struct pd_uinfo *pd_uinfo;
677 pd = dev->pdr + sizeof(struct ce_pd)*idx;
678 pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
679 if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
680 CRYPTO_ALG_TYPE_ABLKCIPHER)
681 return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
682 else
683 return crypto4xx_ahash_done(dev, pd_uinfo);
687 * Note: Only use this function to copy items that is word aligned.
689 void crypto4xx_memcpy_le(unsigned int *dst,
690 const unsigned char *buf,
691 int len)
693 u8 *tmp;
694 for (; len >= 4; buf += 4, len -= 4)
695 *dst++ = cpu_to_le32(*(unsigned int *) buf);
697 tmp = (u8 *)dst;
698 switch (len) {
699 case 3:
700 *tmp++ = 0;
701 *tmp++ = *(buf+2);
702 *tmp++ = *(buf+1);
703 *tmp++ = *buf;
704 break;
705 case 2:
706 *tmp++ = 0;
707 *tmp++ = 0;
708 *tmp++ = *(buf+1);
709 *tmp++ = *buf;
710 break;
711 case 1:
712 *tmp++ = 0;
713 *tmp++ = 0;
714 *tmp++ = 0;
715 *tmp++ = *buf;
716 break;
717 default:
718 break;
722 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
724 crypto4xx_destroy_pdr(core_dev->dev);
725 crypto4xx_destroy_gdr(core_dev->dev);
726 crypto4xx_destroy_sdr(core_dev->dev);
727 iounmap(core_dev->dev->ce_base);
728 kfree(core_dev->dev);
729 kfree(core_dev);
732 void crypto4xx_return_pd(struct crypto4xx_device *dev,
733 u32 pd_entry, struct ce_pd *pd,
734 struct pd_uinfo *pd_uinfo)
736 /* irq should be already disabled */
737 dev->pdr_head = pd_entry;
738 pd->pd_ctl.w = 0;
739 pd->pd_ctl_len.w = 0;
740 pd_uinfo->state = PD_ENTRY_FREE;
744 * derive number of elements in scatterlist
745 * Shamlessly copy from talitos.c
747 static int get_sg_count(struct scatterlist *sg_list, int nbytes)
749 struct scatterlist *sg = sg_list;
750 int sg_nents = 0;
752 while (nbytes) {
753 sg_nents++;
754 if (sg->length > nbytes)
755 break;
756 nbytes -= sg->length;
757 sg = sg_next(sg);
760 return sg_nents;
763 static u32 get_next_gd(u32 current)
765 if (current != PPC4XX_LAST_GD)
766 return current + 1;
767 else
768 return 0;
771 static u32 get_next_sd(u32 current)
773 if (current != PPC4XX_LAST_SD)
774 return current + 1;
775 else
776 return 0;
779 u32 crypto4xx_build_pd(struct crypto_async_request *req,
780 struct crypto4xx_ctx *ctx,
781 struct scatterlist *src,
782 struct scatterlist *dst,
783 unsigned int datalen,
784 void *iv, u32 iv_len)
786 struct crypto4xx_device *dev = ctx->dev;
787 dma_addr_t addr, pd_dma, sd_dma, gd_dma;
788 struct dynamic_sa_ctl *sa;
789 struct scatterlist *sg;
790 struct ce_gd *gd;
791 struct ce_pd *pd;
792 u32 num_gd, num_sd;
793 u32 fst_gd = 0xffffffff;
794 u32 fst_sd = 0xffffffff;
795 u32 pd_entry;
796 unsigned long flags;
797 struct pd_uinfo *pd_uinfo = NULL;
798 unsigned int nbytes = datalen, idx;
799 unsigned int ivlen = 0;
800 u32 gd_idx = 0;
802 /* figure how many gd is needed */
803 num_gd = get_sg_count(src, datalen);
804 if (num_gd == 1)
805 num_gd = 0;
807 /* figure how many sd is needed */
808 if (sg_is_last(dst) || ctx->is_hash) {
809 num_sd = 0;
810 } else {
811 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
812 num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
813 if (datalen % PPC4XX_SD_BUFFER_SIZE)
814 num_sd++;
815 } else {
816 num_sd = 1;
821 * The follow section of code needs to be protected
822 * The gather ring and scatter ring needs to be consecutive
823 * In case of run out of any kind of descriptor, the descriptor
824 * already got must be return the original place.
826 spin_lock_irqsave(&dev->core_dev->lock, flags);
827 if (num_gd) {
828 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
829 if (fst_gd == ERING_WAS_FULL) {
830 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
831 return -EAGAIN;
834 if (num_sd) {
835 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
836 if (fst_sd == ERING_WAS_FULL) {
837 if (num_gd)
838 dev->gdr_head = fst_gd;
839 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
840 return -EAGAIN;
843 pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
844 if (pd_entry == ERING_WAS_FULL) {
845 if (num_gd)
846 dev->gdr_head = fst_gd;
847 if (num_sd)
848 dev->sdr_head = fst_sd;
849 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
850 return -EAGAIN;
852 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
854 pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
855 sizeof(struct pd_uinfo) * pd_entry);
856 pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
857 pd_uinfo->async_req = req;
858 pd_uinfo->num_gd = num_gd;
859 pd_uinfo->num_sd = num_sd;
861 if (iv_len || ctx->is_hash) {
862 ivlen = iv_len;
863 pd->sa = pd_uinfo->sa_pa;
864 sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
865 if (ctx->direction == DIR_INBOUND)
866 memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
867 else
868 memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
870 memcpy((void *) sa + ctx->offset_to_sr_ptr,
871 &pd_uinfo->sr_pa, 4);
873 if (iv_len)
874 crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
875 } else {
876 if (ctx->direction == DIR_INBOUND) {
877 pd->sa = ctx->sa_in_dma_addr;
878 sa = (struct dynamic_sa_ctl *) ctx->sa_in;
879 } else {
880 pd->sa = ctx->sa_out_dma_addr;
881 sa = (struct dynamic_sa_ctl *) ctx->sa_out;
884 pd->sa_len = ctx->sa_len;
885 if (num_gd) {
886 /* get first gd we are going to use */
887 gd_idx = fst_gd;
888 pd_uinfo->first_gd = fst_gd;
889 pd_uinfo->num_gd = num_gd;
890 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
891 pd->src = gd_dma;
892 /* enable gather */
893 sa->sa_command_0.bf.gather = 1;
894 idx = 0;
895 src = &src[0];
896 /* walk the sg, and setup gather array */
897 while (nbytes) {
898 sg = &src[idx];
899 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
900 sg->offset, sg->length, DMA_TO_DEVICE);
901 gd->ptr = addr;
902 gd->ctl_len.len = sg->length;
903 gd->ctl_len.done = 0;
904 gd->ctl_len.ready = 1;
905 if (sg->length >= nbytes)
906 break;
907 nbytes -= sg->length;
908 gd_idx = get_next_gd(gd_idx);
909 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
910 idx++;
912 } else {
913 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
914 src->offset, src->length, DMA_TO_DEVICE);
916 * Disable gather in sa command
918 sa->sa_command_0.bf.gather = 0;
920 * Indicate gather array is not used
922 pd_uinfo->first_gd = 0xffffffff;
923 pd_uinfo->num_gd = 0;
925 if (ctx->is_hash || sg_is_last(dst)) {
927 * we know application give us dst a whole piece of memory
928 * no need to use scatter ring.
929 * In case of is_hash, the icv is always at end of src data.
931 pd_uinfo->using_sd = 0;
932 pd_uinfo->first_sd = 0xffffffff;
933 pd_uinfo->num_sd = 0;
934 pd_uinfo->dest_va = dst;
935 sa->sa_command_0.bf.scatter = 0;
936 if (ctx->is_hash)
937 pd->dest = virt_to_phys((void *)dst);
938 else
939 pd->dest = (u32)dma_map_page(dev->core_dev->device,
940 sg_page(dst), dst->offset,
941 dst->length, DMA_TO_DEVICE);
942 } else {
943 struct ce_sd *sd = NULL;
944 u32 sd_idx = fst_sd;
945 nbytes = datalen;
946 sa->sa_command_0.bf.scatter = 1;
947 pd_uinfo->using_sd = 1;
948 pd_uinfo->dest_va = dst;
949 pd_uinfo->first_sd = fst_sd;
950 pd_uinfo->num_sd = num_sd;
951 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
952 pd->dest = sd_dma;
953 /* setup scatter descriptor */
954 sd->ctl.done = 0;
955 sd->ctl.rdy = 1;
956 /* sd->ptr should be setup by sd_init routine*/
957 idx = 0;
958 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
959 nbytes -= PPC4XX_SD_BUFFER_SIZE;
960 else
961 nbytes = 0;
962 while (nbytes) {
963 sd_idx = get_next_sd(sd_idx);
964 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
965 /* setup scatter descriptor */
966 sd->ctl.done = 0;
967 sd->ctl.rdy = 1;
968 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
969 nbytes -= PPC4XX_SD_BUFFER_SIZE;
970 else
972 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
973 * which is more than nbytes, so done.
975 nbytes = 0;
979 sa->sa_command_1.bf.hash_crypto_offset = 0;
980 pd->pd_ctl.w = ctx->pd_ctl;
981 pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
982 pd_uinfo->state = PD_ENTRY_INUSE;
983 wmb();
984 /* write any value to push engine to read a pd */
985 writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
986 return -EINPROGRESS;
990 * Algorithm Registration Functions
992 static int crypto4xx_alg_init(struct crypto_tfm *tfm)
994 struct crypto_alg *alg = tfm->__crt_alg;
995 struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
996 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
998 ctx->dev = amcc_alg->dev;
999 ctx->sa_in = NULL;
1000 ctx->sa_out = NULL;
1001 ctx->sa_in_dma_addr = 0;
1002 ctx->sa_out_dma_addr = 0;
1003 ctx->sa_len = 0;
1005 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
1006 default:
1007 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
1008 break;
1009 case CRYPTO_ALG_TYPE_AHASH:
1010 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1011 sizeof(struct crypto4xx_ctx));
1012 break;
1015 return 0;
1018 static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1020 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
1022 crypto4xx_free_sa(ctx);
1023 crypto4xx_free_state_record(ctx);
1026 int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1027 struct crypto4xx_alg_common *crypto_alg,
1028 int array_size)
1030 struct crypto4xx_alg *alg;
1031 int i;
1032 int rc = 0;
1034 for (i = 0; i < array_size; i++) {
1035 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1036 if (!alg)
1037 return -ENOMEM;
1039 alg->alg = crypto_alg[i];
1040 alg->dev = sec_dev;
1042 switch (alg->alg.type) {
1043 case CRYPTO_ALG_TYPE_AHASH:
1044 rc = crypto_register_ahash(&alg->alg.u.hash);
1045 break;
1047 default:
1048 rc = crypto_register_alg(&alg->alg.u.cipher);
1049 break;
1052 if (rc) {
1053 list_del(&alg->entry);
1054 kfree(alg);
1055 } else {
1056 list_add_tail(&alg->entry, &sec_dev->alg_list);
1060 return 0;
1063 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1065 struct crypto4xx_alg *alg, *tmp;
1067 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1068 list_del(&alg->entry);
1069 switch (alg->alg.type) {
1070 case CRYPTO_ALG_TYPE_AHASH:
1071 crypto_unregister_ahash(&alg->alg.u.hash);
1072 break;
1074 default:
1075 crypto_unregister_alg(&alg->alg.u.cipher);
1077 kfree(alg);
1081 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1083 struct device *dev = (struct device *)data;
1084 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1085 struct pd_uinfo *pd_uinfo;
1086 struct ce_pd *pd;
1087 u32 tail;
1089 while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
1090 tail = core_dev->dev->pdr_tail;
1091 pd_uinfo = core_dev->dev->pdr_uinfo +
1092 sizeof(struct pd_uinfo)*tail;
1093 pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
1094 if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
1095 pd->pd_ctl.bf.pe_done &&
1096 !pd->pd_ctl.bf.host_ready) {
1097 pd->pd_ctl.bf.pe_done = 0;
1098 crypto4xx_pd_done(core_dev->dev, tail);
1099 crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1100 pd_uinfo->state = PD_ENTRY_FREE;
1101 } else {
1102 /* if tail not done, break */
1103 break;
1109 * Top Half of isr.
1111 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1113 struct device *dev = (struct device *)data;
1114 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1116 if (core_dev->dev->ce_base == 0)
1117 return 0;
1119 writel(PPC4XX_INTERRUPT_CLR,
1120 core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1121 tasklet_schedule(&core_dev->tasklet);
1123 return IRQ_HANDLED;
1127 * Supported Crypto Algorithms
1129 struct crypto4xx_alg_common crypto4xx_alg[] = {
1130 /* Crypto AES modes */
1131 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
1132 .cra_name = "cbc(aes)",
1133 .cra_driver_name = "cbc-aes-ppc4xx",
1134 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1135 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1136 .cra_blocksize = AES_BLOCK_SIZE,
1137 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1138 .cra_type = &crypto_ablkcipher_type,
1139 .cra_init = crypto4xx_alg_init,
1140 .cra_exit = crypto4xx_alg_exit,
1141 .cra_module = THIS_MODULE,
1142 .cra_u = {
1143 .ablkcipher = {
1144 .min_keysize = AES_MIN_KEY_SIZE,
1145 .max_keysize = AES_MAX_KEY_SIZE,
1146 .ivsize = AES_IV_SIZE,
1147 .setkey = crypto4xx_setkey_aes_cbc,
1148 .encrypt = crypto4xx_encrypt,
1149 .decrypt = crypto4xx_decrypt,
1156 * Module Initialization Routine
1158 static int __init crypto4xx_probe(struct platform_device *ofdev)
1160 int rc;
1161 struct resource res;
1162 struct device *dev = &ofdev->dev;
1163 struct crypto4xx_core_device *core_dev;
1165 rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1166 if (rc)
1167 return -ENODEV;
1169 if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1170 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1171 mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1172 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1173 mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1174 } else if (of_find_compatible_node(NULL, NULL,
1175 "amcc,ppc405ex-crypto")) {
1176 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1177 mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1178 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1179 mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1180 } else if (of_find_compatible_node(NULL, NULL,
1181 "amcc,ppc460sx-crypto")) {
1182 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1183 mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1184 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1185 mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1186 } else {
1187 printk(KERN_ERR "Crypto Function Not supported!\n");
1188 return -EINVAL;
1191 core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1192 if (!core_dev)
1193 return -ENOMEM;
1195 dev_set_drvdata(dev, core_dev);
1196 core_dev->ofdev = ofdev;
1197 core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1198 if (!core_dev->dev)
1199 goto err_alloc_dev;
1201 core_dev->dev->core_dev = core_dev;
1202 core_dev->device = dev;
1203 spin_lock_init(&core_dev->lock);
1204 INIT_LIST_HEAD(&core_dev->dev->alg_list);
1205 rc = crypto4xx_build_pdr(core_dev->dev);
1206 if (rc)
1207 goto err_build_pdr;
1209 rc = crypto4xx_build_gdr(core_dev->dev);
1210 if (rc)
1211 goto err_build_gdr;
1213 rc = crypto4xx_build_sdr(core_dev->dev);
1214 if (rc)
1215 goto err_build_sdr;
1217 /* Init tasklet for bottom half processing */
1218 tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1219 (unsigned long) dev);
1221 /* Register for Crypto isr, Crypto Engine IRQ */
1222 core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1223 rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
1224 core_dev->dev->name, dev);
1225 if (rc)
1226 goto err_request_irq;
1228 core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1229 if (!core_dev->dev->ce_base) {
1230 dev_err(dev, "failed to of_iomap\n");
1231 rc = -ENOMEM;
1232 goto err_iomap;
1235 /* need to setup pdr, rdr, gdr and sdr before this */
1236 crypto4xx_hw_init(core_dev->dev);
1238 /* Register security algorithms with Linux CryptoAPI */
1239 rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1240 ARRAY_SIZE(crypto4xx_alg));
1241 if (rc)
1242 goto err_start_dev;
1244 return 0;
1246 err_start_dev:
1247 iounmap(core_dev->dev->ce_base);
1248 err_iomap:
1249 free_irq(core_dev->irq, dev);
1250 err_request_irq:
1251 irq_dispose_mapping(core_dev->irq);
1252 tasklet_kill(&core_dev->tasklet);
1253 crypto4xx_destroy_sdr(core_dev->dev);
1254 err_build_sdr:
1255 crypto4xx_destroy_gdr(core_dev->dev);
1256 err_build_gdr:
1257 crypto4xx_destroy_pdr(core_dev->dev);
1258 err_build_pdr:
1259 kfree(core_dev->dev);
1260 err_alloc_dev:
1261 kfree(core_dev);
1263 return rc;
1266 static int __exit crypto4xx_remove(struct platform_device *ofdev)
1268 struct device *dev = &ofdev->dev;
1269 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1271 free_irq(core_dev->irq, dev);
1272 irq_dispose_mapping(core_dev->irq);
1274 tasklet_kill(&core_dev->tasklet);
1275 /* Un-register with Linux CryptoAPI */
1276 crypto4xx_unregister_alg(core_dev->dev);
1277 /* Free all allocated memory */
1278 crypto4xx_stop_all(core_dev);
1280 return 0;
1283 static const struct of_device_id crypto4xx_match[] = {
1284 { .compatible = "amcc,ppc4xx-crypto",},
1285 { },
1288 static struct platform_driver crypto4xx_driver = {
1289 .driver = {
1290 .name = "crypto4xx",
1291 .owner = THIS_MODULE,
1292 .of_match_table = crypto4xx_match,
1294 .probe = crypto4xx_probe,
1295 .remove = crypto4xx_remove,
1298 module_platform_driver(crypto4xx_driver);
1300 MODULE_LICENSE("GPL");
1301 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1302 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");