dm thin metadata: fix __udivdi3 undefined on 32-bit
[linux/fpc-iii.git] / drivers / crypto / amcc / crypto4xx_core.c
blob78d0722feacb59070bfd08d2bce4b4a1b4bbd892
1 /**
2 * AMCC SoC PPC4xx Crypto Driver
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * This file implements AMCC crypto offload Linux device driver for use with
18 * Linux CryptoAPI.
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock_types.h>
24 #include <linux/random.h>
25 #include <linux/scatterlist.h>
26 #include <linux/crypto.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/of_address.h>
32 #include <linux/of_irq.h>
33 #include <linux/of_platform.h>
34 #include <linux/slab.h>
35 #include <asm/dcr.h>
36 #include <asm/dcr-regs.h>
37 #include <asm/cacheflush.h>
38 #include <crypto/aes.h>
39 #include <crypto/sha.h>
40 #include "crypto4xx_reg_def.h"
41 #include "crypto4xx_core.h"
42 #include "crypto4xx_sa.h"
44 #define PPC4XX_SEC_VERSION_STR "0.5"
46 /**
47 * PPC4xx Crypto Engine Initialization Routine
49 static void crypto4xx_hw_init(struct crypto4xx_device *dev)
51 union ce_ring_size ring_size;
52 union ce_ring_contol ring_ctrl;
53 union ce_part_ring_size part_ring_size;
54 union ce_io_threshold io_threshold;
55 u32 rand_num;
56 union ce_pe_dma_cfg pe_dma_cfg;
57 u32 device_ctrl;
59 writel(PPC4XX_BYTE_ORDER, dev->ce_base + CRYPTO4XX_BYTE_ORDER_CFG);
60 /* setup pe dma, include reset sg, pdr and pe, then release reset */
61 pe_dma_cfg.w = 0;
62 pe_dma_cfg.bf.bo_sgpd_en = 1;
63 pe_dma_cfg.bf.bo_data_en = 0;
64 pe_dma_cfg.bf.bo_sa_en = 1;
65 pe_dma_cfg.bf.bo_pd_en = 1;
66 pe_dma_cfg.bf.dynamic_sa_en = 1;
67 pe_dma_cfg.bf.reset_sg = 1;
68 pe_dma_cfg.bf.reset_pdr = 1;
69 pe_dma_cfg.bf.reset_pe = 1;
70 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
71 /* un reset pe,sg and pdr */
72 pe_dma_cfg.bf.pe_mode = 0;
73 pe_dma_cfg.bf.reset_sg = 0;
74 pe_dma_cfg.bf.reset_pdr = 0;
75 pe_dma_cfg.bf.reset_pe = 0;
76 pe_dma_cfg.bf.bo_td_en = 0;
77 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
78 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_PDR_BASE);
79 writel(dev->pdr_pa, dev->ce_base + CRYPTO4XX_RDR_BASE);
80 writel(PPC4XX_PRNG_CTRL_AUTO_EN, dev->ce_base + CRYPTO4XX_PRNG_CTRL);
81 get_random_bytes(&rand_num, sizeof(rand_num));
82 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_L);
83 get_random_bytes(&rand_num, sizeof(rand_num));
84 writel(rand_num, dev->ce_base + CRYPTO4XX_PRNG_SEED_H);
85 ring_size.w = 0;
86 ring_size.bf.ring_offset = PPC4XX_PD_SIZE;
87 ring_size.bf.ring_size = PPC4XX_NUM_PD;
88 writel(ring_size.w, dev->ce_base + CRYPTO4XX_RING_SIZE);
89 ring_ctrl.w = 0;
90 writel(ring_ctrl.w, dev->ce_base + CRYPTO4XX_RING_CTRL);
91 device_ctrl = readl(dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
92 device_ctrl |= PPC4XX_DC_3DES_EN;
93 writel(device_ctrl, dev->ce_base + CRYPTO4XX_DEVICE_CTRL);
94 writel(dev->gdr_pa, dev->ce_base + CRYPTO4XX_GATH_RING_BASE);
95 writel(dev->sdr_pa, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE);
96 part_ring_size.w = 0;
97 part_ring_size.bf.sdr_size = PPC4XX_SDR_SIZE;
98 part_ring_size.bf.gdr_size = PPC4XX_GDR_SIZE;
99 writel(part_ring_size.w, dev->ce_base + CRYPTO4XX_PART_RING_SIZE);
100 writel(PPC4XX_SD_BUFFER_SIZE, dev->ce_base + CRYPTO4XX_PART_RING_CFG);
101 io_threshold.w = 0;
102 io_threshold.bf.output_threshold = PPC4XX_OUTPUT_THRESHOLD;
103 io_threshold.bf.input_threshold = PPC4XX_INPUT_THRESHOLD;
104 writel(io_threshold.w, dev->ce_base + CRYPTO4XX_IO_THRESHOLD);
105 writel(0, dev->ce_base + CRYPTO4XX_PDR_BASE_UADDR);
106 writel(0, dev->ce_base + CRYPTO4XX_RDR_BASE_UADDR);
107 writel(0, dev->ce_base + CRYPTO4XX_PKT_SRC_UADDR);
108 writel(0, dev->ce_base + CRYPTO4XX_PKT_DEST_UADDR);
109 writel(0, dev->ce_base + CRYPTO4XX_SA_UADDR);
110 writel(0, dev->ce_base + CRYPTO4XX_GATH_RING_BASE_UADDR);
111 writel(0, dev->ce_base + CRYPTO4XX_SCAT_RING_BASE_UADDR);
112 /* un reset pe,sg and pdr */
113 pe_dma_cfg.bf.pe_mode = 1;
114 pe_dma_cfg.bf.reset_sg = 0;
115 pe_dma_cfg.bf.reset_pdr = 0;
116 pe_dma_cfg.bf.reset_pe = 0;
117 pe_dma_cfg.bf.bo_td_en = 0;
118 writel(pe_dma_cfg.w, dev->ce_base + CRYPTO4XX_PE_DMA_CFG);
119 /*clear all pending interrupt*/
120 writel(PPC4XX_INTERRUPT_CLR, dev->ce_base + CRYPTO4XX_INT_CLR);
121 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
122 writel(PPC4XX_INT_DESCR_CNT, dev->ce_base + CRYPTO4XX_INT_DESCR_CNT);
123 writel(PPC4XX_INT_CFG, dev->ce_base + CRYPTO4XX_INT_CFG);
124 writel(PPC4XX_PD_DONE_INT, dev->ce_base + CRYPTO4XX_INT_EN);
127 int crypto4xx_alloc_sa(struct crypto4xx_ctx *ctx, u32 size)
129 ctx->sa_in = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
130 &ctx->sa_in_dma_addr, GFP_ATOMIC);
131 if (ctx->sa_in == NULL)
132 return -ENOMEM;
134 ctx->sa_out = dma_alloc_coherent(ctx->dev->core_dev->device, size * 4,
135 &ctx->sa_out_dma_addr, GFP_ATOMIC);
136 if (ctx->sa_out == NULL) {
137 dma_free_coherent(ctx->dev->core_dev->device,
138 ctx->sa_len * 4,
139 ctx->sa_in, ctx->sa_in_dma_addr);
140 return -ENOMEM;
143 memset(ctx->sa_in, 0, size * 4);
144 memset(ctx->sa_out, 0, size * 4);
145 ctx->sa_len = size;
147 return 0;
150 void crypto4xx_free_sa(struct crypto4xx_ctx *ctx)
152 if (ctx->sa_in != NULL)
153 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
154 ctx->sa_in, ctx->sa_in_dma_addr);
155 if (ctx->sa_out != NULL)
156 dma_free_coherent(ctx->dev->core_dev->device, ctx->sa_len * 4,
157 ctx->sa_out, ctx->sa_out_dma_addr);
159 ctx->sa_in_dma_addr = 0;
160 ctx->sa_out_dma_addr = 0;
161 ctx->sa_len = 0;
164 u32 crypto4xx_alloc_state_record(struct crypto4xx_ctx *ctx)
166 ctx->state_record = dma_alloc_coherent(ctx->dev->core_dev->device,
167 sizeof(struct sa_state_record),
168 &ctx->state_record_dma_addr, GFP_ATOMIC);
169 if (!ctx->state_record_dma_addr)
170 return -ENOMEM;
171 memset(ctx->state_record, 0, sizeof(struct sa_state_record));
173 return 0;
176 void crypto4xx_free_state_record(struct crypto4xx_ctx *ctx)
178 if (ctx->state_record != NULL)
179 dma_free_coherent(ctx->dev->core_dev->device,
180 sizeof(struct sa_state_record),
181 ctx->state_record,
182 ctx->state_record_dma_addr);
183 ctx->state_record_dma_addr = 0;
187 * alloc memory for the gather ring
188 * no need to alloc buf for the ring
189 * gdr_tail, gdr_head and gdr_count are initialized by this function
191 static u32 crypto4xx_build_pdr(struct crypto4xx_device *dev)
193 int i;
194 struct pd_uinfo *pd_uinfo;
195 dev->pdr = dma_alloc_coherent(dev->core_dev->device,
196 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
197 &dev->pdr_pa, GFP_ATOMIC);
198 if (!dev->pdr)
199 return -ENOMEM;
201 dev->pdr_uinfo = kzalloc(sizeof(struct pd_uinfo) * PPC4XX_NUM_PD,
202 GFP_KERNEL);
203 if (!dev->pdr_uinfo) {
204 dma_free_coherent(dev->core_dev->device,
205 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
206 dev->pdr,
207 dev->pdr_pa);
208 return -ENOMEM;
210 memset(dev->pdr, 0, sizeof(struct ce_pd) * PPC4XX_NUM_PD);
211 dev->shadow_sa_pool = dma_alloc_coherent(dev->core_dev->device,
212 256 * PPC4XX_NUM_PD,
213 &dev->shadow_sa_pool_pa,
214 GFP_ATOMIC);
215 if (!dev->shadow_sa_pool)
216 return -ENOMEM;
218 dev->shadow_sr_pool = dma_alloc_coherent(dev->core_dev->device,
219 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
220 &dev->shadow_sr_pool_pa, GFP_ATOMIC);
221 if (!dev->shadow_sr_pool)
222 return -ENOMEM;
223 for (i = 0; i < PPC4XX_NUM_PD; i++) {
224 pd_uinfo = (struct pd_uinfo *) (dev->pdr_uinfo +
225 sizeof(struct pd_uinfo) * i);
227 /* alloc 256 bytes which is enough for any kind of dynamic sa */
228 pd_uinfo->sa_va = dev->shadow_sa_pool + 256 * i;
229 pd_uinfo->sa_pa = dev->shadow_sa_pool_pa + 256 * i;
231 /* alloc state record */
232 pd_uinfo->sr_va = dev->shadow_sr_pool +
233 sizeof(struct sa_state_record) * i;
234 pd_uinfo->sr_pa = dev->shadow_sr_pool_pa +
235 sizeof(struct sa_state_record) * i;
238 return 0;
241 static void crypto4xx_destroy_pdr(struct crypto4xx_device *dev)
243 if (dev->pdr)
244 dma_free_coherent(dev->core_dev->device,
245 sizeof(struct ce_pd) * PPC4XX_NUM_PD,
246 dev->pdr, dev->pdr_pa);
248 if (dev->shadow_sa_pool)
249 dma_free_coherent(dev->core_dev->device, 256 * PPC4XX_NUM_PD,
250 dev->shadow_sa_pool, dev->shadow_sa_pool_pa);
252 if (dev->shadow_sr_pool)
253 dma_free_coherent(dev->core_dev->device,
254 sizeof(struct sa_state_record) * PPC4XX_NUM_PD,
255 dev->shadow_sr_pool, dev->shadow_sr_pool_pa);
257 kfree(dev->pdr_uinfo);
260 static u32 crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device *dev)
262 u32 retval;
263 u32 tmp;
265 retval = dev->pdr_head;
266 tmp = (dev->pdr_head + 1) % PPC4XX_NUM_PD;
268 if (tmp == dev->pdr_tail)
269 return ERING_WAS_FULL;
271 dev->pdr_head = tmp;
273 return retval;
276 static u32 crypto4xx_put_pd_to_pdr(struct crypto4xx_device *dev, u32 idx)
278 struct pd_uinfo *pd_uinfo;
279 unsigned long flags;
281 pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
282 sizeof(struct pd_uinfo) * idx);
283 spin_lock_irqsave(&dev->core_dev->lock, flags);
284 if (dev->pdr_tail != PPC4XX_LAST_PD)
285 dev->pdr_tail++;
286 else
287 dev->pdr_tail = 0;
288 pd_uinfo->state = PD_ENTRY_FREE;
289 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
291 return 0;
294 static struct ce_pd *crypto4xx_get_pdp(struct crypto4xx_device *dev,
295 dma_addr_t *pd_dma, u32 idx)
297 *pd_dma = dev->pdr_pa + sizeof(struct ce_pd) * idx;
299 return dev->pdr + sizeof(struct ce_pd) * idx;
303 * alloc memory for the gather ring
304 * no need to alloc buf for the ring
305 * gdr_tail, gdr_head and gdr_count are initialized by this function
307 static u32 crypto4xx_build_gdr(struct crypto4xx_device *dev)
309 dev->gdr = dma_alloc_coherent(dev->core_dev->device,
310 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
311 &dev->gdr_pa, GFP_ATOMIC);
312 if (!dev->gdr)
313 return -ENOMEM;
315 memset(dev->gdr, 0, sizeof(struct ce_gd) * PPC4XX_NUM_GD);
317 return 0;
320 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device *dev)
322 dma_free_coherent(dev->core_dev->device,
323 sizeof(struct ce_gd) * PPC4XX_NUM_GD,
324 dev->gdr, dev->gdr_pa);
328 * when this function is called.
329 * preemption or interrupt must be disabled
331 u32 crypto4xx_get_n_gd(struct crypto4xx_device *dev, int n)
333 u32 retval;
334 u32 tmp;
335 if (n >= PPC4XX_NUM_GD)
336 return ERING_WAS_FULL;
338 retval = dev->gdr_head;
339 tmp = (dev->gdr_head + n) % PPC4XX_NUM_GD;
340 if (dev->gdr_head > dev->gdr_tail) {
341 if (tmp < dev->gdr_head && tmp >= dev->gdr_tail)
342 return ERING_WAS_FULL;
343 } else if (dev->gdr_head < dev->gdr_tail) {
344 if (tmp < dev->gdr_head || tmp >= dev->gdr_tail)
345 return ERING_WAS_FULL;
347 dev->gdr_head = tmp;
349 return retval;
352 static u32 crypto4xx_put_gd_to_gdr(struct crypto4xx_device *dev)
354 unsigned long flags;
356 spin_lock_irqsave(&dev->core_dev->lock, flags);
357 if (dev->gdr_tail == dev->gdr_head) {
358 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
359 return 0;
362 if (dev->gdr_tail != PPC4XX_LAST_GD)
363 dev->gdr_tail++;
364 else
365 dev->gdr_tail = 0;
367 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
369 return 0;
372 static inline struct ce_gd *crypto4xx_get_gdp(struct crypto4xx_device *dev,
373 dma_addr_t *gd_dma, u32 idx)
375 *gd_dma = dev->gdr_pa + sizeof(struct ce_gd) * idx;
377 return (struct ce_gd *) (dev->gdr + sizeof(struct ce_gd) * idx);
381 * alloc memory for the scatter ring
382 * need to alloc buf for the ring
383 * sdr_tail, sdr_head and sdr_count are initialized by this function
385 static u32 crypto4xx_build_sdr(struct crypto4xx_device *dev)
387 int i;
388 struct ce_sd *sd_array;
390 /* alloc memory for scatter descriptor ring */
391 dev->sdr = dma_alloc_coherent(dev->core_dev->device,
392 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
393 &dev->sdr_pa, GFP_ATOMIC);
394 if (!dev->sdr)
395 return -ENOMEM;
397 dev->scatter_buffer_size = PPC4XX_SD_BUFFER_SIZE;
398 dev->scatter_buffer_va =
399 dma_alloc_coherent(dev->core_dev->device,
400 dev->scatter_buffer_size * PPC4XX_NUM_SD,
401 &dev->scatter_buffer_pa, GFP_ATOMIC);
402 if (!dev->scatter_buffer_va) {
403 dma_free_coherent(dev->core_dev->device,
404 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
405 dev->sdr, dev->sdr_pa);
406 return -ENOMEM;
409 sd_array = dev->sdr;
411 for (i = 0; i < PPC4XX_NUM_SD; i++) {
412 sd_array[i].ptr = dev->scatter_buffer_pa +
413 dev->scatter_buffer_size * i;
416 return 0;
419 static void crypto4xx_destroy_sdr(struct crypto4xx_device *dev)
421 if (dev->sdr)
422 dma_free_coherent(dev->core_dev->device,
423 sizeof(struct ce_sd) * PPC4XX_NUM_SD,
424 dev->sdr, dev->sdr_pa);
426 if (dev->scatter_buffer_va)
427 dma_free_coherent(dev->core_dev->device,
428 dev->scatter_buffer_size * PPC4XX_NUM_SD,
429 dev->scatter_buffer_va,
430 dev->scatter_buffer_pa);
434 * when this function is called.
435 * preemption or interrupt must be disabled
437 static u32 crypto4xx_get_n_sd(struct crypto4xx_device *dev, int n)
439 u32 retval;
440 u32 tmp;
442 if (n >= PPC4XX_NUM_SD)
443 return ERING_WAS_FULL;
445 retval = dev->sdr_head;
446 tmp = (dev->sdr_head + n) % PPC4XX_NUM_SD;
447 if (dev->sdr_head > dev->gdr_tail) {
448 if (tmp < dev->sdr_head && tmp >= dev->sdr_tail)
449 return ERING_WAS_FULL;
450 } else if (dev->sdr_head < dev->sdr_tail) {
451 if (tmp < dev->sdr_head || tmp >= dev->sdr_tail)
452 return ERING_WAS_FULL;
453 } /* the head = tail, or empty case is already take cared */
454 dev->sdr_head = tmp;
456 return retval;
459 static u32 crypto4xx_put_sd_to_sdr(struct crypto4xx_device *dev)
461 unsigned long flags;
463 spin_lock_irqsave(&dev->core_dev->lock, flags);
464 if (dev->sdr_tail == dev->sdr_head) {
465 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
466 return 0;
468 if (dev->sdr_tail != PPC4XX_LAST_SD)
469 dev->sdr_tail++;
470 else
471 dev->sdr_tail = 0;
472 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
474 return 0;
477 static inline struct ce_sd *crypto4xx_get_sdp(struct crypto4xx_device *dev,
478 dma_addr_t *sd_dma, u32 idx)
480 *sd_dma = dev->sdr_pa + sizeof(struct ce_sd) * idx;
482 return (struct ce_sd *)(dev->sdr + sizeof(struct ce_sd) * idx);
485 static u32 crypto4xx_fill_one_page(struct crypto4xx_device *dev,
486 dma_addr_t *addr, u32 *length,
487 u32 *idx, u32 *offset, u32 *nbytes)
489 u32 len;
491 if (*length > dev->scatter_buffer_size) {
492 memcpy(phys_to_virt(*addr),
493 dev->scatter_buffer_va +
494 *idx * dev->scatter_buffer_size + *offset,
495 dev->scatter_buffer_size);
496 *offset = 0;
497 *length -= dev->scatter_buffer_size;
498 *nbytes -= dev->scatter_buffer_size;
499 if (*idx == PPC4XX_LAST_SD)
500 *idx = 0;
501 else
502 (*idx)++;
503 *addr = *addr + dev->scatter_buffer_size;
504 return 1;
505 } else if (*length < dev->scatter_buffer_size) {
506 memcpy(phys_to_virt(*addr),
507 dev->scatter_buffer_va +
508 *idx * dev->scatter_buffer_size + *offset, *length);
509 if ((*offset + *length) == dev->scatter_buffer_size) {
510 if (*idx == PPC4XX_LAST_SD)
511 *idx = 0;
512 else
513 (*idx)++;
514 *nbytes -= *length;
515 *offset = 0;
516 } else {
517 *nbytes -= *length;
518 *offset += *length;
521 return 0;
522 } else {
523 len = (*nbytes <= dev->scatter_buffer_size) ?
524 (*nbytes) : dev->scatter_buffer_size;
525 memcpy(phys_to_virt(*addr),
526 dev->scatter_buffer_va +
527 *idx * dev->scatter_buffer_size + *offset,
528 len);
529 *offset = 0;
530 *nbytes -= len;
532 if (*idx == PPC4XX_LAST_SD)
533 *idx = 0;
534 else
535 (*idx)++;
537 return 0;
541 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device *dev,
542 struct ce_pd *pd,
543 struct pd_uinfo *pd_uinfo,
544 u32 nbytes,
545 struct scatterlist *dst)
547 dma_addr_t addr;
548 u32 this_sd;
549 u32 offset;
550 u32 len;
551 u32 i;
552 u32 sg_len;
553 struct scatterlist *sg;
555 this_sd = pd_uinfo->first_sd;
556 offset = 0;
557 i = 0;
559 while (nbytes) {
560 sg = &dst[i];
561 sg_len = sg->length;
562 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
563 sg->offset, sg->length, DMA_TO_DEVICE);
565 if (offset == 0) {
566 len = (nbytes <= sg->length) ? nbytes : sg->length;
567 while (crypto4xx_fill_one_page(dev, &addr, &len,
568 &this_sd, &offset, &nbytes))
570 if (!nbytes)
571 return;
572 i++;
573 } else {
574 len = (nbytes <= (dev->scatter_buffer_size - offset)) ?
575 nbytes : (dev->scatter_buffer_size - offset);
576 len = (sg->length < len) ? sg->length : len;
577 while (crypto4xx_fill_one_page(dev, &addr, &len,
578 &this_sd, &offset, &nbytes))
580 if (!nbytes)
581 return;
582 sg_len -= len;
583 if (sg_len) {
584 addr += len;
585 while (crypto4xx_fill_one_page(dev, &addr,
586 &sg_len, &this_sd, &offset, &nbytes))
589 i++;
594 static u32 crypto4xx_copy_digest_to_dst(struct pd_uinfo *pd_uinfo,
595 struct crypto4xx_ctx *ctx)
597 struct dynamic_sa_ctl *sa = (struct dynamic_sa_ctl *) ctx->sa_in;
598 struct sa_state_record *state_record =
599 (struct sa_state_record *) pd_uinfo->sr_va;
601 if (sa->sa_command_0.bf.hash_alg == SA_HASH_ALG_SHA1) {
602 memcpy((void *) pd_uinfo->dest_va, state_record->save_digest,
603 SA_HASH_ALG_SHA1_DIGEST_SIZE);
606 return 0;
609 static void crypto4xx_ret_sg_desc(struct crypto4xx_device *dev,
610 struct pd_uinfo *pd_uinfo)
612 int i;
613 if (pd_uinfo->num_gd) {
614 for (i = 0; i < pd_uinfo->num_gd; i++)
615 crypto4xx_put_gd_to_gdr(dev);
616 pd_uinfo->first_gd = 0xffffffff;
617 pd_uinfo->num_gd = 0;
619 if (pd_uinfo->num_sd) {
620 for (i = 0; i < pd_uinfo->num_sd; i++)
621 crypto4xx_put_sd_to_sdr(dev);
623 pd_uinfo->first_sd = 0xffffffff;
624 pd_uinfo->num_sd = 0;
628 static u32 crypto4xx_ablkcipher_done(struct crypto4xx_device *dev,
629 struct pd_uinfo *pd_uinfo,
630 struct ce_pd *pd)
632 struct crypto4xx_ctx *ctx;
633 struct ablkcipher_request *ablk_req;
634 struct scatterlist *dst;
635 dma_addr_t addr;
637 ablk_req = ablkcipher_request_cast(pd_uinfo->async_req);
638 ctx = crypto_tfm_ctx(ablk_req->base.tfm);
640 if (pd_uinfo->using_sd) {
641 crypto4xx_copy_pkt_to_dst(dev, pd, pd_uinfo, ablk_req->nbytes,
642 ablk_req->dst);
643 } else {
644 dst = pd_uinfo->dest_va;
645 addr = dma_map_page(dev->core_dev->device, sg_page(dst),
646 dst->offset, dst->length, DMA_FROM_DEVICE);
648 crypto4xx_ret_sg_desc(dev, pd_uinfo);
649 if (ablk_req->base.complete != NULL)
650 ablk_req->base.complete(&ablk_req->base, 0);
652 return 0;
655 static u32 crypto4xx_ahash_done(struct crypto4xx_device *dev,
656 struct pd_uinfo *pd_uinfo)
658 struct crypto4xx_ctx *ctx;
659 struct ahash_request *ahash_req;
661 ahash_req = ahash_request_cast(pd_uinfo->async_req);
662 ctx = crypto_tfm_ctx(ahash_req->base.tfm);
664 crypto4xx_copy_digest_to_dst(pd_uinfo,
665 crypto_tfm_ctx(ahash_req->base.tfm));
666 crypto4xx_ret_sg_desc(dev, pd_uinfo);
667 /* call user provided callback function x */
668 if (ahash_req->base.complete != NULL)
669 ahash_req->base.complete(&ahash_req->base, 0);
671 return 0;
674 static u32 crypto4xx_pd_done(struct crypto4xx_device *dev, u32 idx)
676 struct ce_pd *pd;
677 struct pd_uinfo *pd_uinfo;
679 pd = dev->pdr + sizeof(struct ce_pd)*idx;
680 pd_uinfo = dev->pdr_uinfo + sizeof(struct pd_uinfo)*idx;
681 if (crypto_tfm_alg_type(pd_uinfo->async_req->tfm) ==
682 CRYPTO_ALG_TYPE_ABLKCIPHER)
683 return crypto4xx_ablkcipher_done(dev, pd_uinfo, pd);
684 else
685 return crypto4xx_ahash_done(dev, pd_uinfo);
689 * Note: Only use this function to copy items that is word aligned.
691 void crypto4xx_memcpy_le(unsigned int *dst,
692 const unsigned char *buf,
693 int len)
695 u8 *tmp;
696 for (; len >= 4; buf += 4, len -= 4)
697 *dst++ = cpu_to_le32(*(unsigned int *) buf);
699 tmp = (u8 *)dst;
700 switch (len) {
701 case 3:
702 *tmp++ = 0;
703 *tmp++ = *(buf+2);
704 *tmp++ = *(buf+1);
705 *tmp++ = *buf;
706 break;
707 case 2:
708 *tmp++ = 0;
709 *tmp++ = 0;
710 *tmp++ = *(buf+1);
711 *tmp++ = *buf;
712 break;
713 case 1:
714 *tmp++ = 0;
715 *tmp++ = 0;
716 *tmp++ = 0;
717 *tmp++ = *buf;
718 break;
719 default:
720 break;
724 static void crypto4xx_stop_all(struct crypto4xx_core_device *core_dev)
726 crypto4xx_destroy_pdr(core_dev->dev);
727 crypto4xx_destroy_gdr(core_dev->dev);
728 crypto4xx_destroy_sdr(core_dev->dev);
729 iounmap(core_dev->dev->ce_base);
730 kfree(core_dev->dev);
731 kfree(core_dev);
734 void crypto4xx_return_pd(struct crypto4xx_device *dev,
735 u32 pd_entry, struct ce_pd *pd,
736 struct pd_uinfo *pd_uinfo)
738 /* irq should be already disabled */
739 dev->pdr_head = pd_entry;
740 pd->pd_ctl.w = 0;
741 pd->pd_ctl_len.w = 0;
742 pd_uinfo->state = PD_ENTRY_FREE;
745 static u32 get_next_gd(u32 current)
747 if (current != PPC4XX_LAST_GD)
748 return current + 1;
749 else
750 return 0;
753 static u32 get_next_sd(u32 current)
755 if (current != PPC4XX_LAST_SD)
756 return current + 1;
757 else
758 return 0;
761 u32 crypto4xx_build_pd(struct crypto_async_request *req,
762 struct crypto4xx_ctx *ctx,
763 struct scatterlist *src,
764 struct scatterlist *dst,
765 unsigned int datalen,
766 void *iv, u32 iv_len)
768 struct crypto4xx_device *dev = ctx->dev;
769 dma_addr_t addr, pd_dma, sd_dma, gd_dma;
770 struct dynamic_sa_ctl *sa;
771 struct scatterlist *sg;
772 struct ce_gd *gd;
773 struct ce_pd *pd;
774 u32 num_gd, num_sd;
775 u32 fst_gd = 0xffffffff;
776 u32 fst_sd = 0xffffffff;
777 u32 pd_entry;
778 unsigned long flags;
779 struct pd_uinfo *pd_uinfo = NULL;
780 unsigned int nbytes = datalen, idx;
781 unsigned int ivlen = 0;
782 u32 gd_idx = 0;
784 /* figure how many gd is needed */
785 num_gd = sg_nents_for_len(src, datalen);
786 if (num_gd == 1)
787 num_gd = 0;
789 /* figure how many sd is needed */
790 if (sg_is_last(dst) || ctx->is_hash) {
791 num_sd = 0;
792 } else {
793 if (datalen > PPC4XX_SD_BUFFER_SIZE) {
794 num_sd = datalen / PPC4XX_SD_BUFFER_SIZE;
795 if (datalen % PPC4XX_SD_BUFFER_SIZE)
796 num_sd++;
797 } else {
798 num_sd = 1;
803 * The follow section of code needs to be protected
804 * The gather ring and scatter ring needs to be consecutive
805 * In case of run out of any kind of descriptor, the descriptor
806 * already got must be return the original place.
808 spin_lock_irqsave(&dev->core_dev->lock, flags);
809 if (num_gd) {
810 fst_gd = crypto4xx_get_n_gd(dev, num_gd);
811 if (fst_gd == ERING_WAS_FULL) {
812 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
813 return -EAGAIN;
816 if (num_sd) {
817 fst_sd = crypto4xx_get_n_sd(dev, num_sd);
818 if (fst_sd == ERING_WAS_FULL) {
819 if (num_gd)
820 dev->gdr_head = fst_gd;
821 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
822 return -EAGAIN;
825 pd_entry = crypto4xx_get_pd_from_pdr_nolock(dev);
826 if (pd_entry == ERING_WAS_FULL) {
827 if (num_gd)
828 dev->gdr_head = fst_gd;
829 if (num_sd)
830 dev->sdr_head = fst_sd;
831 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
832 return -EAGAIN;
834 spin_unlock_irqrestore(&dev->core_dev->lock, flags);
836 pd_uinfo = (struct pd_uinfo *)(dev->pdr_uinfo +
837 sizeof(struct pd_uinfo) * pd_entry);
838 pd = crypto4xx_get_pdp(dev, &pd_dma, pd_entry);
839 pd_uinfo->async_req = req;
840 pd_uinfo->num_gd = num_gd;
841 pd_uinfo->num_sd = num_sd;
843 if (iv_len || ctx->is_hash) {
844 ivlen = iv_len;
845 pd->sa = pd_uinfo->sa_pa;
846 sa = (struct dynamic_sa_ctl *) pd_uinfo->sa_va;
847 if (ctx->direction == DIR_INBOUND)
848 memcpy(sa, ctx->sa_in, ctx->sa_len * 4);
849 else
850 memcpy(sa, ctx->sa_out, ctx->sa_len * 4);
852 memcpy((void *) sa + ctx->offset_to_sr_ptr,
853 &pd_uinfo->sr_pa, 4);
855 if (iv_len)
856 crypto4xx_memcpy_le(pd_uinfo->sr_va, iv, iv_len);
857 } else {
858 if (ctx->direction == DIR_INBOUND) {
859 pd->sa = ctx->sa_in_dma_addr;
860 sa = (struct dynamic_sa_ctl *) ctx->sa_in;
861 } else {
862 pd->sa = ctx->sa_out_dma_addr;
863 sa = (struct dynamic_sa_ctl *) ctx->sa_out;
866 pd->sa_len = ctx->sa_len;
867 if (num_gd) {
868 /* get first gd we are going to use */
869 gd_idx = fst_gd;
870 pd_uinfo->first_gd = fst_gd;
871 pd_uinfo->num_gd = num_gd;
872 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
873 pd->src = gd_dma;
874 /* enable gather */
875 sa->sa_command_0.bf.gather = 1;
876 idx = 0;
877 src = &src[0];
878 /* walk the sg, and setup gather array */
879 while (nbytes) {
880 sg = &src[idx];
881 addr = dma_map_page(dev->core_dev->device, sg_page(sg),
882 sg->offset, sg->length, DMA_TO_DEVICE);
883 gd->ptr = addr;
884 gd->ctl_len.len = sg->length;
885 gd->ctl_len.done = 0;
886 gd->ctl_len.ready = 1;
887 if (sg->length >= nbytes)
888 break;
889 nbytes -= sg->length;
890 gd_idx = get_next_gd(gd_idx);
891 gd = crypto4xx_get_gdp(dev, &gd_dma, gd_idx);
892 idx++;
894 } else {
895 pd->src = (u32)dma_map_page(dev->core_dev->device, sg_page(src),
896 src->offset, src->length, DMA_TO_DEVICE);
898 * Disable gather in sa command
900 sa->sa_command_0.bf.gather = 0;
902 * Indicate gather array is not used
904 pd_uinfo->first_gd = 0xffffffff;
905 pd_uinfo->num_gd = 0;
907 if (ctx->is_hash || sg_is_last(dst)) {
909 * we know application give us dst a whole piece of memory
910 * no need to use scatter ring.
911 * In case of is_hash, the icv is always at end of src data.
913 pd_uinfo->using_sd = 0;
914 pd_uinfo->first_sd = 0xffffffff;
915 pd_uinfo->num_sd = 0;
916 pd_uinfo->dest_va = dst;
917 sa->sa_command_0.bf.scatter = 0;
918 if (ctx->is_hash)
919 pd->dest = virt_to_phys((void *)dst);
920 else
921 pd->dest = (u32)dma_map_page(dev->core_dev->device,
922 sg_page(dst), dst->offset,
923 dst->length, DMA_TO_DEVICE);
924 } else {
925 struct ce_sd *sd = NULL;
926 u32 sd_idx = fst_sd;
927 nbytes = datalen;
928 sa->sa_command_0.bf.scatter = 1;
929 pd_uinfo->using_sd = 1;
930 pd_uinfo->dest_va = dst;
931 pd_uinfo->first_sd = fst_sd;
932 pd_uinfo->num_sd = num_sd;
933 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
934 pd->dest = sd_dma;
935 /* setup scatter descriptor */
936 sd->ctl.done = 0;
937 sd->ctl.rdy = 1;
938 /* sd->ptr should be setup by sd_init routine*/
939 idx = 0;
940 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
941 nbytes -= PPC4XX_SD_BUFFER_SIZE;
942 else
943 nbytes = 0;
944 while (nbytes) {
945 sd_idx = get_next_sd(sd_idx);
946 sd = crypto4xx_get_sdp(dev, &sd_dma, sd_idx);
947 /* setup scatter descriptor */
948 sd->ctl.done = 0;
949 sd->ctl.rdy = 1;
950 if (nbytes >= PPC4XX_SD_BUFFER_SIZE)
951 nbytes -= PPC4XX_SD_BUFFER_SIZE;
952 else
954 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
955 * which is more than nbytes, so done.
957 nbytes = 0;
961 sa->sa_command_1.bf.hash_crypto_offset = 0;
962 pd->pd_ctl.w = ctx->pd_ctl;
963 pd->pd_ctl_len.w = 0x00400000 | (ctx->bypass << 24) | datalen;
964 pd_uinfo->state = PD_ENTRY_INUSE;
965 wmb();
966 /* write any value to push engine to read a pd */
967 writel(1, dev->ce_base + CRYPTO4XX_INT_DESCR_RD);
968 return -EINPROGRESS;
972 * Algorithm Registration Functions
974 static int crypto4xx_alg_init(struct crypto_tfm *tfm)
976 struct crypto_alg *alg = tfm->__crt_alg;
977 struct crypto4xx_alg *amcc_alg = crypto_alg_to_crypto4xx_alg(alg);
978 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
980 ctx->dev = amcc_alg->dev;
981 ctx->sa_in = NULL;
982 ctx->sa_out = NULL;
983 ctx->sa_in_dma_addr = 0;
984 ctx->sa_out_dma_addr = 0;
985 ctx->sa_len = 0;
987 switch (alg->cra_flags & CRYPTO_ALG_TYPE_MASK) {
988 default:
989 tfm->crt_ablkcipher.reqsize = sizeof(struct crypto4xx_ctx);
990 break;
991 case CRYPTO_ALG_TYPE_AHASH:
992 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
993 sizeof(struct crypto4xx_ctx));
994 break;
997 return 0;
1000 static void crypto4xx_alg_exit(struct crypto_tfm *tfm)
1002 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm);
1004 crypto4xx_free_sa(ctx);
1005 crypto4xx_free_state_record(ctx);
1008 int crypto4xx_register_alg(struct crypto4xx_device *sec_dev,
1009 struct crypto4xx_alg_common *crypto_alg,
1010 int array_size)
1012 struct crypto4xx_alg *alg;
1013 int i;
1014 int rc = 0;
1016 for (i = 0; i < array_size; i++) {
1017 alg = kzalloc(sizeof(struct crypto4xx_alg), GFP_KERNEL);
1018 if (!alg)
1019 return -ENOMEM;
1021 alg->alg = crypto_alg[i];
1022 alg->dev = sec_dev;
1024 switch (alg->alg.type) {
1025 case CRYPTO_ALG_TYPE_AHASH:
1026 rc = crypto_register_ahash(&alg->alg.u.hash);
1027 break;
1029 default:
1030 rc = crypto_register_alg(&alg->alg.u.cipher);
1031 break;
1034 if (rc)
1035 kfree(alg);
1036 else
1037 list_add_tail(&alg->entry, &sec_dev->alg_list);
1040 return 0;
1043 static void crypto4xx_unregister_alg(struct crypto4xx_device *sec_dev)
1045 struct crypto4xx_alg *alg, *tmp;
1047 list_for_each_entry_safe(alg, tmp, &sec_dev->alg_list, entry) {
1048 list_del(&alg->entry);
1049 switch (alg->alg.type) {
1050 case CRYPTO_ALG_TYPE_AHASH:
1051 crypto_unregister_ahash(&alg->alg.u.hash);
1052 break;
1054 default:
1055 crypto_unregister_alg(&alg->alg.u.cipher);
1057 kfree(alg);
1061 static void crypto4xx_bh_tasklet_cb(unsigned long data)
1063 struct device *dev = (struct device *)data;
1064 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1065 struct pd_uinfo *pd_uinfo;
1066 struct ce_pd *pd;
1067 u32 tail;
1069 while (core_dev->dev->pdr_head != core_dev->dev->pdr_tail) {
1070 tail = core_dev->dev->pdr_tail;
1071 pd_uinfo = core_dev->dev->pdr_uinfo +
1072 sizeof(struct pd_uinfo)*tail;
1073 pd = core_dev->dev->pdr + sizeof(struct ce_pd) * tail;
1074 if ((pd_uinfo->state == PD_ENTRY_INUSE) &&
1075 pd->pd_ctl.bf.pe_done &&
1076 !pd->pd_ctl.bf.host_ready) {
1077 pd->pd_ctl.bf.pe_done = 0;
1078 crypto4xx_pd_done(core_dev->dev, tail);
1079 crypto4xx_put_pd_to_pdr(core_dev->dev, tail);
1080 pd_uinfo->state = PD_ENTRY_FREE;
1081 } else {
1082 /* if tail not done, break */
1083 break;
1089 * Top Half of isr.
1091 static irqreturn_t crypto4xx_ce_interrupt_handler(int irq, void *data)
1093 struct device *dev = (struct device *)data;
1094 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1096 if (!core_dev->dev->ce_base)
1097 return 0;
1099 writel(PPC4XX_INTERRUPT_CLR,
1100 core_dev->dev->ce_base + CRYPTO4XX_INT_CLR);
1101 tasklet_schedule(&core_dev->tasklet);
1103 return IRQ_HANDLED;
1107 * Supported Crypto Algorithms
1109 struct crypto4xx_alg_common crypto4xx_alg[] = {
1110 /* Crypto AES modes */
1111 { .type = CRYPTO_ALG_TYPE_ABLKCIPHER, .u.cipher = {
1112 .cra_name = "cbc(aes)",
1113 .cra_driver_name = "cbc-aes-ppc4xx",
1114 .cra_priority = CRYPTO4XX_CRYPTO_PRIORITY,
1115 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
1116 .cra_blocksize = AES_BLOCK_SIZE,
1117 .cra_ctxsize = sizeof(struct crypto4xx_ctx),
1118 .cra_type = &crypto_ablkcipher_type,
1119 .cra_init = crypto4xx_alg_init,
1120 .cra_exit = crypto4xx_alg_exit,
1121 .cra_module = THIS_MODULE,
1122 .cra_u = {
1123 .ablkcipher = {
1124 .min_keysize = AES_MIN_KEY_SIZE,
1125 .max_keysize = AES_MAX_KEY_SIZE,
1126 .ivsize = AES_IV_SIZE,
1127 .setkey = crypto4xx_setkey_aes_cbc,
1128 .encrypt = crypto4xx_encrypt,
1129 .decrypt = crypto4xx_decrypt,
1136 * Module Initialization Routine
1138 static int crypto4xx_probe(struct platform_device *ofdev)
1140 int rc;
1141 struct resource res;
1142 struct device *dev = &ofdev->dev;
1143 struct crypto4xx_core_device *core_dev;
1145 rc = of_address_to_resource(ofdev->dev.of_node, 0, &res);
1146 if (rc)
1147 return -ENODEV;
1149 if (of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-crypto")) {
1150 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1151 mfdcri(SDR0, PPC460EX_SDR0_SRST) | PPC460EX_CE_RESET);
1152 mtdcri(SDR0, PPC460EX_SDR0_SRST,
1153 mfdcri(SDR0, PPC460EX_SDR0_SRST) & ~PPC460EX_CE_RESET);
1154 } else if (of_find_compatible_node(NULL, NULL,
1155 "amcc,ppc405ex-crypto")) {
1156 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1157 mfdcri(SDR0, PPC405EX_SDR0_SRST) | PPC405EX_CE_RESET);
1158 mtdcri(SDR0, PPC405EX_SDR0_SRST,
1159 mfdcri(SDR0, PPC405EX_SDR0_SRST) & ~PPC405EX_CE_RESET);
1160 } else if (of_find_compatible_node(NULL, NULL,
1161 "amcc,ppc460sx-crypto")) {
1162 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1163 mfdcri(SDR0, PPC460SX_SDR0_SRST) | PPC460SX_CE_RESET);
1164 mtdcri(SDR0, PPC460SX_SDR0_SRST,
1165 mfdcri(SDR0, PPC460SX_SDR0_SRST) & ~PPC460SX_CE_RESET);
1166 } else {
1167 printk(KERN_ERR "Crypto Function Not supported!\n");
1168 return -EINVAL;
1171 core_dev = kzalloc(sizeof(struct crypto4xx_core_device), GFP_KERNEL);
1172 if (!core_dev)
1173 return -ENOMEM;
1175 dev_set_drvdata(dev, core_dev);
1176 core_dev->ofdev = ofdev;
1177 core_dev->dev = kzalloc(sizeof(struct crypto4xx_device), GFP_KERNEL);
1178 if (!core_dev->dev)
1179 goto err_alloc_dev;
1181 core_dev->dev->core_dev = core_dev;
1182 core_dev->device = dev;
1183 spin_lock_init(&core_dev->lock);
1184 INIT_LIST_HEAD(&core_dev->dev->alg_list);
1185 rc = crypto4xx_build_pdr(core_dev->dev);
1186 if (rc)
1187 goto err_build_pdr;
1189 rc = crypto4xx_build_gdr(core_dev->dev);
1190 if (rc)
1191 goto err_build_pdr;
1193 rc = crypto4xx_build_sdr(core_dev->dev);
1194 if (rc)
1195 goto err_build_sdr;
1197 /* Init tasklet for bottom half processing */
1198 tasklet_init(&core_dev->tasklet, crypto4xx_bh_tasklet_cb,
1199 (unsigned long) dev);
1201 /* Register for Crypto isr, Crypto Engine IRQ */
1202 core_dev->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1203 rc = request_irq(core_dev->irq, crypto4xx_ce_interrupt_handler, 0,
1204 core_dev->dev->name, dev);
1205 if (rc)
1206 goto err_request_irq;
1208 core_dev->dev->ce_base = of_iomap(ofdev->dev.of_node, 0);
1209 if (!core_dev->dev->ce_base) {
1210 dev_err(dev, "failed to of_iomap\n");
1211 rc = -ENOMEM;
1212 goto err_iomap;
1215 /* need to setup pdr, rdr, gdr and sdr before this */
1216 crypto4xx_hw_init(core_dev->dev);
1218 /* Register security algorithms with Linux CryptoAPI */
1219 rc = crypto4xx_register_alg(core_dev->dev, crypto4xx_alg,
1220 ARRAY_SIZE(crypto4xx_alg));
1221 if (rc)
1222 goto err_start_dev;
1224 return 0;
1226 err_start_dev:
1227 iounmap(core_dev->dev->ce_base);
1228 err_iomap:
1229 free_irq(core_dev->irq, dev);
1230 err_request_irq:
1231 irq_dispose_mapping(core_dev->irq);
1232 tasklet_kill(&core_dev->tasklet);
1233 err_build_sdr:
1234 crypto4xx_destroy_sdr(core_dev->dev);
1235 crypto4xx_destroy_gdr(core_dev->dev);
1236 err_build_pdr:
1237 crypto4xx_destroy_pdr(core_dev->dev);
1238 kfree(core_dev->dev);
1239 err_alloc_dev:
1240 kfree(core_dev);
1242 return rc;
1245 static int crypto4xx_remove(struct platform_device *ofdev)
1247 struct device *dev = &ofdev->dev;
1248 struct crypto4xx_core_device *core_dev = dev_get_drvdata(dev);
1250 free_irq(core_dev->irq, dev);
1251 irq_dispose_mapping(core_dev->irq);
1253 tasklet_kill(&core_dev->tasklet);
1254 /* Un-register with Linux CryptoAPI */
1255 crypto4xx_unregister_alg(core_dev->dev);
1256 /* Free all allocated memory */
1257 crypto4xx_stop_all(core_dev);
1259 return 0;
1262 static const struct of_device_id crypto4xx_match[] = {
1263 { .compatible = "amcc,ppc4xx-crypto",},
1264 { },
1266 MODULE_DEVICE_TABLE(of, crypto4xx_match);
1268 static struct platform_driver crypto4xx_driver = {
1269 .driver = {
1270 .name = "crypto4xx",
1271 .of_match_table = crypto4xx_match,
1273 .probe = crypto4xx_probe,
1274 .remove = crypto4xx_remove,
1277 module_platform_driver(crypto4xx_driver);
1279 MODULE_LICENSE("GPL");
1280 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1281 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");