2 * AMCC SoC PPC4xx Crypto Driver
4 * Copyright (c) 2008 Applied Micro Circuits Corporation.
5 * All rights reserved. James Hsiao <jhsiao@amcc.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * This file implements AMCC crypto offload Linux device driver for use with
21 #include <linux/kernel.h>
22 #include <linux/interrupt.h>
23 #include <linux/spinlock_types.h>
24 #include <linux/random.h>
25 #include <linux/scatterlist.h>
26 #include <linux/crypto.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/platform_device.h>
29 #include <linux/init.h>
30 #include <linux/module.h>
31 #include <linux/of_address.h>
32 #include <linux/of_irq.h>
33 #include <linux/of_platform.h>
34 #include <linux/slab.h>
36 #include <asm/dcr-regs.h>
37 #include <asm/cacheflush.h>
38 #include <crypto/aead.h>
39 #include <crypto/aes.h>
40 #include <crypto/ctr.h>
41 #include <crypto/gcm.h>
42 #include <crypto/sha.h>
43 #include <crypto/scatterwalk.h>
44 #include <crypto/internal/aead.h>
45 #include <crypto/internal/skcipher.h>
46 #include "crypto4xx_reg_def.h"
47 #include "crypto4xx_core.h"
48 #include "crypto4xx_sa.h"
49 #include "crypto4xx_trng.h"
51 #define PPC4XX_SEC_VERSION_STR "0.5"
54 * PPC4xx Crypto Engine Initialization Routine
56 static void crypto4xx_hw_init(struct crypto4xx_device
*dev
)
58 union ce_ring_size ring_size
;
59 union ce_ring_control ring_ctrl
;
60 union ce_part_ring_size part_ring_size
;
61 union ce_io_threshold io_threshold
;
63 union ce_pe_dma_cfg pe_dma_cfg
;
66 writel(PPC4XX_BYTE_ORDER
, dev
->ce_base
+ CRYPTO4XX_BYTE_ORDER_CFG
);
67 /* setup pe dma, include reset sg, pdr and pe, then release reset */
69 pe_dma_cfg
.bf
.bo_sgpd_en
= 1;
70 pe_dma_cfg
.bf
.bo_data_en
= 0;
71 pe_dma_cfg
.bf
.bo_sa_en
= 1;
72 pe_dma_cfg
.bf
.bo_pd_en
= 1;
73 pe_dma_cfg
.bf
.dynamic_sa_en
= 1;
74 pe_dma_cfg
.bf
.reset_sg
= 1;
75 pe_dma_cfg
.bf
.reset_pdr
= 1;
76 pe_dma_cfg
.bf
.reset_pe
= 1;
77 writel(pe_dma_cfg
.w
, dev
->ce_base
+ CRYPTO4XX_PE_DMA_CFG
);
78 /* un reset pe,sg and pdr */
79 pe_dma_cfg
.bf
.pe_mode
= 0;
80 pe_dma_cfg
.bf
.reset_sg
= 0;
81 pe_dma_cfg
.bf
.reset_pdr
= 0;
82 pe_dma_cfg
.bf
.reset_pe
= 0;
83 pe_dma_cfg
.bf
.bo_td_en
= 0;
84 writel(pe_dma_cfg
.w
, dev
->ce_base
+ CRYPTO4XX_PE_DMA_CFG
);
85 writel(dev
->pdr_pa
, dev
->ce_base
+ CRYPTO4XX_PDR_BASE
);
86 writel(dev
->pdr_pa
, dev
->ce_base
+ CRYPTO4XX_RDR_BASE
);
87 writel(PPC4XX_PRNG_CTRL_AUTO_EN
, dev
->ce_base
+ CRYPTO4XX_PRNG_CTRL
);
88 get_random_bytes(&rand_num
, sizeof(rand_num
));
89 writel(rand_num
, dev
->ce_base
+ CRYPTO4XX_PRNG_SEED_L
);
90 get_random_bytes(&rand_num
, sizeof(rand_num
));
91 writel(rand_num
, dev
->ce_base
+ CRYPTO4XX_PRNG_SEED_H
);
93 ring_size
.bf
.ring_offset
= PPC4XX_PD_SIZE
;
94 ring_size
.bf
.ring_size
= PPC4XX_NUM_PD
;
95 writel(ring_size
.w
, dev
->ce_base
+ CRYPTO4XX_RING_SIZE
);
97 writel(ring_ctrl
.w
, dev
->ce_base
+ CRYPTO4XX_RING_CTRL
);
98 device_ctrl
= readl(dev
->ce_base
+ CRYPTO4XX_DEVICE_CTRL
);
99 device_ctrl
|= PPC4XX_DC_3DES_EN
;
100 writel(device_ctrl
, dev
->ce_base
+ CRYPTO4XX_DEVICE_CTRL
);
101 writel(dev
->gdr_pa
, dev
->ce_base
+ CRYPTO4XX_GATH_RING_BASE
);
102 writel(dev
->sdr_pa
, dev
->ce_base
+ CRYPTO4XX_SCAT_RING_BASE
);
103 part_ring_size
.w
= 0;
104 part_ring_size
.bf
.sdr_size
= PPC4XX_SDR_SIZE
;
105 part_ring_size
.bf
.gdr_size
= PPC4XX_GDR_SIZE
;
106 writel(part_ring_size
.w
, dev
->ce_base
+ CRYPTO4XX_PART_RING_SIZE
);
107 writel(PPC4XX_SD_BUFFER_SIZE
, dev
->ce_base
+ CRYPTO4XX_PART_RING_CFG
);
109 io_threshold
.bf
.output_threshold
= PPC4XX_OUTPUT_THRESHOLD
;
110 io_threshold
.bf
.input_threshold
= PPC4XX_INPUT_THRESHOLD
;
111 writel(io_threshold
.w
, dev
->ce_base
+ CRYPTO4XX_IO_THRESHOLD
);
112 writel(0, dev
->ce_base
+ CRYPTO4XX_PDR_BASE_UADDR
);
113 writel(0, dev
->ce_base
+ CRYPTO4XX_RDR_BASE_UADDR
);
114 writel(0, dev
->ce_base
+ CRYPTO4XX_PKT_SRC_UADDR
);
115 writel(0, dev
->ce_base
+ CRYPTO4XX_PKT_DEST_UADDR
);
116 writel(0, dev
->ce_base
+ CRYPTO4XX_SA_UADDR
);
117 writel(0, dev
->ce_base
+ CRYPTO4XX_GATH_RING_BASE_UADDR
);
118 writel(0, dev
->ce_base
+ CRYPTO4XX_SCAT_RING_BASE_UADDR
);
119 /* un reset pe,sg and pdr */
120 pe_dma_cfg
.bf
.pe_mode
= 1;
121 pe_dma_cfg
.bf
.reset_sg
= 0;
122 pe_dma_cfg
.bf
.reset_pdr
= 0;
123 pe_dma_cfg
.bf
.reset_pe
= 0;
124 pe_dma_cfg
.bf
.bo_td_en
= 0;
125 writel(pe_dma_cfg
.w
, dev
->ce_base
+ CRYPTO4XX_PE_DMA_CFG
);
126 /*clear all pending interrupt*/
127 writel(PPC4XX_INTERRUPT_CLR
, dev
->ce_base
+ CRYPTO4XX_INT_CLR
);
128 writel(PPC4XX_INT_DESCR_CNT
, dev
->ce_base
+ CRYPTO4XX_INT_DESCR_CNT
);
129 writel(PPC4XX_INT_DESCR_CNT
, dev
->ce_base
+ CRYPTO4XX_INT_DESCR_CNT
);
130 writel(PPC4XX_INT_CFG
, dev
->ce_base
+ CRYPTO4XX_INT_CFG
);
132 writel(PPC4XX_INT_TIMEOUT_CNT_REVB
<< 10,
133 dev
->ce_base
+ CRYPTO4XX_INT_TIMEOUT_CNT
);
134 writel(PPC4XX_PD_DONE_INT
| PPC4XX_TMO_ERR_INT
,
135 dev
->ce_base
+ CRYPTO4XX_INT_EN
);
137 writel(PPC4XX_PD_DONE_INT
, dev
->ce_base
+ CRYPTO4XX_INT_EN
);
141 int crypto4xx_alloc_sa(struct crypto4xx_ctx
*ctx
, u32 size
)
143 ctx
->sa_in
= kzalloc(size
* 4, GFP_ATOMIC
);
144 if (ctx
->sa_in
== NULL
)
147 ctx
->sa_out
= kzalloc(size
* 4, GFP_ATOMIC
);
148 if (ctx
->sa_out
== NULL
) {
159 void crypto4xx_free_sa(struct crypto4xx_ctx
*ctx
)
169 * alloc memory for the gather ring
170 * no need to alloc buf for the ring
171 * gdr_tail, gdr_head and gdr_count are initialized by this function
173 static u32
crypto4xx_build_pdr(struct crypto4xx_device
*dev
)
176 dev
->pdr
= dma_alloc_coherent(dev
->core_dev
->device
,
177 sizeof(struct ce_pd
) * PPC4XX_NUM_PD
,
178 &dev
->pdr_pa
, GFP_ATOMIC
);
182 dev
->pdr_uinfo
= kzalloc(sizeof(struct pd_uinfo
) * PPC4XX_NUM_PD
,
184 if (!dev
->pdr_uinfo
) {
185 dma_free_coherent(dev
->core_dev
->device
,
186 sizeof(struct ce_pd
) * PPC4XX_NUM_PD
,
191 memset(dev
->pdr
, 0, sizeof(struct ce_pd
) * PPC4XX_NUM_PD
);
192 dev
->shadow_sa_pool
= dma_alloc_coherent(dev
->core_dev
->device
,
193 sizeof(union shadow_sa_buf
) * PPC4XX_NUM_PD
,
194 &dev
->shadow_sa_pool_pa
,
196 if (!dev
->shadow_sa_pool
)
199 dev
->shadow_sr_pool
= dma_alloc_coherent(dev
->core_dev
->device
,
200 sizeof(struct sa_state_record
) * PPC4XX_NUM_PD
,
201 &dev
->shadow_sr_pool_pa
, GFP_ATOMIC
);
202 if (!dev
->shadow_sr_pool
)
204 for (i
= 0; i
< PPC4XX_NUM_PD
; i
++) {
205 struct ce_pd
*pd
= &dev
->pdr
[i
];
206 struct pd_uinfo
*pd_uinfo
= &dev
->pdr_uinfo
[i
];
208 pd
->sa
= dev
->shadow_sa_pool_pa
+
209 sizeof(union shadow_sa_buf
) * i
;
211 /* alloc 256 bytes which is enough for any kind of dynamic sa */
212 pd_uinfo
->sa_va
= &dev
->shadow_sa_pool
[i
].sa
;
214 /* alloc state record */
215 pd_uinfo
->sr_va
= &dev
->shadow_sr_pool
[i
];
216 pd_uinfo
->sr_pa
= dev
->shadow_sr_pool_pa
+
217 sizeof(struct sa_state_record
) * i
;
223 static void crypto4xx_destroy_pdr(struct crypto4xx_device
*dev
)
226 dma_free_coherent(dev
->core_dev
->device
,
227 sizeof(struct ce_pd
) * PPC4XX_NUM_PD
,
228 dev
->pdr
, dev
->pdr_pa
);
230 if (dev
->shadow_sa_pool
)
231 dma_free_coherent(dev
->core_dev
->device
,
232 sizeof(union shadow_sa_buf
) * PPC4XX_NUM_PD
,
233 dev
->shadow_sa_pool
, dev
->shadow_sa_pool_pa
);
235 if (dev
->shadow_sr_pool
)
236 dma_free_coherent(dev
->core_dev
->device
,
237 sizeof(struct sa_state_record
) * PPC4XX_NUM_PD
,
238 dev
->shadow_sr_pool
, dev
->shadow_sr_pool_pa
);
240 kfree(dev
->pdr_uinfo
);
243 static u32
crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device
*dev
)
248 retval
= dev
->pdr_head
;
249 tmp
= (dev
->pdr_head
+ 1) % PPC4XX_NUM_PD
;
251 if (tmp
== dev
->pdr_tail
)
252 return ERING_WAS_FULL
;
259 static u32
crypto4xx_put_pd_to_pdr(struct crypto4xx_device
*dev
, u32 idx
)
261 struct pd_uinfo
*pd_uinfo
= &dev
->pdr_uinfo
[idx
];
265 spin_lock_irqsave(&dev
->core_dev
->lock
, flags
);
266 pd_uinfo
->state
= PD_ENTRY_FREE
;
268 if (dev
->pdr_tail
!= PPC4XX_LAST_PD
)
272 tail
= dev
->pdr_tail
;
273 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
279 * alloc memory for the gather ring
280 * no need to alloc buf for the ring
281 * gdr_tail, gdr_head and gdr_count are initialized by this function
283 static u32
crypto4xx_build_gdr(struct crypto4xx_device
*dev
)
285 dev
->gdr
= dma_zalloc_coherent(dev
->core_dev
->device
,
286 sizeof(struct ce_gd
) * PPC4XX_NUM_GD
,
287 &dev
->gdr_pa
, GFP_ATOMIC
);
294 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device
*dev
)
296 dma_free_coherent(dev
->core_dev
->device
,
297 sizeof(struct ce_gd
) * PPC4XX_NUM_GD
,
298 dev
->gdr
, dev
->gdr_pa
);
302 * when this function is called.
303 * preemption or interrupt must be disabled
305 static u32
crypto4xx_get_n_gd(struct crypto4xx_device
*dev
, int n
)
310 if (n
>= PPC4XX_NUM_GD
)
311 return ERING_WAS_FULL
;
313 retval
= dev
->gdr_head
;
314 tmp
= (dev
->gdr_head
+ n
) % PPC4XX_NUM_GD
;
315 if (dev
->gdr_head
> dev
->gdr_tail
) {
316 if (tmp
< dev
->gdr_head
&& tmp
>= dev
->gdr_tail
)
317 return ERING_WAS_FULL
;
318 } else if (dev
->gdr_head
< dev
->gdr_tail
) {
319 if (tmp
< dev
->gdr_head
|| tmp
>= dev
->gdr_tail
)
320 return ERING_WAS_FULL
;
327 static u32
crypto4xx_put_gd_to_gdr(struct crypto4xx_device
*dev
)
331 spin_lock_irqsave(&dev
->core_dev
->lock
, flags
);
332 if (dev
->gdr_tail
== dev
->gdr_head
) {
333 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
337 if (dev
->gdr_tail
!= PPC4XX_LAST_GD
)
342 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
347 static inline struct ce_gd
*crypto4xx_get_gdp(struct crypto4xx_device
*dev
,
348 dma_addr_t
*gd_dma
, u32 idx
)
350 *gd_dma
= dev
->gdr_pa
+ sizeof(struct ce_gd
) * idx
;
352 return &dev
->gdr
[idx
];
356 * alloc memory for the scatter ring
357 * need to alloc buf for the ring
358 * sdr_tail, sdr_head and sdr_count are initialized by this function
360 static u32
crypto4xx_build_sdr(struct crypto4xx_device
*dev
)
364 /* alloc memory for scatter descriptor ring */
365 dev
->sdr
= dma_alloc_coherent(dev
->core_dev
->device
,
366 sizeof(struct ce_sd
) * PPC4XX_NUM_SD
,
367 &dev
->sdr_pa
, GFP_ATOMIC
);
371 dev
->scatter_buffer_va
=
372 dma_alloc_coherent(dev
->core_dev
->device
,
373 PPC4XX_SD_BUFFER_SIZE
* PPC4XX_NUM_SD
,
374 &dev
->scatter_buffer_pa
, GFP_ATOMIC
);
375 if (!dev
->scatter_buffer_va
) {
376 dma_free_coherent(dev
->core_dev
->device
,
377 sizeof(struct ce_sd
) * PPC4XX_NUM_SD
,
378 dev
->sdr
, dev
->sdr_pa
);
382 for (i
= 0; i
< PPC4XX_NUM_SD
; i
++) {
383 dev
->sdr
[i
].ptr
= dev
->scatter_buffer_pa
+
384 PPC4XX_SD_BUFFER_SIZE
* i
;
390 static void crypto4xx_destroy_sdr(struct crypto4xx_device
*dev
)
393 dma_free_coherent(dev
->core_dev
->device
,
394 sizeof(struct ce_sd
) * PPC4XX_NUM_SD
,
395 dev
->sdr
, dev
->sdr_pa
);
397 if (dev
->scatter_buffer_va
)
398 dma_free_coherent(dev
->core_dev
->device
,
399 PPC4XX_SD_BUFFER_SIZE
* PPC4XX_NUM_SD
,
400 dev
->scatter_buffer_va
,
401 dev
->scatter_buffer_pa
);
405 * when this function is called.
406 * preemption or interrupt must be disabled
408 static u32
crypto4xx_get_n_sd(struct crypto4xx_device
*dev
, int n
)
413 if (n
>= PPC4XX_NUM_SD
)
414 return ERING_WAS_FULL
;
416 retval
= dev
->sdr_head
;
417 tmp
= (dev
->sdr_head
+ n
) % PPC4XX_NUM_SD
;
418 if (dev
->sdr_head
> dev
->gdr_tail
) {
419 if (tmp
< dev
->sdr_head
&& tmp
>= dev
->sdr_tail
)
420 return ERING_WAS_FULL
;
421 } else if (dev
->sdr_head
< dev
->sdr_tail
) {
422 if (tmp
< dev
->sdr_head
|| tmp
>= dev
->sdr_tail
)
423 return ERING_WAS_FULL
;
424 } /* the head = tail, or empty case is already take cared */
430 static u32
crypto4xx_put_sd_to_sdr(struct crypto4xx_device
*dev
)
434 spin_lock_irqsave(&dev
->core_dev
->lock
, flags
);
435 if (dev
->sdr_tail
== dev
->sdr_head
) {
436 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
439 if (dev
->sdr_tail
!= PPC4XX_LAST_SD
)
443 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
448 static inline struct ce_sd
*crypto4xx_get_sdp(struct crypto4xx_device
*dev
,
449 dma_addr_t
*sd_dma
, u32 idx
)
451 *sd_dma
= dev
->sdr_pa
+ sizeof(struct ce_sd
) * idx
;
453 return &dev
->sdr
[idx
];
456 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device
*dev
,
458 struct pd_uinfo
*pd_uinfo
,
460 struct scatterlist
*dst
)
462 unsigned int first_sd
= pd_uinfo
->first_sd
;
463 unsigned int last_sd
;
464 unsigned int overflow
= 0;
465 unsigned int to_copy
;
466 unsigned int dst_start
= 0;
469 * Because the scatter buffers are all neatly organized in one
470 * big continuous ringbuffer; scatterwalk_map_and_copy() can
471 * be instructed to copy a range of buffers in one go.
474 last_sd
= (first_sd
+ pd_uinfo
->num_sd
);
475 if (last_sd
> PPC4XX_LAST_SD
) {
476 last_sd
= PPC4XX_LAST_SD
;
477 overflow
= last_sd
% PPC4XX_NUM_SD
;
481 void *buf
= dev
->scatter_buffer_va
+
482 first_sd
* PPC4XX_SD_BUFFER_SIZE
;
484 to_copy
= min(nbytes
, PPC4XX_SD_BUFFER_SIZE
*
485 (1 + last_sd
- first_sd
));
486 scatterwalk_map_and_copy(buf
, dst
, dst_start
, to_copy
, 1);
492 dst_start
+= to_copy
;
498 static void crypto4xx_copy_digest_to_dst(void *dst
,
499 struct pd_uinfo
*pd_uinfo
,
500 struct crypto4xx_ctx
*ctx
)
502 struct dynamic_sa_ctl
*sa
= (struct dynamic_sa_ctl
*) ctx
->sa_in
;
504 if (sa
->sa_command_0
.bf
.hash_alg
== SA_HASH_ALG_SHA1
) {
505 memcpy(dst
, pd_uinfo
->sr_va
->save_digest
,
506 SA_HASH_ALG_SHA1_DIGEST_SIZE
);
510 static void crypto4xx_ret_sg_desc(struct crypto4xx_device
*dev
,
511 struct pd_uinfo
*pd_uinfo
)
514 if (pd_uinfo
->num_gd
) {
515 for (i
= 0; i
< pd_uinfo
->num_gd
; i
++)
516 crypto4xx_put_gd_to_gdr(dev
);
517 pd_uinfo
->first_gd
= 0xffffffff;
518 pd_uinfo
->num_gd
= 0;
520 if (pd_uinfo
->num_sd
) {
521 for (i
= 0; i
< pd_uinfo
->num_sd
; i
++)
522 crypto4xx_put_sd_to_sdr(dev
);
524 pd_uinfo
->first_sd
= 0xffffffff;
525 pd_uinfo
->num_sd
= 0;
529 static void crypto4xx_ablkcipher_done(struct crypto4xx_device
*dev
,
530 struct pd_uinfo
*pd_uinfo
,
533 struct crypto4xx_ctx
*ctx
;
534 struct ablkcipher_request
*ablk_req
;
535 struct scatterlist
*dst
;
538 ablk_req
= ablkcipher_request_cast(pd_uinfo
->async_req
);
539 ctx
= crypto_tfm_ctx(ablk_req
->base
.tfm
);
541 if (pd_uinfo
->using_sd
) {
542 crypto4xx_copy_pkt_to_dst(dev
, pd
, pd_uinfo
, ablk_req
->nbytes
,
545 dst
= pd_uinfo
->dest_va
;
546 addr
= dma_map_page(dev
->core_dev
->device
, sg_page(dst
),
547 dst
->offset
, dst
->length
, DMA_FROM_DEVICE
);
549 crypto4xx_ret_sg_desc(dev
, pd_uinfo
);
551 if (pd_uinfo
->state
& PD_ENTRY_BUSY
)
552 ablkcipher_request_complete(ablk_req
, -EINPROGRESS
);
553 ablkcipher_request_complete(ablk_req
, 0);
556 static void crypto4xx_ahash_done(struct crypto4xx_device
*dev
,
557 struct pd_uinfo
*pd_uinfo
)
559 struct crypto4xx_ctx
*ctx
;
560 struct ahash_request
*ahash_req
;
562 ahash_req
= ahash_request_cast(pd_uinfo
->async_req
);
563 ctx
= crypto_tfm_ctx(ahash_req
->base
.tfm
);
565 crypto4xx_copy_digest_to_dst(ahash_req
->result
, pd_uinfo
,
566 crypto_tfm_ctx(ahash_req
->base
.tfm
));
567 crypto4xx_ret_sg_desc(dev
, pd_uinfo
);
569 if (pd_uinfo
->state
& PD_ENTRY_BUSY
)
570 ahash_request_complete(ahash_req
, -EINPROGRESS
);
571 ahash_request_complete(ahash_req
, 0);
574 static void crypto4xx_aead_done(struct crypto4xx_device
*dev
,
575 struct pd_uinfo
*pd_uinfo
,
578 struct aead_request
*aead_req
= container_of(pd_uinfo
->async_req
,
579 struct aead_request
, base
);
580 struct scatterlist
*dst
= pd_uinfo
->dest_va
;
581 size_t cp_len
= crypto_aead_authsize(
582 crypto_aead_reqtfm(aead_req
));
586 if (pd_uinfo
->using_sd
) {
587 crypto4xx_copy_pkt_to_dst(dev
, pd
, pd_uinfo
,
588 pd
->pd_ctl_len
.bf
.pkt_len
,
591 __dma_sync_page(sg_page(dst
), dst
->offset
, dst
->length
,
595 if (pd_uinfo
->sa_va
->sa_command_0
.bf
.dir
== DIR_OUTBOUND
) {
596 /* append icv at the end */
597 crypto4xx_memcpy_from_le32(icv
, pd_uinfo
->sr_va
->save_digest
,
600 scatterwalk_map_and_copy(icv
, dst
, aead_req
->cryptlen
,
603 /* check icv at the end */
604 scatterwalk_map_and_copy(icv
, aead_req
->src
,
605 aead_req
->assoclen
+ aead_req
->cryptlen
-
608 crypto4xx_memcpy_from_le32(icv
, icv
, cp_len
);
610 if (crypto_memneq(icv
, pd_uinfo
->sr_va
->save_digest
, cp_len
))
614 crypto4xx_ret_sg_desc(dev
, pd_uinfo
);
616 if (pd
->pd_ctl
.bf
.status
& 0xff) {
617 if (!__ratelimit(&dev
->aead_ratelimit
)) {
618 if (pd
->pd_ctl
.bf
.status
& 2)
619 pr_err("pad fail error\n");
620 if (pd
->pd_ctl
.bf
.status
& 4)
621 pr_err("seqnum fail\n");
622 if (pd
->pd_ctl
.bf
.status
& 8)
623 pr_err("error _notify\n");
624 pr_err("aead return err status = 0x%02x\n",
625 pd
->pd_ctl
.bf
.status
& 0xff);
626 pr_err("pd pad_ctl = 0x%08x\n",
627 pd
->pd_ctl
.bf
.pd_pad_ctl
);
632 if (pd_uinfo
->state
& PD_ENTRY_BUSY
)
633 aead_request_complete(aead_req
, -EINPROGRESS
);
635 aead_request_complete(aead_req
, err
);
638 static void crypto4xx_pd_done(struct crypto4xx_device
*dev
, u32 idx
)
640 struct ce_pd
*pd
= &dev
->pdr
[idx
];
641 struct pd_uinfo
*pd_uinfo
= &dev
->pdr_uinfo
[idx
];
643 switch (crypto_tfm_alg_type(pd_uinfo
->async_req
->tfm
)) {
644 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
645 crypto4xx_ablkcipher_done(dev
, pd_uinfo
, pd
);
647 case CRYPTO_ALG_TYPE_AEAD
:
648 crypto4xx_aead_done(dev
, pd_uinfo
, pd
);
650 case CRYPTO_ALG_TYPE_AHASH
:
651 crypto4xx_ahash_done(dev
, pd_uinfo
);
656 static void crypto4xx_stop_all(struct crypto4xx_core_device
*core_dev
)
658 crypto4xx_destroy_pdr(core_dev
->dev
);
659 crypto4xx_destroy_gdr(core_dev
->dev
);
660 crypto4xx_destroy_sdr(core_dev
->dev
);
661 iounmap(core_dev
->dev
->ce_base
);
662 kfree(core_dev
->dev
);
666 static u32
get_next_gd(u32 current
)
668 if (current
!= PPC4XX_LAST_GD
)
674 static u32
get_next_sd(u32 current
)
676 if (current
!= PPC4XX_LAST_SD
)
682 int crypto4xx_build_pd(struct crypto_async_request
*req
,
683 struct crypto4xx_ctx
*ctx
,
684 struct scatterlist
*src
,
685 struct scatterlist
*dst
,
686 const unsigned int datalen
,
687 const __le32
*iv
, const u32 iv_len
,
688 const struct dynamic_sa_ctl
*req_sa
,
689 const unsigned int sa_len
,
690 const unsigned int assoclen
)
692 struct scatterlist _dst
[2];
693 struct crypto4xx_device
*dev
= ctx
->dev
;
694 struct dynamic_sa_ctl
*sa
;
698 u32 fst_gd
= 0xffffffff;
699 u32 fst_sd
= 0xffffffff;
702 struct pd_uinfo
*pd_uinfo
;
703 unsigned int nbytes
= datalen
;
704 size_t offset_to_sr_ptr
;
709 /* figure how many gd are needed */
710 tmp
= sg_nents_for_len(src
, assoclen
+ datalen
);
712 dev_err(dev
->core_dev
->device
, "Invalid number of src SG.\n");
721 dst
= scatterwalk_ffwd(_dst
, dst
, assoclen
);
724 /* figure how many sd are needed */
725 if (sg_is_last(dst
)) {
728 if (datalen
> PPC4XX_SD_BUFFER_SIZE
) {
729 num_sd
= datalen
/ PPC4XX_SD_BUFFER_SIZE
;
730 if (datalen
% PPC4XX_SD_BUFFER_SIZE
)
738 * The follow section of code needs to be protected
739 * The gather ring and scatter ring needs to be consecutive
740 * In case of run out of any kind of descriptor, the descriptor
741 * already got must be return the original place.
743 spin_lock_irqsave(&dev
->core_dev
->lock
, flags
);
745 * Let the caller know to slow down, once more than 13/16ths = 81%
746 * of the available data contexts are being used simultaneously.
748 * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
749 * 31 more contexts. Before new requests have to be rejected.
751 if (req
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
) {
752 is_busy
= ((dev
->pdr_head
- dev
->pdr_tail
) % PPC4XX_NUM_PD
) >=
753 ((PPC4XX_NUM_PD
* 13) / 16);
756 * To fix contention issues between ipsec (no blacklog) and
757 * dm-crypto (backlog) reserve 32 entries for "no backlog"
760 is_busy
= ((dev
->pdr_head
- dev
->pdr_tail
) % PPC4XX_NUM_PD
) >=
761 ((PPC4XX_NUM_PD
* 15) / 16);
764 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
770 fst_gd
= crypto4xx_get_n_gd(dev
, num_gd
);
771 if (fst_gd
== ERING_WAS_FULL
) {
772 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
777 fst_sd
= crypto4xx_get_n_sd(dev
, num_sd
);
778 if (fst_sd
== ERING_WAS_FULL
) {
780 dev
->gdr_head
= fst_gd
;
781 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
785 pd_entry
= crypto4xx_get_pd_from_pdr_nolock(dev
);
786 if (pd_entry
== ERING_WAS_FULL
) {
788 dev
->gdr_head
= fst_gd
;
790 dev
->sdr_head
= fst_sd
;
791 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
794 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
796 pd
= &dev
->pdr
[pd_entry
];
799 pd_uinfo
= &dev
->pdr_uinfo
[pd_entry
];
800 pd_uinfo
->async_req
= req
;
801 pd_uinfo
->num_gd
= num_gd
;
802 pd_uinfo
->num_sd
= num_sd
;
805 memcpy(pd_uinfo
->sr_va
->save_iv
, iv
, iv_len
);
807 sa
= pd_uinfo
->sa_va
;
808 memcpy(sa
, req_sa
, sa_len
* 4);
810 sa
->sa_command_1
.bf
.hash_crypto_offset
= (assoclen
>> 2);
811 offset_to_sr_ptr
= get_dynamic_sa_offset_state_ptr_field(sa
);
812 *(u32
*)((unsigned long)sa
+ offset_to_sr_ptr
) = pd_uinfo
->sr_pa
;
816 struct scatterlist
*sg
;
818 /* get first gd we are going to use */
820 pd_uinfo
->first_gd
= fst_gd
;
821 pd_uinfo
->num_gd
= num_gd
;
822 gd
= crypto4xx_get_gdp(dev
, &gd_dma
, gd_idx
);
825 sa
->sa_command_0
.bf
.gather
= 1;
826 /* walk the sg, and setup gather array */
832 len
= min(sg
->length
, nbytes
);
833 gd
->ptr
= dma_map_page(dev
->core_dev
->device
,
834 sg_page(sg
), sg
->offset
, len
, DMA_TO_DEVICE
);
835 gd
->ctl_len
.len
= len
;
836 gd
->ctl_len
.done
= 0;
837 gd
->ctl_len
.ready
= 1;
841 nbytes
-= sg
->length
;
842 gd_idx
= get_next_gd(gd_idx
);
843 gd
= crypto4xx_get_gdp(dev
, &gd_dma
, gd_idx
);
847 pd
->src
= (u32
)dma_map_page(dev
->core_dev
->device
, sg_page(src
),
848 src
->offset
, min(nbytes
, src
->length
),
851 * Disable gather in sa command
853 sa
->sa_command_0
.bf
.gather
= 0;
855 * Indicate gather array is not used
857 pd_uinfo
->first_gd
= 0xffffffff;
858 pd_uinfo
->num_gd
= 0;
860 if (sg_is_last(dst
)) {
862 * we know application give us dst a whole piece of memory
863 * no need to use scatter ring.
865 pd_uinfo
->using_sd
= 0;
866 pd_uinfo
->first_sd
= 0xffffffff;
867 pd_uinfo
->num_sd
= 0;
868 pd_uinfo
->dest_va
= dst
;
869 sa
->sa_command_0
.bf
.scatter
= 0;
870 pd
->dest
= (u32
)dma_map_page(dev
->core_dev
->device
,
871 sg_page(dst
), dst
->offset
,
872 min(datalen
, dst
->length
),
876 struct ce_sd
*sd
= NULL
;
880 sa
->sa_command_0
.bf
.scatter
= 1;
881 pd_uinfo
->using_sd
= 1;
882 pd_uinfo
->dest_va
= dst
;
883 pd_uinfo
->first_sd
= fst_sd
;
884 pd_uinfo
->num_sd
= num_sd
;
885 sd
= crypto4xx_get_sdp(dev
, &sd_dma
, sd_idx
);
887 /* setup scatter descriptor */
890 /* sd->ptr should be setup by sd_init routine*/
891 if (nbytes
>= PPC4XX_SD_BUFFER_SIZE
)
892 nbytes
-= PPC4XX_SD_BUFFER_SIZE
;
896 sd_idx
= get_next_sd(sd_idx
);
897 sd
= crypto4xx_get_sdp(dev
, &sd_dma
, sd_idx
);
898 /* setup scatter descriptor */
901 if (nbytes
>= PPC4XX_SD_BUFFER_SIZE
) {
902 nbytes
-= PPC4XX_SD_BUFFER_SIZE
;
905 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
906 * which is more than nbytes, so done.
913 pd
->pd_ctl
.w
= PD_CTL_HOST_READY
|
914 ((crypto_tfm_alg_type(req
->tfm
) == CRYPTO_ALG_TYPE_AHASH
) |
915 (crypto_tfm_alg_type(req
->tfm
) == CRYPTO_ALG_TYPE_AEAD
) ?
916 PD_CTL_HASH_FINAL
: 0);
917 pd
->pd_ctl_len
.w
= 0x00400000 | (assoclen
+ datalen
);
918 pd_uinfo
->state
= PD_ENTRY_INUSE
| (is_busy
? PD_ENTRY_BUSY
: 0);
921 /* write any value to push engine to read a pd */
922 writel(0, dev
->ce_base
+ CRYPTO4XX_INT_DESCR_RD
);
923 writel(1, dev
->ce_base
+ CRYPTO4XX_INT_DESCR_RD
);
924 return is_busy
? -EBUSY
: -EINPROGRESS
;
928 * Algorithm Registration Functions
930 static void crypto4xx_ctx_init(struct crypto4xx_alg
*amcc_alg
,
931 struct crypto4xx_ctx
*ctx
)
933 ctx
->dev
= amcc_alg
->dev
;
939 static int crypto4xx_ablk_init(struct crypto_tfm
*tfm
)
941 struct crypto_alg
*alg
= tfm
->__crt_alg
;
942 struct crypto4xx_alg
*amcc_alg
;
943 struct crypto4xx_ctx
*ctx
= crypto_tfm_ctx(tfm
);
945 amcc_alg
= container_of(alg
, struct crypto4xx_alg
, alg
.u
.cipher
);
946 crypto4xx_ctx_init(amcc_alg
, ctx
);
947 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct crypto4xx_ctx
);
951 static void crypto4xx_common_exit(struct crypto4xx_ctx
*ctx
)
953 crypto4xx_free_sa(ctx
);
956 static void crypto4xx_ablk_exit(struct crypto_tfm
*tfm
)
958 crypto4xx_common_exit(crypto_tfm_ctx(tfm
));
961 static int crypto4xx_aead_init(struct crypto_aead
*tfm
)
963 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
964 struct crypto4xx_ctx
*ctx
= crypto_aead_ctx(tfm
);
965 struct crypto4xx_alg
*amcc_alg
;
967 ctx
->sw_cipher
.aead
= crypto_alloc_aead(alg
->base
.cra_name
, 0,
968 CRYPTO_ALG_NEED_FALLBACK
|
970 if (IS_ERR(ctx
->sw_cipher
.aead
))
971 return PTR_ERR(ctx
->sw_cipher
.aead
);
973 amcc_alg
= container_of(alg
, struct crypto4xx_alg
, alg
.u
.aead
);
974 crypto4xx_ctx_init(amcc_alg
, ctx
);
975 crypto_aead_set_reqsize(tfm
, sizeof(struct aead_request
) +
976 max(sizeof(struct crypto4xx_ctx
), 32 +
977 crypto_aead_reqsize(ctx
->sw_cipher
.aead
)));
981 static void crypto4xx_aead_exit(struct crypto_aead
*tfm
)
983 struct crypto4xx_ctx
*ctx
= crypto_aead_ctx(tfm
);
985 crypto4xx_common_exit(ctx
);
986 crypto_free_aead(ctx
->sw_cipher
.aead
);
989 static int crypto4xx_register_alg(struct crypto4xx_device
*sec_dev
,
990 struct crypto4xx_alg_common
*crypto_alg
,
993 struct crypto4xx_alg
*alg
;
997 for (i
= 0; i
< array_size
; i
++) {
998 alg
= kzalloc(sizeof(struct crypto4xx_alg
), GFP_KERNEL
);
1002 alg
->alg
= crypto_alg
[i
];
1005 switch (alg
->alg
.type
) {
1006 case CRYPTO_ALG_TYPE_AEAD
:
1007 rc
= crypto_register_aead(&alg
->alg
.u
.aead
);
1010 case CRYPTO_ALG_TYPE_AHASH
:
1011 rc
= crypto_register_ahash(&alg
->alg
.u
.hash
);
1015 rc
= crypto_register_alg(&alg
->alg
.u
.cipher
);
1022 list_add_tail(&alg
->entry
, &sec_dev
->alg_list
);
1028 static void crypto4xx_unregister_alg(struct crypto4xx_device
*sec_dev
)
1030 struct crypto4xx_alg
*alg
, *tmp
;
1032 list_for_each_entry_safe(alg
, tmp
, &sec_dev
->alg_list
, entry
) {
1033 list_del(&alg
->entry
);
1034 switch (alg
->alg
.type
) {
1035 case CRYPTO_ALG_TYPE_AHASH
:
1036 crypto_unregister_ahash(&alg
->alg
.u
.hash
);
1039 case CRYPTO_ALG_TYPE_AEAD
:
1040 crypto_unregister_aead(&alg
->alg
.u
.aead
);
1044 crypto_unregister_alg(&alg
->alg
.u
.cipher
);
1050 static void crypto4xx_bh_tasklet_cb(unsigned long data
)
1052 struct device
*dev
= (struct device
*)data
;
1053 struct crypto4xx_core_device
*core_dev
= dev_get_drvdata(dev
);
1054 struct pd_uinfo
*pd_uinfo
;
1056 u32 tail
= core_dev
->dev
->pdr_tail
;
1057 u32 head
= core_dev
->dev
->pdr_head
;
1060 pd_uinfo
= &core_dev
->dev
->pdr_uinfo
[tail
];
1061 pd
= &core_dev
->dev
->pdr
[tail
];
1062 if ((pd_uinfo
->state
& PD_ENTRY_INUSE
) &&
1063 ((READ_ONCE(pd
->pd_ctl
.w
) &
1064 (PD_CTL_PE_DONE
| PD_CTL_HOST_READY
)) ==
1066 crypto4xx_pd_done(core_dev
->dev
, tail
);
1067 tail
= crypto4xx_put_pd_to_pdr(core_dev
->dev
, tail
);
1069 /* if tail not done, break */
1072 } while (head
!= tail
);
1078 static inline irqreturn_t
crypto4xx_interrupt_handler(int irq
, void *data
,
1081 struct device
*dev
= (struct device
*)data
;
1082 struct crypto4xx_core_device
*core_dev
= dev_get_drvdata(dev
);
1084 writel(clr_val
, core_dev
->dev
->ce_base
+ CRYPTO4XX_INT_CLR
);
1085 tasklet_schedule(&core_dev
->tasklet
);
1090 static irqreturn_t
crypto4xx_ce_interrupt_handler(int irq
, void *data
)
1092 return crypto4xx_interrupt_handler(irq
, data
, PPC4XX_INTERRUPT_CLR
);
1095 static irqreturn_t
crypto4xx_ce_interrupt_handler_revb(int irq
, void *data
)
1097 return crypto4xx_interrupt_handler(irq
, data
, PPC4XX_INTERRUPT_CLR
|
1098 PPC4XX_TMO_ERR_INT
);
1102 * Supported Crypto Algorithms
1104 static struct crypto4xx_alg_common crypto4xx_alg
[] = {
1105 /* Crypto AES modes */
1106 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
, .u
.cipher
= {
1107 .cra_name
= "cbc(aes)",
1108 .cra_driver_name
= "cbc-aes-ppc4xx",
1109 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1110 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
1112 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1113 .cra_blocksize
= AES_BLOCK_SIZE
,
1114 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1115 .cra_type
= &crypto_ablkcipher_type
,
1116 .cra_init
= crypto4xx_ablk_init
,
1117 .cra_exit
= crypto4xx_ablk_exit
,
1118 .cra_module
= THIS_MODULE
,
1121 .min_keysize
= AES_MIN_KEY_SIZE
,
1122 .max_keysize
= AES_MAX_KEY_SIZE
,
1123 .ivsize
= AES_IV_SIZE
,
1124 .setkey
= crypto4xx_setkey_aes_cbc
,
1125 .encrypt
= crypto4xx_encrypt
,
1126 .decrypt
= crypto4xx_decrypt
,
1130 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
, .u
.cipher
= {
1131 .cra_name
= "cfb(aes)",
1132 .cra_driver_name
= "cfb-aes-ppc4xx",
1133 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1134 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
1136 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1137 .cra_blocksize
= AES_BLOCK_SIZE
,
1138 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1139 .cra_type
= &crypto_ablkcipher_type
,
1140 .cra_init
= crypto4xx_ablk_init
,
1141 .cra_exit
= crypto4xx_ablk_exit
,
1142 .cra_module
= THIS_MODULE
,
1145 .min_keysize
= AES_MIN_KEY_SIZE
,
1146 .max_keysize
= AES_MAX_KEY_SIZE
,
1147 .ivsize
= AES_IV_SIZE
,
1148 .setkey
= crypto4xx_setkey_aes_cfb
,
1149 .encrypt
= crypto4xx_encrypt
,
1150 .decrypt
= crypto4xx_decrypt
,
1154 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
, .u
.cipher
= {
1155 .cra_name
= "rfc3686(ctr(aes))",
1156 .cra_driver_name
= "rfc3686-ctr-aes-ppc4xx",
1157 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1158 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
1160 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1161 .cra_blocksize
= AES_BLOCK_SIZE
,
1162 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1163 .cra_type
= &crypto_ablkcipher_type
,
1164 .cra_init
= crypto4xx_ablk_init
,
1165 .cra_exit
= crypto4xx_ablk_exit
,
1166 .cra_module
= THIS_MODULE
,
1169 .min_keysize
= AES_MIN_KEY_SIZE
+
1170 CTR_RFC3686_NONCE_SIZE
,
1171 .max_keysize
= AES_MAX_KEY_SIZE
+
1172 CTR_RFC3686_NONCE_SIZE
,
1173 .ivsize
= CTR_RFC3686_IV_SIZE
,
1174 .setkey
= crypto4xx_setkey_rfc3686
,
1175 .encrypt
= crypto4xx_rfc3686_encrypt
,
1176 .decrypt
= crypto4xx_rfc3686_decrypt
,
1180 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
, .u
.cipher
= {
1181 .cra_name
= "ecb(aes)",
1182 .cra_driver_name
= "ecb-aes-ppc4xx",
1183 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1184 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
1186 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1187 .cra_blocksize
= AES_BLOCK_SIZE
,
1188 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1189 .cra_type
= &crypto_ablkcipher_type
,
1190 .cra_init
= crypto4xx_ablk_init
,
1191 .cra_exit
= crypto4xx_ablk_exit
,
1192 .cra_module
= THIS_MODULE
,
1195 .min_keysize
= AES_MIN_KEY_SIZE
,
1196 .max_keysize
= AES_MAX_KEY_SIZE
,
1197 .setkey
= crypto4xx_setkey_aes_ecb
,
1198 .encrypt
= crypto4xx_encrypt
,
1199 .decrypt
= crypto4xx_decrypt
,
1203 { .type
= CRYPTO_ALG_TYPE_ABLKCIPHER
, .u
.cipher
= {
1204 .cra_name
= "ofb(aes)",
1205 .cra_driver_name
= "ofb-aes-ppc4xx",
1206 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1207 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
|
1209 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1210 .cra_blocksize
= AES_BLOCK_SIZE
,
1211 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1212 .cra_type
= &crypto_ablkcipher_type
,
1213 .cra_init
= crypto4xx_ablk_init
,
1214 .cra_exit
= crypto4xx_ablk_exit
,
1215 .cra_module
= THIS_MODULE
,
1218 .min_keysize
= AES_MIN_KEY_SIZE
,
1219 .max_keysize
= AES_MAX_KEY_SIZE
,
1220 .ivsize
= AES_IV_SIZE
,
1221 .setkey
= crypto4xx_setkey_aes_ofb
,
1222 .encrypt
= crypto4xx_encrypt
,
1223 .decrypt
= crypto4xx_decrypt
,
1229 { .type
= CRYPTO_ALG_TYPE_AEAD
, .u
.aead
= {
1230 .setkey
= crypto4xx_setkey_aes_ccm
,
1231 .setauthsize
= crypto4xx_setauthsize_aead
,
1232 .encrypt
= crypto4xx_encrypt_aes_ccm
,
1233 .decrypt
= crypto4xx_decrypt_aes_ccm
,
1234 .init
= crypto4xx_aead_init
,
1235 .exit
= crypto4xx_aead_exit
,
1236 .ivsize
= AES_BLOCK_SIZE
,
1239 .cra_name
= "ccm(aes)",
1240 .cra_driver_name
= "ccm-aes-ppc4xx",
1241 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1242 .cra_flags
= CRYPTO_ALG_ASYNC
|
1243 CRYPTO_ALG_NEED_FALLBACK
|
1244 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1246 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1247 .cra_module
= THIS_MODULE
,
1250 { .type
= CRYPTO_ALG_TYPE_AEAD
, .u
.aead
= {
1251 .setkey
= crypto4xx_setkey_aes_gcm
,
1252 .setauthsize
= crypto4xx_setauthsize_aead
,
1253 .encrypt
= crypto4xx_encrypt_aes_gcm
,
1254 .decrypt
= crypto4xx_decrypt_aes_gcm
,
1255 .init
= crypto4xx_aead_init
,
1256 .exit
= crypto4xx_aead_exit
,
1257 .ivsize
= GCM_AES_IV_SIZE
,
1260 .cra_name
= "gcm(aes)",
1261 .cra_driver_name
= "gcm-aes-ppc4xx",
1262 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1263 .cra_flags
= CRYPTO_ALG_ASYNC
|
1264 CRYPTO_ALG_NEED_FALLBACK
|
1265 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1267 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1268 .cra_module
= THIS_MODULE
,
1274 * Module Initialization Routine
1276 static int crypto4xx_probe(struct platform_device
*ofdev
)
1279 struct resource res
;
1280 struct device
*dev
= &ofdev
->dev
;
1281 struct crypto4xx_core_device
*core_dev
;
1283 bool is_revb
= true;
1285 rc
= of_address_to_resource(ofdev
->dev
.of_node
, 0, &res
);
1289 if (of_find_compatible_node(NULL
, NULL
, "amcc,ppc460ex-crypto")) {
1290 mtdcri(SDR0
, PPC460EX_SDR0_SRST
,
1291 mfdcri(SDR0
, PPC460EX_SDR0_SRST
) | PPC460EX_CE_RESET
);
1292 mtdcri(SDR0
, PPC460EX_SDR0_SRST
,
1293 mfdcri(SDR0
, PPC460EX_SDR0_SRST
) & ~PPC460EX_CE_RESET
);
1294 } else if (of_find_compatible_node(NULL
, NULL
,
1295 "amcc,ppc405ex-crypto")) {
1296 mtdcri(SDR0
, PPC405EX_SDR0_SRST
,
1297 mfdcri(SDR0
, PPC405EX_SDR0_SRST
) | PPC405EX_CE_RESET
);
1298 mtdcri(SDR0
, PPC405EX_SDR0_SRST
,
1299 mfdcri(SDR0
, PPC405EX_SDR0_SRST
) & ~PPC405EX_CE_RESET
);
1301 } else if (of_find_compatible_node(NULL
, NULL
,
1302 "amcc,ppc460sx-crypto")) {
1303 mtdcri(SDR0
, PPC460SX_SDR0_SRST
,
1304 mfdcri(SDR0
, PPC460SX_SDR0_SRST
) | PPC460SX_CE_RESET
);
1305 mtdcri(SDR0
, PPC460SX_SDR0_SRST
,
1306 mfdcri(SDR0
, PPC460SX_SDR0_SRST
) & ~PPC460SX_CE_RESET
);
1308 printk(KERN_ERR
"Crypto Function Not supported!\n");
1312 core_dev
= kzalloc(sizeof(struct crypto4xx_core_device
), GFP_KERNEL
);
1316 dev_set_drvdata(dev
, core_dev
);
1317 core_dev
->ofdev
= ofdev
;
1318 core_dev
->dev
= kzalloc(sizeof(struct crypto4xx_device
), GFP_KERNEL
);
1324 * Older version of 460EX/GT have a hardware bug.
1325 * Hence they do not support H/W based security intr coalescing
1327 pvr
= mfspr(SPRN_PVR
);
1328 if (is_revb
&& ((pvr
>> 4) == 0x130218A)) {
1329 u32 min
= PVR_MIN(pvr
);
1332 dev_info(dev
, "RevA detected - disable interrupt coalescing\n");
1337 core_dev
->dev
->core_dev
= core_dev
;
1338 core_dev
->dev
->is_revb
= is_revb
;
1339 core_dev
->device
= dev
;
1340 spin_lock_init(&core_dev
->lock
);
1341 INIT_LIST_HEAD(&core_dev
->dev
->alg_list
);
1342 ratelimit_default_init(&core_dev
->dev
->aead_ratelimit
);
1343 rc
= crypto4xx_build_pdr(core_dev
->dev
);
1347 rc
= crypto4xx_build_gdr(core_dev
->dev
);
1351 rc
= crypto4xx_build_sdr(core_dev
->dev
);
1355 /* Init tasklet for bottom half processing */
1356 tasklet_init(&core_dev
->tasklet
, crypto4xx_bh_tasklet_cb
,
1357 (unsigned long) dev
);
1359 core_dev
->dev
->ce_base
= of_iomap(ofdev
->dev
.of_node
, 0);
1360 if (!core_dev
->dev
->ce_base
) {
1361 dev_err(dev
, "failed to of_iomap\n");
1366 /* Register for Crypto isr, Crypto Engine IRQ */
1367 core_dev
->irq
= irq_of_parse_and_map(ofdev
->dev
.of_node
, 0);
1368 rc
= request_irq(core_dev
->irq
, is_revb
?
1369 crypto4xx_ce_interrupt_handler_revb
:
1370 crypto4xx_ce_interrupt_handler
, 0,
1371 KBUILD_MODNAME
, dev
);
1373 goto err_request_irq
;
1375 /* need to setup pdr, rdr, gdr and sdr before this */
1376 crypto4xx_hw_init(core_dev
->dev
);
1378 /* Register security algorithms with Linux CryptoAPI */
1379 rc
= crypto4xx_register_alg(core_dev
->dev
, crypto4xx_alg
,
1380 ARRAY_SIZE(crypto4xx_alg
));
1384 ppc4xx_trng_probe(core_dev
);
1388 free_irq(core_dev
->irq
, dev
);
1390 irq_dispose_mapping(core_dev
->irq
);
1391 iounmap(core_dev
->dev
->ce_base
);
1393 tasklet_kill(&core_dev
->tasklet
);
1395 crypto4xx_destroy_sdr(core_dev
->dev
);
1396 crypto4xx_destroy_gdr(core_dev
->dev
);
1398 crypto4xx_destroy_pdr(core_dev
->dev
);
1399 kfree(core_dev
->dev
);
1406 static int crypto4xx_remove(struct platform_device
*ofdev
)
1408 struct device
*dev
= &ofdev
->dev
;
1409 struct crypto4xx_core_device
*core_dev
= dev_get_drvdata(dev
);
1411 ppc4xx_trng_remove(core_dev
);
1413 free_irq(core_dev
->irq
, dev
);
1414 irq_dispose_mapping(core_dev
->irq
);
1416 tasklet_kill(&core_dev
->tasklet
);
1417 /* Un-register with Linux CryptoAPI */
1418 crypto4xx_unregister_alg(core_dev
->dev
);
1419 /* Free all allocated memory */
1420 crypto4xx_stop_all(core_dev
);
1425 static const struct of_device_id crypto4xx_match
[] = {
1426 { .compatible
= "amcc,ppc4xx-crypto",},
1429 MODULE_DEVICE_TABLE(of
, crypto4xx_match
);
1431 static struct platform_driver crypto4xx_driver
= {
1433 .name
= KBUILD_MODNAME
,
1434 .of_match_table
= crypto4xx_match
,
1436 .probe
= crypto4xx_probe
,
1437 .remove
= crypto4xx_remove
,
1440 module_platform_driver(crypto4xx_driver
);
1442 MODULE_LICENSE("GPL");
1443 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1444 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");