1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * AMCC SoC PPC4xx Crypto Driver
5 * Copyright (c) 2008 Applied Micro Circuits Corporation.
6 * All rights reserved. James Hsiao <jhsiao@amcc.com>
8 * This file implements AMCC crypto offload Linux device driver for use with
12 #include <linux/kernel.h>
13 #include <linux/interrupt.h>
14 #include <linux/spinlock_types.h>
15 #include <linux/random.h>
16 #include <linux/scatterlist.h>
17 #include <linux/crypto.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/platform_device.h>
20 #include <linux/init.h>
21 #include <linux/module.h>
22 #include <linux/of_address.h>
23 #include <linux/of_irq.h>
24 #include <linux/of_platform.h>
25 #include <linux/slab.h>
27 #include <asm/dcr-regs.h>
28 #include <asm/cacheflush.h>
29 #include <crypto/aead.h>
30 #include <crypto/aes.h>
31 #include <crypto/ctr.h>
32 #include <crypto/gcm.h>
33 #include <crypto/sha1.h>
34 #include <crypto/rng.h>
35 #include <crypto/scatterwalk.h>
36 #include <crypto/skcipher.h>
37 #include <crypto/internal/aead.h>
38 #include <crypto/internal/rng.h>
39 #include <crypto/internal/skcipher.h>
40 #include "crypto4xx_reg_def.h"
41 #include "crypto4xx_core.h"
42 #include "crypto4xx_sa.h"
43 #include "crypto4xx_trng.h"
45 #define PPC4XX_SEC_VERSION_STR "0.5"
48 * PPC4xx Crypto Engine Initialization Routine
50 static void crypto4xx_hw_init(struct crypto4xx_device
*dev
)
52 union ce_ring_size ring_size
;
53 union ce_ring_control ring_ctrl
;
54 union ce_part_ring_size part_ring_size
;
55 union ce_io_threshold io_threshold
;
57 union ce_pe_dma_cfg pe_dma_cfg
;
60 writel(PPC4XX_BYTE_ORDER
, dev
->ce_base
+ CRYPTO4XX_BYTE_ORDER_CFG
);
61 /* setup pe dma, include reset sg, pdr and pe, then release reset */
63 pe_dma_cfg
.bf
.bo_sgpd_en
= 1;
64 pe_dma_cfg
.bf
.bo_data_en
= 0;
65 pe_dma_cfg
.bf
.bo_sa_en
= 1;
66 pe_dma_cfg
.bf
.bo_pd_en
= 1;
67 pe_dma_cfg
.bf
.dynamic_sa_en
= 1;
68 pe_dma_cfg
.bf
.reset_sg
= 1;
69 pe_dma_cfg
.bf
.reset_pdr
= 1;
70 pe_dma_cfg
.bf
.reset_pe
= 1;
71 writel(pe_dma_cfg
.w
, dev
->ce_base
+ CRYPTO4XX_PE_DMA_CFG
);
72 /* un reset pe,sg and pdr */
73 pe_dma_cfg
.bf
.pe_mode
= 0;
74 pe_dma_cfg
.bf
.reset_sg
= 0;
75 pe_dma_cfg
.bf
.reset_pdr
= 0;
76 pe_dma_cfg
.bf
.reset_pe
= 0;
77 pe_dma_cfg
.bf
.bo_td_en
= 0;
78 writel(pe_dma_cfg
.w
, dev
->ce_base
+ CRYPTO4XX_PE_DMA_CFG
);
79 writel(dev
->pdr_pa
, dev
->ce_base
+ CRYPTO4XX_PDR_BASE
);
80 writel(dev
->pdr_pa
, dev
->ce_base
+ CRYPTO4XX_RDR_BASE
);
81 writel(PPC4XX_PRNG_CTRL_AUTO_EN
, dev
->ce_base
+ CRYPTO4XX_PRNG_CTRL
);
82 get_random_bytes(&rand_num
, sizeof(rand_num
));
83 writel(rand_num
, dev
->ce_base
+ CRYPTO4XX_PRNG_SEED_L
);
84 get_random_bytes(&rand_num
, sizeof(rand_num
));
85 writel(rand_num
, dev
->ce_base
+ CRYPTO4XX_PRNG_SEED_H
);
87 ring_size
.bf
.ring_offset
= PPC4XX_PD_SIZE
;
88 ring_size
.bf
.ring_size
= PPC4XX_NUM_PD
;
89 writel(ring_size
.w
, dev
->ce_base
+ CRYPTO4XX_RING_SIZE
);
91 writel(ring_ctrl
.w
, dev
->ce_base
+ CRYPTO4XX_RING_CTRL
);
92 device_ctrl
= readl(dev
->ce_base
+ CRYPTO4XX_DEVICE_CTRL
);
93 device_ctrl
|= PPC4XX_DC_3DES_EN
;
94 writel(device_ctrl
, dev
->ce_base
+ CRYPTO4XX_DEVICE_CTRL
);
95 writel(dev
->gdr_pa
, dev
->ce_base
+ CRYPTO4XX_GATH_RING_BASE
);
96 writel(dev
->sdr_pa
, dev
->ce_base
+ CRYPTO4XX_SCAT_RING_BASE
);
98 part_ring_size
.bf
.sdr_size
= PPC4XX_SDR_SIZE
;
99 part_ring_size
.bf
.gdr_size
= PPC4XX_GDR_SIZE
;
100 writel(part_ring_size
.w
, dev
->ce_base
+ CRYPTO4XX_PART_RING_SIZE
);
101 writel(PPC4XX_SD_BUFFER_SIZE
, dev
->ce_base
+ CRYPTO4XX_PART_RING_CFG
);
103 io_threshold
.bf
.output_threshold
= PPC4XX_OUTPUT_THRESHOLD
;
104 io_threshold
.bf
.input_threshold
= PPC4XX_INPUT_THRESHOLD
;
105 writel(io_threshold
.w
, dev
->ce_base
+ CRYPTO4XX_IO_THRESHOLD
);
106 writel(0, dev
->ce_base
+ CRYPTO4XX_PDR_BASE_UADDR
);
107 writel(0, dev
->ce_base
+ CRYPTO4XX_RDR_BASE_UADDR
);
108 writel(0, dev
->ce_base
+ CRYPTO4XX_PKT_SRC_UADDR
);
109 writel(0, dev
->ce_base
+ CRYPTO4XX_PKT_DEST_UADDR
);
110 writel(0, dev
->ce_base
+ CRYPTO4XX_SA_UADDR
);
111 writel(0, dev
->ce_base
+ CRYPTO4XX_GATH_RING_BASE_UADDR
);
112 writel(0, dev
->ce_base
+ CRYPTO4XX_SCAT_RING_BASE_UADDR
);
113 /* un reset pe,sg and pdr */
114 pe_dma_cfg
.bf
.pe_mode
= 1;
115 pe_dma_cfg
.bf
.reset_sg
= 0;
116 pe_dma_cfg
.bf
.reset_pdr
= 0;
117 pe_dma_cfg
.bf
.reset_pe
= 0;
118 pe_dma_cfg
.bf
.bo_td_en
= 0;
119 writel(pe_dma_cfg
.w
, dev
->ce_base
+ CRYPTO4XX_PE_DMA_CFG
);
120 /*clear all pending interrupt*/
121 writel(PPC4XX_INTERRUPT_CLR
, dev
->ce_base
+ CRYPTO4XX_INT_CLR
);
122 writel(PPC4XX_INT_DESCR_CNT
, dev
->ce_base
+ CRYPTO4XX_INT_DESCR_CNT
);
123 writel(PPC4XX_INT_DESCR_CNT
, dev
->ce_base
+ CRYPTO4XX_INT_DESCR_CNT
);
124 writel(PPC4XX_INT_CFG
, dev
->ce_base
+ CRYPTO4XX_INT_CFG
);
126 writel(PPC4XX_INT_TIMEOUT_CNT_REVB
<< 10,
127 dev
->ce_base
+ CRYPTO4XX_INT_TIMEOUT_CNT
);
128 writel(PPC4XX_PD_DONE_INT
| PPC4XX_TMO_ERR_INT
,
129 dev
->ce_base
+ CRYPTO4XX_INT_EN
);
131 writel(PPC4XX_PD_DONE_INT
, dev
->ce_base
+ CRYPTO4XX_INT_EN
);
135 int crypto4xx_alloc_sa(struct crypto4xx_ctx
*ctx
, u32 size
)
137 ctx
->sa_in
= kcalloc(size
, 4, GFP_ATOMIC
);
138 if (ctx
->sa_in
== NULL
)
141 ctx
->sa_out
= kcalloc(size
, 4, GFP_ATOMIC
);
142 if (ctx
->sa_out
== NULL
) {
153 void crypto4xx_free_sa(struct crypto4xx_ctx
*ctx
)
163 * alloc memory for the gather ring
164 * no need to alloc buf for the ring
165 * gdr_tail, gdr_head and gdr_count are initialized by this function
167 static u32
crypto4xx_build_pdr(struct crypto4xx_device
*dev
)
170 dev
->pdr
= dma_alloc_coherent(dev
->core_dev
->device
,
171 sizeof(struct ce_pd
) * PPC4XX_NUM_PD
,
172 &dev
->pdr_pa
, GFP_KERNEL
);
176 dev
->pdr_uinfo
= kcalloc(PPC4XX_NUM_PD
, sizeof(struct pd_uinfo
),
178 if (!dev
->pdr_uinfo
) {
179 dma_free_coherent(dev
->core_dev
->device
,
180 sizeof(struct ce_pd
) * PPC4XX_NUM_PD
,
185 dev
->shadow_sa_pool
= dma_alloc_coherent(dev
->core_dev
->device
,
186 sizeof(union shadow_sa_buf
) * PPC4XX_NUM_PD
,
187 &dev
->shadow_sa_pool_pa
,
189 if (!dev
->shadow_sa_pool
)
192 dev
->shadow_sr_pool
= dma_alloc_coherent(dev
->core_dev
->device
,
193 sizeof(struct sa_state_record
) * PPC4XX_NUM_PD
,
194 &dev
->shadow_sr_pool_pa
, GFP_KERNEL
);
195 if (!dev
->shadow_sr_pool
)
197 for (i
= 0; i
< PPC4XX_NUM_PD
; i
++) {
198 struct ce_pd
*pd
= &dev
->pdr
[i
];
199 struct pd_uinfo
*pd_uinfo
= &dev
->pdr_uinfo
[i
];
201 pd
->sa
= dev
->shadow_sa_pool_pa
+
202 sizeof(union shadow_sa_buf
) * i
;
204 /* alloc 256 bytes which is enough for any kind of dynamic sa */
205 pd_uinfo
->sa_va
= &dev
->shadow_sa_pool
[i
].sa
;
207 /* alloc state record */
208 pd_uinfo
->sr_va
= &dev
->shadow_sr_pool
[i
];
209 pd_uinfo
->sr_pa
= dev
->shadow_sr_pool_pa
+
210 sizeof(struct sa_state_record
) * i
;
216 static void crypto4xx_destroy_pdr(struct crypto4xx_device
*dev
)
219 dma_free_coherent(dev
->core_dev
->device
,
220 sizeof(struct ce_pd
) * PPC4XX_NUM_PD
,
221 dev
->pdr
, dev
->pdr_pa
);
223 if (dev
->shadow_sa_pool
)
224 dma_free_coherent(dev
->core_dev
->device
,
225 sizeof(union shadow_sa_buf
) * PPC4XX_NUM_PD
,
226 dev
->shadow_sa_pool
, dev
->shadow_sa_pool_pa
);
228 if (dev
->shadow_sr_pool
)
229 dma_free_coherent(dev
->core_dev
->device
,
230 sizeof(struct sa_state_record
) * PPC4XX_NUM_PD
,
231 dev
->shadow_sr_pool
, dev
->shadow_sr_pool_pa
);
233 kfree(dev
->pdr_uinfo
);
236 static u32
crypto4xx_get_pd_from_pdr_nolock(struct crypto4xx_device
*dev
)
241 retval
= dev
->pdr_head
;
242 tmp
= (dev
->pdr_head
+ 1) % PPC4XX_NUM_PD
;
244 if (tmp
== dev
->pdr_tail
)
245 return ERING_WAS_FULL
;
252 static u32
crypto4xx_put_pd_to_pdr(struct crypto4xx_device
*dev
, u32 idx
)
254 struct pd_uinfo
*pd_uinfo
= &dev
->pdr_uinfo
[idx
];
258 spin_lock_irqsave(&dev
->core_dev
->lock
, flags
);
259 pd_uinfo
->state
= PD_ENTRY_FREE
;
261 if (dev
->pdr_tail
!= PPC4XX_LAST_PD
)
265 tail
= dev
->pdr_tail
;
266 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
272 * alloc memory for the gather ring
273 * no need to alloc buf for the ring
274 * gdr_tail, gdr_head and gdr_count are initialized by this function
276 static u32
crypto4xx_build_gdr(struct crypto4xx_device
*dev
)
278 dev
->gdr
= dma_alloc_coherent(dev
->core_dev
->device
,
279 sizeof(struct ce_gd
) * PPC4XX_NUM_GD
,
280 &dev
->gdr_pa
, GFP_KERNEL
);
287 static inline void crypto4xx_destroy_gdr(struct crypto4xx_device
*dev
)
290 dma_free_coherent(dev
->core_dev
->device
,
291 sizeof(struct ce_gd
) * PPC4XX_NUM_GD
,
292 dev
->gdr
, dev
->gdr_pa
);
296 * when this function is called.
297 * preemption or interrupt must be disabled
299 static u32
crypto4xx_get_n_gd(struct crypto4xx_device
*dev
, int n
)
304 if (n
>= PPC4XX_NUM_GD
)
305 return ERING_WAS_FULL
;
307 retval
= dev
->gdr_head
;
308 tmp
= (dev
->gdr_head
+ n
) % PPC4XX_NUM_GD
;
309 if (dev
->gdr_head
> dev
->gdr_tail
) {
310 if (tmp
< dev
->gdr_head
&& tmp
>= dev
->gdr_tail
)
311 return ERING_WAS_FULL
;
312 } else if (dev
->gdr_head
< dev
->gdr_tail
) {
313 if (tmp
< dev
->gdr_head
|| tmp
>= dev
->gdr_tail
)
314 return ERING_WAS_FULL
;
321 static u32
crypto4xx_put_gd_to_gdr(struct crypto4xx_device
*dev
)
325 spin_lock_irqsave(&dev
->core_dev
->lock
, flags
);
326 if (dev
->gdr_tail
== dev
->gdr_head
) {
327 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
331 if (dev
->gdr_tail
!= PPC4XX_LAST_GD
)
336 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
341 static inline struct ce_gd
*crypto4xx_get_gdp(struct crypto4xx_device
*dev
,
342 dma_addr_t
*gd_dma
, u32 idx
)
344 *gd_dma
= dev
->gdr_pa
+ sizeof(struct ce_gd
) * idx
;
346 return &dev
->gdr
[idx
];
350 * alloc memory for the scatter ring
351 * need to alloc buf for the ring
352 * sdr_tail, sdr_head and sdr_count are initialized by this function
354 static u32
crypto4xx_build_sdr(struct crypto4xx_device
*dev
)
358 dev
->scatter_buffer_va
=
359 dma_alloc_coherent(dev
->core_dev
->device
,
360 PPC4XX_SD_BUFFER_SIZE
* PPC4XX_NUM_SD
,
361 &dev
->scatter_buffer_pa
, GFP_KERNEL
);
362 if (!dev
->scatter_buffer_va
)
365 /* alloc memory for scatter descriptor ring */
366 dev
->sdr
= dma_alloc_coherent(dev
->core_dev
->device
,
367 sizeof(struct ce_sd
) * PPC4XX_NUM_SD
,
368 &dev
->sdr_pa
, GFP_KERNEL
);
372 for (i
= 0; i
< PPC4XX_NUM_SD
; i
++) {
373 dev
->sdr
[i
].ptr
= dev
->scatter_buffer_pa
+
374 PPC4XX_SD_BUFFER_SIZE
* i
;
380 static void crypto4xx_destroy_sdr(struct crypto4xx_device
*dev
)
383 dma_free_coherent(dev
->core_dev
->device
,
384 sizeof(struct ce_sd
) * PPC4XX_NUM_SD
,
385 dev
->sdr
, dev
->sdr_pa
);
387 if (dev
->scatter_buffer_va
)
388 dma_free_coherent(dev
->core_dev
->device
,
389 PPC4XX_SD_BUFFER_SIZE
* PPC4XX_NUM_SD
,
390 dev
->scatter_buffer_va
,
391 dev
->scatter_buffer_pa
);
395 * when this function is called.
396 * preemption or interrupt must be disabled
398 static u32
crypto4xx_get_n_sd(struct crypto4xx_device
*dev
, int n
)
403 if (n
>= PPC4XX_NUM_SD
)
404 return ERING_WAS_FULL
;
406 retval
= dev
->sdr_head
;
407 tmp
= (dev
->sdr_head
+ n
) % PPC4XX_NUM_SD
;
408 if (dev
->sdr_head
> dev
->gdr_tail
) {
409 if (tmp
< dev
->sdr_head
&& tmp
>= dev
->sdr_tail
)
410 return ERING_WAS_FULL
;
411 } else if (dev
->sdr_head
< dev
->sdr_tail
) {
412 if (tmp
< dev
->sdr_head
|| tmp
>= dev
->sdr_tail
)
413 return ERING_WAS_FULL
;
414 } /* the head = tail, or empty case is already take cared */
420 static u32
crypto4xx_put_sd_to_sdr(struct crypto4xx_device
*dev
)
424 spin_lock_irqsave(&dev
->core_dev
->lock
, flags
);
425 if (dev
->sdr_tail
== dev
->sdr_head
) {
426 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
429 if (dev
->sdr_tail
!= PPC4XX_LAST_SD
)
433 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
438 static inline struct ce_sd
*crypto4xx_get_sdp(struct crypto4xx_device
*dev
,
439 dma_addr_t
*sd_dma
, u32 idx
)
441 *sd_dma
= dev
->sdr_pa
+ sizeof(struct ce_sd
) * idx
;
443 return &dev
->sdr
[idx
];
446 static void crypto4xx_copy_pkt_to_dst(struct crypto4xx_device
*dev
,
448 struct pd_uinfo
*pd_uinfo
,
450 struct scatterlist
*dst
)
452 unsigned int first_sd
= pd_uinfo
->first_sd
;
453 unsigned int last_sd
;
454 unsigned int overflow
= 0;
455 unsigned int to_copy
;
456 unsigned int dst_start
= 0;
459 * Because the scatter buffers are all neatly organized in one
460 * big continuous ringbuffer; scatterwalk_map_and_copy() can
461 * be instructed to copy a range of buffers in one go.
464 last_sd
= (first_sd
+ pd_uinfo
->num_sd
);
465 if (last_sd
> PPC4XX_LAST_SD
) {
466 last_sd
= PPC4XX_LAST_SD
;
467 overflow
= last_sd
% PPC4XX_NUM_SD
;
471 void *buf
= dev
->scatter_buffer_va
+
472 first_sd
* PPC4XX_SD_BUFFER_SIZE
;
474 to_copy
= min(nbytes
, PPC4XX_SD_BUFFER_SIZE
*
475 (1 + last_sd
- first_sd
));
476 scatterwalk_map_and_copy(buf
, dst
, dst_start
, to_copy
, 1);
482 dst_start
+= to_copy
;
488 static void crypto4xx_copy_digest_to_dst(void *dst
,
489 struct pd_uinfo
*pd_uinfo
,
490 struct crypto4xx_ctx
*ctx
)
492 struct dynamic_sa_ctl
*sa
= (struct dynamic_sa_ctl
*) ctx
->sa_in
;
494 if (sa
->sa_command_0
.bf
.hash_alg
== SA_HASH_ALG_SHA1
) {
495 memcpy(dst
, pd_uinfo
->sr_va
->save_digest
,
496 SA_HASH_ALG_SHA1_DIGEST_SIZE
);
500 static void crypto4xx_ret_sg_desc(struct crypto4xx_device
*dev
,
501 struct pd_uinfo
*pd_uinfo
)
504 if (pd_uinfo
->num_gd
) {
505 for (i
= 0; i
< pd_uinfo
->num_gd
; i
++)
506 crypto4xx_put_gd_to_gdr(dev
);
507 pd_uinfo
->first_gd
= 0xffffffff;
508 pd_uinfo
->num_gd
= 0;
510 if (pd_uinfo
->num_sd
) {
511 for (i
= 0; i
< pd_uinfo
->num_sd
; i
++)
512 crypto4xx_put_sd_to_sdr(dev
);
514 pd_uinfo
->first_sd
= 0xffffffff;
515 pd_uinfo
->num_sd
= 0;
519 static void crypto4xx_cipher_done(struct crypto4xx_device
*dev
,
520 struct pd_uinfo
*pd_uinfo
,
523 struct skcipher_request
*req
;
524 struct scatterlist
*dst
;
527 req
= skcipher_request_cast(pd_uinfo
->async_req
);
529 if (pd_uinfo
->sa_va
->sa_command_0
.bf
.scatter
) {
530 crypto4xx_copy_pkt_to_dst(dev
, pd
, pd_uinfo
,
531 req
->cryptlen
, req
->dst
);
533 dst
= pd_uinfo
->dest_va
;
534 addr
= dma_map_page(dev
->core_dev
->device
, sg_page(dst
),
535 dst
->offset
, dst
->length
, DMA_FROM_DEVICE
);
538 if (pd_uinfo
->sa_va
->sa_command_0
.bf
.save_iv
== SA_SAVE_IV
) {
539 struct crypto_skcipher
*skcipher
= crypto_skcipher_reqtfm(req
);
541 crypto4xx_memcpy_from_le32((u32
*)req
->iv
,
542 pd_uinfo
->sr_va
->save_iv
,
543 crypto_skcipher_ivsize(skcipher
));
546 crypto4xx_ret_sg_desc(dev
, pd_uinfo
);
548 if (pd_uinfo
->state
& PD_ENTRY_BUSY
)
549 skcipher_request_complete(req
, -EINPROGRESS
);
550 skcipher_request_complete(req
, 0);
553 static void crypto4xx_ahash_done(struct crypto4xx_device
*dev
,
554 struct pd_uinfo
*pd_uinfo
)
556 struct crypto4xx_ctx
*ctx
;
557 struct ahash_request
*ahash_req
;
559 ahash_req
= ahash_request_cast(pd_uinfo
->async_req
);
560 ctx
= crypto_tfm_ctx(ahash_req
->base
.tfm
);
562 crypto4xx_copy_digest_to_dst(ahash_req
->result
, pd_uinfo
,
563 crypto_tfm_ctx(ahash_req
->base
.tfm
));
564 crypto4xx_ret_sg_desc(dev
, pd_uinfo
);
566 if (pd_uinfo
->state
& PD_ENTRY_BUSY
)
567 ahash_request_complete(ahash_req
, -EINPROGRESS
);
568 ahash_request_complete(ahash_req
, 0);
571 static void crypto4xx_aead_done(struct crypto4xx_device
*dev
,
572 struct pd_uinfo
*pd_uinfo
,
575 struct aead_request
*aead_req
= container_of(pd_uinfo
->async_req
,
576 struct aead_request
, base
);
577 struct scatterlist
*dst
= pd_uinfo
->dest_va
;
578 size_t cp_len
= crypto_aead_authsize(
579 crypto_aead_reqtfm(aead_req
));
580 u32 icv
[AES_BLOCK_SIZE
];
583 if (pd_uinfo
->sa_va
->sa_command_0
.bf
.scatter
) {
584 crypto4xx_copy_pkt_to_dst(dev
, pd
, pd_uinfo
,
585 pd
->pd_ctl_len
.bf
.pkt_len
,
588 dma_unmap_page(dev
->core_dev
->device
, pd
->dest
, dst
->length
,
592 if (pd_uinfo
->sa_va
->sa_command_0
.bf
.dir
== DIR_OUTBOUND
) {
593 /* append icv at the end */
594 crypto4xx_memcpy_from_le32(icv
, pd_uinfo
->sr_va
->save_digest
,
597 scatterwalk_map_and_copy(icv
, dst
, aead_req
->cryptlen
,
600 /* check icv at the end */
601 scatterwalk_map_and_copy(icv
, aead_req
->src
,
602 aead_req
->assoclen
+ aead_req
->cryptlen
-
605 crypto4xx_memcpy_from_le32(icv
, icv
, sizeof(icv
));
607 if (crypto_memneq(icv
, pd_uinfo
->sr_va
->save_digest
, cp_len
))
611 crypto4xx_ret_sg_desc(dev
, pd_uinfo
);
613 if (pd
->pd_ctl
.bf
.status
& 0xff) {
614 if (!__ratelimit(&dev
->aead_ratelimit
)) {
615 if (pd
->pd_ctl
.bf
.status
& 2)
616 pr_err("pad fail error\n");
617 if (pd
->pd_ctl
.bf
.status
& 4)
618 pr_err("seqnum fail\n");
619 if (pd
->pd_ctl
.bf
.status
& 8)
620 pr_err("error _notify\n");
621 pr_err("aead return err status = 0x%02x\n",
622 pd
->pd_ctl
.bf
.status
& 0xff);
623 pr_err("pd pad_ctl = 0x%08x\n",
624 pd
->pd_ctl
.bf
.pd_pad_ctl
);
629 if (pd_uinfo
->state
& PD_ENTRY_BUSY
)
630 aead_request_complete(aead_req
, -EINPROGRESS
);
632 aead_request_complete(aead_req
, err
);
635 static void crypto4xx_pd_done(struct crypto4xx_device
*dev
, u32 idx
)
637 struct ce_pd
*pd
= &dev
->pdr
[idx
];
638 struct pd_uinfo
*pd_uinfo
= &dev
->pdr_uinfo
[idx
];
640 switch (crypto_tfm_alg_type(pd_uinfo
->async_req
->tfm
)) {
641 case CRYPTO_ALG_TYPE_SKCIPHER
:
642 crypto4xx_cipher_done(dev
, pd_uinfo
, pd
);
644 case CRYPTO_ALG_TYPE_AEAD
:
645 crypto4xx_aead_done(dev
, pd_uinfo
, pd
);
647 case CRYPTO_ALG_TYPE_AHASH
:
648 crypto4xx_ahash_done(dev
, pd_uinfo
);
653 static void crypto4xx_stop_all(struct crypto4xx_core_device
*core_dev
)
655 crypto4xx_destroy_pdr(core_dev
->dev
);
656 crypto4xx_destroy_gdr(core_dev
->dev
);
657 crypto4xx_destroy_sdr(core_dev
->dev
);
658 iounmap(core_dev
->dev
->ce_base
);
659 kfree(core_dev
->dev
);
663 static u32
get_next_gd(u32 current
)
665 if (current
!= PPC4XX_LAST_GD
)
671 static u32
get_next_sd(u32 current
)
673 if (current
!= PPC4XX_LAST_SD
)
679 int crypto4xx_build_pd(struct crypto_async_request
*req
,
680 struct crypto4xx_ctx
*ctx
,
681 struct scatterlist
*src
,
682 struct scatterlist
*dst
,
683 const unsigned int datalen
,
684 const __le32
*iv
, const u32 iv_len
,
685 const struct dynamic_sa_ctl
*req_sa
,
686 const unsigned int sa_len
,
687 const unsigned int assoclen
,
688 struct scatterlist
*_dst
)
690 struct crypto4xx_device
*dev
= ctx
->dev
;
691 struct dynamic_sa_ctl
*sa
;
695 u32 fst_gd
= 0xffffffff;
696 u32 fst_sd
= 0xffffffff;
699 struct pd_uinfo
*pd_uinfo
;
700 unsigned int nbytes
= datalen
;
701 size_t offset_to_sr_ptr
;
704 bool is_busy
, force_sd
;
707 * There's a very subtile/disguised "bug" in the hardware that
708 * gets indirectly mentioned in 18.1.3.5 Encryption/Decryption
709 * of the hardware spec:
710 * *drum roll* the AES/(T)DES OFB and CFB modes are listed as
711 * operation modes for >>> "Block ciphers" <<<.
713 * To workaround this issue and stop the hardware from causing
714 * "overran dst buffer" on crypttexts that are not a multiple
715 * of 16 (AES_BLOCK_SIZE), we force the driver to use the
718 force_sd
= (req_sa
->sa_command_1
.bf
.crypto_mode9_8
== CRYPTO_MODE_CFB
719 || req_sa
->sa_command_1
.bf
.crypto_mode9_8
== CRYPTO_MODE_OFB
)
720 && (datalen
% AES_BLOCK_SIZE
);
722 /* figure how many gd are needed */
723 tmp
= sg_nents_for_len(src
, assoclen
+ datalen
);
725 dev_err(dev
->core_dev
->device
, "Invalid number of src SG.\n");
734 dst
= scatterwalk_ffwd(_dst
, dst
, assoclen
);
737 /* figure how many sd are needed */
738 if (sg_is_last(dst
) && force_sd
== false) {
741 if (datalen
> PPC4XX_SD_BUFFER_SIZE
) {
742 num_sd
= datalen
/ PPC4XX_SD_BUFFER_SIZE
;
743 if (datalen
% PPC4XX_SD_BUFFER_SIZE
)
751 * The follow section of code needs to be protected
752 * The gather ring and scatter ring needs to be consecutive
753 * In case of run out of any kind of descriptor, the descriptor
754 * already got must be return the original place.
756 spin_lock_irqsave(&dev
->core_dev
->lock
, flags
);
758 * Let the caller know to slow down, once more than 13/16ths = 81%
759 * of the available data contexts are being used simultaneously.
761 * With PPC4XX_NUM_PD = 256, this will leave a "backlog queue" for
762 * 31 more contexts. Before new requests have to be rejected.
764 if (req
->flags
& CRYPTO_TFM_REQ_MAY_BACKLOG
) {
765 is_busy
= ((dev
->pdr_head
- dev
->pdr_tail
) % PPC4XX_NUM_PD
) >=
766 ((PPC4XX_NUM_PD
* 13) / 16);
769 * To fix contention issues between ipsec (no blacklog) and
770 * dm-crypto (backlog) reserve 32 entries for "no backlog"
773 is_busy
= ((dev
->pdr_head
- dev
->pdr_tail
) % PPC4XX_NUM_PD
) >=
774 ((PPC4XX_NUM_PD
* 15) / 16);
777 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
783 fst_gd
= crypto4xx_get_n_gd(dev
, num_gd
);
784 if (fst_gd
== ERING_WAS_FULL
) {
785 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
790 fst_sd
= crypto4xx_get_n_sd(dev
, num_sd
);
791 if (fst_sd
== ERING_WAS_FULL
) {
793 dev
->gdr_head
= fst_gd
;
794 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
798 pd_entry
= crypto4xx_get_pd_from_pdr_nolock(dev
);
799 if (pd_entry
== ERING_WAS_FULL
) {
801 dev
->gdr_head
= fst_gd
;
803 dev
->sdr_head
= fst_sd
;
804 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
807 spin_unlock_irqrestore(&dev
->core_dev
->lock
, flags
);
809 pd
= &dev
->pdr
[pd_entry
];
812 pd_uinfo
= &dev
->pdr_uinfo
[pd_entry
];
813 pd_uinfo
->num_gd
= num_gd
;
814 pd_uinfo
->num_sd
= num_sd
;
815 pd_uinfo
->dest_va
= dst
;
816 pd_uinfo
->async_req
= req
;
819 memcpy(pd_uinfo
->sr_va
->save_iv
, iv
, iv_len
);
821 sa
= pd_uinfo
->sa_va
;
822 memcpy(sa
, req_sa
, sa_len
* 4);
824 sa
->sa_command_1
.bf
.hash_crypto_offset
= (assoclen
>> 2);
825 offset_to_sr_ptr
= get_dynamic_sa_offset_state_ptr_field(sa
);
826 *(u32
*)((unsigned long)sa
+ offset_to_sr_ptr
) = pd_uinfo
->sr_pa
;
830 struct scatterlist
*sg
;
832 /* get first gd we are going to use */
834 pd_uinfo
->first_gd
= fst_gd
;
835 gd
= crypto4xx_get_gdp(dev
, &gd_dma
, gd_idx
);
838 sa
->sa_command_0
.bf
.gather
= 1;
839 /* walk the sg, and setup gather array */
845 len
= min(sg
->length
, nbytes
);
846 gd
->ptr
= dma_map_page(dev
->core_dev
->device
,
847 sg_page(sg
), sg
->offset
, len
, DMA_TO_DEVICE
);
848 gd
->ctl_len
.len
= len
;
849 gd
->ctl_len
.done
= 0;
850 gd
->ctl_len
.ready
= 1;
854 nbytes
-= sg
->length
;
855 gd_idx
= get_next_gd(gd_idx
);
856 gd
= crypto4xx_get_gdp(dev
, &gd_dma
, gd_idx
);
860 pd
->src
= (u32
)dma_map_page(dev
->core_dev
->device
, sg_page(src
),
861 src
->offset
, min(nbytes
, src
->length
),
864 * Disable gather in sa command
866 sa
->sa_command_0
.bf
.gather
= 0;
868 * Indicate gather array is not used
870 pd_uinfo
->first_gd
= 0xffffffff;
874 * we know application give us dst a whole piece of memory
875 * no need to use scatter ring.
877 pd_uinfo
->first_sd
= 0xffffffff;
878 sa
->sa_command_0
.bf
.scatter
= 0;
879 pd
->dest
= (u32
)dma_map_page(dev
->core_dev
->device
,
880 sg_page(dst
), dst
->offset
,
881 min(datalen
, dst
->length
),
885 struct ce_sd
*sd
= NULL
;
889 sa
->sa_command_0
.bf
.scatter
= 1;
890 pd_uinfo
->first_sd
= fst_sd
;
891 sd
= crypto4xx_get_sdp(dev
, &sd_dma
, sd_idx
);
893 /* setup scatter descriptor */
896 /* sd->ptr should be setup by sd_init routine*/
897 if (nbytes
>= PPC4XX_SD_BUFFER_SIZE
)
898 nbytes
-= PPC4XX_SD_BUFFER_SIZE
;
902 sd_idx
= get_next_sd(sd_idx
);
903 sd
= crypto4xx_get_sdp(dev
, &sd_dma
, sd_idx
);
904 /* setup scatter descriptor */
907 if (nbytes
>= PPC4XX_SD_BUFFER_SIZE
) {
908 nbytes
-= PPC4XX_SD_BUFFER_SIZE
;
911 * SD entry can hold PPC4XX_SD_BUFFER_SIZE,
912 * which is more than nbytes, so done.
919 pd
->pd_ctl
.w
= PD_CTL_HOST_READY
|
920 ((crypto_tfm_alg_type(req
->tfm
) == CRYPTO_ALG_TYPE_AHASH
) ||
921 (crypto_tfm_alg_type(req
->tfm
) == CRYPTO_ALG_TYPE_AEAD
) ?
922 PD_CTL_HASH_FINAL
: 0);
923 pd
->pd_ctl_len
.w
= 0x00400000 | (assoclen
+ datalen
);
924 pd_uinfo
->state
= PD_ENTRY_INUSE
| (is_busy
? PD_ENTRY_BUSY
: 0);
927 /* write any value to push engine to read a pd */
928 writel(0, dev
->ce_base
+ CRYPTO4XX_INT_DESCR_RD
);
929 writel(1, dev
->ce_base
+ CRYPTO4XX_INT_DESCR_RD
);
930 return is_busy
? -EBUSY
: -EINPROGRESS
;
934 * Algorithm Registration Functions
936 static void crypto4xx_ctx_init(struct crypto4xx_alg
*amcc_alg
,
937 struct crypto4xx_ctx
*ctx
)
939 ctx
->dev
= amcc_alg
->dev
;
945 static int crypto4xx_sk_init(struct crypto_skcipher
*sk
)
947 struct skcipher_alg
*alg
= crypto_skcipher_alg(sk
);
948 struct crypto4xx_alg
*amcc_alg
;
949 struct crypto4xx_ctx
*ctx
= crypto_skcipher_ctx(sk
);
951 if (alg
->base
.cra_flags
& CRYPTO_ALG_NEED_FALLBACK
) {
952 ctx
->sw_cipher
.cipher
=
953 crypto_alloc_sync_skcipher(alg
->base
.cra_name
, 0,
954 CRYPTO_ALG_NEED_FALLBACK
);
955 if (IS_ERR(ctx
->sw_cipher
.cipher
))
956 return PTR_ERR(ctx
->sw_cipher
.cipher
);
959 amcc_alg
= container_of(alg
, struct crypto4xx_alg
, alg
.u
.cipher
);
960 crypto4xx_ctx_init(amcc_alg
, ctx
);
964 static void crypto4xx_common_exit(struct crypto4xx_ctx
*ctx
)
966 crypto4xx_free_sa(ctx
);
969 static void crypto4xx_sk_exit(struct crypto_skcipher
*sk
)
971 struct crypto4xx_ctx
*ctx
= crypto_skcipher_ctx(sk
);
973 crypto4xx_common_exit(ctx
);
974 if (ctx
->sw_cipher
.cipher
)
975 crypto_free_sync_skcipher(ctx
->sw_cipher
.cipher
);
978 static int crypto4xx_aead_init(struct crypto_aead
*tfm
)
980 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
981 struct crypto4xx_ctx
*ctx
= crypto_aead_ctx(tfm
);
982 struct crypto4xx_alg
*amcc_alg
;
984 ctx
->sw_cipher
.aead
= crypto_alloc_aead(alg
->base
.cra_name
, 0,
985 CRYPTO_ALG_NEED_FALLBACK
|
987 if (IS_ERR(ctx
->sw_cipher
.aead
))
988 return PTR_ERR(ctx
->sw_cipher
.aead
);
990 amcc_alg
= container_of(alg
, struct crypto4xx_alg
, alg
.u
.aead
);
991 crypto4xx_ctx_init(amcc_alg
, ctx
);
992 crypto_aead_set_reqsize(tfm
, max(sizeof(struct aead_request
) + 32 +
993 crypto_aead_reqsize(ctx
->sw_cipher
.aead
),
994 sizeof(struct crypto4xx_aead_reqctx
)));
998 static void crypto4xx_aead_exit(struct crypto_aead
*tfm
)
1000 struct crypto4xx_ctx
*ctx
= crypto_aead_ctx(tfm
);
1002 crypto4xx_common_exit(ctx
);
1003 crypto_free_aead(ctx
->sw_cipher
.aead
);
1006 static int crypto4xx_register_alg(struct crypto4xx_device
*sec_dev
,
1007 struct crypto4xx_alg_common
*crypto_alg
,
1010 struct crypto4xx_alg
*alg
;
1014 for (i
= 0; i
< array_size
; i
++) {
1015 alg
= kzalloc(sizeof(struct crypto4xx_alg
), GFP_KERNEL
);
1019 alg
->alg
= crypto_alg
[i
];
1022 switch (alg
->alg
.type
) {
1023 case CRYPTO_ALG_TYPE_AEAD
:
1024 rc
= crypto_register_aead(&alg
->alg
.u
.aead
);
1027 case CRYPTO_ALG_TYPE_AHASH
:
1028 rc
= crypto_register_ahash(&alg
->alg
.u
.hash
);
1031 case CRYPTO_ALG_TYPE_RNG
:
1032 rc
= crypto_register_rng(&alg
->alg
.u
.rng
);
1036 rc
= crypto_register_skcipher(&alg
->alg
.u
.cipher
);
1043 list_add_tail(&alg
->entry
, &sec_dev
->alg_list
);
1049 static void crypto4xx_unregister_alg(struct crypto4xx_device
*sec_dev
)
1051 struct crypto4xx_alg
*alg
, *tmp
;
1053 list_for_each_entry_safe(alg
, tmp
, &sec_dev
->alg_list
, entry
) {
1054 list_del(&alg
->entry
);
1055 switch (alg
->alg
.type
) {
1056 case CRYPTO_ALG_TYPE_AHASH
:
1057 crypto_unregister_ahash(&alg
->alg
.u
.hash
);
1060 case CRYPTO_ALG_TYPE_AEAD
:
1061 crypto_unregister_aead(&alg
->alg
.u
.aead
);
1064 case CRYPTO_ALG_TYPE_RNG
:
1065 crypto_unregister_rng(&alg
->alg
.u
.rng
);
1069 crypto_unregister_skcipher(&alg
->alg
.u
.cipher
);
1075 static void crypto4xx_bh_tasklet_cb(unsigned long data
)
1077 struct device
*dev
= (struct device
*)data
;
1078 struct crypto4xx_core_device
*core_dev
= dev_get_drvdata(dev
);
1079 struct pd_uinfo
*pd_uinfo
;
1081 u32 tail
= core_dev
->dev
->pdr_tail
;
1082 u32 head
= core_dev
->dev
->pdr_head
;
1085 pd_uinfo
= &core_dev
->dev
->pdr_uinfo
[tail
];
1086 pd
= &core_dev
->dev
->pdr
[tail
];
1087 if ((pd_uinfo
->state
& PD_ENTRY_INUSE
) &&
1088 ((READ_ONCE(pd
->pd_ctl
.w
) &
1089 (PD_CTL_PE_DONE
| PD_CTL_HOST_READY
)) ==
1091 crypto4xx_pd_done(core_dev
->dev
, tail
);
1092 tail
= crypto4xx_put_pd_to_pdr(core_dev
->dev
, tail
);
1094 /* if tail not done, break */
1097 } while (head
!= tail
);
1103 static inline irqreturn_t
crypto4xx_interrupt_handler(int irq
, void *data
,
1106 struct device
*dev
= (struct device
*)data
;
1107 struct crypto4xx_core_device
*core_dev
= dev_get_drvdata(dev
);
1109 writel(clr_val
, core_dev
->dev
->ce_base
+ CRYPTO4XX_INT_CLR
);
1110 tasklet_schedule(&core_dev
->tasklet
);
1115 static irqreturn_t
crypto4xx_ce_interrupt_handler(int irq
, void *data
)
1117 return crypto4xx_interrupt_handler(irq
, data
, PPC4XX_INTERRUPT_CLR
);
1120 static irqreturn_t
crypto4xx_ce_interrupt_handler_revb(int irq
, void *data
)
1122 return crypto4xx_interrupt_handler(irq
, data
, PPC4XX_INTERRUPT_CLR
|
1123 PPC4XX_TMO_ERR_INT
);
1126 static int ppc4xx_prng_data_read(struct crypto4xx_device
*dev
,
1127 u8
*data
, unsigned int max
)
1129 unsigned int i
, curr
= 0;
1133 /* trigger PRN generation */
1134 writel(PPC4XX_PRNG_CTRL_AUTO_EN
,
1135 dev
->ce_base
+ CRYPTO4XX_PRNG_CTRL
);
1137 for (i
= 0; i
< 1024; i
++) {
1138 /* usually 19 iterations are enough */
1139 if ((readl(dev
->ce_base
+ CRYPTO4XX_PRNG_STAT
) &
1140 CRYPTO4XX_PRNG_STAT_BUSY
))
1143 val
[0] = readl_be(dev
->ce_base
+ CRYPTO4XX_PRNG_RES_0
);
1144 val
[1] = readl_be(dev
->ce_base
+ CRYPTO4XX_PRNG_RES_1
);
1150 if ((max
- curr
) >= 8) {
1151 memcpy(data
, &val
, 8);
1155 /* copy only remaining bytes */
1156 memcpy(data
, &val
, max
- curr
);
1159 } while (curr
< max
);
1164 static int crypto4xx_prng_generate(struct crypto_rng
*tfm
,
1165 const u8
*src
, unsigned int slen
,
1166 u8
*dstn
, unsigned int dlen
)
1168 struct rng_alg
*alg
= crypto_rng_alg(tfm
);
1169 struct crypto4xx_alg
*amcc_alg
;
1170 struct crypto4xx_device
*dev
;
1173 amcc_alg
= container_of(alg
, struct crypto4xx_alg
, alg
.u
.rng
);
1174 dev
= amcc_alg
->dev
;
1176 mutex_lock(&dev
->core_dev
->rng_lock
);
1177 ret
= ppc4xx_prng_data_read(dev
, dstn
, dlen
);
1178 mutex_unlock(&dev
->core_dev
->rng_lock
);
1183 static int crypto4xx_prng_seed(struct crypto_rng
*tfm
, const u8
*seed
,
1190 * Supported Crypto Algorithms
1192 static struct crypto4xx_alg_common crypto4xx_alg
[] = {
1193 /* Crypto AES modes */
1194 { .type
= CRYPTO_ALG_TYPE_SKCIPHER
, .u
.cipher
= {
1196 .cra_name
= "cbc(aes)",
1197 .cra_driver_name
= "cbc-aes-ppc4xx",
1198 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1199 .cra_flags
= CRYPTO_ALG_ASYNC
|
1200 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1201 .cra_blocksize
= AES_BLOCK_SIZE
,
1202 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1203 .cra_module
= THIS_MODULE
,
1205 .min_keysize
= AES_MIN_KEY_SIZE
,
1206 .max_keysize
= AES_MAX_KEY_SIZE
,
1207 .ivsize
= AES_IV_SIZE
,
1208 .setkey
= crypto4xx_setkey_aes_cbc
,
1209 .encrypt
= crypto4xx_encrypt_iv_block
,
1210 .decrypt
= crypto4xx_decrypt_iv_block
,
1211 .init
= crypto4xx_sk_init
,
1212 .exit
= crypto4xx_sk_exit
,
1214 { .type
= CRYPTO_ALG_TYPE_SKCIPHER
, .u
.cipher
= {
1216 .cra_name
= "cfb(aes)",
1217 .cra_driver_name
= "cfb-aes-ppc4xx",
1218 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1219 .cra_flags
= CRYPTO_ALG_ASYNC
|
1220 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1222 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1223 .cra_module
= THIS_MODULE
,
1225 .min_keysize
= AES_MIN_KEY_SIZE
,
1226 .max_keysize
= AES_MAX_KEY_SIZE
,
1227 .ivsize
= AES_IV_SIZE
,
1228 .setkey
= crypto4xx_setkey_aes_cfb
,
1229 .encrypt
= crypto4xx_encrypt_iv_stream
,
1230 .decrypt
= crypto4xx_decrypt_iv_stream
,
1231 .init
= crypto4xx_sk_init
,
1232 .exit
= crypto4xx_sk_exit
,
1234 { .type
= CRYPTO_ALG_TYPE_SKCIPHER
, .u
.cipher
= {
1236 .cra_name
= "ctr(aes)",
1237 .cra_driver_name
= "ctr-aes-ppc4xx",
1238 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1239 .cra_flags
= CRYPTO_ALG_NEED_FALLBACK
|
1241 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1243 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1244 .cra_module
= THIS_MODULE
,
1246 .min_keysize
= AES_MIN_KEY_SIZE
,
1247 .max_keysize
= AES_MAX_KEY_SIZE
,
1248 .ivsize
= AES_IV_SIZE
,
1249 .setkey
= crypto4xx_setkey_aes_ctr
,
1250 .encrypt
= crypto4xx_encrypt_ctr
,
1251 .decrypt
= crypto4xx_decrypt_ctr
,
1252 .init
= crypto4xx_sk_init
,
1253 .exit
= crypto4xx_sk_exit
,
1255 { .type
= CRYPTO_ALG_TYPE_SKCIPHER
, .u
.cipher
= {
1257 .cra_name
= "rfc3686(ctr(aes))",
1258 .cra_driver_name
= "rfc3686-ctr-aes-ppc4xx",
1259 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1260 .cra_flags
= CRYPTO_ALG_ASYNC
|
1261 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1263 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1264 .cra_module
= THIS_MODULE
,
1266 .min_keysize
= AES_MIN_KEY_SIZE
+ CTR_RFC3686_NONCE_SIZE
,
1267 .max_keysize
= AES_MAX_KEY_SIZE
+ CTR_RFC3686_NONCE_SIZE
,
1268 .ivsize
= CTR_RFC3686_IV_SIZE
,
1269 .setkey
= crypto4xx_setkey_rfc3686
,
1270 .encrypt
= crypto4xx_rfc3686_encrypt
,
1271 .decrypt
= crypto4xx_rfc3686_decrypt
,
1272 .init
= crypto4xx_sk_init
,
1273 .exit
= crypto4xx_sk_exit
,
1275 { .type
= CRYPTO_ALG_TYPE_SKCIPHER
, .u
.cipher
= {
1277 .cra_name
= "ecb(aes)",
1278 .cra_driver_name
= "ecb-aes-ppc4xx",
1279 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1280 .cra_flags
= CRYPTO_ALG_ASYNC
|
1281 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1282 .cra_blocksize
= AES_BLOCK_SIZE
,
1283 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1284 .cra_module
= THIS_MODULE
,
1286 .min_keysize
= AES_MIN_KEY_SIZE
,
1287 .max_keysize
= AES_MAX_KEY_SIZE
,
1288 .setkey
= crypto4xx_setkey_aes_ecb
,
1289 .encrypt
= crypto4xx_encrypt_noiv_block
,
1290 .decrypt
= crypto4xx_decrypt_noiv_block
,
1291 .init
= crypto4xx_sk_init
,
1292 .exit
= crypto4xx_sk_exit
,
1294 { .type
= CRYPTO_ALG_TYPE_SKCIPHER
, .u
.cipher
= {
1296 .cra_name
= "ofb(aes)",
1297 .cra_driver_name
= "ofb-aes-ppc4xx",
1298 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1299 .cra_flags
= CRYPTO_ALG_ASYNC
|
1300 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1302 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1303 .cra_module
= THIS_MODULE
,
1305 .min_keysize
= AES_MIN_KEY_SIZE
,
1306 .max_keysize
= AES_MAX_KEY_SIZE
,
1307 .ivsize
= AES_IV_SIZE
,
1308 .setkey
= crypto4xx_setkey_aes_ofb
,
1309 .encrypt
= crypto4xx_encrypt_iv_stream
,
1310 .decrypt
= crypto4xx_decrypt_iv_stream
,
1311 .init
= crypto4xx_sk_init
,
1312 .exit
= crypto4xx_sk_exit
,
1316 { .type
= CRYPTO_ALG_TYPE_AEAD
, .u
.aead
= {
1317 .setkey
= crypto4xx_setkey_aes_ccm
,
1318 .setauthsize
= crypto4xx_setauthsize_aead
,
1319 .encrypt
= crypto4xx_encrypt_aes_ccm
,
1320 .decrypt
= crypto4xx_decrypt_aes_ccm
,
1321 .init
= crypto4xx_aead_init
,
1322 .exit
= crypto4xx_aead_exit
,
1323 .ivsize
= AES_BLOCK_SIZE
,
1326 .cra_name
= "ccm(aes)",
1327 .cra_driver_name
= "ccm-aes-ppc4xx",
1328 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1329 .cra_flags
= CRYPTO_ALG_ASYNC
|
1330 CRYPTO_ALG_NEED_FALLBACK
|
1331 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1333 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1334 .cra_module
= THIS_MODULE
,
1337 { .type
= CRYPTO_ALG_TYPE_AEAD
, .u
.aead
= {
1338 .setkey
= crypto4xx_setkey_aes_gcm
,
1339 .setauthsize
= crypto4xx_setauthsize_aead
,
1340 .encrypt
= crypto4xx_encrypt_aes_gcm
,
1341 .decrypt
= crypto4xx_decrypt_aes_gcm
,
1342 .init
= crypto4xx_aead_init
,
1343 .exit
= crypto4xx_aead_exit
,
1344 .ivsize
= GCM_AES_IV_SIZE
,
1347 .cra_name
= "gcm(aes)",
1348 .cra_driver_name
= "gcm-aes-ppc4xx",
1349 .cra_priority
= CRYPTO4XX_CRYPTO_PRIORITY
,
1350 .cra_flags
= CRYPTO_ALG_ASYNC
|
1351 CRYPTO_ALG_NEED_FALLBACK
|
1352 CRYPTO_ALG_KERN_DRIVER_ONLY
,
1354 .cra_ctxsize
= sizeof(struct crypto4xx_ctx
),
1355 .cra_module
= THIS_MODULE
,
1358 { .type
= CRYPTO_ALG_TYPE_RNG
, .u
.rng
= {
1360 .cra_name
= "stdrng",
1361 .cra_driver_name
= "crypto4xx_rng",
1362 .cra_priority
= 300,
1364 .cra_module
= THIS_MODULE
,
1366 .generate
= crypto4xx_prng_generate
,
1367 .seed
= crypto4xx_prng_seed
,
1373 * Module Initialization Routine
1375 static int crypto4xx_probe(struct platform_device
*ofdev
)
1378 struct resource res
;
1379 struct device
*dev
= &ofdev
->dev
;
1380 struct crypto4xx_core_device
*core_dev
;
1382 bool is_revb
= true;
1384 rc
= of_address_to_resource(ofdev
->dev
.of_node
, 0, &res
);
1388 if (of_find_compatible_node(NULL
, NULL
, "amcc,ppc460ex-crypto")) {
1389 mtdcri(SDR0
, PPC460EX_SDR0_SRST
,
1390 mfdcri(SDR0
, PPC460EX_SDR0_SRST
) | PPC460EX_CE_RESET
);
1391 mtdcri(SDR0
, PPC460EX_SDR0_SRST
,
1392 mfdcri(SDR0
, PPC460EX_SDR0_SRST
) & ~PPC460EX_CE_RESET
);
1393 } else if (of_find_compatible_node(NULL
, NULL
,
1394 "amcc,ppc405ex-crypto")) {
1395 mtdcri(SDR0
, PPC405EX_SDR0_SRST
,
1396 mfdcri(SDR0
, PPC405EX_SDR0_SRST
) | PPC405EX_CE_RESET
);
1397 mtdcri(SDR0
, PPC405EX_SDR0_SRST
,
1398 mfdcri(SDR0
, PPC405EX_SDR0_SRST
) & ~PPC405EX_CE_RESET
);
1400 } else if (of_find_compatible_node(NULL
, NULL
,
1401 "amcc,ppc460sx-crypto")) {
1402 mtdcri(SDR0
, PPC460SX_SDR0_SRST
,
1403 mfdcri(SDR0
, PPC460SX_SDR0_SRST
) | PPC460SX_CE_RESET
);
1404 mtdcri(SDR0
, PPC460SX_SDR0_SRST
,
1405 mfdcri(SDR0
, PPC460SX_SDR0_SRST
) & ~PPC460SX_CE_RESET
);
1407 printk(KERN_ERR
"Crypto Function Not supported!\n");
1411 core_dev
= kzalloc(sizeof(struct crypto4xx_core_device
), GFP_KERNEL
);
1415 dev_set_drvdata(dev
, core_dev
);
1416 core_dev
->ofdev
= ofdev
;
1417 core_dev
->dev
= kzalloc(sizeof(struct crypto4xx_device
), GFP_KERNEL
);
1423 * Older version of 460EX/GT have a hardware bug.
1424 * Hence they do not support H/W based security intr coalescing
1426 pvr
= mfspr(SPRN_PVR
);
1427 if (is_revb
&& ((pvr
>> 4) == 0x130218A)) {
1428 u32 min
= PVR_MIN(pvr
);
1431 dev_info(dev
, "RevA detected - disable interrupt coalescing\n");
1436 core_dev
->dev
->core_dev
= core_dev
;
1437 core_dev
->dev
->is_revb
= is_revb
;
1438 core_dev
->device
= dev
;
1439 mutex_init(&core_dev
->rng_lock
);
1440 spin_lock_init(&core_dev
->lock
);
1441 INIT_LIST_HEAD(&core_dev
->dev
->alg_list
);
1442 ratelimit_default_init(&core_dev
->dev
->aead_ratelimit
);
1443 rc
= crypto4xx_build_sdr(core_dev
->dev
);
1446 rc
= crypto4xx_build_pdr(core_dev
->dev
);
1450 rc
= crypto4xx_build_gdr(core_dev
->dev
);
1454 /* Init tasklet for bottom half processing */
1455 tasklet_init(&core_dev
->tasklet
, crypto4xx_bh_tasklet_cb
,
1456 (unsigned long) dev
);
1458 core_dev
->dev
->ce_base
= of_iomap(ofdev
->dev
.of_node
, 0);
1459 if (!core_dev
->dev
->ce_base
) {
1460 dev_err(dev
, "failed to of_iomap\n");
1465 /* Register for Crypto isr, Crypto Engine IRQ */
1466 core_dev
->irq
= irq_of_parse_and_map(ofdev
->dev
.of_node
, 0);
1467 rc
= request_irq(core_dev
->irq
, is_revb
?
1468 crypto4xx_ce_interrupt_handler_revb
:
1469 crypto4xx_ce_interrupt_handler
, 0,
1470 KBUILD_MODNAME
, dev
);
1472 goto err_request_irq
;
1474 /* need to setup pdr, rdr, gdr and sdr before this */
1475 crypto4xx_hw_init(core_dev
->dev
);
1477 /* Register security algorithms with Linux CryptoAPI */
1478 rc
= crypto4xx_register_alg(core_dev
->dev
, crypto4xx_alg
,
1479 ARRAY_SIZE(crypto4xx_alg
));
1483 ppc4xx_trng_probe(core_dev
);
1487 free_irq(core_dev
->irq
, dev
);
1489 irq_dispose_mapping(core_dev
->irq
);
1490 iounmap(core_dev
->dev
->ce_base
);
1492 tasklet_kill(&core_dev
->tasklet
);
1494 crypto4xx_destroy_sdr(core_dev
->dev
);
1495 crypto4xx_destroy_gdr(core_dev
->dev
);
1496 crypto4xx_destroy_pdr(core_dev
->dev
);
1497 kfree(core_dev
->dev
);
1504 static int crypto4xx_remove(struct platform_device
*ofdev
)
1506 struct device
*dev
= &ofdev
->dev
;
1507 struct crypto4xx_core_device
*core_dev
= dev_get_drvdata(dev
);
1509 ppc4xx_trng_remove(core_dev
);
1511 free_irq(core_dev
->irq
, dev
);
1512 irq_dispose_mapping(core_dev
->irq
);
1514 tasklet_kill(&core_dev
->tasklet
);
1515 /* Un-register with Linux CryptoAPI */
1516 crypto4xx_unregister_alg(core_dev
->dev
);
1517 mutex_destroy(&core_dev
->rng_lock
);
1518 /* Free all allocated memory */
1519 crypto4xx_stop_all(core_dev
);
1524 static const struct of_device_id crypto4xx_match
[] = {
1525 { .compatible
= "amcc,ppc4xx-crypto",},
1528 MODULE_DEVICE_TABLE(of
, crypto4xx_match
);
1530 static struct platform_driver crypto4xx_driver
= {
1532 .name
= KBUILD_MODNAME
,
1533 .of_match_table
= crypto4xx_match
,
1535 .probe
= crypto4xx_probe
,
1536 .remove
= crypto4xx_remove
,
1539 module_platform_driver(crypto4xx_driver
);
1541 MODULE_LICENSE("GPL");
1542 MODULE_AUTHOR("James Hsiao <jhsiao@amcc.com>");
1543 MODULE_DESCRIPTION("Driver for AMCC PPC4xx crypto accelerator");