2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/pci_ids.h>
17 #include <linux/kthread.h>
18 #include <linux/sched.h>
19 #include <linux/interrupt.h>
20 #include <linux/spinlock.h>
21 #include <linux/mutex.h>
22 #include <linux/delay.h>
23 #include <linux/ccp.h>
24 #include <linux/scatterlist.h>
25 #include <crypto/scatterwalk.h>
26 #include <crypto/sha.h>
32 CCP_MEMTYPE_SYSTEM
= 0,
42 enum dma_data_direction dir
;
45 struct ccp_dm_workarea
{
47 struct dma_pool
*dma_pool
;
51 struct ccp_dma_info dma
;
54 struct ccp_sg_workarea
{
55 struct scatterlist
*sg
;
59 struct scatterlist
*dma_sg
;
60 struct device
*dma_dev
;
61 unsigned int dma_count
;
62 enum dma_data_direction dma_dir
;
70 struct ccp_sg_workarea sg_wa
;
71 struct ccp_dm_workarea dm_wa
;
75 enum ccp_memtype type
;
77 struct ccp_dma_info dma
;
83 enum ccp_aes_type type
;
84 enum ccp_aes_mode mode
;
85 enum ccp_aes_action action
;
88 struct ccp_xts_aes_op
{
89 enum ccp_aes_action action
;
90 enum ccp_xts_aes_unit_size unit_size
;
94 enum ccp_sha_type type
;
103 struct ccp_passthru_op
{
104 enum ccp_passthru_bitwise bit_mod
;
105 enum ccp_passthru_byteswap byte_swap
;
109 enum ccp_ecc_function function
;
113 struct ccp_cmd_queue
*cmd_q
;
127 struct ccp_aes_op aes
;
128 struct ccp_xts_aes_op xts
;
129 struct ccp_sha_op sha
;
130 struct ccp_rsa_op rsa
;
131 struct ccp_passthru_op passthru
;
132 struct ccp_ecc_op ecc
;
136 /* SHA initial context values */
137 static const __be32 ccp_sha1_init
[CCP_SHA_CTXSIZE
/ sizeof(__be32
)] = {
138 cpu_to_be32(SHA1_H0
), cpu_to_be32(SHA1_H1
),
139 cpu_to_be32(SHA1_H2
), cpu_to_be32(SHA1_H3
),
140 cpu_to_be32(SHA1_H4
), 0, 0, 0,
143 static const __be32 ccp_sha224_init
[CCP_SHA_CTXSIZE
/ sizeof(__be32
)] = {
144 cpu_to_be32(SHA224_H0
), cpu_to_be32(SHA224_H1
),
145 cpu_to_be32(SHA224_H2
), cpu_to_be32(SHA224_H3
),
146 cpu_to_be32(SHA224_H4
), cpu_to_be32(SHA224_H5
),
147 cpu_to_be32(SHA224_H6
), cpu_to_be32(SHA224_H7
),
150 static const __be32 ccp_sha256_init
[CCP_SHA_CTXSIZE
/ sizeof(__be32
)] = {
151 cpu_to_be32(SHA256_H0
), cpu_to_be32(SHA256_H1
),
152 cpu_to_be32(SHA256_H2
), cpu_to_be32(SHA256_H3
),
153 cpu_to_be32(SHA256_H4
), cpu_to_be32(SHA256_H5
),
154 cpu_to_be32(SHA256_H6
), cpu_to_be32(SHA256_H7
),
157 /* The CCP cannot perform zero-length sha operations so the caller
158 * is required to buffer data for the final operation. However, a
159 * sha operation for a message with a total length of zero is valid
160 * so known values are required to supply the result.
162 static const u8 ccp_sha1_zero
[CCP_SHA_CTXSIZE
] = {
163 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
164 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
165 0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00,
166 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
169 static const u8 ccp_sha224_zero
[CCP_SHA_CTXSIZE
] = {
170 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9,
171 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4,
172 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a,
173 0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00,
176 static const u8 ccp_sha256_zero
[CCP_SHA_CTXSIZE
] = {
177 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
178 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
179 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
180 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
183 static u32
ccp_addr_lo(struct ccp_dma_info
*info
)
185 return lower_32_bits(info
->address
+ info
->offset
);
188 static u32
ccp_addr_hi(struct ccp_dma_info
*info
)
190 return upper_32_bits(info
->address
+ info
->offset
) & 0x0000ffff;
193 static int ccp_do_cmd(struct ccp_op
*op
, u32
*cr
, unsigned int cr_count
)
195 struct ccp_cmd_queue
*cmd_q
= op
->cmd_q
;
196 struct ccp_device
*ccp
= cmd_q
->ccp
;
197 void __iomem
*cr_addr
;
202 /* We could read a status register to see how many free slots
203 * are actually available, but reading that register resets it
204 * and you could lose some error information.
208 cr0
= (cmd_q
->id
<< REQ0_CMD_Q_SHIFT
)
209 | (op
->jobid
<< REQ0_JOBID_SHIFT
)
210 | REQ0_WAIT_FOR_WRITE
;
213 cr0
|= REQ0_STOP_ON_COMPLETE
214 | REQ0_INT_ON_COMPLETE
;
216 if (op
->ioc
|| !cmd_q
->free_slots
)
217 cr0
|= REQ0_INT_ON_COMPLETE
;
219 /* Start at CMD_REQ1 */
220 cr_addr
= ccp
->io_regs
+ CMD_REQ0
+ CMD_REQ_INCR
;
222 mutex_lock(&ccp
->req_mutex
);
224 /* Write CMD_REQ1 through CMD_REQx first */
225 for (i
= 0; i
< cr_count
; i
++, cr_addr
+= CMD_REQ_INCR
)
226 iowrite32(*(cr
+ i
), cr_addr
);
228 /* Tell the CCP to start */
230 iowrite32(cr0
, ccp
->io_regs
+ CMD_REQ0
);
232 mutex_unlock(&ccp
->req_mutex
);
234 if (cr0
& REQ0_INT_ON_COMPLETE
) {
235 /* Wait for the job to complete */
236 ret
= wait_event_interruptible(cmd_q
->int_queue
,
238 if (ret
|| cmd_q
->cmd_error
) {
239 /* On error delete all related jobs from the queue */
240 cmd
= (cmd_q
->id
<< DEL_Q_ID_SHIFT
)
243 iowrite32(cmd
, ccp
->io_regs
+ DEL_CMD_Q_JOB
);
247 } else if (op
->soc
) {
248 /* Delete just head job from the queue on SoC */
250 | (cmd_q
->id
<< DEL_Q_ID_SHIFT
)
253 iowrite32(cmd
, ccp
->io_regs
+ DEL_CMD_Q_JOB
);
256 cmd_q
->free_slots
= CMD_Q_DEPTH(cmd_q
->q_status
);
264 static int ccp_perform_aes(struct ccp_op
*op
)
268 /* Fill out the register contents for REQ1 through REQ6 */
269 cr
[0] = (CCP_ENGINE_AES
<< REQ1_ENGINE_SHIFT
)
270 | (op
->u
.aes
.type
<< REQ1_AES_TYPE_SHIFT
)
271 | (op
->u
.aes
.mode
<< REQ1_AES_MODE_SHIFT
)
272 | (op
->u
.aes
.action
<< REQ1_AES_ACTION_SHIFT
)
273 | (op
->ksb_key
<< REQ1_KEY_KSB_SHIFT
);
274 cr
[1] = op
->src
.u
.dma
.length
- 1;
275 cr
[2] = ccp_addr_lo(&op
->src
.u
.dma
);
276 cr
[3] = (op
->ksb_ctx
<< REQ4_KSB_SHIFT
)
277 | (CCP_MEMTYPE_SYSTEM
<< REQ4_MEMTYPE_SHIFT
)
278 | ccp_addr_hi(&op
->src
.u
.dma
);
279 cr
[4] = ccp_addr_lo(&op
->dst
.u
.dma
);
280 cr
[5] = (CCP_MEMTYPE_SYSTEM
<< REQ6_MEMTYPE_SHIFT
)
281 | ccp_addr_hi(&op
->dst
.u
.dma
);
283 if (op
->u
.aes
.mode
== CCP_AES_MODE_CFB
)
284 cr
[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT
);
292 return ccp_do_cmd(op
, cr
, ARRAY_SIZE(cr
));
295 static int ccp_perform_xts_aes(struct ccp_op
*op
)
299 /* Fill out the register contents for REQ1 through REQ6 */
300 cr
[0] = (CCP_ENGINE_XTS_AES_128
<< REQ1_ENGINE_SHIFT
)
301 | (op
->u
.xts
.action
<< REQ1_AES_ACTION_SHIFT
)
302 | (op
->u
.xts
.unit_size
<< REQ1_XTS_AES_SIZE_SHIFT
)
303 | (op
->ksb_key
<< REQ1_KEY_KSB_SHIFT
);
304 cr
[1] = op
->src
.u
.dma
.length
- 1;
305 cr
[2] = ccp_addr_lo(&op
->src
.u
.dma
);
306 cr
[3] = (op
->ksb_ctx
<< REQ4_KSB_SHIFT
)
307 | (CCP_MEMTYPE_SYSTEM
<< REQ4_MEMTYPE_SHIFT
)
308 | ccp_addr_hi(&op
->src
.u
.dma
);
309 cr
[4] = ccp_addr_lo(&op
->dst
.u
.dma
);
310 cr
[5] = (CCP_MEMTYPE_SYSTEM
<< REQ6_MEMTYPE_SHIFT
)
311 | ccp_addr_hi(&op
->dst
.u
.dma
);
319 return ccp_do_cmd(op
, cr
, ARRAY_SIZE(cr
));
322 static int ccp_perform_sha(struct ccp_op
*op
)
326 /* Fill out the register contents for REQ1 through REQ6 */
327 cr
[0] = (CCP_ENGINE_SHA
<< REQ1_ENGINE_SHIFT
)
328 | (op
->u
.sha
.type
<< REQ1_SHA_TYPE_SHIFT
)
330 cr
[1] = op
->src
.u
.dma
.length
- 1;
331 cr
[2] = ccp_addr_lo(&op
->src
.u
.dma
);
332 cr
[3] = (op
->ksb_ctx
<< REQ4_KSB_SHIFT
)
333 | (CCP_MEMTYPE_SYSTEM
<< REQ4_MEMTYPE_SHIFT
)
334 | ccp_addr_hi(&op
->src
.u
.dma
);
338 cr
[4] = lower_32_bits(op
->u
.sha
.msg_bits
);
339 cr
[5] = upper_32_bits(op
->u
.sha
.msg_bits
);
345 return ccp_do_cmd(op
, cr
, ARRAY_SIZE(cr
));
348 static int ccp_perform_rsa(struct ccp_op
*op
)
352 /* Fill out the register contents for REQ1 through REQ6 */
353 cr
[0] = (CCP_ENGINE_RSA
<< REQ1_ENGINE_SHIFT
)
354 | (op
->u
.rsa
.mod_size
<< REQ1_RSA_MOD_SIZE_SHIFT
)
355 | (op
->ksb_key
<< REQ1_KEY_KSB_SHIFT
)
357 cr
[1] = op
->u
.rsa
.input_len
- 1;
358 cr
[2] = ccp_addr_lo(&op
->src
.u
.dma
);
359 cr
[3] = (op
->ksb_ctx
<< REQ4_KSB_SHIFT
)
360 | (CCP_MEMTYPE_SYSTEM
<< REQ4_MEMTYPE_SHIFT
)
361 | ccp_addr_hi(&op
->src
.u
.dma
);
362 cr
[4] = ccp_addr_lo(&op
->dst
.u
.dma
);
363 cr
[5] = (CCP_MEMTYPE_SYSTEM
<< REQ6_MEMTYPE_SHIFT
)
364 | ccp_addr_hi(&op
->dst
.u
.dma
);
366 return ccp_do_cmd(op
, cr
, ARRAY_SIZE(cr
));
369 static int ccp_perform_passthru(struct ccp_op
*op
)
373 /* Fill out the register contents for REQ1 through REQ6 */
374 cr
[0] = (CCP_ENGINE_PASSTHRU
<< REQ1_ENGINE_SHIFT
)
375 | (op
->u
.passthru
.bit_mod
<< REQ1_PT_BW_SHIFT
)
376 | (op
->u
.passthru
.byte_swap
<< REQ1_PT_BS_SHIFT
);
378 if (op
->src
.type
== CCP_MEMTYPE_SYSTEM
)
379 cr
[1] = op
->src
.u
.dma
.length
- 1;
381 cr
[1] = op
->dst
.u
.dma
.length
- 1;
383 if (op
->src
.type
== CCP_MEMTYPE_SYSTEM
) {
384 cr
[2] = ccp_addr_lo(&op
->src
.u
.dma
);
385 cr
[3] = (CCP_MEMTYPE_SYSTEM
<< REQ4_MEMTYPE_SHIFT
)
386 | ccp_addr_hi(&op
->src
.u
.dma
);
388 if (op
->u
.passthru
.bit_mod
!= CCP_PASSTHRU_BITWISE_NOOP
)
389 cr
[3] |= (op
->ksb_key
<< REQ4_KSB_SHIFT
);
391 cr
[2] = op
->src
.u
.ksb
* CCP_KSB_BYTES
;
392 cr
[3] = (CCP_MEMTYPE_KSB
<< REQ4_MEMTYPE_SHIFT
);
395 if (op
->dst
.type
== CCP_MEMTYPE_SYSTEM
) {
396 cr
[4] = ccp_addr_lo(&op
->dst
.u
.dma
);
397 cr
[5] = (CCP_MEMTYPE_SYSTEM
<< REQ6_MEMTYPE_SHIFT
)
398 | ccp_addr_hi(&op
->dst
.u
.dma
);
400 cr
[4] = op
->dst
.u
.ksb
* CCP_KSB_BYTES
;
401 cr
[5] = (CCP_MEMTYPE_KSB
<< REQ6_MEMTYPE_SHIFT
);
407 return ccp_do_cmd(op
, cr
, ARRAY_SIZE(cr
));
410 static int ccp_perform_ecc(struct ccp_op
*op
)
414 /* Fill out the register contents for REQ1 through REQ6 */
415 cr
[0] = REQ1_ECC_AFFINE_CONVERT
416 | (CCP_ENGINE_ECC
<< REQ1_ENGINE_SHIFT
)
417 | (op
->u
.ecc
.function
<< REQ1_ECC_FUNCTION_SHIFT
)
419 cr
[1] = op
->src
.u
.dma
.length
- 1;
420 cr
[2] = ccp_addr_lo(&op
->src
.u
.dma
);
421 cr
[3] = (CCP_MEMTYPE_SYSTEM
<< REQ4_MEMTYPE_SHIFT
)
422 | ccp_addr_hi(&op
->src
.u
.dma
);
423 cr
[4] = ccp_addr_lo(&op
->dst
.u
.dma
);
424 cr
[5] = (CCP_MEMTYPE_SYSTEM
<< REQ6_MEMTYPE_SHIFT
)
425 | ccp_addr_hi(&op
->dst
.u
.dma
);
427 return ccp_do_cmd(op
, cr
, ARRAY_SIZE(cr
));
430 static u32
ccp_alloc_ksb(struct ccp_device
*ccp
, unsigned int count
)
435 mutex_lock(&ccp
->ksb_mutex
);
437 start
= (u32
)bitmap_find_next_zero_area(ccp
->ksb
,
441 if (start
<= ccp
->ksb_count
) {
442 bitmap_set(ccp
->ksb
, start
, count
);
444 mutex_unlock(&ccp
->ksb_mutex
);
450 mutex_unlock(&ccp
->ksb_mutex
);
452 /* Wait for KSB entries to become available */
453 if (wait_event_interruptible(ccp
->ksb_queue
, ccp
->ksb_avail
))
457 return KSB_START
+ start
;
460 static void ccp_free_ksb(struct ccp_device
*ccp
, unsigned int start
,
466 mutex_lock(&ccp
->ksb_mutex
);
468 bitmap_clear(ccp
->ksb
, start
- KSB_START
, count
);
472 mutex_unlock(&ccp
->ksb_mutex
);
474 wake_up_interruptible_all(&ccp
->ksb_queue
);
477 static u32
ccp_gen_jobid(struct ccp_device
*ccp
)
479 return atomic_inc_return(&ccp
->current_id
) & CCP_JOBID_MASK
;
482 static void ccp_sg_free(struct ccp_sg_workarea
*wa
)
485 dma_unmap_sg(wa
->dma_dev
, wa
->dma_sg
, wa
->nents
, wa
->dma_dir
);
490 static int ccp_init_sg_workarea(struct ccp_sg_workarea
*wa
, struct device
*dev
,
491 struct scatterlist
*sg
, u64 len
,
492 enum dma_data_direction dma_dir
)
494 memset(wa
, 0, sizeof(*wa
));
500 wa
->nents
= sg_nents(sg
);
501 wa
->length
= sg
->length
;
502 wa
->bytes_left
= len
;
508 if (dma_dir
== DMA_NONE
)
513 wa
->dma_dir
= dma_dir
;
514 wa
->dma_count
= dma_map_sg(dev
, sg
, wa
->nents
, dma_dir
);
522 static void ccp_update_sg_workarea(struct ccp_sg_workarea
*wa
, unsigned int len
)
524 unsigned int nbytes
= min_t(u64
, len
, wa
->bytes_left
);
529 wa
->sg_used
+= nbytes
;
530 wa
->bytes_left
-= nbytes
;
531 if (wa
->sg_used
== wa
->sg
->length
) {
532 wa
->sg
= sg_next(wa
->sg
);
537 static void ccp_dm_free(struct ccp_dm_workarea
*wa
)
539 if (wa
->length
<= CCP_DMAPOOL_MAX_SIZE
) {
541 dma_pool_free(wa
->dma_pool
, wa
->address
,
545 dma_unmap_single(wa
->dev
, wa
->dma
.address
, wa
->length
,
554 static int ccp_init_dm_workarea(struct ccp_dm_workarea
*wa
,
555 struct ccp_cmd_queue
*cmd_q
,
557 enum dma_data_direction dir
)
559 memset(wa
, 0, sizeof(*wa
));
564 wa
->dev
= cmd_q
->ccp
->dev
;
567 if (len
<= CCP_DMAPOOL_MAX_SIZE
) {
568 wa
->dma_pool
= cmd_q
->dma_pool
;
570 wa
->address
= dma_pool_alloc(wa
->dma_pool
, GFP_KERNEL
,
575 wa
->dma
.length
= CCP_DMAPOOL_MAX_SIZE
;
577 memset(wa
->address
, 0, CCP_DMAPOOL_MAX_SIZE
);
579 wa
->address
= kzalloc(len
, GFP_KERNEL
);
583 wa
->dma
.address
= dma_map_single(wa
->dev
, wa
->address
, len
,
585 if (!wa
->dma
.address
)
588 wa
->dma
.length
= len
;
595 static void ccp_set_dm_area(struct ccp_dm_workarea
*wa
, unsigned int wa_offset
,
596 struct scatterlist
*sg
, unsigned int sg_offset
,
599 WARN_ON(!wa
->address
);
601 scatterwalk_map_and_copy(wa
->address
+ wa_offset
, sg
, sg_offset
, len
,
605 static void ccp_get_dm_area(struct ccp_dm_workarea
*wa
, unsigned int wa_offset
,
606 struct scatterlist
*sg
, unsigned int sg_offset
,
609 WARN_ON(!wa
->address
);
611 scatterwalk_map_and_copy(wa
->address
+ wa_offset
, sg
, sg_offset
, len
,
615 static void ccp_reverse_set_dm_area(struct ccp_dm_workarea
*wa
,
616 struct scatterlist
*sg
,
617 unsigned int len
, unsigned int se_len
,
620 unsigned int nbytes
, sg_offset
, dm_offset
, ksb_len
, i
;
621 u8 buffer
[CCP_REVERSE_BUF_SIZE
];
623 BUG_ON(se_len
> sizeof(buffer
));
629 ksb_len
= min_t(unsigned int, nbytes
, se_len
);
630 sg_offset
-= ksb_len
;
632 scatterwalk_map_and_copy(buffer
, sg
, sg_offset
, ksb_len
, 0);
633 for (i
= 0; i
< ksb_len
; i
++)
634 wa
->address
[dm_offset
+ i
] = buffer
[ksb_len
- i
- 1];
636 dm_offset
+= ksb_len
;
639 if ((ksb_len
!= se_len
) && sign_extend
) {
640 /* Must sign-extend to nearest sign-extend length */
641 if (wa
->address
[dm_offset
- 1] & 0x80)
642 memset(wa
->address
+ dm_offset
, 0xff,
648 static void ccp_reverse_get_dm_area(struct ccp_dm_workarea
*wa
,
649 struct scatterlist
*sg
,
652 unsigned int nbytes
, sg_offset
, dm_offset
, ksb_len
, i
;
653 u8 buffer
[CCP_REVERSE_BUF_SIZE
];
659 ksb_len
= min_t(unsigned int, nbytes
, sizeof(buffer
));
660 dm_offset
-= ksb_len
;
662 for (i
= 0; i
< ksb_len
; i
++)
663 buffer
[ksb_len
- i
- 1] = wa
->address
[dm_offset
+ i
];
664 scatterwalk_map_and_copy(buffer
, sg
, sg_offset
, ksb_len
, 1);
666 sg_offset
+= ksb_len
;
671 static void ccp_free_data(struct ccp_data
*data
, struct ccp_cmd_queue
*cmd_q
)
673 ccp_dm_free(&data
->dm_wa
);
674 ccp_sg_free(&data
->sg_wa
);
677 static int ccp_init_data(struct ccp_data
*data
, struct ccp_cmd_queue
*cmd_q
,
678 struct scatterlist
*sg
, u64 sg_len
,
680 enum dma_data_direction dir
)
684 memset(data
, 0, sizeof(*data
));
686 ret
= ccp_init_sg_workarea(&data
->sg_wa
, cmd_q
->ccp
->dev
, sg
, sg_len
,
691 ret
= ccp_init_dm_workarea(&data
->dm_wa
, cmd_q
, dm_len
, dir
);
698 ccp_free_data(data
, cmd_q
);
703 static unsigned int ccp_queue_buf(struct ccp_data
*data
, unsigned int from
)
705 struct ccp_sg_workarea
*sg_wa
= &data
->sg_wa
;
706 struct ccp_dm_workarea
*dm_wa
= &data
->dm_wa
;
707 unsigned int buf_count
, nbytes
;
709 /* Clear the buffer if setting it */
711 memset(dm_wa
->address
, 0, dm_wa
->length
);
716 /* Perform the copy operation
717 * nbytes will always be <= UINT_MAX because dm_wa->length is
720 nbytes
= min_t(u64
, sg_wa
->bytes_left
, dm_wa
->length
);
721 scatterwalk_map_and_copy(dm_wa
->address
, sg_wa
->sg
, sg_wa
->sg_used
,
724 /* Update the structures and generate the count */
726 while (sg_wa
->bytes_left
&& (buf_count
< dm_wa
->length
)) {
727 nbytes
= min(sg_wa
->sg
->length
- sg_wa
->sg_used
,
728 dm_wa
->length
- buf_count
);
729 nbytes
= min_t(u64
, sg_wa
->bytes_left
, nbytes
);
732 ccp_update_sg_workarea(sg_wa
, nbytes
);
738 static unsigned int ccp_fill_queue_buf(struct ccp_data
*data
)
740 return ccp_queue_buf(data
, 0);
743 static unsigned int ccp_empty_queue_buf(struct ccp_data
*data
)
745 return ccp_queue_buf(data
, 1);
748 static void ccp_prepare_data(struct ccp_data
*src
, struct ccp_data
*dst
,
749 struct ccp_op
*op
, unsigned int block_size
,
752 unsigned int sg_src_len
, sg_dst_len
, op_len
;
754 /* The CCP can only DMA from/to one address each per operation. This
755 * requires that we find the smallest DMA area between the source
756 * and destination. The resulting len values will always be <= UINT_MAX
757 * because the dma length is an unsigned int.
759 sg_src_len
= sg_dma_len(src
->sg_wa
.sg
) - src
->sg_wa
.sg_used
;
760 sg_src_len
= min_t(u64
, src
->sg_wa
.bytes_left
, sg_src_len
);
763 sg_dst_len
= sg_dma_len(dst
->sg_wa
.sg
) - dst
->sg_wa
.sg_used
;
764 sg_dst_len
= min_t(u64
, src
->sg_wa
.bytes_left
, sg_dst_len
);
765 op_len
= min(sg_src_len
, sg_dst_len
);
769 /* The data operation length will be at least block_size in length
770 * or the smaller of available sg room remaining for the source or
773 op_len
= max(op_len
, block_size
);
775 /* Unless we have to buffer data, there's no reason to wait */
778 if (sg_src_len
< block_size
) {
779 /* Not enough data in the sg element, so it
780 * needs to be buffered into a blocksize chunk
782 int cp_len
= ccp_fill_queue_buf(src
);
785 op
->src
.u
.dma
.address
= src
->dm_wa
.dma
.address
;
786 op
->src
.u
.dma
.offset
= 0;
787 op
->src
.u
.dma
.length
= (blocksize_op
) ? block_size
: cp_len
;
789 /* Enough data in the sg element, but we need to
790 * adjust for any previously copied data
792 op
->src
.u
.dma
.address
= sg_dma_address(src
->sg_wa
.sg
);
793 op
->src
.u
.dma
.offset
= src
->sg_wa
.sg_used
;
794 op
->src
.u
.dma
.length
= op_len
& ~(block_size
- 1);
796 ccp_update_sg_workarea(&src
->sg_wa
, op
->src
.u
.dma
.length
);
800 if (sg_dst_len
< block_size
) {
801 /* Not enough room in the sg element or we're on the
802 * last piece of data (when using padding), so the
803 * output needs to be buffered into a blocksize chunk
806 op
->dst
.u
.dma
.address
= dst
->dm_wa
.dma
.address
;
807 op
->dst
.u
.dma
.offset
= 0;
808 op
->dst
.u
.dma
.length
= op
->src
.u
.dma
.length
;
810 /* Enough room in the sg element, but we need to
811 * adjust for any previously used area
813 op
->dst
.u
.dma
.address
= sg_dma_address(dst
->sg_wa
.sg
);
814 op
->dst
.u
.dma
.offset
= dst
->sg_wa
.sg_used
;
815 op
->dst
.u
.dma
.length
= op
->src
.u
.dma
.length
;
820 static void ccp_process_data(struct ccp_data
*src
, struct ccp_data
*dst
,
826 if (op
->dst
.u
.dma
.address
== dst
->dm_wa
.dma
.address
)
827 ccp_empty_queue_buf(dst
);
829 ccp_update_sg_workarea(&dst
->sg_wa
,
830 op
->dst
.u
.dma
.length
);
834 static int ccp_copy_to_from_ksb(struct ccp_cmd_queue
*cmd_q
,
835 struct ccp_dm_workarea
*wa
, u32 jobid
, u32 ksb
,
836 u32 byte_swap
, bool from
)
840 memset(&op
, 0, sizeof(op
));
848 op
.src
.type
= CCP_MEMTYPE_KSB
;
850 op
.dst
.type
= CCP_MEMTYPE_SYSTEM
;
851 op
.dst
.u
.dma
.address
= wa
->dma
.address
;
852 op
.dst
.u
.dma
.length
= wa
->length
;
854 op
.src
.type
= CCP_MEMTYPE_SYSTEM
;
855 op
.src
.u
.dma
.address
= wa
->dma
.address
;
856 op
.src
.u
.dma
.length
= wa
->length
;
857 op
.dst
.type
= CCP_MEMTYPE_KSB
;
861 op
.u
.passthru
.byte_swap
= byte_swap
;
863 return ccp_perform_passthru(&op
);
866 static int ccp_copy_to_ksb(struct ccp_cmd_queue
*cmd_q
,
867 struct ccp_dm_workarea
*wa
, u32 jobid
, u32 ksb
,
870 return ccp_copy_to_from_ksb(cmd_q
, wa
, jobid
, ksb
, byte_swap
, false);
873 static int ccp_copy_from_ksb(struct ccp_cmd_queue
*cmd_q
,
874 struct ccp_dm_workarea
*wa
, u32 jobid
, u32 ksb
,
877 return ccp_copy_to_from_ksb(cmd_q
, wa
, jobid
, ksb
, byte_swap
, true);
880 static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue
*cmd_q
,
883 struct ccp_aes_engine
*aes
= &cmd
->u
.aes
;
884 struct ccp_dm_workarea key
, ctx
;
887 unsigned int dm_offset
;
890 if (!((aes
->key_len
== AES_KEYSIZE_128
) ||
891 (aes
->key_len
== AES_KEYSIZE_192
) ||
892 (aes
->key_len
== AES_KEYSIZE_256
)))
895 if (aes
->src_len
& (AES_BLOCK_SIZE
- 1))
898 if (aes
->iv_len
!= AES_BLOCK_SIZE
)
901 if (!aes
->key
|| !aes
->iv
|| !aes
->src
)
904 if (aes
->cmac_final
) {
905 if (aes
->cmac_key_len
!= AES_BLOCK_SIZE
)
912 BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT
!= 1);
913 BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT
!= 1);
916 memset(&op
, 0, sizeof(op
));
918 op
.jobid
= ccp_gen_jobid(cmd_q
->ccp
);
919 op
.ksb_key
= cmd_q
->ksb_key
;
920 op
.ksb_ctx
= cmd_q
->ksb_ctx
;
922 op
.u
.aes
.type
= aes
->type
;
923 op
.u
.aes
.mode
= aes
->mode
;
924 op
.u
.aes
.action
= aes
->action
;
926 /* All supported key sizes fit in a single (32-byte) KSB entry
927 * and must be in little endian format. Use the 256-bit byte
928 * swap passthru option to convert from big endian to little
931 ret
= ccp_init_dm_workarea(&key
, cmd_q
,
932 CCP_AES_KEY_KSB_COUNT
* CCP_KSB_BYTES
,
937 dm_offset
= CCP_KSB_BYTES
- aes
->key_len
;
938 ccp_set_dm_area(&key
, dm_offset
, aes
->key
, 0, aes
->key_len
);
939 ret
= ccp_copy_to_ksb(cmd_q
, &key
, op
.jobid
, op
.ksb_key
,
940 CCP_PASSTHRU_BYTESWAP_256BIT
);
942 cmd
->engine_error
= cmd_q
->cmd_error
;
946 /* The AES context fits in a single (32-byte) KSB entry and
947 * must be in little endian format. Use the 256-bit byte swap
948 * passthru option to convert from big endian to little endian.
950 ret
= ccp_init_dm_workarea(&ctx
, cmd_q
,
951 CCP_AES_CTX_KSB_COUNT
* CCP_KSB_BYTES
,
956 dm_offset
= CCP_KSB_BYTES
- AES_BLOCK_SIZE
;
957 ccp_set_dm_area(&ctx
, dm_offset
, aes
->iv
, 0, aes
->iv_len
);
958 ret
= ccp_copy_to_ksb(cmd_q
, &ctx
, op
.jobid
, op
.ksb_ctx
,
959 CCP_PASSTHRU_BYTESWAP_256BIT
);
961 cmd
->engine_error
= cmd_q
->cmd_error
;
965 /* Send data to the CCP AES engine */
966 ret
= ccp_init_data(&src
, cmd_q
, aes
->src
, aes
->src_len
,
967 AES_BLOCK_SIZE
, DMA_TO_DEVICE
);
971 while (src
.sg_wa
.bytes_left
) {
972 ccp_prepare_data(&src
, NULL
, &op
, AES_BLOCK_SIZE
, true);
973 if (aes
->cmac_final
&& !src
.sg_wa
.bytes_left
) {
976 /* Push the K1/K2 key to the CCP now */
977 ret
= ccp_copy_from_ksb(cmd_q
, &ctx
, op
.jobid
,
979 CCP_PASSTHRU_BYTESWAP_256BIT
);
981 cmd
->engine_error
= cmd_q
->cmd_error
;
985 ccp_set_dm_area(&ctx
, 0, aes
->cmac_key
, 0,
987 ret
= ccp_copy_to_ksb(cmd_q
, &ctx
, op
.jobid
, op
.ksb_ctx
,
988 CCP_PASSTHRU_BYTESWAP_256BIT
);
990 cmd
->engine_error
= cmd_q
->cmd_error
;
995 ret
= ccp_perform_aes(&op
);
997 cmd
->engine_error
= cmd_q
->cmd_error
;
1001 ccp_process_data(&src
, NULL
, &op
);
1004 /* Retrieve the AES context - convert from LE to BE using
1005 * 32-byte (256-bit) byteswapping
1007 ret
= ccp_copy_from_ksb(cmd_q
, &ctx
, op
.jobid
, op
.ksb_ctx
,
1008 CCP_PASSTHRU_BYTESWAP_256BIT
);
1010 cmd
->engine_error
= cmd_q
->cmd_error
;
1014 /* ...but we only need AES_BLOCK_SIZE bytes */
1015 dm_offset
= CCP_KSB_BYTES
- AES_BLOCK_SIZE
;
1016 ccp_get_dm_area(&ctx
, dm_offset
, aes
->iv
, 0, aes
->iv_len
);
1019 ccp_free_data(&src
, cmd_q
);
1030 static int ccp_run_aes_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
1032 struct ccp_aes_engine
*aes
= &cmd
->u
.aes
;
1033 struct ccp_dm_workarea key
, ctx
;
1034 struct ccp_data src
, dst
;
1036 unsigned int dm_offset
;
1037 bool in_place
= false;
1040 if (aes
->mode
== CCP_AES_MODE_CMAC
)
1041 return ccp_run_aes_cmac_cmd(cmd_q
, cmd
);
1043 if (!((aes
->key_len
== AES_KEYSIZE_128
) ||
1044 (aes
->key_len
== AES_KEYSIZE_192
) ||
1045 (aes
->key_len
== AES_KEYSIZE_256
)))
1048 if (((aes
->mode
== CCP_AES_MODE_ECB
) ||
1049 (aes
->mode
== CCP_AES_MODE_CBC
) ||
1050 (aes
->mode
== CCP_AES_MODE_CFB
)) &&
1051 (aes
->src_len
& (AES_BLOCK_SIZE
- 1)))
1054 if (!aes
->key
|| !aes
->src
|| !aes
->dst
)
1057 if (aes
->mode
!= CCP_AES_MODE_ECB
) {
1058 if (aes
->iv_len
!= AES_BLOCK_SIZE
)
1065 BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT
!= 1);
1066 BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT
!= 1);
1069 memset(&op
, 0, sizeof(op
));
1071 op
.jobid
= ccp_gen_jobid(cmd_q
->ccp
);
1072 op
.ksb_key
= cmd_q
->ksb_key
;
1073 op
.ksb_ctx
= cmd_q
->ksb_ctx
;
1074 op
.init
= (aes
->mode
== CCP_AES_MODE_ECB
) ? 0 : 1;
1075 op
.u
.aes
.type
= aes
->type
;
1076 op
.u
.aes
.mode
= aes
->mode
;
1077 op
.u
.aes
.action
= aes
->action
;
1079 /* All supported key sizes fit in a single (32-byte) KSB entry
1080 * and must be in little endian format. Use the 256-bit byte
1081 * swap passthru option to convert from big endian to little
1084 ret
= ccp_init_dm_workarea(&key
, cmd_q
,
1085 CCP_AES_KEY_KSB_COUNT
* CCP_KSB_BYTES
,
1090 dm_offset
= CCP_KSB_BYTES
- aes
->key_len
;
1091 ccp_set_dm_area(&key
, dm_offset
, aes
->key
, 0, aes
->key_len
);
1092 ret
= ccp_copy_to_ksb(cmd_q
, &key
, op
.jobid
, op
.ksb_key
,
1093 CCP_PASSTHRU_BYTESWAP_256BIT
);
1095 cmd
->engine_error
= cmd_q
->cmd_error
;
1099 /* The AES context fits in a single (32-byte) KSB entry and
1100 * must be in little endian format. Use the 256-bit byte swap
1101 * passthru option to convert from big endian to little endian.
1103 ret
= ccp_init_dm_workarea(&ctx
, cmd_q
,
1104 CCP_AES_CTX_KSB_COUNT
* CCP_KSB_BYTES
,
1109 if (aes
->mode
!= CCP_AES_MODE_ECB
) {
1110 /* Load the AES context - conver to LE */
1111 dm_offset
= CCP_KSB_BYTES
- AES_BLOCK_SIZE
;
1112 ccp_set_dm_area(&ctx
, dm_offset
, aes
->iv
, 0, aes
->iv_len
);
1113 ret
= ccp_copy_to_ksb(cmd_q
, &ctx
, op
.jobid
, op
.ksb_ctx
,
1114 CCP_PASSTHRU_BYTESWAP_256BIT
);
1116 cmd
->engine_error
= cmd_q
->cmd_error
;
1121 /* Prepare the input and output data workareas. For in-place
1122 * operations we need to set the dma direction to BIDIRECTIONAL
1123 * and copy the src workarea to the dst workarea.
1125 if (sg_virt(aes
->src
) == sg_virt(aes
->dst
))
1128 ret
= ccp_init_data(&src
, cmd_q
, aes
->src
, aes
->src_len
,
1130 in_place
? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
);
1137 ret
= ccp_init_data(&dst
, cmd_q
, aes
->dst
, aes
->src_len
,
1138 AES_BLOCK_SIZE
, DMA_FROM_DEVICE
);
1143 /* Send data to the CCP AES engine */
1144 while (src
.sg_wa
.bytes_left
) {
1145 ccp_prepare_data(&src
, &dst
, &op
, AES_BLOCK_SIZE
, true);
1146 if (!src
.sg_wa
.bytes_left
) {
1149 /* Since we don't retrieve the AES context in ECB
1150 * mode we have to wait for the operation to complete
1151 * on the last piece of data
1153 if (aes
->mode
== CCP_AES_MODE_ECB
)
1157 ret
= ccp_perform_aes(&op
);
1159 cmd
->engine_error
= cmd_q
->cmd_error
;
1163 ccp_process_data(&src
, &dst
, &op
);
1166 if (aes
->mode
!= CCP_AES_MODE_ECB
) {
1167 /* Retrieve the AES context - convert from LE to BE using
1168 * 32-byte (256-bit) byteswapping
1170 ret
= ccp_copy_from_ksb(cmd_q
, &ctx
, op
.jobid
, op
.ksb_ctx
,
1171 CCP_PASSTHRU_BYTESWAP_256BIT
);
1173 cmd
->engine_error
= cmd_q
->cmd_error
;
1177 /* ...but we only need AES_BLOCK_SIZE bytes */
1178 dm_offset
= CCP_KSB_BYTES
- AES_BLOCK_SIZE
;
1179 ccp_get_dm_area(&ctx
, dm_offset
, aes
->iv
, 0, aes
->iv_len
);
1184 ccp_free_data(&dst
, cmd_q
);
1187 ccp_free_data(&src
, cmd_q
);
1198 static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue
*cmd_q
,
1199 struct ccp_cmd
*cmd
)
1201 struct ccp_xts_aes_engine
*xts
= &cmd
->u
.xts
;
1202 struct ccp_dm_workarea key
, ctx
;
1203 struct ccp_data src
, dst
;
1205 unsigned int unit_size
, dm_offset
;
1206 bool in_place
= false;
1209 switch (xts
->unit_size
) {
1210 case CCP_XTS_AES_UNIT_SIZE_16
:
1213 case CCP_XTS_AES_UNIT_SIZE_512
:
1216 case CCP_XTS_AES_UNIT_SIZE_1024
:
1219 case CCP_XTS_AES_UNIT_SIZE_2048
:
1222 case CCP_XTS_AES_UNIT_SIZE_4096
:
1230 if (xts
->key_len
!= AES_KEYSIZE_128
)
1233 if (!xts
->final
&& (xts
->src_len
& (AES_BLOCK_SIZE
- 1)))
1236 if (xts
->iv_len
!= AES_BLOCK_SIZE
)
1239 if (!xts
->key
|| !xts
->iv
|| !xts
->src
|| !xts
->dst
)
1242 BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT
!= 1);
1243 BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT
!= 1);
1246 memset(&op
, 0, sizeof(op
));
1248 op
.jobid
= ccp_gen_jobid(cmd_q
->ccp
);
1249 op
.ksb_key
= cmd_q
->ksb_key
;
1250 op
.ksb_ctx
= cmd_q
->ksb_ctx
;
1252 op
.u
.xts
.action
= xts
->action
;
1253 op
.u
.xts
.unit_size
= xts
->unit_size
;
1255 /* All supported key sizes fit in a single (32-byte) KSB entry
1256 * and must be in little endian format. Use the 256-bit byte
1257 * swap passthru option to convert from big endian to little
1260 ret
= ccp_init_dm_workarea(&key
, cmd_q
,
1261 CCP_XTS_AES_KEY_KSB_COUNT
* CCP_KSB_BYTES
,
1266 dm_offset
= CCP_KSB_BYTES
- AES_KEYSIZE_128
;
1267 ccp_set_dm_area(&key
, dm_offset
, xts
->key
, 0, xts
->key_len
);
1268 ccp_set_dm_area(&key
, 0, xts
->key
, dm_offset
, xts
->key_len
);
1269 ret
= ccp_copy_to_ksb(cmd_q
, &key
, op
.jobid
, op
.ksb_key
,
1270 CCP_PASSTHRU_BYTESWAP_256BIT
);
1272 cmd
->engine_error
= cmd_q
->cmd_error
;
1276 /* The AES context fits in a single (32-byte) KSB entry and
1277 * for XTS is already in little endian format so no byte swapping
1280 ret
= ccp_init_dm_workarea(&ctx
, cmd_q
,
1281 CCP_XTS_AES_CTX_KSB_COUNT
* CCP_KSB_BYTES
,
1286 ccp_set_dm_area(&ctx
, 0, xts
->iv
, 0, xts
->iv_len
);
1287 ret
= ccp_copy_to_ksb(cmd_q
, &ctx
, op
.jobid
, op
.ksb_ctx
,
1288 CCP_PASSTHRU_BYTESWAP_NOOP
);
1290 cmd
->engine_error
= cmd_q
->cmd_error
;
1294 /* Prepare the input and output data workareas. For in-place
1295 * operations we need to set the dma direction to BIDIRECTIONAL
1296 * and copy the src workarea to the dst workarea.
1298 if (sg_virt(xts
->src
) == sg_virt(xts
->dst
))
1301 ret
= ccp_init_data(&src
, cmd_q
, xts
->src
, xts
->src_len
,
1303 in_place
? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
);
1310 ret
= ccp_init_data(&dst
, cmd_q
, xts
->dst
, xts
->src_len
,
1311 unit_size
, DMA_FROM_DEVICE
);
1316 /* Send data to the CCP AES engine */
1317 while (src
.sg_wa
.bytes_left
) {
1318 ccp_prepare_data(&src
, &dst
, &op
, unit_size
, true);
1319 if (!src
.sg_wa
.bytes_left
)
1322 ret
= ccp_perform_xts_aes(&op
);
1324 cmd
->engine_error
= cmd_q
->cmd_error
;
1328 ccp_process_data(&src
, &dst
, &op
);
1331 /* Retrieve the AES context - convert from LE to BE using
1332 * 32-byte (256-bit) byteswapping
1334 ret
= ccp_copy_from_ksb(cmd_q
, &ctx
, op
.jobid
, op
.ksb_ctx
,
1335 CCP_PASSTHRU_BYTESWAP_256BIT
);
1337 cmd
->engine_error
= cmd_q
->cmd_error
;
1341 /* ...but we only need AES_BLOCK_SIZE bytes */
1342 dm_offset
= CCP_KSB_BYTES
- AES_BLOCK_SIZE
;
1343 ccp_get_dm_area(&ctx
, dm_offset
, xts
->iv
, 0, xts
->iv_len
);
1347 ccp_free_data(&dst
, cmd_q
);
1350 ccp_free_data(&src
, cmd_q
);
1361 static int ccp_run_sha_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
1363 struct ccp_sha_engine
*sha
= &cmd
->u
.sha
;
1364 struct ccp_dm_workarea ctx
;
1365 struct ccp_data src
;
1369 if (sha
->ctx_len
!= CCP_SHA_CTXSIZE
)
1375 if (!sha
->final
&& (sha
->src_len
& (CCP_SHA_BLOCKSIZE
- 1)))
1378 if (!sha
->src_len
) {
1381 /* Not final, just return */
1385 /* CCP can't do a zero length sha operation so the caller
1386 * must buffer the data.
1391 /* A sha operation for a message with a total length of zero,
1392 * return known result.
1394 switch (sha
->type
) {
1395 case CCP_SHA_TYPE_1
:
1396 sha_zero
= ccp_sha1_zero
;
1398 case CCP_SHA_TYPE_224
:
1399 sha_zero
= ccp_sha224_zero
;
1401 case CCP_SHA_TYPE_256
:
1402 sha_zero
= ccp_sha256_zero
;
1408 scatterwalk_map_and_copy((void *)sha_zero
, sha
->ctx
, 0,
1417 BUILD_BUG_ON(CCP_SHA_KSB_COUNT
!= 1);
1419 memset(&op
, 0, sizeof(op
));
1421 op
.jobid
= ccp_gen_jobid(cmd_q
->ccp
);
1422 op
.ksb_ctx
= cmd_q
->ksb_ctx
;
1423 op
.u
.sha
.type
= sha
->type
;
1424 op
.u
.sha
.msg_bits
= sha
->msg_bits
;
1426 /* The SHA context fits in a single (32-byte) KSB entry and
1427 * must be in little endian format. Use the 256-bit byte swap
1428 * passthru option to convert from big endian to little endian.
1430 ret
= ccp_init_dm_workarea(&ctx
, cmd_q
,
1431 CCP_SHA_KSB_COUNT
* CCP_KSB_BYTES
,
1439 switch (sha
->type
) {
1440 case CCP_SHA_TYPE_1
:
1441 init
= ccp_sha1_init
;
1443 case CCP_SHA_TYPE_224
:
1444 init
= ccp_sha224_init
;
1446 case CCP_SHA_TYPE_256
:
1447 init
= ccp_sha256_init
;
1453 memcpy(ctx
.address
, init
, CCP_SHA_CTXSIZE
);
1455 ccp_set_dm_area(&ctx
, 0, sha
->ctx
, 0, sha
->ctx_len
);
1457 ret
= ccp_copy_to_ksb(cmd_q
, &ctx
, op
.jobid
, op
.ksb_ctx
,
1458 CCP_PASSTHRU_BYTESWAP_256BIT
);
1460 cmd
->engine_error
= cmd_q
->cmd_error
;
1464 /* Send data to the CCP SHA engine */
1465 ret
= ccp_init_data(&src
, cmd_q
, sha
->src
, sha
->src_len
,
1466 CCP_SHA_BLOCKSIZE
, DMA_TO_DEVICE
);
1470 while (src
.sg_wa
.bytes_left
) {
1471 ccp_prepare_data(&src
, NULL
, &op
, CCP_SHA_BLOCKSIZE
, false);
1472 if (sha
->final
&& !src
.sg_wa
.bytes_left
)
1475 ret
= ccp_perform_sha(&op
);
1477 cmd
->engine_error
= cmd_q
->cmd_error
;
1481 ccp_process_data(&src
, NULL
, &op
);
1484 /* Retrieve the SHA context - convert from LE to BE using
1485 * 32-byte (256-bit) byteswapping to BE
1487 ret
= ccp_copy_from_ksb(cmd_q
, &ctx
, op
.jobid
, op
.ksb_ctx
,
1488 CCP_PASSTHRU_BYTESWAP_256BIT
);
1490 cmd
->engine_error
= cmd_q
->cmd_error
;
1494 ccp_get_dm_area(&ctx
, 0, sha
->ctx
, 0, sha
->ctx_len
);
1496 if (sha
->final
&& sha
->opad
) {
1497 /* HMAC operation, recursively perform final SHA */
1498 struct ccp_cmd hmac_cmd
;
1499 struct scatterlist sg
;
1500 u64 block_size
, digest_size
;
1503 switch (sha
->type
) {
1504 case CCP_SHA_TYPE_1
:
1505 block_size
= SHA1_BLOCK_SIZE
;
1506 digest_size
= SHA1_DIGEST_SIZE
;
1508 case CCP_SHA_TYPE_224
:
1509 block_size
= SHA224_BLOCK_SIZE
;
1510 digest_size
= SHA224_DIGEST_SIZE
;
1512 case CCP_SHA_TYPE_256
:
1513 block_size
= SHA256_BLOCK_SIZE
;
1514 digest_size
= SHA256_DIGEST_SIZE
;
1521 if (sha
->opad_len
!= block_size
) {
1526 hmac_buf
= kmalloc(block_size
+ digest_size
, GFP_KERNEL
);
1531 sg_init_one(&sg
, hmac_buf
, block_size
+ digest_size
);
1533 scatterwalk_map_and_copy(hmac_buf
, sha
->opad
, 0, block_size
, 0);
1534 memcpy(hmac_buf
+ block_size
, ctx
.address
, digest_size
);
1536 memset(&hmac_cmd
, 0, sizeof(hmac_cmd
));
1537 hmac_cmd
.engine
= CCP_ENGINE_SHA
;
1538 hmac_cmd
.u
.sha
.type
= sha
->type
;
1539 hmac_cmd
.u
.sha
.ctx
= sha
->ctx
;
1540 hmac_cmd
.u
.sha
.ctx_len
= sha
->ctx_len
;
1541 hmac_cmd
.u
.sha
.src
= &sg
;
1542 hmac_cmd
.u
.sha
.src_len
= block_size
+ digest_size
;
1543 hmac_cmd
.u
.sha
.opad
= NULL
;
1544 hmac_cmd
.u
.sha
.opad_len
= 0;
1545 hmac_cmd
.u
.sha
.first
= 1;
1546 hmac_cmd
.u
.sha
.final
= 1;
1547 hmac_cmd
.u
.sha
.msg_bits
= (block_size
+ digest_size
) << 3;
1549 ret
= ccp_run_sha_cmd(cmd_q
, &hmac_cmd
);
1551 cmd
->engine_error
= hmac_cmd
.engine_error
;
1557 ccp_free_data(&src
, cmd_q
);
1565 static int ccp_run_rsa_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
1567 struct ccp_rsa_engine
*rsa
= &cmd
->u
.rsa
;
1568 struct ccp_dm_workarea exp
, src
;
1569 struct ccp_data dst
;
1571 unsigned int ksb_count
, i_len
, o_len
;
1574 if (rsa
->key_size
> CCP_RSA_MAX_WIDTH
)
1577 if (!rsa
->exp
|| !rsa
->mod
|| !rsa
->src
|| !rsa
->dst
)
1580 /* The RSA modulus must precede the message being acted upon, so
1581 * it must be copied to a DMA area where the message and the
1582 * modulus can be concatenated. Therefore the input buffer
1583 * length required is twice the output buffer length (which
1584 * must be a multiple of 256-bits).
1586 o_len
= ((rsa
->key_size
+ 255) / 256) * 32;
1589 ksb_count
= o_len
/ CCP_KSB_BYTES
;
1591 memset(&op
, 0, sizeof(op
));
1593 op
.jobid
= ccp_gen_jobid(cmd_q
->ccp
);
1594 op
.ksb_key
= ccp_alloc_ksb(cmd_q
->ccp
, ksb_count
);
1598 /* The RSA exponent may span multiple (32-byte) KSB entries and must
1599 * be in little endian format. Reverse copy each 32-byte chunk
1600 * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
1601 * and each byte within that chunk and do not perform any byte swap
1602 * operations on the passthru operation.
1604 ret
= ccp_init_dm_workarea(&exp
, cmd_q
, o_len
, DMA_TO_DEVICE
);
1608 ccp_reverse_set_dm_area(&exp
, rsa
->exp
, rsa
->exp_len
, CCP_KSB_BYTES
,
1610 ret
= ccp_copy_to_ksb(cmd_q
, &exp
, op
.jobid
, op
.ksb_key
,
1611 CCP_PASSTHRU_BYTESWAP_NOOP
);
1613 cmd
->engine_error
= cmd_q
->cmd_error
;
1617 /* Concatenate the modulus and the message. Both the modulus and
1618 * the operands must be in little endian format. Since the input
1619 * is in big endian format it must be converted.
1621 ret
= ccp_init_dm_workarea(&src
, cmd_q
, i_len
, DMA_TO_DEVICE
);
1625 ccp_reverse_set_dm_area(&src
, rsa
->mod
, rsa
->mod_len
, CCP_KSB_BYTES
,
1627 src
.address
+= o_len
; /* Adjust the address for the copy operation */
1628 ccp_reverse_set_dm_area(&src
, rsa
->src
, rsa
->src_len
, CCP_KSB_BYTES
,
1630 src
.address
-= o_len
; /* Reset the address to original value */
1632 /* Prepare the output area for the operation */
1633 ret
= ccp_init_data(&dst
, cmd_q
, rsa
->dst
, rsa
->mod_len
,
1634 o_len
, DMA_FROM_DEVICE
);
1639 op
.src
.u
.dma
.address
= src
.dma
.address
;
1640 op
.src
.u
.dma
.offset
= 0;
1641 op
.src
.u
.dma
.length
= i_len
;
1642 op
.dst
.u
.dma
.address
= dst
.dm_wa
.dma
.address
;
1643 op
.dst
.u
.dma
.offset
= 0;
1644 op
.dst
.u
.dma
.length
= o_len
;
1646 op
.u
.rsa
.mod_size
= rsa
->key_size
;
1647 op
.u
.rsa
.input_len
= i_len
;
1649 ret
= ccp_perform_rsa(&op
);
1651 cmd
->engine_error
= cmd_q
->cmd_error
;
1655 ccp_reverse_get_dm_area(&dst
.dm_wa
, rsa
->dst
, rsa
->mod_len
);
1658 ccp_free_data(&dst
, cmd_q
);
1667 ccp_free_ksb(cmd_q
->ccp
, op
.ksb_key
, ksb_count
);
1672 static int ccp_run_passthru_cmd(struct ccp_cmd_queue
*cmd_q
,
1673 struct ccp_cmd
*cmd
)
1675 struct ccp_passthru_engine
*pt
= &cmd
->u
.passthru
;
1676 struct ccp_dm_workarea mask
;
1677 struct ccp_data src
, dst
;
1679 bool in_place
= false;
1683 if (!pt
->final
&& (pt
->src_len
& (CCP_PASSTHRU_BLOCKSIZE
- 1)))
1686 if (!pt
->src
|| !pt
->dst
)
1689 if (pt
->bit_mod
!= CCP_PASSTHRU_BITWISE_NOOP
) {
1690 if (pt
->mask_len
!= CCP_PASSTHRU_MASKSIZE
)
1696 BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT
!= 1);
1698 memset(&op
, 0, sizeof(op
));
1700 op
.jobid
= ccp_gen_jobid(cmd_q
->ccp
);
1702 if (pt
->bit_mod
!= CCP_PASSTHRU_BITWISE_NOOP
) {
1704 op
.ksb_key
= cmd_q
->ksb_key
;
1706 ret
= ccp_init_dm_workarea(&mask
, cmd_q
,
1707 CCP_PASSTHRU_KSB_COUNT
*
1713 ccp_set_dm_area(&mask
, 0, pt
->mask
, 0, pt
->mask_len
);
1714 ret
= ccp_copy_to_ksb(cmd_q
, &mask
, op
.jobid
, op
.ksb_key
,
1715 CCP_PASSTHRU_BYTESWAP_NOOP
);
1717 cmd
->engine_error
= cmd_q
->cmd_error
;
1722 /* Prepare the input and output data workareas. For in-place
1723 * operations we need to set the dma direction to BIDIRECTIONAL
1724 * and copy the src workarea to the dst workarea.
1726 if (sg_virt(pt
->src
) == sg_virt(pt
->dst
))
1729 ret
= ccp_init_data(&src
, cmd_q
, pt
->src
, pt
->src_len
,
1730 CCP_PASSTHRU_MASKSIZE
,
1731 in_place
? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE
);
1738 ret
= ccp_init_data(&dst
, cmd_q
, pt
->dst
, pt
->src_len
,
1739 CCP_PASSTHRU_MASKSIZE
, DMA_FROM_DEVICE
);
1744 /* Send data to the CCP Passthru engine
1745 * Because the CCP engine works on a single source and destination
1746 * dma address at a time, each entry in the source scatterlist
1747 * (after the dma_map_sg call) must be less than or equal to the
1748 * (remaining) length in the destination scatterlist entry and the
1749 * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
1751 dst
.sg_wa
.sg_used
= 0;
1752 for (i
= 1; i
<= src
.sg_wa
.dma_count
; i
++) {
1753 if (!dst
.sg_wa
.sg
||
1754 (dst
.sg_wa
.sg
->length
< src
.sg_wa
.sg
->length
)) {
1759 if (i
== src
.sg_wa
.dma_count
) {
1764 op
.src
.type
= CCP_MEMTYPE_SYSTEM
;
1765 op
.src
.u
.dma
.address
= sg_dma_address(src
.sg_wa
.sg
);
1766 op
.src
.u
.dma
.offset
= 0;
1767 op
.src
.u
.dma
.length
= sg_dma_len(src
.sg_wa
.sg
);
1769 op
.dst
.type
= CCP_MEMTYPE_SYSTEM
;
1770 op
.dst
.u
.dma
.address
= sg_dma_address(dst
.sg_wa
.sg
);
1771 op
.dst
.u
.dma
.offset
= dst
.sg_wa
.sg_used
;
1772 op
.dst
.u
.dma
.length
= op
.src
.u
.dma
.length
;
1774 ret
= ccp_perform_passthru(&op
);
1776 cmd
->engine_error
= cmd_q
->cmd_error
;
1780 dst
.sg_wa
.sg_used
+= src
.sg_wa
.sg
->length
;
1781 if (dst
.sg_wa
.sg_used
== dst
.sg_wa
.sg
->length
) {
1782 dst
.sg_wa
.sg
= sg_next(dst
.sg_wa
.sg
);
1783 dst
.sg_wa
.sg_used
= 0;
1785 src
.sg_wa
.sg
= sg_next(src
.sg_wa
.sg
);
1790 ccp_free_data(&dst
, cmd_q
);
1793 ccp_free_data(&src
, cmd_q
);
1796 if (pt
->bit_mod
!= CCP_PASSTHRU_BITWISE_NOOP
)
1802 static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
1804 struct ccp_ecc_engine
*ecc
= &cmd
->u
.ecc
;
1805 struct ccp_dm_workarea src
, dst
;
1810 if (!ecc
->u
.mm
.operand_1
||
1811 (ecc
->u
.mm
.operand_1_len
> CCP_ECC_MODULUS_BYTES
))
1814 if (ecc
->function
!= CCP_ECC_FUNCTION_MINV_384BIT
)
1815 if (!ecc
->u
.mm
.operand_2
||
1816 (ecc
->u
.mm
.operand_2_len
> CCP_ECC_MODULUS_BYTES
))
1819 if (!ecc
->u
.mm
.result
||
1820 (ecc
->u
.mm
.result_len
< CCP_ECC_MODULUS_BYTES
))
1823 memset(&op
, 0, sizeof(op
));
1825 op
.jobid
= ccp_gen_jobid(cmd_q
->ccp
);
1827 /* Concatenate the modulus and the operands. Both the modulus and
1828 * the operands must be in little endian format. Since the input
1829 * is in big endian format it must be converted and placed in a
1830 * fixed length buffer.
1832 ret
= ccp_init_dm_workarea(&src
, cmd_q
, CCP_ECC_SRC_BUF_SIZE
,
1837 /* Save the workarea address since it is updated in order to perform
1842 /* Copy the ECC modulus */
1843 ccp_reverse_set_dm_area(&src
, ecc
->mod
, ecc
->mod_len
,
1844 CCP_ECC_OPERAND_SIZE
, true);
1845 src
.address
+= CCP_ECC_OPERAND_SIZE
;
1847 /* Copy the first operand */
1848 ccp_reverse_set_dm_area(&src
, ecc
->u
.mm
.operand_1
,
1849 ecc
->u
.mm
.operand_1_len
,
1850 CCP_ECC_OPERAND_SIZE
, true);
1851 src
.address
+= CCP_ECC_OPERAND_SIZE
;
1853 if (ecc
->function
!= CCP_ECC_FUNCTION_MINV_384BIT
) {
1854 /* Copy the second operand */
1855 ccp_reverse_set_dm_area(&src
, ecc
->u
.mm
.operand_2
,
1856 ecc
->u
.mm
.operand_2_len
,
1857 CCP_ECC_OPERAND_SIZE
, true);
1858 src
.address
+= CCP_ECC_OPERAND_SIZE
;
1861 /* Restore the workarea address */
1864 /* Prepare the output area for the operation */
1865 ret
= ccp_init_dm_workarea(&dst
, cmd_q
, CCP_ECC_DST_BUF_SIZE
,
1871 op
.src
.u
.dma
.address
= src
.dma
.address
;
1872 op
.src
.u
.dma
.offset
= 0;
1873 op
.src
.u
.dma
.length
= src
.length
;
1874 op
.dst
.u
.dma
.address
= dst
.dma
.address
;
1875 op
.dst
.u
.dma
.offset
= 0;
1876 op
.dst
.u
.dma
.length
= dst
.length
;
1878 op
.u
.ecc
.function
= cmd
->u
.ecc
.function
;
1880 ret
= ccp_perform_ecc(&op
);
1882 cmd
->engine_error
= cmd_q
->cmd_error
;
1886 ecc
->ecc_result
= le16_to_cpup(
1887 (const __le16
*)(dst
.address
+ CCP_ECC_RESULT_OFFSET
));
1888 if (!(ecc
->ecc_result
& CCP_ECC_RESULT_SUCCESS
)) {
1893 /* Save the ECC result */
1894 ccp_reverse_get_dm_area(&dst
, ecc
->u
.mm
.result
, CCP_ECC_MODULUS_BYTES
);
1905 static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
1907 struct ccp_ecc_engine
*ecc
= &cmd
->u
.ecc
;
1908 struct ccp_dm_workarea src
, dst
;
1913 if (!ecc
->u
.pm
.point_1
.x
||
1914 (ecc
->u
.pm
.point_1
.x_len
> CCP_ECC_MODULUS_BYTES
) ||
1915 !ecc
->u
.pm
.point_1
.y
||
1916 (ecc
->u
.pm
.point_1
.y_len
> CCP_ECC_MODULUS_BYTES
))
1919 if (ecc
->function
== CCP_ECC_FUNCTION_PADD_384BIT
) {
1920 if (!ecc
->u
.pm
.point_2
.x
||
1921 (ecc
->u
.pm
.point_2
.x_len
> CCP_ECC_MODULUS_BYTES
) ||
1922 !ecc
->u
.pm
.point_2
.y
||
1923 (ecc
->u
.pm
.point_2
.y_len
> CCP_ECC_MODULUS_BYTES
))
1926 if (!ecc
->u
.pm
.domain_a
||
1927 (ecc
->u
.pm
.domain_a_len
> CCP_ECC_MODULUS_BYTES
))
1930 if (ecc
->function
== CCP_ECC_FUNCTION_PMUL_384BIT
)
1931 if (!ecc
->u
.pm
.scalar
||
1932 (ecc
->u
.pm
.scalar_len
> CCP_ECC_MODULUS_BYTES
))
1936 if (!ecc
->u
.pm
.result
.x
||
1937 (ecc
->u
.pm
.result
.x_len
< CCP_ECC_MODULUS_BYTES
) ||
1938 !ecc
->u
.pm
.result
.y
||
1939 (ecc
->u
.pm
.result
.y_len
< CCP_ECC_MODULUS_BYTES
))
1942 memset(&op
, 0, sizeof(op
));
1944 op
.jobid
= ccp_gen_jobid(cmd_q
->ccp
);
1946 /* Concatenate the modulus and the operands. Both the modulus and
1947 * the operands must be in little endian format. Since the input
1948 * is in big endian format it must be converted and placed in a
1949 * fixed length buffer.
1951 ret
= ccp_init_dm_workarea(&src
, cmd_q
, CCP_ECC_SRC_BUF_SIZE
,
1956 /* Save the workarea address since it is updated in order to perform
1961 /* Copy the ECC modulus */
1962 ccp_reverse_set_dm_area(&src
, ecc
->mod
, ecc
->mod_len
,
1963 CCP_ECC_OPERAND_SIZE
, true);
1964 src
.address
+= CCP_ECC_OPERAND_SIZE
;
1966 /* Copy the first point X and Y coordinate */
1967 ccp_reverse_set_dm_area(&src
, ecc
->u
.pm
.point_1
.x
,
1968 ecc
->u
.pm
.point_1
.x_len
,
1969 CCP_ECC_OPERAND_SIZE
, true);
1970 src
.address
+= CCP_ECC_OPERAND_SIZE
;
1971 ccp_reverse_set_dm_area(&src
, ecc
->u
.pm
.point_1
.y
,
1972 ecc
->u
.pm
.point_1
.y_len
,
1973 CCP_ECC_OPERAND_SIZE
, true);
1974 src
.address
+= CCP_ECC_OPERAND_SIZE
;
1976 /* Set the first point Z coordianate to 1 */
1977 *(src
.address
) = 0x01;
1978 src
.address
+= CCP_ECC_OPERAND_SIZE
;
1980 if (ecc
->function
== CCP_ECC_FUNCTION_PADD_384BIT
) {
1981 /* Copy the second point X and Y coordinate */
1982 ccp_reverse_set_dm_area(&src
, ecc
->u
.pm
.point_2
.x
,
1983 ecc
->u
.pm
.point_2
.x_len
,
1984 CCP_ECC_OPERAND_SIZE
, true);
1985 src
.address
+= CCP_ECC_OPERAND_SIZE
;
1986 ccp_reverse_set_dm_area(&src
, ecc
->u
.pm
.point_2
.y
,
1987 ecc
->u
.pm
.point_2
.y_len
,
1988 CCP_ECC_OPERAND_SIZE
, true);
1989 src
.address
+= CCP_ECC_OPERAND_SIZE
;
1991 /* Set the second point Z coordianate to 1 */
1992 *(src
.address
) = 0x01;
1993 src
.address
+= CCP_ECC_OPERAND_SIZE
;
1995 /* Copy the Domain "a" parameter */
1996 ccp_reverse_set_dm_area(&src
, ecc
->u
.pm
.domain_a
,
1997 ecc
->u
.pm
.domain_a_len
,
1998 CCP_ECC_OPERAND_SIZE
, true);
1999 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2001 if (ecc
->function
== CCP_ECC_FUNCTION_PMUL_384BIT
) {
2002 /* Copy the scalar value */
2003 ccp_reverse_set_dm_area(&src
, ecc
->u
.pm
.scalar
,
2004 ecc
->u
.pm
.scalar_len
,
2005 CCP_ECC_OPERAND_SIZE
, true);
2006 src
.address
+= CCP_ECC_OPERAND_SIZE
;
2010 /* Restore the workarea address */
2013 /* Prepare the output area for the operation */
2014 ret
= ccp_init_dm_workarea(&dst
, cmd_q
, CCP_ECC_DST_BUF_SIZE
,
2020 op
.src
.u
.dma
.address
= src
.dma
.address
;
2021 op
.src
.u
.dma
.offset
= 0;
2022 op
.src
.u
.dma
.length
= src
.length
;
2023 op
.dst
.u
.dma
.address
= dst
.dma
.address
;
2024 op
.dst
.u
.dma
.offset
= 0;
2025 op
.dst
.u
.dma
.length
= dst
.length
;
2027 op
.u
.ecc
.function
= cmd
->u
.ecc
.function
;
2029 ret
= ccp_perform_ecc(&op
);
2031 cmd
->engine_error
= cmd_q
->cmd_error
;
2035 ecc
->ecc_result
= le16_to_cpup(
2036 (const __le16
*)(dst
.address
+ CCP_ECC_RESULT_OFFSET
));
2037 if (!(ecc
->ecc_result
& CCP_ECC_RESULT_SUCCESS
)) {
2042 /* Save the workarea address since it is updated as we walk through
2043 * to copy the point math result
2047 /* Save the ECC result X and Y coordinates */
2048 ccp_reverse_get_dm_area(&dst
, ecc
->u
.pm
.result
.x
,
2049 CCP_ECC_MODULUS_BYTES
);
2050 dst
.address
+= CCP_ECC_OUTPUT_SIZE
;
2051 ccp_reverse_get_dm_area(&dst
, ecc
->u
.pm
.result
.y
,
2052 CCP_ECC_MODULUS_BYTES
);
2053 dst
.address
+= CCP_ECC_OUTPUT_SIZE
;
2055 /* Restore the workarea address */
2067 static int ccp_run_ecc_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
2069 struct ccp_ecc_engine
*ecc
= &cmd
->u
.ecc
;
2071 ecc
->ecc_result
= 0;
2074 (ecc
->mod_len
> CCP_ECC_MODULUS_BYTES
))
2077 switch (ecc
->function
) {
2078 case CCP_ECC_FUNCTION_MMUL_384BIT
:
2079 case CCP_ECC_FUNCTION_MADD_384BIT
:
2080 case CCP_ECC_FUNCTION_MINV_384BIT
:
2081 return ccp_run_ecc_mm_cmd(cmd_q
, cmd
);
2083 case CCP_ECC_FUNCTION_PADD_384BIT
:
2084 case CCP_ECC_FUNCTION_PMUL_384BIT
:
2085 case CCP_ECC_FUNCTION_PDBL_384BIT
:
2086 return ccp_run_ecc_pm_cmd(cmd_q
, cmd
);
2093 int ccp_run_cmd(struct ccp_cmd_queue
*cmd_q
, struct ccp_cmd
*cmd
)
2097 cmd
->engine_error
= 0;
2098 cmd_q
->cmd_error
= 0;
2099 cmd_q
->int_rcvd
= 0;
2100 cmd_q
->free_slots
= CMD_Q_DEPTH(ioread32(cmd_q
->reg_status
));
2102 switch (cmd
->engine
) {
2103 case CCP_ENGINE_AES
:
2104 ret
= ccp_run_aes_cmd(cmd_q
, cmd
);
2106 case CCP_ENGINE_XTS_AES_128
:
2107 ret
= ccp_run_xts_aes_cmd(cmd_q
, cmd
);
2109 case CCP_ENGINE_SHA
:
2110 ret
= ccp_run_sha_cmd(cmd_q
, cmd
);
2112 case CCP_ENGINE_RSA
:
2113 ret
= ccp_run_rsa_cmd(cmd_q
, cmd
);
2115 case CCP_ENGINE_PASSTHRU
:
2116 ret
= ccp_run_passthru_cmd(cmd_q
, cmd
);
2118 case CCP_ENGINE_ECC
:
2119 ret
= ccp_run_ecc_cmd(cmd_q
, cmd
);