2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
7 * Author: Gary R Hook <gary.hook@amd.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/pci.h>
17 #include <linux/kthread.h>
18 #include <linux/interrupt.h>
19 #include <linux/ccp.h>
23 static u32
ccp_alloc_ksb(struct ccp_cmd_queue
*cmd_q
, unsigned int count
)
26 struct ccp_device
*ccp
= cmd_q
->ccp
;
29 mutex_lock(&ccp
->sb_mutex
);
31 start
= (u32
)bitmap_find_next_zero_area(ccp
->sb
,
35 if (start
<= ccp
->sb_count
) {
36 bitmap_set(ccp
->sb
, start
, count
);
38 mutex_unlock(&ccp
->sb_mutex
);
44 mutex_unlock(&ccp
->sb_mutex
);
46 /* Wait for KSB entries to become available */
47 if (wait_event_interruptible(ccp
->sb_queue
, ccp
->sb_avail
))
51 return KSB_START
+ start
;
54 static void ccp_free_ksb(struct ccp_cmd_queue
*cmd_q
, unsigned int start
,
57 struct ccp_device
*ccp
= cmd_q
->ccp
;
62 mutex_lock(&ccp
->sb_mutex
);
64 bitmap_clear(ccp
->sb
, start
- KSB_START
, count
);
68 mutex_unlock(&ccp
->sb_mutex
);
70 wake_up_interruptible_all(&ccp
->sb_queue
);
73 static unsigned int ccp_get_free_slots(struct ccp_cmd_queue
*cmd_q
)
75 return CMD_Q_DEPTH(ioread32(cmd_q
->reg_status
));
78 static int ccp_do_cmd(struct ccp_op
*op
, u32
*cr
, unsigned int cr_count
)
80 struct ccp_cmd_queue
*cmd_q
= op
->cmd_q
;
81 struct ccp_device
*ccp
= cmd_q
->ccp
;
82 void __iomem
*cr_addr
;
87 /* We could read a status register to see how many free slots
88 * are actually available, but reading that register resets it
89 * and you could lose some error information.
93 cr0
= (cmd_q
->id
<< REQ0_CMD_Q_SHIFT
)
94 | (op
->jobid
<< REQ0_JOBID_SHIFT
)
95 | REQ0_WAIT_FOR_WRITE
;
98 cr0
|= REQ0_STOP_ON_COMPLETE
99 | REQ0_INT_ON_COMPLETE
;
101 if (op
->ioc
|| !cmd_q
->free_slots
)
102 cr0
|= REQ0_INT_ON_COMPLETE
;
104 /* Start at CMD_REQ1 */
105 cr_addr
= ccp
->io_regs
+ CMD_REQ0
+ CMD_REQ_INCR
;
107 mutex_lock(&ccp
->req_mutex
);
109 /* Write CMD_REQ1 through CMD_REQx first */
110 for (i
= 0; i
< cr_count
; i
++, cr_addr
+= CMD_REQ_INCR
)
111 iowrite32(*(cr
+ i
), cr_addr
);
113 /* Tell the CCP to start */
115 iowrite32(cr0
, ccp
->io_regs
+ CMD_REQ0
);
117 mutex_unlock(&ccp
->req_mutex
);
119 if (cr0
& REQ0_INT_ON_COMPLETE
) {
120 /* Wait for the job to complete */
121 ret
= wait_event_interruptible(cmd_q
->int_queue
,
123 if (ret
|| cmd_q
->cmd_error
) {
124 /* On error delete all related jobs from the queue */
125 cmd
= (cmd_q
->id
<< DEL_Q_ID_SHIFT
)
127 if (cmd_q
->cmd_error
)
128 ccp_log_error(cmd_q
->ccp
,
131 iowrite32(cmd
, ccp
->io_regs
+ DEL_CMD_Q_JOB
);
135 } else if (op
->soc
) {
136 /* Delete just head job from the queue on SoC */
138 | (cmd_q
->id
<< DEL_Q_ID_SHIFT
)
141 iowrite32(cmd
, ccp
->io_regs
+ DEL_CMD_Q_JOB
);
144 cmd_q
->free_slots
= CMD_Q_DEPTH(cmd_q
->q_status
);
152 static int ccp_perform_aes(struct ccp_op
*op
)
156 /* Fill out the register contents for REQ1 through REQ6 */
157 cr
[0] = (CCP_ENGINE_AES
<< REQ1_ENGINE_SHIFT
)
158 | (op
->u
.aes
.type
<< REQ1_AES_TYPE_SHIFT
)
159 | (op
->u
.aes
.mode
<< REQ1_AES_MODE_SHIFT
)
160 | (op
->u
.aes
.action
<< REQ1_AES_ACTION_SHIFT
)
161 | (op
->sb_key
<< REQ1_KEY_KSB_SHIFT
);
162 cr
[1] = op
->src
.u
.dma
.length
- 1;
163 cr
[2] = ccp_addr_lo(&op
->src
.u
.dma
);
164 cr
[3] = (op
->sb_ctx
<< REQ4_KSB_SHIFT
)
165 | (CCP_MEMTYPE_SYSTEM
<< REQ4_MEMTYPE_SHIFT
)
166 | ccp_addr_hi(&op
->src
.u
.dma
);
167 cr
[4] = ccp_addr_lo(&op
->dst
.u
.dma
);
168 cr
[5] = (CCP_MEMTYPE_SYSTEM
<< REQ6_MEMTYPE_SHIFT
)
169 | ccp_addr_hi(&op
->dst
.u
.dma
);
171 if (op
->u
.aes
.mode
== CCP_AES_MODE_CFB
)
172 cr
[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT
);
180 return ccp_do_cmd(op
, cr
, ARRAY_SIZE(cr
));
183 static int ccp_perform_xts_aes(struct ccp_op
*op
)
187 /* Fill out the register contents for REQ1 through REQ6 */
188 cr
[0] = (CCP_ENGINE_XTS_AES_128
<< REQ1_ENGINE_SHIFT
)
189 | (op
->u
.xts
.action
<< REQ1_AES_ACTION_SHIFT
)
190 | (op
->u
.xts
.unit_size
<< REQ1_XTS_AES_SIZE_SHIFT
)
191 | (op
->sb_key
<< REQ1_KEY_KSB_SHIFT
);
192 cr
[1] = op
->src
.u
.dma
.length
- 1;
193 cr
[2] = ccp_addr_lo(&op
->src
.u
.dma
);
194 cr
[3] = (op
->sb_ctx
<< REQ4_KSB_SHIFT
)
195 | (CCP_MEMTYPE_SYSTEM
<< REQ4_MEMTYPE_SHIFT
)
196 | ccp_addr_hi(&op
->src
.u
.dma
);
197 cr
[4] = ccp_addr_lo(&op
->dst
.u
.dma
);
198 cr
[5] = (CCP_MEMTYPE_SYSTEM
<< REQ6_MEMTYPE_SHIFT
)
199 | ccp_addr_hi(&op
->dst
.u
.dma
);
207 return ccp_do_cmd(op
, cr
, ARRAY_SIZE(cr
));
210 static int ccp_perform_sha(struct ccp_op
*op
)
214 /* Fill out the register contents for REQ1 through REQ6 */
215 cr
[0] = (CCP_ENGINE_SHA
<< REQ1_ENGINE_SHIFT
)
216 | (op
->u
.sha
.type
<< REQ1_SHA_TYPE_SHIFT
)
218 cr
[1] = op
->src
.u
.dma
.length
- 1;
219 cr
[2] = ccp_addr_lo(&op
->src
.u
.dma
);
220 cr
[3] = (op
->sb_ctx
<< REQ4_KSB_SHIFT
)
221 | (CCP_MEMTYPE_SYSTEM
<< REQ4_MEMTYPE_SHIFT
)
222 | ccp_addr_hi(&op
->src
.u
.dma
);
226 cr
[4] = lower_32_bits(op
->u
.sha
.msg_bits
);
227 cr
[5] = upper_32_bits(op
->u
.sha
.msg_bits
);
233 return ccp_do_cmd(op
, cr
, ARRAY_SIZE(cr
));
236 static int ccp_perform_rsa(struct ccp_op
*op
)
240 /* Fill out the register contents for REQ1 through REQ6 */
241 cr
[0] = (CCP_ENGINE_RSA
<< REQ1_ENGINE_SHIFT
)
242 | (op
->u
.rsa
.mod_size
<< REQ1_RSA_MOD_SIZE_SHIFT
)
243 | (op
->sb_key
<< REQ1_KEY_KSB_SHIFT
)
245 cr
[1] = op
->u
.rsa
.input_len
- 1;
246 cr
[2] = ccp_addr_lo(&op
->src
.u
.dma
);
247 cr
[3] = (op
->sb_ctx
<< REQ4_KSB_SHIFT
)
248 | (CCP_MEMTYPE_SYSTEM
<< REQ4_MEMTYPE_SHIFT
)
249 | ccp_addr_hi(&op
->src
.u
.dma
);
250 cr
[4] = ccp_addr_lo(&op
->dst
.u
.dma
);
251 cr
[5] = (CCP_MEMTYPE_SYSTEM
<< REQ6_MEMTYPE_SHIFT
)
252 | ccp_addr_hi(&op
->dst
.u
.dma
);
254 return ccp_do_cmd(op
, cr
, ARRAY_SIZE(cr
));
257 static int ccp_perform_passthru(struct ccp_op
*op
)
261 /* Fill out the register contents for REQ1 through REQ6 */
262 cr
[0] = (CCP_ENGINE_PASSTHRU
<< REQ1_ENGINE_SHIFT
)
263 | (op
->u
.passthru
.bit_mod
<< REQ1_PT_BW_SHIFT
)
264 | (op
->u
.passthru
.byte_swap
<< REQ1_PT_BS_SHIFT
);
266 if (op
->src
.type
== CCP_MEMTYPE_SYSTEM
)
267 cr
[1] = op
->src
.u
.dma
.length
- 1;
269 cr
[1] = op
->dst
.u
.dma
.length
- 1;
271 if (op
->src
.type
== CCP_MEMTYPE_SYSTEM
) {
272 cr
[2] = ccp_addr_lo(&op
->src
.u
.dma
);
273 cr
[3] = (CCP_MEMTYPE_SYSTEM
<< REQ4_MEMTYPE_SHIFT
)
274 | ccp_addr_hi(&op
->src
.u
.dma
);
276 if (op
->u
.passthru
.bit_mod
!= CCP_PASSTHRU_BITWISE_NOOP
)
277 cr
[3] |= (op
->sb_key
<< REQ4_KSB_SHIFT
);
279 cr
[2] = op
->src
.u
.sb
* CCP_SB_BYTES
;
280 cr
[3] = (CCP_MEMTYPE_SB
<< REQ4_MEMTYPE_SHIFT
);
283 if (op
->dst
.type
== CCP_MEMTYPE_SYSTEM
) {
284 cr
[4] = ccp_addr_lo(&op
->dst
.u
.dma
);
285 cr
[5] = (CCP_MEMTYPE_SYSTEM
<< REQ6_MEMTYPE_SHIFT
)
286 | ccp_addr_hi(&op
->dst
.u
.dma
);
288 cr
[4] = op
->dst
.u
.sb
* CCP_SB_BYTES
;
289 cr
[5] = (CCP_MEMTYPE_SB
<< REQ6_MEMTYPE_SHIFT
);
295 return ccp_do_cmd(op
, cr
, ARRAY_SIZE(cr
));
298 static int ccp_perform_ecc(struct ccp_op
*op
)
302 /* Fill out the register contents for REQ1 through REQ6 */
303 cr
[0] = REQ1_ECC_AFFINE_CONVERT
304 | (CCP_ENGINE_ECC
<< REQ1_ENGINE_SHIFT
)
305 | (op
->u
.ecc
.function
<< REQ1_ECC_FUNCTION_SHIFT
)
307 cr
[1] = op
->src
.u
.dma
.length
- 1;
308 cr
[2] = ccp_addr_lo(&op
->src
.u
.dma
);
309 cr
[3] = (CCP_MEMTYPE_SYSTEM
<< REQ4_MEMTYPE_SHIFT
)
310 | ccp_addr_hi(&op
->src
.u
.dma
);
311 cr
[4] = ccp_addr_lo(&op
->dst
.u
.dma
);
312 cr
[5] = (CCP_MEMTYPE_SYSTEM
<< REQ6_MEMTYPE_SHIFT
)
313 | ccp_addr_hi(&op
->dst
.u
.dma
);
315 return ccp_do_cmd(op
, cr
, ARRAY_SIZE(cr
));
318 static void ccp_disable_queue_interrupts(struct ccp_device
*ccp
)
320 iowrite32(0x00, ccp
->io_regs
+ IRQ_MASK_REG
);
323 static void ccp_enable_queue_interrupts(struct ccp_device
*ccp
)
325 iowrite32(ccp
->qim
, ccp
->io_regs
+ IRQ_MASK_REG
);
328 static void ccp_irq_bh(unsigned long data
)
330 struct ccp_device
*ccp
= (struct ccp_device
*)data
;
331 struct ccp_cmd_queue
*cmd_q
;
335 status
= ioread32(ccp
->io_regs
+ IRQ_STATUS_REG
);
337 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
338 cmd_q
= &ccp
->cmd_q
[i
];
340 q_int
= status
& (cmd_q
->int_ok
| cmd_q
->int_err
);
342 cmd_q
->int_status
= status
;
343 cmd_q
->q_status
= ioread32(cmd_q
->reg_status
);
344 cmd_q
->q_int_status
= ioread32(cmd_q
->reg_int_status
);
346 /* On error, only save the first error value */
347 if ((q_int
& cmd_q
->int_err
) && !cmd_q
->cmd_error
)
348 cmd_q
->cmd_error
= CMD_Q_ERROR(cmd_q
->q_status
);
352 /* Acknowledge the interrupt and wake the kthread */
353 iowrite32(q_int
, ccp
->io_regs
+ IRQ_STATUS_REG
);
354 wake_up_interruptible(&cmd_q
->int_queue
);
357 ccp_enable_queue_interrupts(ccp
);
360 static irqreturn_t
ccp_irq_handler(int irq
, void *data
)
362 struct ccp_device
*ccp
= (struct ccp_device
*)data
;
364 ccp_disable_queue_interrupts(ccp
);
365 if (ccp
->use_tasklet
)
366 tasklet_schedule(&ccp
->irq_tasklet
);
368 ccp_irq_bh((unsigned long)ccp
);
373 static int ccp_init(struct ccp_device
*ccp
)
375 struct device
*dev
= ccp
->dev
;
376 struct ccp_cmd_queue
*cmd_q
;
377 struct dma_pool
*dma_pool
;
378 char dma_pool_name
[MAX_DMAPOOL_NAME_LEN
];
382 /* Find available queues */
384 qmr
= ioread32(ccp
->io_regs
+ Q_MASK_REG
);
385 for (i
= 0; i
< MAX_HW_QUEUES
; i
++) {
386 if (!(qmr
& (1 << i
)))
389 /* Allocate a dma pool for this queue */
390 snprintf(dma_pool_name
, sizeof(dma_pool_name
), "%s_q%d",
392 dma_pool
= dma_pool_create(dma_pool_name
, dev
,
393 CCP_DMAPOOL_MAX_SIZE
,
394 CCP_DMAPOOL_ALIGN
, 0);
396 dev_err(dev
, "unable to allocate dma pool\n");
401 cmd_q
= &ccp
->cmd_q
[ccp
->cmd_q_count
];
406 cmd_q
->dma_pool
= dma_pool
;
408 /* Reserve 2 KSB regions for the queue */
409 cmd_q
->sb_key
= KSB_START
+ ccp
->sb_start
++;
410 cmd_q
->sb_ctx
= KSB_START
+ ccp
->sb_start
++;
413 /* Preset some register values and masks that are queue
416 cmd_q
->reg_status
= ccp
->io_regs
+ CMD_Q_STATUS_BASE
+
417 (CMD_Q_STATUS_INCR
* i
);
418 cmd_q
->reg_int_status
= ccp
->io_regs
+ CMD_Q_INT_STATUS_BASE
+
419 (CMD_Q_STATUS_INCR
* i
);
420 cmd_q
->int_ok
= 1 << (i
* 2);
421 cmd_q
->int_err
= 1 << ((i
* 2) + 1);
423 cmd_q
->free_slots
= ccp_get_free_slots(cmd_q
);
425 init_waitqueue_head(&cmd_q
->int_queue
);
427 /* Build queue interrupt mask (two interrupts per queue) */
428 ccp
->qim
|= cmd_q
->int_ok
| cmd_q
->int_err
;
431 /* For arm64 set the recommended queue cache settings */
432 iowrite32(ccp
->axcache
, ccp
->io_regs
+ CMD_Q_CACHE_BASE
+
433 (CMD_Q_CACHE_INC
* i
));
436 dev_dbg(dev
, "queue #%u available\n", i
);
438 if (ccp
->cmd_q_count
== 0) {
439 dev_notice(dev
, "no command queues available\n");
443 dev_notice(dev
, "%u command queues available\n", ccp
->cmd_q_count
);
445 /* Disable and clear interrupts until ready */
446 ccp_disable_queue_interrupts(ccp
);
447 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
448 cmd_q
= &ccp
->cmd_q
[i
];
450 ioread32(cmd_q
->reg_int_status
);
451 ioread32(cmd_q
->reg_status
);
453 iowrite32(ccp
->qim
, ccp
->io_regs
+ IRQ_STATUS_REG
);
456 ret
= sp_request_ccp_irq(ccp
->sp
, ccp_irq_handler
, ccp
->name
, ccp
);
458 dev_err(dev
, "unable to allocate an IRQ\n");
462 /* Initialize the ISR tasklet? */
463 if (ccp
->use_tasklet
)
464 tasklet_init(&ccp
->irq_tasklet
, ccp_irq_bh
,
467 dev_dbg(dev
, "Starting threads...\n");
468 /* Create a kthread for each queue */
469 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
470 struct task_struct
*kthread
;
472 cmd_q
= &ccp
->cmd_q
[i
];
474 kthread
= kthread_create(ccp_cmd_queue_thread
, cmd_q
,
475 "%s-q%u", ccp
->name
, cmd_q
->id
);
476 if (IS_ERR(kthread
)) {
477 dev_err(dev
, "error creating queue thread (%ld)\n",
479 ret
= PTR_ERR(kthread
);
483 cmd_q
->kthread
= kthread
;
484 wake_up_process(kthread
);
487 dev_dbg(dev
, "Enabling interrupts...\n");
488 /* Enable interrupts */
489 ccp_enable_queue_interrupts(ccp
);
491 dev_dbg(dev
, "Registering device...\n");
494 ret
= ccp_register_rng(ccp
);
498 /* Register the DMA engine support */
499 ret
= ccp_dmaengine_register(ccp
);
506 ccp_unregister_rng(ccp
);
509 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
510 if (ccp
->cmd_q
[i
].kthread
)
511 kthread_stop(ccp
->cmd_q
[i
].kthread
);
513 sp_free_ccp_irq(ccp
->sp
, ccp
);
516 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
517 dma_pool_destroy(ccp
->cmd_q
[i
].dma_pool
);
522 static void ccp_destroy(struct ccp_device
*ccp
)
524 struct ccp_cmd_queue
*cmd_q
;
528 /* Unregister the DMA engine */
529 ccp_dmaengine_unregister(ccp
);
531 /* Unregister the RNG */
532 ccp_unregister_rng(ccp
);
534 /* Remove this device from the list of available units */
537 /* Disable and clear interrupts */
538 ccp_disable_queue_interrupts(ccp
);
539 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
540 cmd_q
= &ccp
->cmd_q
[i
];
542 ioread32(cmd_q
->reg_int_status
);
543 ioread32(cmd_q
->reg_status
);
545 iowrite32(ccp
->qim
, ccp
->io_regs
+ IRQ_STATUS_REG
);
547 /* Stop the queue kthreads */
548 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
549 if (ccp
->cmd_q
[i
].kthread
)
550 kthread_stop(ccp
->cmd_q
[i
].kthread
);
552 sp_free_ccp_irq(ccp
->sp
, ccp
);
554 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
555 dma_pool_destroy(ccp
->cmd_q
[i
].dma_pool
);
557 /* Flush the cmd and backlog queue */
558 while (!list_empty(&ccp
->cmd
)) {
559 /* Invoke the callback directly with an error code */
560 cmd
= list_first_entry(&ccp
->cmd
, struct ccp_cmd
, entry
);
561 list_del(&cmd
->entry
);
562 cmd
->callback(cmd
->data
, -ENODEV
);
564 while (!list_empty(&ccp
->backlog
)) {
565 /* Invoke the callback directly with an error code */
566 cmd
= list_first_entry(&ccp
->backlog
, struct ccp_cmd
, entry
);
567 list_del(&cmd
->entry
);
568 cmd
->callback(cmd
->data
, -ENODEV
);
572 static const struct ccp_actions ccp3_actions
= {
573 .aes
= ccp_perform_aes
,
574 .xts_aes
= ccp_perform_xts_aes
,
576 .sha
= ccp_perform_sha
,
577 .rsa
= ccp_perform_rsa
,
578 .passthru
= ccp_perform_passthru
,
579 .ecc
= ccp_perform_ecc
,
580 .sballoc
= ccp_alloc_ksb
,
581 .sbfree
= ccp_free_ksb
,
583 .destroy
= ccp_destroy
,
584 .get_free_slots
= ccp_get_free_slots
,
585 .irqhandler
= ccp_irq_handler
,
588 const struct ccp_vdata ccpv3_platform
= {
589 .version
= CCP_VERSION(3, 0),
591 .perform
= &ccp3_actions
,
595 const struct ccp_vdata ccpv3
= {
596 .version
= CCP_VERSION(3, 0),
598 .perform
= &ccp3_actions
,
600 .rsamax
= CCP_RSA_MAX_WIDTH
,