2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/kthread.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/hw_random.h>
22 #include <linux/cpu.h>
23 #include <asm/cpu_device_id.h>
24 #include <linux/ccp.h>
28 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
29 MODULE_LICENSE("GPL");
30 MODULE_VERSION("1.0.0");
31 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
33 struct ccp_tasklet_data
{
34 struct completion completion
;
39 static struct ccp_device
*ccp_dev
;
40 static inline struct ccp_device
*ccp_get_device(void)
45 static inline void ccp_add_device(struct ccp_device
*ccp
)
50 static inline void ccp_del_device(struct ccp_device
*ccp
)
56 * ccp_enqueue_cmd - queue an operation for processing by the CCP
58 * @cmd: ccp_cmd struct to be processed
60 * Queue a cmd to be processed by the CCP. If queueing the cmd
61 * would exceed the defined length of the cmd queue the cmd will
62 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
63 * result in a return code of -EBUSY.
65 * The callback routine specified in the ccp_cmd struct will be
66 * called to notify the caller of completion (if the cmd was not
67 * backlogged) or advancement out of the backlog. If the cmd has
68 * advanced out of the backlog the "err" value of the callback
69 * will be -EINPROGRESS. Any other "err" value during callback is
70 * the result of the operation.
72 * The cmd has been successfully queued if:
73 * the return code is -EINPROGRESS or
74 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
76 int ccp_enqueue_cmd(struct ccp_cmd
*cmd
)
78 struct ccp_device
*ccp
= ccp_get_device();
86 /* Caller must supply a callback routine */
92 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
96 if (ccp
->cmd_count
>= MAX_CMD_QLEN
) {
98 if (cmd
->flags
& CCP_CMD_MAY_BACKLOG
)
99 list_add_tail(&cmd
->entry
, &ccp
->backlog
);
103 list_add_tail(&cmd
->entry
, &ccp
->cmd
);
105 /* Find an idle queue */
106 if (!ccp
->suspending
) {
107 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
108 if (ccp
->cmd_q
[i
].active
)
116 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
118 /* If we found an idle queue, wake it up */
119 if (i
< ccp
->cmd_q_count
)
120 wake_up_process(ccp
->cmd_q
[i
].kthread
);
124 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd
);
126 static void ccp_do_cmd_backlog(struct work_struct
*work
)
128 struct ccp_cmd
*cmd
= container_of(work
, struct ccp_cmd
, work
);
129 struct ccp_device
*ccp
= cmd
->ccp
;
133 cmd
->callback(cmd
->data
, -EINPROGRESS
);
135 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
138 list_add_tail(&cmd
->entry
, &ccp
->cmd
);
140 /* Find an idle queue */
141 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
142 if (ccp
->cmd_q
[i
].active
)
148 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
150 /* If we found an idle queue, wake it up */
151 if (i
< ccp
->cmd_q_count
)
152 wake_up_process(ccp
->cmd_q
[i
].kthread
);
155 static struct ccp_cmd
*ccp_dequeue_cmd(struct ccp_cmd_queue
*cmd_q
)
157 struct ccp_device
*ccp
= cmd_q
->ccp
;
158 struct ccp_cmd
*cmd
= NULL
;
159 struct ccp_cmd
*backlog
= NULL
;
162 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
166 if (ccp
->suspending
) {
167 cmd_q
->suspended
= 1;
169 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
170 wake_up_interruptible(&ccp
->suspend_queue
);
175 if (ccp
->cmd_count
) {
178 cmd
= list_first_entry(&ccp
->cmd
, struct ccp_cmd
, entry
);
179 list_del(&cmd
->entry
);
184 if (!list_empty(&ccp
->backlog
)) {
185 backlog
= list_first_entry(&ccp
->backlog
, struct ccp_cmd
,
187 list_del(&backlog
->entry
);
190 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
193 INIT_WORK(&backlog
->work
, ccp_do_cmd_backlog
);
194 schedule_work(&backlog
->work
);
200 static void ccp_do_cmd_complete(unsigned long data
)
202 struct ccp_tasklet_data
*tdata
= (struct ccp_tasklet_data
*)data
;
203 struct ccp_cmd
*cmd
= tdata
->cmd
;
205 cmd
->callback(cmd
->data
, cmd
->ret
);
206 complete(&tdata
->completion
);
209 static int ccp_cmd_queue_thread(void *data
)
211 struct ccp_cmd_queue
*cmd_q
= (struct ccp_cmd_queue
*)data
;
213 struct ccp_tasklet_data tdata
;
214 struct tasklet_struct tasklet
;
216 tasklet_init(&tasklet
, ccp_do_cmd_complete
, (unsigned long)&tdata
);
218 set_current_state(TASK_INTERRUPTIBLE
);
219 while (!kthread_should_stop()) {
222 set_current_state(TASK_INTERRUPTIBLE
);
224 cmd
= ccp_dequeue_cmd(cmd_q
);
228 __set_current_state(TASK_RUNNING
);
230 /* Execute the command */
231 cmd
->ret
= ccp_run_cmd(cmd_q
, cmd
);
233 /* Schedule the completion callback */
235 init_completion(&tdata
.completion
);
236 tasklet_schedule(&tasklet
);
237 wait_for_completion(&tdata
.completion
);
240 __set_current_state(TASK_RUNNING
);
245 static int ccp_trng_read(struct hwrng
*rng
, void *data
, size_t max
, bool wait
)
247 struct ccp_device
*ccp
= container_of(rng
, struct ccp_device
, hwrng
);
249 int len
= min_t(int, sizeof(trng_value
), max
);
252 * Locking is provided by the caller so we can update device
253 * hwrng-related fields safely
255 trng_value
= ioread32(ccp
->io_regs
+ TRNG_OUT_REG
);
257 /* Zero is returned if not data is available or if a
258 * bad-entropy error is present. Assume an error if
259 * we exceed TRNG_RETRIES reads of zero.
261 if (ccp
->hwrng_retries
++ > TRNG_RETRIES
)
267 /* Reset the counter and save the rng value */
268 ccp
->hwrng_retries
= 0;
269 memcpy(data
, &trng_value
, len
);
275 * ccp_alloc_struct - allocate and initialize the ccp_device struct
277 * @dev: device struct of the CCP
279 struct ccp_device
*ccp_alloc_struct(struct device
*dev
)
281 struct ccp_device
*ccp
;
283 ccp
= kzalloc(sizeof(*ccp
), GFP_KERNEL
);
285 dev_err(dev
, "unable to allocate device struct\n");
290 INIT_LIST_HEAD(&ccp
->cmd
);
291 INIT_LIST_HEAD(&ccp
->backlog
);
293 spin_lock_init(&ccp
->cmd_lock
);
294 mutex_init(&ccp
->req_mutex
);
295 mutex_init(&ccp
->ksb_mutex
);
296 ccp
->ksb_count
= KSB_COUNT
;
303 * ccp_init - initialize the CCP device
305 * @ccp: ccp_device struct
307 int ccp_init(struct ccp_device
*ccp
)
309 struct device
*dev
= ccp
->dev
;
310 struct ccp_cmd_queue
*cmd_q
;
311 struct dma_pool
*dma_pool
;
312 char dma_pool_name
[MAX_DMAPOOL_NAME_LEN
];
313 unsigned int qmr
, qim
, i
;
316 /* Find available queues */
318 qmr
= ioread32(ccp
->io_regs
+ Q_MASK_REG
);
319 for (i
= 0; i
< MAX_HW_QUEUES
; i
++) {
320 if (!(qmr
& (1 << i
)))
323 /* Allocate a dma pool for this queue */
324 snprintf(dma_pool_name
, sizeof(dma_pool_name
), "ccp_q%d", i
);
325 dma_pool
= dma_pool_create(dma_pool_name
, dev
,
326 CCP_DMAPOOL_MAX_SIZE
,
327 CCP_DMAPOOL_ALIGN
, 0);
329 dev_err(dev
, "unable to allocate dma pool\n");
334 cmd_q
= &ccp
->cmd_q
[ccp
->cmd_q_count
];
339 cmd_q
->dma_pool
= dma_pool
;
341 /* Reserve 2 KSB regions for the queue */
342 cmd_q
->ksb_key
= KSB_START
+ ccp
->ksb_start
++;
343 cmd_q
->ksb_ctx
= KSB_START
+ ccp
->ksb_start
++;
346 /* Preset some register values and masks that are queue
349 cmd_q
->reg_status
= ccp
->io_regs
+ CMD_Q_STATUS_BASE
+
350 (CMD_Q_STATUS_INCR
* i
);
351 cmd_q
->reg_int_status
= ccp
->io_regs
+ CMD_Q_INT_STATUS_BASE
+
352 (CMD_Q_STATUS_INCR
* i
);
353 cmd_q
->int_ok
= 1 << (i
* 2);
354 cmd_q
->int_err
= 1 << ((i
* 2) + 1);
356 cmd_q
->free_slots
= CMD_Q_DEPTH(ioread32(cmd_q
->reg_status
));
358 init_waitqueue_head(&cmd_q
->int_queue
);
360 /* Build queue interrupt mask (two interrupts per queue) */
361 qim
|= cmd_q
->int_ok
| cmd_q
->int_err
;
363 dev_dbg(dev
, "queue #%u available\n", i
);
365 if (ccp
->cmd_q_count
== 0) {
366 dev_notice(dev
, "no command queues available\n");
370 dev_notice(dev
, "%u command queues available\n", ccp
->cmd_q_count
);
372 /* Disable and clear interrupts until ready */
373 iowrite32(0x00, ccp
->io_regs
+ IRQ_MASK_REG
);
374 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
375 cmd_q
= &ccp
->cmd_q
[i
];
377 ioread32(cmd_q
->reg_int_status
);
378 ioread32(cmd_q
->reg_status
);
380 iowrite32(qim
, ccp
->io_regs
+ IRQ_STATUS_REG
);
383 ret
= ccp
->get_irq(ccp
);
385 dev_err(dev
, "unable to allocate an IRQ\n");
389 /* Initialize the queues used to wait for KSB space and suspend */
390 init_waitqueue_head(&ccp
->ksb_queue
);
391 init_waitqueue_head(&ccp
->suspend_queue
);
393 /* Create a kthread for each queue */
394 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
395 struct task_struct
*kthread
;
397 cmd_q
= &ccp
->cmd_q
[i
];
399 kthread
= kthread_create(ccp_cmd_queue_thread
, cmd_q
,
400 "ccp-q%u", cmd_q
->id
);
401 if (IS_ERR(kthread
)) {
402 dev_err(dev
, "error creating queue thread (%ld)\n",
404 ret
= PTR_ERR(kthread
);
408 cmd_q
->kthread
= kthread
;
409 wake_up_process(kthread
);
412 /* Register the RNG */
413 ccp
->hwrng
.name
= "ccp-rng";
414 ccp
->hwrng
.read
= ccp_trng_read
;
415 ret
= hwrng_register(&ccp
->hwrng
);
417 dev_err(dev
, "error registering hwrng (%d)\n", ret
);
421 /* Make the device struct available before enabling interrupts */
424 /* Enable interrupts */
425 iowrite32(qim
, ccp
->io_regs
+ IRQ_MASK_REG
);
430 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
431 if (ccp
->cmd_q
[i
].kthread
)
432 kthread_stop(ccp
->cmd_q
[i
].kthread
);
437 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
438 dma_pool_destroy(ccp
->cmd_q
[i
].dma_pool
);
444 * ccp_destroy - tear down the CCP device
446 * @ccp: ccp_device struct
448 void ccp_destroy(struct ccp_device
*ccp
)
450 struct ccp_cmd_queue
*cmd_q
;
454 /* Remove general access to the device struct */
457 /* Unregister the RNG */
458 hwrng_unregister(&ccp
->hwrng
);
460 /* Stop the queue kthreads */
461 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
462 if (ccp
->cmd_q
[i
].kthread
)
463 kthread_stop(ccp
->cmd_q
[i
].kthread
);
465 /* Build queue interrupt mask (two interrupt masks per queue) */
467 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
468 cmd_q
= &ccp
->cmd_q
[i
];
469 qim
|= cmd_q
->int_ok
| cmd_q
->int_err
;
472 /* Disable and clear interrupts */
473 iowrite32(0x00, ccp
->io_regs
+ IRQ_MASK_REG
);
474 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
475 cmd_q
= &ccp
->cmd_q
[i
];
477 ioread32(cmd_q
->reg_int_status
);
478 ioread32(cmd_q
->reg_status
);
480 iowrite32(qim
, ccp
->io_regs
+ IRQ_STATUS_REG
);
484 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
485 dma_pool_destroy(ccp
->cmd_q
[i
].dma_pool
);
487 /* Flush the cmd and backlog queue */
488 while (!list_empty(&ccp
->cmd
)) {
489 /* Invoke the callback directly with an error code */
490 cmd
= list_first_entry(&ccp
->cmd
, struct ccp_cmd
, entry
);
491 list_del(&cmd
->entry
);
492 cmd
->callback(cmd
->data
, -ENODEV
);
494 while (!list_empty(&ccp
->backlog
)) {
495 /* Invoke the callback directly with an error code */
496 cmd
= list_first_entry(&ccp
->backlog
, struct ccp_cmd
, entry
);
497 list_del(&cmd
->entry
);
498 cmd
->callback(cmd
->data
, -ENODEV
);
503 * ccp_irq_handler - handle interrupts generated by the CCP device
505 * @irq: the irq associated with the interrupt
506 * @data: the data value supplied when the irq was created
508 irqreturn_t
ccp_irq_handler(int irq
, void *data
)
510 struct device
*dev
= data
;
511 struct ccp_device
*ccp
= dev_get_drvdata(dev
);
512 struct ccp_cmd_queue
*cmd_q
;
516 status
= ioread32(ccp
->io_regs
+ IRQ_STATUS_REG
);
518 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
519 cmd_q
= &ccp
->cmd_q
[i
];
521 q_int
= status
& (cmd_q
->int_ok
| cmd_q
->int_err
);
523 cmd_q
->int_status
= status
;
524 cmd_q
->q_status
= ioread32(cmd_q
->reg_status
);
525 cmd_q
->q_int_status
= ioread32(cmd_q
->reg_int_status
);
527 /* On error, only save the first error value */
528 if ((q_int
& cmd_q
->int_err
) && !cmd_q
->cmd_error
)
529 cmd_q
->cmd_error
= CMD_Q_ERROR(cmd_q
->q_status
);
533 /* Acknowledge the interrupt and wake the kthread */
534 iowrite32(q_int
, ccp
->io_regs
+ IRQ_STATUS_REG
);
535 wake_up_interruptible(&cmd_q
->int_queue
);
543 bool ccp_queues_suspended(struct ccp_device
*ccp
)
545 unsigned int suspended
= 0;
549 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
551 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
552 if (ccp
->cmd_q
[i
].suspended
)
555 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
557 return ccp
->cmd_q_count
== suspended
;
561 static const struct x86_cpu_id ccp_support
[] = {
562 { X86_VENDOR_AMD
, 22, },
565 static int __init
ccp_mod_init(void)
567 struct cpuinfo_x86
*cpuinfo
= &boot_cpu_data
;
570 if (!x86_match_cpu(ccp_support
))
573 switch (cpuinfo
->x86
) {
575 if ((cpuinfo
->x86_model
< 48) || (cpuinfo
->x86_model
> 63))
578 ret
= ccp_pci_init();
582 /* Don't leave the driver loaded if init failed */
583 if (!ccp_get_device()) {
596 static void __exit
ccp_mod_exit(void)
598 struct cpuinfo_x86
*cpuinfo
= &boot_cpu_data
;
600 switch (cpuinfo
->x86
) {
607 module_init(ccp_mod_init
);
608 module_exit(ccp_mod_exit
);