2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/kthread.h>
16 #include <linux/sched.h>
17 #include <linux/interrupt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/hw_random.h>
22 #include <linux/cpu.h>
23 #include <asm/cpu_device_id.h>
24 #include <linux/ccp.h>
28 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
29 MODULE_LICENSE("GPL");
30 MODULE_VERSION("1.0.0");
31 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver");
34 static struct ccp_device
*ccp_dev
;
35 static inline struct ccp_device
*ccp_get_device(void)
40 static inline void ccp_add_device(struct ccp_device
*ccp
)
45 static inline void ccp_del_device(struct ccp_device
*ccp
)
51 * ccp_enqueue_cmd - queue an operation for processing by the CCP
53 * @cmd: ccp_cmd struct to be processed
55 * Queue a cmd to be processed by the CCP. If queueing the cmd
56 * would exceed the defined length of the cmd queue the cmd will
57 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will
58 * result in a return code of -EBUSY.
60 * The callback routine specified in the ccp_cmd struct will be
61 * called to notify the caller of completion (if the cmd was not
62 * backlogged) or advancement out of the backlog. If the cmd has
63 * advanced out of the backlog the "err" value of the callback
64 * will be -EINPROGRESS. Any other "err" value during callback is
65 * the result of the operation.
67 * The cmd has been successfully queued if:
68 * the return code is -EINPROGRESS or
69 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set
71 int ccp_enqueue_cmd(struct ccp_cmd
*cmd
)
73 struct ccp_device
*ccp
= ccp_get_device();
81 /* Caller must supply a callback routine */
87 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
91 if (ccp
->cmd_count
>= MAX_CMD_QLEN
) {
93 if (cmd
->flags
& CCP_CMD_MAY_BACKLOG
)
94 list_add_tail(&cmd
->entry
, &ccp
->backlog
);
98 list_add_tail(&cmd
->entry
, &ccp
->cmd
);
100 /* Find an idle queue */
101 if (!ccp
->suspending
) {
102 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
103 if (ccp
->cmd_q
[i
].active
)
111 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
113 /* If we found an idle queue, wake it up */
114 if (i
< ccp
->cmd_q_count
)
115 wake_up_process(ccp
->cmd_q
[i
].kthread
);
119 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd
);
121 static void ccp_do_cmd_backlog(struct work_struct
*work
)
123 struct ccp_cmd
*cmd
= container_of(work
, struct ccp_cmd
, work
);
124 struct ccp_device
*ccp
= cmd
->ccp
;
128 cmd
->callback(cmd
->data
, -EINPROGRESS
);
130 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
133 list_add_tail(&cmd
->entry
, &ccp
->cmd
);
135 /* Find an idle queue */
136 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
137 if (ccp
->cmd_q
[i
].active
)
143 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
145 /* If we found an idle queue, wake it up */
146 if (i
< ccp
->cmd_q_count
)
147 wake_up_process(ccp
->cmd_q
[i
].kthread
);
150 static struct ccp_cmd
*ccp_dequeue_cmd(struct ccp_cmd_queue
*cmd_q
)
152 struct ccp_device
*ccp
= cmd_q
->ccp
;
153 struct ccp_cmd
*cmd
= NULL
;
154 struct ccp_cmd
*backlog
= NULL
;
157 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
161 if (ccp
->suspending
) {
162 cmd_q
->suspended
= 1;
164 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
165 wake_up_interruptible(&ccp
->suspend_queue
);
170 if (ccp
->cmd_count
) {
173 cmd
= list_first_entry(&ccp
->cmd
, struct ccp_cmd
, entry
);
174 list_del(&cmd
->entry
);
179 if (!list_empty(&ccp
->backlog
)) {
180 backlog
= list_first_entry(&ccp
->backlog
, struct ccp_cmd
,
182 list_del(&backlog
->entry
);
185 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
188 INIT_WORK(&backlog
->work
, ccp_do_cmd_backlog
);
189 schedule_work(&backlog
->work
);
195 static void ccp_do_cmd_complete(struct work_struct
*work
)
197 struct ccp_cmd
*cmd
= container_of(work
, struct ccp_cmd
, work
);
199 cmd
->callback(cmd
->data
, cmd
->ret
);
202 static int ccp_cmd_queue_thread(void *data
)
204 struct ccp_cmd_queue
*cmd_q
= (struct ccp_cmd_queue
*)data
;
207 set_current_state(TASK_INTERRUPTIBLE
);
208 while (!kthread_should_stop()) {
211 set_current_state(TASK_INTERRUPTIBLE
);
213 cmd
= ccp_dequeue_cmd(cmd_q
);
217 __set_current_state(TASK_RUNNING
);
219 /* Execute the command */
220 cmd
->ret
= ccp_run_cmd(cmd_q
, cmd
);
222 /* Schedule the completion callback */
223 INIT_WORK(&cmd
->work
, ccp_do_cmd_complete
);
224 schedule_work(&cmd
->work
);
227 __set_current_state(TASK_RUNNING
);
232 static int ccp_trng_read(struct hwrng
*rng
, void *data
, size_t max
, bool wait
)
234 struct ccp_device
*ccp
= container_of(rng
, struct ccp_device
, hwrng
);
236 int len
= min_t(int, sizeof(trng_value
), max
);
239 * Locking is provided by the caller so we can update device
240 * hwrng-related fields safely
242 trng_value
= ioread32(ccp
->io_regs
+ TRNG_OUT_REG
);
244 /* Zero is returned if not data is available or if a
245 * bad-entropy error is present. Assume an error if
246 * we exceed TRNG_RETRIES reads of zero.
248 if (ccp
->hwrng_retries
++ > TRNG_RETRIES
)
254 /* Reset the counter and save the rng value */
255 ccp
->hwrng_retries
= 0;
256 memcpy(data
, &trng_value
, len
);
262 * ccp_alloc_struct - allocate and initialize the ccp_device struct
264 * @dev: device struct of the CCP
266 struct ccp_device
*ccp_alloc_struct(struct device
*dev
)
268 struct ccp_device
*ccp
;
270 ccp
= kzalloc(sizeof(*ccp
), GFP_KERNEL
);
272 dev_err(dev
, "unable to allocate device struct\n");
277 INIT_LIST_HEAD(&ccp
->cmd
);
278 INIT_LIST_HEAD(&ccp
->backlog
);
280 spin_lock_init(&ccp
->cmd_lock
);
281 mutex_init(&ccp
->req_mutex
);
282 mutex_init(&ccp
->ksb_mutex
);
283 ccp
->ksb_count
= KSB_COUNT
;
290 * ccp_init - initialize the CCP device
292 * @ccp: ccp_device struct
294 int ccp_init(struct ccp_device
*ccp
)
296 struct device
*dev
= ccp
->dev
;
297 struct ccp_cmd_queue
*cmd_q
;
298 struct dma_pool
*dma_pool
;
299 char dma_pool_name
[MAX_DMAPOOL_NAME_LEN
];
300 unsigned int qmr
, qim
, i
;
303 /* Find available queues */
305 qmr
= ioread32(ccp
->io_regs
+ Q_MASK_REG
);
306 for (i
= 0; i
< MAX_HW_QUEUES
; i
++) {
307 if (!(qmr
& (1 << i
)))
310 /* Allocate a dma pool for this queue */
311 snprintf(dma_pool_name
, sizeof(dma_pool_name
), "ccp_q%d", i
);
312 dma_pool
= dma_pool_create(dma_pool_name
, dev
,
313 CCP_DMAPOOL_MAX_SIZE
,
314 CCP_DMAPOOL_ALIGN
, 0);
316 dev_err(dev
, "unable to allocate dma pool\n");
321 cmd_q
= &ccp
->cmd_q
[ccp
->cmd_q_count
];
326 cmd_q
->dma_pool
= dma_pool
;
328 /* Reserve 2 KSB regions for the queue */
329 cmd_q
->ksb_key
= KSB_START
+ ccp
->ksb_start
++;
330 cmd_q
->ksb_ctx
= KSB_START
+ ccp
->ksb_start
++;
333 /* Preset some register values and masks that are queue
336 cmd_q
->reg_status
= ccp
->io_regs
+ CMD_Q_STATUS_BASE
+
337 (CMD_Q_STATUS_INCR
* i
);
338 cmd_q
->reg_int_status
= ccp
->io_regs
+ CMD_Q_INT_STATUS_BASE
+
339 (CMD_Q_STATUS_INCR
* i
);
340 cmd_q
->int_ok
= 1 << (i
* 2);
341 cmd_q
->int_err
= 1 << ((i
* 2) + 1);
343 cmd_q
->free_slots
= CMD_Q_DEPTH(ioread32(cmd_q
->reg_status
));
345 init_waitqueue_head(&cmd_q
->int_queue
);
347 /* Build queue interrupt mask (two interrupts per queue) */
348 qim
|= cmd_q
->int_ok
| cmd_q
->int_err
;
350 dev_dbg(dev
, "queue #%u available\n", i
);
352 if (ccp
->cmd_q_count
== 0) {
353 dev_notice(dev
, "no command queues available\n");
357 dev_notice(dev
, "%u command queues available\n", ccp
->cmd_q_count
);
359 /* Disable and clear interrupts until ready */
360 iowrite32(0x00, ccp
->io_regs
+ IRQ_MASK_REG
);
361 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
362 cmd_q
= &ccp
->cmd_q
[i
];
364 ioread32(cmd_q
->reg_int_status
);
365 ioread32(cmd_q
->reg_status
);
367 iowrite32(qim
, ccp
->io_regs
+ IRQ_STATUS_REG
);
370 ret
= ccp
->get_irq(ccp
);
372 dev_err(dev
, "unable to allocate an IRQ\n");
376 /* Initialize the queues used to wait for KSB space and suspend */
377 init_waitqueue_head(&ccp
->ksb_queue
);
378 init_waitqueue_head(&ccp
->suspend_queue
);
380 /* Create a kthread for each queue */
381 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
382 struct task_struct
*kthread
;
384 cmd_q
= &ccp
->cmd_q
[i
];
386 kthread
= kthread_create(ccp_cmd_queue_thread
, cmd_q
,
387 "ccp-q%u", cmd_q
->id
);
388 if (IS_ERR(kthread
)) {
389 dev_err(dev
, "error creating queue thread (%ld)\n",
391 ret
= PTR_ERR(kthread
);
395 cmd_q
->kthread
= kthread
;
396 wake_up_process(kthread
);
399 /* Register the RNG */
400 ccp
->hwrng
.name
= "ccp-rng";
401 ccp
->hwrng
.read
= ccp_trng_read
;
402 ret
= hwrng_register(&ccp
->hwrng
);
404 dev_err(dev
, "error registering hwrng (%d)\n", ret
);
408 /* Make the device struct available before enabling interrupts */
411 /* Enable interrupts */
412 iowrite32(qim
, ccp
->io_regs
+ IRQ_MASK_REG
);
417 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
418 if (ccp
->cmd_q
[i
].kthread
)
419 kthread_stop(ccp
->cmd_q
[i
].kthread
);
424 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
425 dma_pool_destroy(ccp
->cmd_q
[i
].dma_pool
);
431 * ccp_destroy - tear down the CCP device
433 * @ccp: ccp_device struct
435 void ccp_destroy(struct ccp_device
*ccp
)
437 struct ccp_cmd_queue
*cmd_q
;
441 /* Remove general access to the device struct */
444 /* Unregister the RNG */
445 hwrng_unregister(&ccp
->hwrng
);
447 /* Stop the queue kthreads */
448 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
449 if (ccp
->cmd_q
[i
].kthread
)
450 kthread_stop(ccp
->cmd_q
[i
].kthread
);
452 /* Build queue interrupt mask (two interrupt masks per queue) */
454 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
455 cmd_q
= &ccp
->cmd_q
[i
];
456 qim
|= cmd_q
->int_ok
| cmd_q
->int_err
;
459 /* Disable and clear interrupts */
460 iowrite32(0x00, ccp
->io_regs
+ IRQ_MASK_REG
);
461 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
462 cmd_q
= &ccp
->cmd_q
[i
];
464 ioread32(cmd_q
->reg_int_status
);
465 ioread32(cmd_q
->reg_status
);
467 iowrite32(qim
, ccp
->io_regs
+ IRQ_STATUS_REG
);
471 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
472 dma_pool_destroy(ccp
->cmd_q
[i
].dma_pool
);
474 /* Flush the cmd and backlog queue */
475 while (!list_empty(&ccp
->cmd
)) {
476 /* Invoke the callback directly with an error code */
477 cmd
= list_first_entry(&ccp
->cmd
, struct ccp_cmd
, entry
);
478 list_del(&cmd
->entry
);
479 cmd
->callback(cmd
->data
, -ENODEV
);
481 while (!list_empty(&ccp
->backlog
)) {
482 /* Invoke the callback directly with an error code */
483 cmd
= list_first_entry(&ccp
->backlog
, struct ccp_cmd
, entry
);
484 list_del(&cmd
->entry
);
485 cmd
->callback(cmd
->data
, -ENODEV
);
490 * ccp_irq_handler - handle interrupts generated by the CCP device
492 * @irq: the irq associated with the interrupt
493 * @data: the data value supplied when the irq was created
495 irqreturn_t
ccp_irq_handler(int irq
, void *data
)
497 struct device
*dev
= data
;
498 struct ccp_device
*ccp
= dev_get_drvdata(dev
);
499 struct ccp_cmd_queue
*cmd_q
;
503 status
= ioread32(ccp
->io_regs
+ IRQ_STATUS_REG
);
505 for (i
= 0; i
< ccp
->cmd_q_count
; i
++) {
506 cmd_q
= &ccp
->cmd_q
[i
];
508 q_int
= status
& (cmd_q
->int_ok
| cmd_q
->int_err
);
510 cmd_q
->int_status
= status
;
511 cmd_q
->q_status
= ioread32(cmd_q
->reg_status
);
512 cmd_q
->q_int_status
= ioread32(cmd_q
->reg_int_status
);
514 /* On error, only save the first error value */
515 if ((q_int
& cmd_q
->int_err
) && !cmd_q
->cmd_error
)
516 cmd_q
->cmd_error
= CMD_Q_ERROR(cmd_q
->q_status
);
520 /* Acknowledge the interrupt and wake the kthread */
521 iowrite32(q_int
, ccp
->io_regs
+ IRQ_STATUS_REG
);
522 wake_up_interruptible(&cmd_q
->int_queue
);
530 bool ccp_queues_suspended(struct ccp_device
*ccp
)
532 unsigned int suspended
= 0;
536 spin_lock_irqsave(&ccp
->cmd_lock
, flags
);
538 for (i
= 0; i
< ccp
->cmd_q_count
; i
++)
539 if (ccp
->cmd_q
[i
].suspended
)
542 spin_unlock_irqrestore(&ccp
->cmd_lock
, flags
);
544 return ccp
->cmd_q_count
== suspended
;
548 static const struct x86_cpu_id ccp_support
[] = {
549 { X86_VENDOR_AMD
, 22, },
552 static int __init
ccp_mod_init(void)
554 struct cpuinfo_x86
*cpuinfo
= &boot_cpu_data
;
557 if (!x86_match_cpu(ccp_support
))
560 switch (cpuinfo
->x86
) {
562 if ((cpuinfo
->x86_model
< 48) || (cpuinfo
->x86_model
> 63))
565 ret
= ccp_pci_init();
569 /* Don't leave the driver loaded if init failed */
570 if (!ccp_get_device()) {
583 static void __exit
ccp_mod_exit(void)
585 struct cpuinfo_x86
*cpuinfo
= &boot_cpu_data
;
587 switch (cpuinfo
->x86
) {
594 module_init(ccp_mod_init
);
595 module_exit(ccp_mod_exit
);