PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / crypto / ccp / ccp-pci.c
blob93319f9db7531b5667542fe6b3c371046711fd5e
1 /*
2 * AMD Cryptographic Coprocessor (CCP) driver
4 * Copyright (C) 2013 Advanced Micro Devices, Inc.
6 * Author: Tom Lendacky <thomas.lendacky@amd.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/pci_ids.h>
17 #include <linux/kthread.h>
18 #include <linux/sched.h>
19 #include <linux/interrupt.h>
20 #include <linux/spinlock.h>
21 #include <linux/delay.h>
22 #include <linux/ccp.h>
24 #include "ccp-dev.h"
26 #define IO_BAR 2
27 #define MSIX_VECTORS 2
29 struct ccp_msix {
30 u32 vector;
31 char name[16];
34 struct ccp_pci {
35 int msix_count;
36 struct ccp_msix msix[MSIX_VECTORS];
39 static int ccp_get_msix_irqs(struct ccp_device *ccp)
41 struct ccp_pci *ccp_pci = ccp->dev_specific;
42 struct device *dev = ccp->dev;
43 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
44 struct msix_entry msix_entry[MSIX_VECTORS];
45 unsigned int name_len = sizeof(ccp_pci->msix[0].name) - 1;
46 int v, ret;
48 for (v = 0; v < ARRAY_SIZE(msix_entry); v++)
49 msix_entry[v].entry = v;
51 while ((ret = pci_enable_msix(pdev, msix_entry, v)) > 0)
52 v = ret;
53 if (ret)
54 return ret;
56 ccp_pci->msix_count = v;
57 for (v = 0; v < ccp_pci->msix_count; v++) {
58 /* Set the interrupt names and request the irqs */
59 snprintf(ccp_pci->msix[v].name, name_len, "ccp-%u", v);
60 ccp_pci->msix[v].vector = msix_entry[v].vector;
61 ret = request_irq(ccp_pci->msix[v].vector, ccp_irq_handler,
62 0, ccp_pci->msix[v].name, dev);
63 if (ret) {
64 dev_notice(dev, "unable to allocate MSI-X IRQ (%d)\n",
65 ret);
66 goto e_irq;
70 return 0;
72 e_irq:
73 while (v--)
74 free_irq(ccp_pci->msix[v].vector, dev);
76 pci_disable_msix(pdev);
78 ccp_pci->msix_count = 0;
80 return ret;
83 static int ccp_get_msi_irq(struct ccp_device *ccp)
85 struct device *dev = ccp->dev;
86 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
87 int ret;
89 ret = pci_enable_msi(pdev);
90 if (ret)
91 return ret;
93 ret = request_irq(pdev->irq, ccp_irq_handler, 0, "ccp", dev);
94 if (ret) {
95 dev_notice(dev, "unable to allocate MSI IRQ (%d)\n", ret);
96 goto e_msi;
99 return 0;
101 e_msi:
102 pci_disable_msi(pdev);
104 return ret;
107 static int ccp_get_irqs(struct ccp_device *ccp)
109 struct device *dev = ccp->dev;
110 int ret;
112 ret = ccp_get_msix_irqs(ccp);
113 if (!ret)
114 return 0;
116 /* Couldn't get MSI-X vectors, try MSI */
117 dev_notice(dev, "could not enable MSI-X (%d), trying MSI\n", ret);
118 ret = ccp_get_msi_irq(ccp);
119 if (!ret)
120 return 0;
122 /* Couldn't get MSI interrupt */
123 dev_notice(dev, "could not enable MSI (%d)\n", ret);
125 return ret;
128 static void ccp_free_irqs(struct ccp_device *ccp)
130 struct ccp_pci *ccp_pci = ccp->dev_specific;
131 struct device *dev = ccp->dev;
132 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
134 if (ccp_pci->msix_count) {
135 while (ccp_pci->msix_count--)
136 free_irq(ccp_pci->msix[ccp_pci->msix_count].vector,
137 dev);
138 pci_disable_msix(pdev);
139 } else {
140 free_irq(pdev->irq, dev);
141 pci_disable_msi(pdev);
145 static int ccp_find_mmio_area(struct ccp_device *ccp)
147 struct device *dev = ccp->dev;
148 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
149 resource_size_t io_len;
150 unsigned long io_flags;
151 int bar;
153 io_flags = pci_resource_flags(pdev, IO_BAR);
154 io_len = pci_resource_len(pdev, IO_BAR);
155 if ((io_flags & IORESOURCE_MEM) && (io_len >= (IO_OFFSET + 0x800)))
156 return IO_BAR;
158 for (bar = 0; bar < PCI_STD_RESOURCE_END; bar++) {
159 io_flags = pci_resource_flags(pdev, bar);
160 io_len = pci_resource_len(pdev, bar);
161 if ((io_flags & IORESOURCE_MEM) &&
162 (io_len >= (IO_OFFSET + 0x800)))
163 return bar;
166 return -EIO;
169 static int ccp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
171 struct ccp_device *ccp;
172 struct ccp_pci *ccp_pci;
173 struct device *dev = &pdev->dev;
174 unsigned int bar;
175 int ret;
177 ret = -ENOMEM;
178 ccp = ccp_alloc_struct(dev);
179 if (!ccp)
180 goto e_err;
182 ccp_pci = kzalloc(sizeof(*ccp_pci), GFP_KERNEL);
183 if (!ccp_pci) {
184 ret = -ENOMEM;
185 goto e_free1;
187 ccp->dev_specific = ccp_pci;
188 ccp->get_irq = ccp_get_irqs;
189 ccp->free_irq = ccp_free_irqs;
191 ret = pci_request_regions(pdev, "ccp");
192 if (ret) {
193 dev_err(dev, "pci_request_regions failed (%d)\n", ret);
194 goto e_free2;
197 ret = pci_enable_device(pdev);
198 if (ret) {
199 dev_err(dev, "pci_enable_device failed (%d)\n", ret);
200 goto e_regions;
203 pci_set_master(pdev);
205 ret = ccp_find_mmio_area(ccp);
206 if (ret < 0)
207 goto e_device;
208 bar = ret;
210 ret = -EIO;
211 ccp->io_map = pci_iomap(pdev, bar, 0);
212 if (ccp->io_map == NULL) {
213 dev_err(dev, "pci_iomap failed\n");
214 goto e_device;
216 ccp->io_regs = ccp->io_map + IO_OFFSET;
218 ret = dma_set_mask(dev, DMA_BIT_MASK(48));
219 if (ret == 0) {
220 ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(48));
221 if (ret) {
222 dev_err(dev,
223 "pci_set_consistent_dma_mask failed (%d)\n",
224 ret);
225 goto e_bar0;
227 } else {
228 ret = dma_set_mask(dev, DMA_BIT_MASK(32));
229 if (ret) {
230 dev_err(dev, "pci_set_dma_mask failed (%d)\n", ret);
231 goto e_bar0;
235 dev_set_drvdata(dev, ccp);
237 ret = ccp_init(ccp);
238 if (ret)
239 goto e_bar0;
241 dev_notice(dev, "enabled\n");
243 return 0;
245 e_bar0:
246 pci_iounmap(pdev, ccp->io_map);
248 e_device:
249 pci_disable_device(pdev);
251 e_regions:
252 pci_release_regions(pdev);
254 e_free2:
255 kfree(ccp_pci);
257 e_free1:
258 kfree(ccp);
260 e_err:
261 dev_notice(dev, "initialization failed\n");
262 return ret;
265 static void ccp_pci_remove(struct pci_dev *pdev)
267 struct device *dev = &pdev->dev;
268 struct ccp_device *ccp = dev_get_drvdata(dev);
270 if (!ccp)
271 return;
273 ccp_destroy(ccp);
275 pci_iounmap(pdev, ccp->io_map);
277 pci_disable_device(pdev);
279 pci_release_regions(pdev);
281 kfree(ccp);
283 dev_notice(dev, "disabled\n");
286 #ifdef CONFIG_PM
287 static int ccp_pci_suspend(struct pci_dev *pdev, pm_message_t state)
289 struct device *dev = &pdev->dev;
290 struct ccp_device *ccp = dev_get_drvdata(dev);
291 unsigned long flags;
292 unsigned int i;
294 spin_lock_irqsave(&ccp->cmd_lock, flags);
296 ccp->suspending = 1;
298 /* Wake all the queue kthreads to prepare for suspend */
299 for (i = 0; i < ccp->cmd_q_count; i++)
300 wake_up_process(ccp->cmd_q[i].kthread);
302 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
304 /* Wait for all queue kthreads to say they're done */
305 while (!ccp_queues_suspended(ccp))
306 wait_event_interruptible(ccp->suspend_queue,
307 ccp_queues_suspended(ccp));
309 return 0;
312 static int ccp_pci_resume(struct pci_dev *pdev)
314 struct device *dev = &pdev->dev;
315 struct ccp_device *ccp = dev_get_drvdata(dev);
316 unsigned long flags;
317 unsigned int i;
319 spin_lock_irqsave(&ccp->cmd_lock, flags);
321 ccp->suspending = 0;
323 /* Wake up all the kthreads */
324 for (i = 0; i < ccp->cmd_q_count; i++) {
325 ccp->cmd_q[i].suspended = 0;
326 wake_up_process(ccp->cmd_q[i].kthread);
329 spin_unlock_irqrestore(&ccp->cmd_lock, flags);
331 return 0;
333 #endif
335 static DEFINE_PCI_DEVICE_TABLE(ccp_pci_table) = {
336 { PCI_VDEVICE(AMD, 0x1537), },
337 /* Last entry must be zero */
338 { 0, }
340 MODULE_DEVICE_TABLE(pci, ccp_pci_table);
342 static struct pci_driver ccp_pci_driver = {
343 .name = "AMD Cryptographic Coprocessor",
344 .id_table = ccp_pci_table,
345 .probe = ccp_pci_probe,
346 .remove = ccp_pci_remove,
347 #ifdef CONFIG_PM
348 .suspend = ccp_pci_suspend,
349 .resume = ccp_pci_resume,
350 #endif
353 int ccp_pci_init(void)
355 return pci_register_driver(&ccp_pci_driver);
358 void ccp_pci_exit(void)
360 pci_unregister_driver(&ccp_pci_driver);