Merge branch 'akpm' (patches from Andrew)
[linux/fpc-iii.git] / drivers / crypto / qat / qat_c62x / adf_drv.c
blob2bc06c89d2fe7c6eb6df702a193a49c285dc1b85
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 Contact Information:
17 qat-linux@intel.com
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/kernel.h>
48 #include <linux/module.h>
49 #include <linux/pci.h>
50 #include <linux/init.h>
51 #include <linux/types.h>
52 #include <linux/fs.h>
53 #include <linux/slab.h>
54 #include <linux/errno.h>
55 #include <linux/device.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/platform_device.h>
58 #include <linux/workqueue.h>
59 #include <linux/io.h>
60 #include <adf_accel_devices.h>
61 #include <adf_common_drv.h>
62 #include <adf_cfg.h>
63 #include "adf_c62x_hw_data.h"
65 #define ADF_SYSTEM_DEVICE(device_id) \
66 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
68 static const struct pci_device_id adf_pci_tbl[] = {
69 ADF_SYSTEM_DEVICE(ADF_C62X_PCI_DEVICE_ID),
70 {0,}
72 MODULE_DEVICE_TABLE(pci, adf_pci_tbl);
74 static int adf_probe(struct pci_dev *dev, const struct pci_device_id *ent);
75 static void adf_remove(struct pci_dev *dev);
77 static struct pci_driver adf_driver = {
78 .id_table = adf_pci_tbl,
79 .name = ADF_C62X_DEVICE_NAME,
80 .probe = adf_probe,
81 .remove = adf_remove,
82 .sriov_configure = adf_sriov_configure,
85 static void adf_cleanup_pci_dev(struct adf_accel_dev *accel_dev)
87 pci_release_regions(accel_dev->accel_pci_dev.pci_dev);
88 pci_disable_device(accel_dev->accel_pci_dev.pci_dev);
91 static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
93 struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
94 int i;
96 for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
97 struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
99 if (bar->virt_addr)
100 pci_iounmap(accel_pci_dev->pci_dev, bar->virt_addr);
103 if (accel_dev->hw_device) {
104 switch (accel_pci_dev->pci_dev->device) {
105 case ADF_C62X_PCI_DEVICE_ID:
106 adf_clean_hw_data_c62x(accel_dev->hw_device);
107 break;
108 default:
109 break;
111 kfree(accel_dev->hw_device);
112 accel_dev->hw_device = NULL;
114 adf_cfg_dev_remove(accel_dev);
115 debugfs_remove(accel_dev->debugfs_dir);
116 adf_devmgr_rm_dev(accel_dev, NULL);
119 static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
121 struct adf_accel_dev *accel_dev;
122 struct adf_accel_pci *accel_pci_dev;
123 struct adf_hw_device_data *hw_data;
124 char name[ADF_DEVICE_NAME_LENGTH];
125 unsigned int i, bar_nr;
126 unsigned long bar_mask;
127 int ret;
129 switch (ent->device) {
130 case ADF_C62X_PCI_DEVICE_ID:
131 break;
132 default:
133 dev_err(&pdev->dev, "Invalid device 0x%x.\n", ent->device);
134 return -ENODEV;
137 if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
138 /* If the accelerator is connected to a node with no memory
139 * there is no point in using the accelerator since the remote
140 * memory transaction will be very slow. */
141 dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
142 return -EINVAL;
145 accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
146 dev_to_node(&pdev->dev));
147 if (!accel_dev)
148 return -ENOMEM;
150 INIT_LIST_HEAD(&accel_dev->crypto_list);
151 accel_pci_dev = &accel_dev->accel_pci_dev;
152 accel_pci_dev->pci_dev = pdev;
154 /* Add accel device to accel table.
155 * This should be called before adf_cleanup_accel is called */
156 if (adf_devmgr_add_dev(accel_dev, NULL)) {
157 dev_err(&pdev->dev, "Failed to add new accelerator device.\n");
158 kfree(accel_dev);
159 return -EFAULT;
162 accel_dev->owner = THIS_MODULE;
163 /* Allocate and configure device configuration structure */
164 hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
165 dev_to_node(&pdev->dev));
166 if (!hw_data) {
167 ret = -ENOMEM;
168 goto out_err;
171 accel_dev->hw_device = hw_data;
172 adf_init_hw_data_c62x(accel_dev->hw_device);
173 pci_read_config_byte(pdev, PCI_REVISION_ID, &accel_pci_dev->revid);
174 pci_read_config_dword(pdev, ADF_DEVICE_FUSECTL_OFFSET,
175 &hw_data->fuses);
177 /* Get Accelerators and Accelerators Engines masks */
178 hw_data->accel_mask = hw_data->get_accel_mask(hw_data->fuses);
179 hw_data->ae_mask = hw_data->get_ae_mask(hw_data->fuses);
180 accel_pci_dev->sku = hw_data->get_sku(hw_data);
181 /* If the device has no acceleration engines then ignore it. */
182 if (!hw_data->accel_mask || !hw_data->ae_mask ||
183 ((~hw_data->ae_mask) & 0x01)) {
184 dev_err(&pdev->dev, "No acceleration units found");
185 ret = -EFAULT;
186 goto out_err;
189 /* Create dev top level debugfs entry */
190 snprintf(name, sizeof(name), "%s%s_%02x:%02d.%d",
191 ADF_DEVICE_NAME_PREFIX, hw_data->dev_class->name,
192 pdev->bus->number, PCI_SLOT(pdev->devfn),
193 PCI_FUNC(pdev->devfn));
195 accel_dev->debugfs_dir = debugfs_create_dir(name, NULL);
197 /* Create device configuration table */
198 ret = adf_cfg_dev_add(accel_dev);
199 if (ret)
200 goto out_err;
202 /* enable PCI device */
203 if (pci_enable_device(pdev)) {
204 ret = -EFAULT;
205 goto out_err;
208 /* set dma identifier */
209 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
210 if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
211 dev_err(&pdev->dev, "No usable DMA configuration\n");
212 ret = -EFAULT;
213 goto out_err_disable;
214 } else {
215 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
218 } else {
219 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
222 if (pci_request_regions(pdev, ADF_C62X_DEVICE_NAME)) {
223 ret = -EFAULT;
224 goto out_err_disable;
227 /* Read accelerator capabilities mask */
228 pci_read_config_dword(pdev, ADF_DEVICE_LEGFUSE_OFFSET,
229 &hw_data->accel_capabilities_mask);
231 /* Find and map all the device's BARS */
232 i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
233 bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
234 for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
235 struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
237 bar->base_addr = pci_resource_start(pdev, bar_nr);
238 if (!bar->base_addr)
239 break;
240 bar->size = pci_resource_len(pdev, bar_nr);
241 bar->virt_addr = pci_iomap(accel_pci_dev->pci_dev, bar_nr, 0);
242 if (!bar->virt_addr) {
243 dev_err(&pdev->dev, "Failed to map BAR %d\n", bar_nr);
244 ret = -EFAULT;
245 goto out_err_free_reg;
248 pci_set_master(pdev);
250 if (adf_enable_aer(accel_dev, &adf_driver)) {
251 dev_err(&pdev->dev, "Failed to enable aer\n");
252 ret = -EFAULT;
253 goto out_err_free_reg;
256 if (pci_save_state(pdev)) {
257 dev_err(&pdev->dev, "Failed to save pci state\n");
258 ret = -ENOMEM;
259 goto out_err_free_reg;
262 ret = qat_crypto_dev_config(accel_dev);
263 if (ret)
264 goto out_err_free_reg;
266 ret = adf_dev_init(accel_dev);
267 if (ret)
268 goto out_err_dev_shutdown;
270 ret = adf_dev_start(accel_dev);
271 if (ret)
272 goto out_err_dev_stop;
274 return ret;
276 out_err_dev_stop:
277 adf_dev_stop(accel_dev);
278 out_err_dev_shutdown:
279 adf_dev_shutdown(accel_dev);
280 out_err_free_reg:
281 pci_release_regions(accel_pci_dev->pci_dev);
282 out_err_disable:
283 pci_disable_device(accel_pci_dev->pci_dev);
284 out_err:
285 adf_cleanup_accel(accel_dev);
286 kfree(accel_dev);
287 return ret;
290 static void adf_remove(struct pci_dev *pdev)
292 struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pdev);
294 if (!accel_dev) {
295 pr_err("QAT: Driver removal failed\n");
296 return;
298 adf_dev_stop(accel_dev);
299 adf_dev_shutdown(accel_dev);
300 adf_disable_aer(accel_dev);
301 adf_cleanup_accel(accel_dev);
302 adf_cleanup_pci_dev(accel_dev);
303 kfree(accel_dev);
306 static int __init adfdrv_init(void)
308 request_module("intel_qat");
310 if (pci_register_driver(&adf_driver)) {
311 pr_err("QAT: Driver initialization failed\n");
312 return -EFAULT;
314 return 0;
317 static void __exit adfdrv_release(void)
319 pci_unregister_driver(&adf_driver);
322 module_init(adfdrv_init);
323 module_exit(adfdrv_release);
325 MODULE_LICENSE("Dual BSD/GPL");
326 MODULE_AUTHOR("Intel");
327 MODULE_FIRMWARE(ADF_C62X_FW);
328 MODULE_FIRMWARE(ADF_C62X_MMP);
329 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
330 MODULE_VERSION(ADF_DRV_VERSION);