2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/kernel.h>
48 #include <linux/module.h>
49 #include <linux/pci.h>
50 #include <linux/init.h>
51 #include <linux/types.h>
53 #include <linux/slab.h>
54 #include <linux/errno.h>
55 #include <linux/device.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/platform_device.h>
58 #include <linux/workqueue.h>
60 #include <adf_accel_devices.h>
61 #include <adf_common_drv.h>
63 #include <adf_transport_access_macros.h>
64 #include "adf_dh895xcc_hw_data.h"
67 static const char adf_driver_name
[] = ADF_DH895XCC_DEVICE_NAME
;
69 #define ADF_SYSTEM_DEVICE(device_id) \
70 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
72 static const struct pci_device_id adf_pci_tbl
[] = {
73 ADF_SYSTEM_DEVICE(ADF_DH895XCC_PCI_DEVICE_ID
),
76 MODULE_DEVICE_TABLE(pci
, adf_pci_tbl
);
78 static int adf_probe(struct pci_dev
*dev
, const struct pci_device_id
*ent
);
79 static void adf_remove(struct pci_dev
*dev
);
81 static struct pci_driver adf_driver
= {
82 .id_table
= adf_pci_tbl
,
83 .name
= adf_driver_name
,
88 static void adf_cleanup_accel(struct adf_accel_dev
*accel_dev
)
90 struct adf_accel_pci
*accel_pci_dev
= &accel_dev
->accel_pci_dev
;
93 adf_dev_shutdown(accel_dev
);
95 for (i
= 0; i
< ADF_PCI_MAX_BARS
; i
++) {
96 struct adf_bar
*bar
= &accel_pci_dev
->pci_bars
[i
];
99 pci_iounmap(accel_pci_dev
->pci_dev
, bar
->virt_addr
);
102 if (accel_dev
->hw_device
) {
103 switch (accel_dev
->hw_device
->pci_dev_id
) {
104 case ADF_DH895XCC_PCI_DEVICE_ID
:
105 adf_clean_hw_data_dh895xcc(accel_dev
->hw_device
);
110 kfree(accel_dev
->hw_device
);
112 adf_cfg_dev_remove(accel_dev
);
113 debugfs_remove(accel_dev
->debugfs_dir
);
114 adf_devmgr_rm_dev(accel_dev
);
115 pci_release_regions(accel_pci_dev
->pci_dev
);
116 pci_disable_device(accel_pci_dev
->pci_dev
);
120 static int adf_dev_configure(struct adf_accel_dev
*accel_dev
)
122 int cpus
= num_online_cpus();
123 int banks
= GET_MAX_BANKS(accel_dev
);
124 int instances
= min(cpus
, banks
);
125 char key
[ADF_CFG_MAX_KEY_LEN_IN_BYTES
];
129 if (adf_cfg_section_add(accel_dev
, ADF_KERNEL_SEC
))
131 if (adf_cfg_section_add(accel_dev
, "Accelerator0"))
133 for (i
= 0; i
< instances
; i
++) {
135 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_BANK_NUM
, i
);
136 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
137 key
, (void *)&val
, ADF_DEC
))
140 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_ETRMGR_CORE_AFFINITY
,
142 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
143 key
, (void *)&val
, ADF_DEC
))
146 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_ASYM_SIZE
, i
);
148 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
149 key
, (void *)&val
, ADF_DEC
))
153 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_SYM_SIZE
, i
);
154 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
155 key
, (void *)&val
, ADF_DEC
))
159 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_ASYM_TX
, i
);
160 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
161 key
, (void *)&val
, ADF_DEC
))
165 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_SYM_TX
, i
);
166 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
167 key
, (void *)&val
, ADF_DEC
))
171 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_RND_TX
, i
);
172 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
173 key
, (void *)&val
, ADF_DEC
))
177 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_ASYM_RX
, i
);
178 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
179 key
, (void *)&val
, ADF_DEC
))
183 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_SYM_RX
, i
);
184 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
185 key
, (void *)&val
, ADF_DEC
))
189 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_RND_RX
, i
);
190 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
191 key
, (void *)&val
, ADF_DEC
))
194 val
= ADF_COALESCING_DEF_TIME
;
195 snprintf(key
, sizeof(key
), ADF_ETRMGR_COALESCE_TIMER_FORMAT
, i
);
196 if (adf_cfg_add_key_value_param(accel_dev
, "Accelerator0",
197 key
, (void *)&val
, ADF_DEC
))
202 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
203 ADF_NUM_CY
, (void *)&val
, ADF_DEC
))
206 set_bit(ADF_STATUS_CONFIGURED
, &accel_dev
->status
);
209 dev_err(&GET_DEV(accel_dev
), "Failed to start QAT accel dev\n");
213 static int adf_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
215 struct adf_accel_dev
*accel_dev
;
216 struct adf_accel_pci
*accel_pci_dev
;
217 struct adf_hw_device_data
*hw_data
;
218 char name
[ADF_DEVICE_NAME_LENGTH
];
219 unsigned int i
, bar_nr
;
222 switch (ent
->device
) {
223 case ADF_DH895XCC_PCI_DEVICE_ID
:
226 dev_err(&pdev
->dev
, "Invalid device 0x%x.\n", ent
->device
);
230 if (num_possible_nodes() > 1 && dev_to_node(&pdev
->dev
) < 0) {
231 /* If the accelerator is connected to a node with no memory
232 * there is no point in using the accelerator since the remote
233 * memory transaction will be very slow. */
234 dev_err(&pdev
->dev
, "Invalid NUMA configuration.\n");
238 accel_dev
= kzalloc_node(sizeof(*accel_dev
), GFP_KERNEL
,
239 dev_to_node(&pdev
->dev
));
243 INIT_LIST_HEAD(&accel_dev
->crypto_list
);
245 /* Add accel device to accel table.
246 * This should be called before adf_cleanup_accel is called */
247 if (adf_devmgr_add_dev(accel_dev
)) {
248 dev_err(&pdev
->dev
, "Failed to add new accelerator device.\n");
253 accel_dev
->owner
= THIS_MODULE
;
254 /* Allocate and configure device configuration structure */
255 hw_data
= kzalloc_node(sizeof(*hw_data
), GFP_KERNEL
,
256 dev_to_node(&pdev
->dev
));
262 accel_dev
->hw_device
= hw_data
;
263 switch (ent
->device
) {
264 case ADF_DH895XCC_PCI_DEVICE_ID
:
265 adf_init_hw_data_dh895xcc(accel_dev
->hw_device
);
270 accel_pci_dev
= &accel_dev
->accel_pci_dev
;
271 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &accel_pci_dev
->revid
);
272 pci_read_config_dword(pdev
, ADF_DH895XCC_FUSECTL_OFFSET
,
275 /* Get Accelerators and Accelerators Engines masks */
276 hw_data
->accel_mask
= hw_data
->get_accel_mask(hw_data
->fuses
);
277 hw_data
->ae_mask
= hw_data
->get_ae_mask(hw_data
->fuses
);
278 accel_pci_dev
->sku
= hw_data
->get_sku(hw_data
);
279 accel_pci_dev
->pci_dev
= pdev
;
280 /* If the device has no acceleration engines then ignore it. */
281 if (!hw_data
->accel_mask
|| !hw_data
->ae_mask
||
282 ((~hw_data
->ae_mask
) & 0x01)) {
283 dev_err(&pdev
->dev
, "No acceleration units found");
288 /* Create dev top level debugfs entry */
289 snprintf(name
, sizeof(name
), "%s%s_dev%d", ADF_DEVICE_NAME_PREFIX
,
290 hw_data
->dev_class
->name
, hw_data
->instance_id
);
291 accel_dev
->debugfs_dir
= debugfs_create_dir(name
, NULL
);
292 if (!accel_dev
->debugfs_dir
) {
293 dev_err(&pdev
->dev
, "Could not create debugfs dir\n");
298 /* Create device configuration table */
299 ret
= adf_cfg_dev_add(accel_dev
);
303 /* enable PCI device */
304 if (pci_enable_device(pdev
)) {
309 /* set dma identifier */
310 if (pci_set_dma_mask(pdev
, DMA_BIT_MASK(64))) {
311 if ((pci_set_dma_mask(pdev
, DMA_BIT_MASK(32)))) {
312 dev_err(&pdev
->dev
, "No usable DMA configuration\n");
316 pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(32));
320 pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
323 if (pci_request_regions(pdev
, adf_driver_name
)) {
328 /* Read accelerator capabilities mask */
329 pci_read_config_dword(pdev
, ADF_DH895XCC_LEGFUSE_OFFSET
,
330 &hw_data
->accel_capabilities_mask
);
332 /* Find and map all the device's BARS */
333 for (i
= 0; i
< ADF_PCI_MAX_BARS
; i
++) {
334 struct adf_bar
*bar
= &accel_pci_dev
->pci_bars
[i
];
337 bar
->base_addr
= pci_resource_start(pdev
, bar_nr
);
340 bar
->size
= pci_resource_len(pdev
, bar_nr
);
341 bar
->virt_addr
= pci_iomap(accel_pci_dev
->pci_dev
, bar_nr
, 0);
342 if (!bar
->virt_addr
) {
343 dev_err(&pdev
->dev
, "Failed to map BAR %d\n", i
);
348 pci_set_master(pdev
);
350 if (adf_enable_aer(accel_dev
, &adf_driver
)) {
351 dev_err(&pdev
->dev
, "Failed to enable aer\n");
356 if (pci_save_state(pdev
)) {
357 dev_err(&pdev
->dev
, "Failed to save pci state\n");
362 ret
= adf_dev_configure(accel_dev
);
366 ret
= adf_dev_init(accel_dev
);
370 ret
= adf_dev_start(accel_dev
);
372 adf_dev_stop(accel_dev
);
378 adf_cleanup_accel(accel_dev
);
382 static void adf_remove(struct pci_dev
*pdev
)
384 struct adf_accel_dev
*accel_dev
= adf_devmgr_pci_to_accel_dev(pdev
);
387 pr_err("QAT: Driver removal failed\n");
390 if (adf_dev_stop(accel_dev
))
391 dev_err(&GET_DEV(accel_dev
), "Failed to stop QAT accel dev\n");
392 adf_disable_aer(accel_dev
);
393 adf_cleanup_accel(accel_dev
);
396 static int __init
adfdrv_init(void)
398 request_module("intel_qat");
399 if (qat_admin_register())
402 if (pci_register_driver(&adf_driver
)) {
403 pr_err("QAT: Driver initialization failed\n");
409 static void __exit
adfdrv_release(void)
411 pci_unregister_driver(&adf_driver
);
412 qat_admin_unregister();
415 module_init(adfdrv_init
);
416 module_exit(adfdrv_release
);
418 MODULE_LICENSE("Dual BSD/GPL");
419 MODULE_AUTHOR("Intel");
420 MODULE_FIRMWARE("qat_895xcc.bin");
421 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");