treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / crypto / qat / qat_common / adf_ctl_drv.c
blobef0e482ee04f0bf8dad3fc0b5654780736b03667
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 Contact Information:
17 qat-linux@intel.com
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/mutex.h>
49 #include <linux/slab.h>
50 #include <linux/fs.h>
51 #include <linux/bitops.h>
52 #include <linux/pci.h>
53 #include <linux/cdev.h>
54 #include <linux/uaccess.h>
55 #include <linux/crypto.h>
57 #include "adf_accel_devices.h"
58 #include "adf_common_drv.h"
59 #include "adf_cfg.h"
60 #include "adf_cfg_common.h"
61 #include "adf_cfg_user.h"
63 #define DEVICE_NAME "qat_adf_ctl"
65 static DEFINE_MUTEX(adf_ctl_lock);
66 static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg);
68 static const struct file_operations adf_ctl_ops = {
69 .owner = THIS_MODULE,
70 .unlocked_ioctl = adf_ctl_ioctl,
71 .compat_ioctl = compat_ptr_ioctl,
74 struct adf_ctl_drv_info {
75 unsigned int major;
76 struct cdev drv_cdev;
77 struct class *drv_class;
80 static struct adf_ctl_drv_info adf_ctl_drv;
82 static void adf_chr_drv_destroy(void)
84 device_destroy(adf_ctl_drv.drv_class, MKDEV(adf_ctl_drv.major, 0));
85 cdev_del(&adf_ctl_drv.drv_cdev);
86 class_destroy(adf_ctl_drv.drv_class);
87 unregister_chrdev_region(MKDEV(adf_ctl_drv.major, 0), 1);
90 static int adf_chr_drv_create(void)
92 dev_t dev_id;
93 struct device *drv_device;
95 if (alloc_chrdev_region(&dev_id, 0, 1, DEVICE_NAME)) {
96 pr_err("QAT: unable to allocate chrdev region\n");
97 return -EFAULT;
100 adf_ctl_drv.drv_class = class_create(THIS_MODULE, DEVICE_NAME);
101 if (IS_ERR(adf_ctl_drv.drv_class)) {
102 pr_err("QAT: class_create failed for adf_ctl\n");
103 goto err_chrdev_unreg;
105 adf_ctl_drv.major = MAJOR(dev_id);
106 cdev_init(&adf_ctl_drv.drv_cdev, &adf_ctl_ops);
107 if (cdev_add(&adf_ctl_drv.drv_cdev, dev_id, 1)) {
108 pr_err("QAT: cdev add failed\n");
109 goto err_class_destr;
112 drv_device = device_create(adf_ctl_drv.drv_class, NULL,
113 MKDEV(adf_ctl_drv.major, 0),
114 NULL, DEVICE_NAME);
115 if (IS_ERR(drv_device)) {
116 pr_err("QAT: failed to create device\n");
117 goto err_cdev_del;
119 return 0;
120 err_cdev_del:
121 cdev_del(&adf_ctl_drv.drv_cdev);
122 err_class_destr:
123 class_destroy(adf_ctl_drv.drv_class);
124 err_chrdev_unreg:
125 unregister_chrdev_region(dev_id, 1);
126 return -EFAULT;
129 static int adf_ctl_alloc_resources(struct adf_user_cfg_ctl_data **ctl_data,
130 unsigned long arg)
132 struct adf_user_cfg_ctl_data *cfg_data;
134 cfg_data = kzalloc(sizeof(*cfg_data), GFP_KERNEL);
135 if (!cfg_data)
136 return -ENOMEM;
138 /* Initialize device id to NO DEVICE as 0 is a valid device id */
139 cfg_data->device_id = ADF_CFG_NO_DEVICE;
141 if (copy_from_user(cfg_data, (void __user *)arg, sizeof(*cfg_data))) {
142 pr_err("QAT: failed to copy from user cfg_data.\n");
143 kfree(cfg_data);
144 return -EIO;
147 *ctl_data = cfg_data;
148 return 0;
151 static int adf_add_key_value_data(struct adf_accel_dev *accel_dev,
152 const char *section,
153 const struct adf_user_cfg_key_val *key_val)
155 if (key_val->type == ADF_HEX) {
156 long *ptr = (long *)key_val->val;
157 long val = *ptr;
159 if (adf_cfg_add_key_value_param(accel_dev, section,
160 key_val->key, (void *)val,
161 key_val->type)) {
162 dev_err(&GET_DEV(accel_dev),
163 "failed to add hex keyvalue.\n");
164 return -EFAULT;
166 } else {
167 if (adf_cfg_add_key_value_param(accel_dev, section,
168 key_val->key, key_val->val,
169 key_val->type)) {
170 dev_err(&GET_DEV(accel_dev),
171 "failed to add keyvalue.\n");
172 return -EFAULT;
175 return 0;
178 static int adf_copy_key_value_data(struct adf_accel_dev *accel_dev,
179 struct adf_user_cfg_ctl_data *ctl_data)
181 struct adf_user_cfg_key_val key_val;
182 struct adf_user_cfg_key_val *params_head;
183 struct adf_user_cfg_section section, *section_head;
185 section_head = ctl_data->config_section;
187 while (section_head) {
188 if (copy_from_user(&section, (void __user *)section_head,
189 sizeof(*section_head))) {
190 dev_err(&GET_DEV(accel_dev),
191 "failed to copy section info\n");
192 goto out_err;
195 if (adf_cfg_section_add(accel_dev, section.name)) {
196 dev_err(&GET_DEV(accel_dev),
197 "failed to add section.\n");
198 goto out_err;
201 params_head = section.params;
203 while (params_head) {
204 if (copy_from_user(&key_val, (void __user *)params_head,
205 sizeof(key_val))) {
206 dev_err(&GET_DEV(accel_dev),
207 "Failed to copy keyvalue.\n");
208 goto out_err;
210 if (adf_add_key_value_data(accel_dev, section.name,
211 &key_val)) {
212 goto out_err;
214 params_head = key_val.next;
216 section_head = section.next;
218 return 0;
219 out_err:
220 adf_cfg_del_all(accel_dev);
221 return -EFAULT;
224 static int adf_ctl_ioctl_dev_config(struct file *fp, unsigned int cmd,
225 unsigned long arg)
227 int ret;
228 struct adf_user_cfg_ctl_data *ctl_data;
229 struct adf_accel_dev *accel_dev;
231 ret = adf_ctl_alloc_resources(&ctl_data, arg);
232 if (ret)
233 return ret;
235 accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
236 if (!accel_dev) {
237 ret = -EFAULT;
238 goto out;
241 if (adf_dev_started(accel_dev)) {
242 ret = -EFAULT;
243 goto out;
246 if (adf_copy_key_value_data(accel_dev, ctl_data)) {
247 ret = -EFAULT;
248 goto out;
250 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
251 out:
252 kfree(ctl_data);
253 return ret;
256 static int adf_ctl_is_device_in_use(int id)
258 struct adf_accel_dev *dev;
260 list_for_each_entry(dev, adf_devmgr_get_head(), list) {
261 if (id == dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
262 if (adf_devmgr_in_reset(dev) || adf_dev_in_use(dev)) {
263 dev_info(&GET_DEV(dev),
264 "device qat_dev%d is busy\n",
265 dev->accel_id);
266 return -EBUSY;
270 return 0;
273 static void adf_ctl_stop_devices(uint32_t id)
275 struct adf_accel_dev *accel_dev;
277 list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
278 if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
279 if (!adf_dev_started(accel_dev))
280 continue;
282 /* First stop all VFs */
283 if (!accel_dev->is_vf)
284 continue;
286 adf_dev_stop(accel_dev);
287 adf_dev_shutdown(accel_dev);
291 list_for_each_entry(accel_dev, adf_devmgr_get_head(), list) {
292 if (id == accel_dev->accel_id || id == ADF_CFG_ALL_DEVICES) {
293 if (!adf_dev_started(accel_dev))
294 continue;
296 adf_dev_stop(accel_dev);
297 adf_dev_shutdown(accel_dev);
302 static int adf_ctl_ioctl_dev_stop(struct file *fp, unsigned int cmd,
303 unsigned long arg)
305 int ret;
306 struct adf_user_cfg_ctl_data *ctl_data;
308 ret = adf_ctl_alloc_resources(&ctl_data, arg);
309 if (ret)
310 return ret;
312 if (adf_devmgr_verify_id(ctl_data->device_id)) {
313 pr_err("QAT: Device %d not found\n", ctl_data->device_id);
314 ret = -ENODEV;
315 goto out;
318 ret = adf_ctl_is_device_in_use(ctl_data->device_id);
319 if (ret)
320 goto out;
322 if (ctl_data->device_id == ADF_CFG_ALL_DEVICES)
323 pr_info("QAT: Stopping all acceleration devices.\n");
324 else
325 pr_info("QAT: Stopping acceleration device qat_dev%d.\n",
326 ctl_data->device_id);
328 adf_ctl_stop_devices(ctl_data->device_id);
330 out:
331 kfree(ctl_data);
332 return ret;
335 static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
336 unsigned long arg)
338 int ret;
339 struct adf_user_cfg_ctl_data *ctl_data;
340 struct adf_accel_dev *accel_dev;
342 ret = adf_ctl_alloc_resources(&ctl_data, arg);
343 if (ret)
344 return ret;
346 ret = -ENODEV;
347 accel_dev = adf_devmgr_get_dev_by_id(ctl_data->device_id);
348 if (!accel_dev)
349 goto out;
351 if (!adf_dev_started(accel_dev)) {
352 dev_info(&GET_DEV(accel_dev),
353 "Starting acceleration device qat_dev%d.\n",
354 ctl_data->device_id);
355 ret = adf_dev_init(accel_dev);
356 if (!ret)
357 ret = adf_dev_start(accel_dev);
358 } else {
359 dev_info(&GET_DEV(accel_dev),
360 "Acceleration device qat_dev%d already started.\n",
361 ctl_data->device_id);
363 if (ret) {
364 dev_err(&GET_DEV(accel_dev), "Failed to start qat_dev%d\n",
365 ctl_data->device_id);
366 adf_dev_stop(accel_dev);
367 adf_dev_shutdown(accel_dev);
369 out:
370 kfree(ctl_data);
371 return ret;
374 static int adf_ctl_ioctl_get_num_devices(struct file *fp, unsigned int cmd,
375 unsigned long arg)
377 uint32_t num_devices = 0;
379 adf_devmgr_get_num_dev(&num_devices);
380 if (copy_to_user((void __user *)arg, &num_devices, sizeof(num_devices)))
381 return -EFAULT;
383 return 0;
386 static int adf_ctl_ioctl_get_status(struct file *fp, unsigned int cmd,
387 unsigned long arg)
389 struct adf_hw_device_data *hw_data;
390 struct adf_dev_status_info dev_info;
391 struct adf_accel_dev *accel_dev;
393 if (copy_from_user(&dev_info, (void __user *)arg,
394 sizeof(struct adf_dev_status_info))) {
395 pr_err("QAT: failed to copy from user.\n");
396 return -EFAULT;
399 accel_dev = adf_devmgr_get_dev_by_id(dev_info.accel_id);
400 if (!accel_dev)
401 return -ENODEV;
403 hw_data = accel_dev->hw_device;
404 dev_info.state = adf_dev_started(accel_dev) ? DEV_UP : DEV_DOWN;
405 dev_info.num_ae = hw_data->get_num_aes(hw_data);
406 dev_info.num_accel = hw_data->get_num_accels(hw_data);
407 dev_info.num_logical_accel = hw_data->num_logical_accel;
408 dev_info.banks_per_accel = hw_data->num_banks
409 / hw_data->num_logical_accel;
410 strlcpy(dev_info.name, hw_data->dev_class->name, sizeof(dev_info.name));
411 dev_info.instance_id = hw_data->instance_id;
412 dev_info.type = hw_data->dev_class->type;
413 dev_info.bus = accel_to_pci_dev(accel_dev)->bus->number;
414 dev_info.dev = PCI_SLOT(accel_to_pci_dev(accel_dev)->devfn);
415 dev_info.fun = PCI_FUNC(accel_to_pci_dev(accel_dev)->devfn);
417 if (copy_to_user((void __user *)arg, &dev_info,
418 sizeof(struct adf_dev_status_info))) {
419 dev_err(&GET_DEV(accel_dev), "failed to copy status.\n");
420 return -EFAULT;
422 return 0;
425 static long adf_ctl_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
427 int ret;
429 if (mutex_lock_interruptible(&adf_ctl_lock))
430 return -EFAULT;
432 switch (cmd) {
433 case IOCTL_CONFIG_SYS_RESOURCE_PARAMETERS:
434 ret = adf_ctl_ioctl_dev_config(fp, cmd, arg);
435 break;
437 case IOCTL_STOP_ACCEL_DEV:
438 ret = adf_ctl_ioctl_dev_stop(fp, cmd, arg);
439 break;
441 case IOCTL_START_ACCEL_DEV:
442 ret = adf_ctl_ioctl_dev_start(fp, cmd, arg);
443 break;
445 case IOCTL_GET_NUM_DEVICES:
446 ret = adf_ctl_ioctl_get_num_devices(fp, cmd, arg);
447 break;
449 case IOCTL_STATUS_ACCEL_DEV:
450 ret = adf_ctl_ioctl_get_status(fp, cmd, arg);
451 break;
452 default:
453 pr_err("QAT: Invalid ioctl\n");
454 ret = -EFAULT;
455 break;
457 mutex_unlock(&adf_ctl_lock);
458 return ret;
461 static int __init adf_register_ctl_device_driver(void)
463 mutex_init(&adf_ctl_lock);
465 if (adf_chr_drv_create())
466 goto err_chr_dev;
468 if (adf_init_aer())
469 goto err_aer;
471 if (adf_init_pf_wq())
472 goto err_pf_wq;
474 if (adf_init_vf_wq())
475 goto err_vf_wq;
477 if (qat_crypto_register())
478 goto err_crypto_register;
480 return 0;
482 err_crypto_register:
483 adf_exit_vf_wq();
484 err_vf_wq:
485 adf_exit_pf_wq();
486 err_pf_wq:
487 adf_exit_aer();
488 err_aer:
489 adf_chr_drv_destroy();
490 err_chr_dev:
491 mutex_destroy(&adf_ctl_lock);
492 return -EFAULT;
495 static void __exit adf_unregister_ctl_device_driver(void)
497 adf_chr_drv_destroy();
498 adf_exit_aer();
499 adf_exit_vf_wq();
500 adf_exit_pf_wq();
501 qat_crypto_unregister();
502 adf_clean_vf_map(false);
503 mutex_destroy(&adf_ctl_lock);
506 module_init(adf_register_ctl_device_driver);
507 module_exit(adf_unregister_ctl_device_driver);
508 MODULE_LICENSE("Dual BSD/GPL");
509 MODULE_AUTHOR("Intel");
510 MODULE_DESCRIPTION("Intel(R) QuickAssist Technology");
511 MODULE_ALIAS_CRYPTO("intel_qat");
512 MODULE_VERSION(ADF_DRV_VERSION);