Merge branch 'akpm' (patches from Andrew)
[linux/fpc-iii.git] / drivers / crypto / qat / qat_common / qat_crypto.c
blob3852d31ce0a4b1369113f43114e6ac3157dfdc43
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 Contact Information:
17 qat-linux@intel.com
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include "adf_accel_devices.h"
50 #include "adf_common_drv.h"
51 #include "adf_transport.h"
52 #include "adf_transport_access_macros.h"
53 #include "adf_cfg.h"
54 #include "adf_cfg_strings.h"
55 #include "qat_crypto.h"
56 #include "icp_qat_fw.h"
58 #define SEC ADF_KERNEL_SEC
60 static struct service_hndl qat_crypto;
62 void qat_crypto_put_instance(struct qat_crypto_instance *inst)
64 atomic_dec(&inst->refctr);
65 adf_dev_put(inst->accel_dev);
68 static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
70 struct qat_crypto_instance *inst, *tmp;
71 int i;
73 list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
74 for (i = 0; i < atomic_read(&inst->refctr); i++)
75 qat_crypto_put_instance(inst);
77 if (inst->sym_tx)
78 adf_remove_ring(inst->sym_tx);
80 if (inst->sym_rx)
81 adf_remove_ring(inst->sym_rx);
83 if (inst->pke_tx)
84 adf_remove_ring(inst->pke_tx);
86 if (inst->pke_rx)
87 adf_remove_ring(inst->pke_rx);
89 list_del(&inst->list);
90 kfree(inst);
92 return 0;
95 struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
97 struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
98 struct qat_crypto_instance *inst = NULL, *tmp_inst;
99 unsigned long best = ~0;
101 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
102 unsigned long ctr;
104 if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
105 dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
106 adf_dev_started(tmp_dev) &&
107 !list_empty(&tmp_dev->crypto_list)) {
108 ctr = atomic_read(&tmp_dev->ref_count);
109 if (best > ctr) {
110 accel_dev = tmp_dev;
111 best = ctr;
116 if (!accel_dev) {
117 pr_info("QAT: Could not find a device on node %d\n", node);
118 /* Get any started device */
119 list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
120 if (adf_dev_started(tmp_dev) &&
121 !list_empty(&tmp_dev->crypto_list)) {
122 accel_dev = tmp_dev;
123 break;
128 if (!accel_dev)
129 return NULL;
131 best = ~0;
132 list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
133 unsigned long ctr;
135 ctr = atomic_read(&tmp_inst->refctr);
136 if (best > ctr) {
137 inst = tmp_inst;
138 best = ctr;
141 if (inst) {
142 if (adf_dev_get(accel_dev)) {
143 dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
144 return NULL;
146 atomic_inc(&inst->refctr);
148 return inst;
152 * qat_crypto_dev_config() - create dev config required to create crypto inst.
154 * @accel_dev: Pointer to acceleration device.
156 * Function creates device configuration required to create crypto instances
158 * Return: 0 on success, error code otherwise.
160 int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
162 int cpus = num_online_cpus();
163 int banks = GET_MAX_BANKS(accel_dev);
164 int instances = min(cpus, banks);
165 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
166 int i;
167 unsigned long val;
169 if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
170 goto err;
171 if (adf_cfg_section_add(accel_dev, "Accelerator0"))
172 goto err;
173 for (i = 0; i < instances; i++) {
174 val = i;
175 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
176 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
177 key, (void *)&val, ADF_DEC))
178 goto err;
180 snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
182 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
183 key, (void *)&val, ADF_DEC))
184 goto err;
186 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
187 val = 128;
188 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
189 key, (void *)&val, ADF_DEC))
190 goto err;
192 val = 512;
193 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
194 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
195 key, (void *)&val, ADF_DEC))
196 goto err;
198 val = 0;
199 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
200 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
201 key, (void *)&val, ADF_DEC))
202 goto err;
204 val = 2;
205 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
206 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
207 key, (void *)&val, ADF_DEC))
208 goto err;
210 val = 8;
211 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
212 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
213 key, (void *)&val, ADF_DEC))
214 goto err;
216 val = 10;
217 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
218 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
219 key, (void *)&val, ADF_DEC))
220 goto err;
222 val = ADF_COALESCING_DEF_TIME;
223 snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
224 if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
225 key, (void *)&val, ADF_DEC))
226 goto err;
229 val = i;
230 if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
231 ADF_NUM_CY, (void *)&val, ADF_DEC))
232 goto err;
234 set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
235 return 0;
236 err:
237 dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
238 return -EINVAL;
240 EXPORT_SYMBOL_GPL(qat_crypto_dev_config);
242 static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
244 int i;
245 unsigned long bank;
246 unsigned long num_inst, num_msg_sym, num_msg_asym;
247 int msg_size;
248 struct qat_crypto_instance *inst;
249 char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
250 char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
252 INIT_LIST_HEAD(&accel_dev->crypto_list);
253 strlcpy(key, ADF_NUM_CY, sizeof(key));
254 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
255 return -EFAULT;
257 if (kstrtoul(val, 0, &num_inst))
258 return -EFAULT;
260 for (i = 0; i < num_inst; i++) {
261 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
262 dev_to_node(&GET_DEV(accel_dev)));
263 if (!inst)
264 goto err;
266 list_add_tail(&inst->list, &accel_dev->crypto_list);
267 inst->id = i;
268 atomic_set(&inst->refctr, 0);
269 inst->accel_dev = accel_dev;
270 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
271 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
272 goto err;
274 if (kstrtoul(val, 10, &bank))
275 goto err;
276 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
277 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
278 goto err;
280 if (kstrtoul(val, 10, &num_msg_sym))
281 goto err;
283 num_msg_sym = num_msg_sym >> 1;
285 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
286 if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
287 goto err;
289 if (kstrtoul(val, 10, &num_msg_asym))
290 goto err;
291 num_msg_asym = num_msg_asym >> 1;
293 msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
294 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
295 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
296 msg_size, key, NULL, 0, &inst->sym_tx))
297 goto err;
299 msg_size = msg_size >> 1;
300 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
301 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
302 msg_size, key, NULL, 0, &inst->pke_tx))
303 goto err;
305 msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
306 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
307 if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
308 msg_size, key, qat_alg_callback, 0,
309 &inst->sym_rx))
310 goto err;
312 snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
313 if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
314 msg_size, key, qat_alg_asym_callback, 0,
315 &inst->pke_rx))
316 goto err;
318 return 0;
319 err:
320 qat_crypto_free_instances(accel_dev);
321 return -ENOMEM;
324 static int qat_crypto_init(struct adf_accel_dev *accel_dev)
326 if (qat_crypto_create_instances(accel_dev))
327 return -EFAULT;
329 return 0;
332 static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
334 return qat_crypto_free_instances(accel_dev);
337 static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
338 enum adf_event event)
340 int ret;
342 switch (event) {
343 case ADF_EVENT_INIT:
344 ret = qat_crypto_init(accel_dev);
345 break;
346 case ADF_EVENT_SHUTDOWN:
347 ret = qat_crypto_shutdown(accel_dev);
348 break;
349 case ADF_EVENT_RESTARTING:
350 case ADF_EVENT_RESTARTED:
351 case ADF_EVENT_START:
352 case ADF_EVENT_STOP:
353 default:
354 ret = 0;
356 return ret;
359 int qat_crypto_register(void)
361 memset(&qat_crypto, 0, sizeof(qat_crypto));
362 qat_crypto.event_hld = qat_crypto_event_handler;
363 qat_crypto.name = "qat_crypto";
364 return adf_service_register(&qat_crypto);
367 int qat_crypto_unregister(void)
369 return adf_service_unregister(&qat_crypto);