2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include "adf_accel_devices.h"
50 #include "adf_common_drv.h"
51 #include "adf_transport.h"
52 #include "adf_transport_access_macros.h"
54 #include "adf_cfg_strings.h"
55 #include "qat_crypto.h"
56 #include "icp_qat_fw.h"
58 #define SEC ADF_KERNEL_SEC
60 static struct service_hndl qat_crypto
;
62 void qat_crypto_put_instance(struct qat_crypto_instance
*inst
)
64 atomic_dec(&inst
->refctr
);
65 adf_dev_put(inst
->accel_dev
);
68 static int qat_crypto_free_instances(struct adf_accel_dev
*accel_dev
)
70 struct qat_crypto_instance
*inst
, *tmp
;
73 list_for_each_entry_safe(inst
, tmp
, &accel_dev
->crypto_list
, list
) {
74 for (i
= 0; i
< atomic_read(&inst
->refctr
); i
++)
75 qat_crypto_put_instance(inst
);
78 adf_remove_ring(inst
->sym_tx
);
81 adf_remove_ring(inst
->sym_rx
);
84 adf_remove_ring(inst
->pke_tx
);
87 adf_remove_ring(inst
->pke_rx
);
89 list_del(&inst
->list
);
95 struct qat_crypto_instance
*qat_crypto_get_instance_node(int node
)
97 struct adf_accel_dev
*accel_dev
= NULL
, *tmp_dev
;
98 struct qat_crypto_instance
*inst
= NULL
, *tmp_inst
;
99 unsigned long best
= ~0;
101 list_for_each_entry(tmp_dev
, adf_devmgr_get_head(), list
) {
104 if ((node
== dev_to_node(&GET_DEV(tmp_dev
)) ||
105 dev_to_node(&GET_DEV(tmp_dev
)) < 0) &&
106 adf_dev_started(tmp_dev
) &&
107 !list_empty(&tmp_dev
->crypto_list
)) {
108 ctr
= atomic_read(&tmp_dev
->ref_count
);
117 pr_info("QAT: Could not find a device on node %d\n", node
);
118 /* Get any started device */
119 list_for_each_entry(tmp_dev
, adf_devmgr_get_head(), list
) {
120 if (adf_dev_started(tmp_dev
) &&
121 !list_empty(&tmp_dev
->crypto_list
)) {
132 list_for_each_entry(tmp_inst
, &accel_dev
->crypto_list
, list
) {
135 ctr
= atomic_read(&tmp_inst
->refctr
);
142 if (adf_dev_get(accel_dev
)) {
143 dev_err(&GET_DEV(accel_dev
), "Could not increment dev refctr\n");
146 atomic_inc(&inst
->refctr
);
152 * qat_crypto_dev_config() - create dev config required to create crypto inst.
154 * @accel_dev: Pointer to acceleration device.
156 * Function creates device configuration required to create crypto instances
158 * Return: 0 on success, error code otherwise.
160 int qat_crypto_dev_config(struct adf_accel_dev
*accel_dev
)
162 int cpus
= num_online_cpus();
163 int banks
= GET_MAX_BANKS(accel_dev
);
164 int instances
= min(cpus
, banks
);
165 char key
[ADF_CFG_MAX_KEY_LEN_IN_BYTES
];
169 if (adf_cfg_section_add(accel_dev
, ADF_KERNEL_SEC
))
171 if (adf_cfg_section_add(accel_dev
, "Accelerator0"))
173 for (i
= 0; i
< instances
; i
++) {
175 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_BANK_NUM
, i
);
176 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
177 key
, (void *)&val
, ADF_DEC
))
180 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_ETRMGR_CORE_AFFINITY
,
182 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
183 key
, (void *)&val
, ADF_DEC
))
186 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_ASYM_SIZE
, i
);
188 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
189 key
, (void *)&val
, ADF_DEC
))
193 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_SYM_SIZE
, i
);
194 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
195 key
, (void *)&val
, ADF_DEC
))
199 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_ASYM_TX
, i
);
200 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
201 key
, (void *)&val
, ADF_DEC
))
205 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_SYM_TX
, i
);
206 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
207 key
, (void *)&val
, ADF_DEC
))
211 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_ASYM_RX
, i
);
212 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
213 key
, (void *)&val
, ADF_DEC
))
217 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_SYM_RX
, i
);
218 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
219 key
, (void *)&val
, ADF_DEC
))
222 val
= ADF_COALESCING_DEF_TIME
;
223 snprintf(key
, sizeof(key
), ADF_ETRMGR_COALESCE_TIMER_FORMAT
, i
);
224 if (adf_cfg_add_key_value_param(accel_dev
, "Accelerator0",
225 key
, (void *)&val
, ADF_DEC
))
230 if (adf_cfg_add_key_value_param(accel_dev
, ADF_KERNEL_SEC
,
231 ADF_NUM_CY
, (void *)&val
, ADF_DEC
))
234 set_bit(ADF_STATUS_CONFIGURED
, &accel_dev
->status
);
237 dev_err(&GET_DEV(accel_dev
), "Failed to start QAT accel dev\n");
240 EXPORT_SYMBOL_GPL(qat_crypto_dev_config
);
242 static int qat_crypto_create_instances(struct adf_accel_dev
*accel_dev
)
246 unsigned long num_inst
, num_msg_sym
, num_msg_asym
;
248 struct qat_crypto_instance
*inst
;
249 char key
[ADF_CFG_MAX_KEY_LEN_IN_BYTES
];
250 char val
[ADF_CFG_MAX_VAL_LEN_IN_BYTES
];
252 INIT_LIST_HEAD(&accel_dev
->crypto_list
);
253 strlcpy(key
, ADF_NUM_CY
, sizeof(key
));
254 if (adf_cfg_get_param_value(accel_dev
, SEC
, key
, val
))
257 if (kstrtoul(val
, 0, &num_inst
))
260 for (i
= 0; i
< num_inst
; i
++) {
261 inst
= kzalloc_node(sizeof(*inst
), GFP_KERNEL
,
262 dev_to_node(&GET_DEV(accel_dev
)));
266 list_add_tail(&inst
->list
, &accel_dev
->crypto_list
);
268 atomic_set(&inst
->refctr
, 0);
269 inst
->accel_dev
= accel_dev
;
270 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_BANK_NUM
, i
);
271 if (adf_cfg_get_param_value(accel_dev
, SEC
, key
, val
))
274 if (kstrtoul(val
, 10, &bank
))
276 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_SYM_SIZE
, i
);
277 if (adf_cfg_get_param_value(accel_dev
, SEC
, key
, val
))
280 if (kstrtoul(val
, 10, &num_msg_sym
))
283 num_msg_sym
= num_msg_sym
>> 1;
285 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_ASYM_SIZE
, i
);
286 if (adf_cfg_get_param_value(accel_dev
, SEC
, key
, val
))
289 if (kstrtoul(val
, 10, &num_msg_asym
))
291 num_msg_asym
= num_msg_asym
>> 1;
293 msg_size
= ICP_QAT_FW_REQ_DEFAULT_SZ
;
294 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_SYM_TX
, i
);
295 if (adf_create_ring(accel_dev
, SEC
, bank
, num_msg_sym
,
296 msg_size
, key
, NULL
, 0, &inst
->sym_tx
))
299 msg_size
= msg_size
>> 1;
300 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_ASYM_TX
, i
);
301 if (adf_create_ring(accel_dev
, SEC
, bank
, num_msg_asym
,
302 msg_size
, key
, NULL
, 0, &inst
->pke_tx
))
305 msg_size
= ICP_QAT_FW_RESP_DEFAULT_SZ
;
306 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_SYM_RX
, i
);
307 if (adf_create_ring(accel_dev
, SEC
, bank
, num_msg_sym
,
308 msg_size
, key
, qat_alg_callback
, 0,
312 snprintf(key
, sizeof(key
), ADF_CY
"%d" ADF_RING_ASYM_RX
, i
);
313 if (adf_create_ring(accel_dev
, SEC
, bank
, num_msg_asym
,
314 msg_size
, key
, qat_alg_asym_callback
, 0,
320 qat_crypto_free_instances(accel_dev
);
324 static int qat_crypto_init(struct adf_accel_dev
*accel_dev
)
326 if (qat_crypto_create_instances(accel_dev
))
332 static int qat_crypto_shutdown(struct adf_accel_dev
*accel_dev
)
334 return qat_crypto_free_instances(accel_dev
);
337 static int qat_crypto_event_handler(struct adf_accel_dev
*accel_dev
,
338 enum adf_event event
)
344 ret
= qat_crypto_init(accel_dev
);
346 case ADF_EVENT_SHUTDOWN
:
347 ret
= qat_crypto_shutdown(accel_dev
);
349 case ADF_EVENT_RESTARTING
:
350 case ADF_EVENT_RESTARTED
:
351 case ADF_EVENT_START
:
359 int qat_crypto_register(void)
361 memset(&qat_crypto
, 0, sizeof(qat_crypto
));
362 qat_crypto
.event_hld
= qat_crypto_event_handler
;
363 qat_crypto
.name
= "qat_crypto";
364 return adf_service_register(&qat_crypto
);
367 int qat_crypto_unregister(void)
369 return adf_service_unregister(&qat_crypto
);