Merge branch 'akpm' (patches from Andrew)
[linux/fpc-iii.git] / drivers / crypto / qat / qat_common / qat_algs.c
blob833bb1d3a11bc5e8b6a6ef25e289d46de48c0cfe
1 /*
2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
5 GPL LICENSE SUMMARY
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 Contact Information:
17 qat-linux@intel.com
19 BSD LICENSE
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
23 are met:
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
30 distribution.
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/internal/skcipher.h>
52 #include <crypto/aes.h>
53 #include <crypto/sha.h>
54 #include <crypto/hash.h>
55 #include <crypto/hmac.h>
56 #include <crypto/algapi.h>
57 #include <crypto/authenc.h>
58 #include <linux/dma-mapping.h>
59 #include "adf_accel_devices.h"
60 #include "adf_transport.h"
61 #include "adf_common_drv.h"
62 #include "qat_crypto.h"
63 #include "icp_qat_hw.h"
64 #include "icp_qat_fw.h"
65 #include "icp_qat_fw_la.h"
67 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
68 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
69 ICP_QAT_HW_CIPHER_NO_CONVERT, \
70 ICP_QAT_HW_CIPHER_ENCRYPT)
72 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
73 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
74 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
75 ICP_QAT_HW_CIPHER_DECRYPT)
77 static DEFINE_MUTEX(algs_lock);
78 static unsigned int active_devs;
80 struct qat_alg_buf {
81 uint32_t len;
82 uint32_t resrvd;
83 uint64_t addr;
84 } __packed;
86 struct qat_alg_buf_list {
87 uint64_t resrvd;
88 uint32_t num_bufs;
89 uint32_t num_mapped_bufs;
90 struct qat_alg_buf bufers[];
91 } __packed __aligned(64);
93 /* Common content descriptor */
94 struct qat_alg_cd {
95 union {
96 struct qat_enc { /* Encrypt content desc */
97 struct icp_qat_hw_cipher_algo_blk cipher;
98 struct icp_qat_hw_auth_algo_blk hash;
99 } qat_enc_cd;
100 struct qat_dec { /* Decrytp content desc */
101 struct icp_qat_hw_auth_algo_blk hash;
102 struct icp_qat_hw_cipher_algo_blk cipher;
103 } qat_dec_cd;
105 } __aligned(64);
107 struct qat_alg_aead_ctx {
108 struct qat_alg_cd *enc_cd;
109 struct qat_alg_cd *dec_cd;
110 dma_addr_t enc_cd_paddr;
111 dma_addr_t dec_cd_paddr;
112 struct icp_qat_fw_la_bulk_req enc_fw_req;
113 struct icp_qat_fw_la_bulk_req dec_fw_req;
114 struct crypto_shash *hash_tfm;
115 enum icp_qat_hw_auth_algo qat_hash_alg;
116 struct qat_crypto_instance *inst;
117 union {
118 struct sha1_state sha1;
119 struct sha256_state sha256;
120 struct sha512_state sha512;
122 char ipad[SHA512_BLOCK_SIZE]; /* sufficient for SHA-1/SHA-256 as well */
123 char opad[SHA512_BLOCK_SIZE];
126 struct qat_alg_skcipher_ctx {
127 struct icp_qat_hw_cipher_algo_blk *enc_cd;
128 struct icp_qat_hw_cipher_algo_blk *dec_cd;
129 dma_addr_t enc_cd_paddr;
130 dma_addr_t dec_cd_paddr;
131 struct icp_qat_fw_la_bulk_req enc_fw_req;
132 struct icp_qat_fw_la_bulk_req dec_fw_req;
133 struct qat_crypto_instance *inst;
134 struct crypto_skcipher *tfm;
137 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
139 switch (qat_hash_alg) {
140 case ICP_QAT_HW_AUTH_ALGO_SHA1:
141 return ICP_QAT_HW_SHA1_STATE1_SZ;
142 case ICP_QAT_HW_AUTH_ALGO_SHA256:
143 return ICP_QAT_HW_SHA256_STATE1_SZ;
144 case ICP_QAT_HW_AUTH_ALGO_SHA512:
145 return ICP_QAT_HW_SHA512_STATE1_SZ;
146 default:
147 return -EFAULT;
149 return -EFAULT;
152 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
153 struct qat_alg_aead_ctx *ctx,
154 const uint8_t *auth_key,
155 unsigned int auth_keylen)
157 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
158 int block_size = crypto_shash_blocksize(ctx->hash_tfm);
159 int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
160 __be32 *hash_state_out;
161 __be64 *hash512_state_out;
162 int i, offset;
164 memset(ctx->ipad, 0, block_size);
165 memset(ctx->opad, 0, block_size);
166 shash->tfm = ctx->hash_tfm;
168 if (auth_keylen > block_size) {
169 int ret = crypto_shash_digest(shash, auth_key,
170 auth_keylen, ctx->ipad);
171 if (ret)
172 return ret;
174 memcpy(ctx->opad, ctx->ipad, digest_size);
175 } else {
176 memcpy(ctx->ipad, auth_key, auth_keylen);
177 memcpy(ctx->opad, auth_key, auth_keylen);
180 for (i = 0; i < block_size; i++) {
181 char *ipad_ptr = ctx->ipad + i;
182 char *opad_ptr = ctx->opad + i;
183 *ipad_ptr ^= HMAC_IPAD_VALUE;
184 *opad_ptr ^= HMAC_OPAD_VALUE;
187 if (crypto_shash_init(shash))
188 return -EFAULT;
190 if (crypto_shash_update(shash, ctx->ipad, block_size))
191 return -EFAULT;
193 hash_state_out = (__be32 *)hash->sha.state1;
194 hash512_state_out = (__be64 *)hash_state_out;
196 switch (ctx->qat_hash_alg) {
197 case ICP_QAT_HW_AUTH_ALGO_SHA1:
198 if (crypto_shash_export(shash, &ctx->sha1))
199 return -EFAULT;
200 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
201 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
202 break;
203 case ICP_QAT_HW_AUTH_ALGO_SHA256:
204 if (crypto_shash_export(shash, &ctx->sha256))
205 return -EFAULT;
206 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
207 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
208 break;
209 case ICP_QAT_HW_AUTH_ALGO_SHA512:
210 if (crypto_shash_export(shash, &ctx->sha512))
211 return -EFAULT;
212 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
213 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
214 break;
215 default:
216 return -EFAULT;
219 if (crypto_shash_init(shash))
220 return -EFAULT;
222 if (crypto_shash_update(shash, ctx->opad, block_size))
223 return -EFAULT;
225 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8);
226 if (offset < 0)
227 return -EFAULT;
229 hash_state_out = (__be32 *)(hash->sha.state1 + offset);
230 hash512_state_out = (__be64 *)hash_state_out;
232 switch (ctx->qat_hash_alg) {
233 case ICP_QAT_HW_AUTH_ALGO_SHA1:
234 if (crypto_shash_export(shash, &ctx->sha1))
235 return -EFAULT;
236 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
237 *hash_state_out = cpu_to_be32(ctx->sha1.state[i]);
238 break;
239 case ICP_QAT_HW_AUTH_ALGO_SHA256:
240 if (crypto_shash_export(shash, &ctx->sha256))
241 return -EFAULT;
242 for (i = 0; i < digest_size >> 2; i++, hash_state_out++)
243 *hash_state_out = cpu_to_be32(ctx->sha256.state[i]);
244 break;
245 case ICP_QAT_HW_AUTH_ALGO_SHA512:
246 if (crypto_shash_export(shash, &ctx->sha512))
247 return -EFAULT;
248 for (i = 0; i < digest_size >> 3; i++, hash512_state_out++)
249 *hash512_state_out = cpu_to_be64(ctx->sha512.state[i]);
250 break;
251 default:
252 return -EFAULT;
254 memzero_explicit(ctx->ipad, block_size);
255 memzero_explicit(ctx->opad, block_size);
256 return 0;
259 static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
261 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
262 ICP_QAT_FW_CIPH_IV_64BIT_PTR);
263 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
264 ICP_QAT_FW_LA_UPDATE_STATE);
267 static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr *header)
269 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
270 ICP_QAT_FW_CIPH_IV_16BYTE_DATA);
271 ICP_QAT_FW_LA_UPDATE_STATE_SET(header->serv_specif_flags,
272 ICP_QAT_FW_LA_NO_UPDATE_STATE);
275 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header,
276 int aead)
278 header->hdr_flags =
279 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET);
280 header->service_type = ICP_QAT_FW_COMN_REQ_CPM_FW_LA;
281 header->comn_req_flags =
282 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
283 QAT_COMN_PTR_TYPE_SGL);
284 ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
285 ICP_QAT_FW_LA_PARTIAL_NONE);
286 if (aead)
287 qat_alg_init_hdr_no_iv_updt(header);
288 else
289 qat_alg_init_hdr_iv_updt(header);
290 ICP_QAT_FW_LA_PROTO_SET(header->serv_specif_flags,
291 ICP_QAT_FW_LA_NO_PROTO);
294 static int qat_alg_aead_init_enc_session(struct crypto_aead *aead_tfm,
295 int alg,
296 struct crypto_authenc_keys *keys,
297 int mode)
299 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
300 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
301 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd;
302 struct icp_qat_hw_cipher_algo_blk *cipher = &enc_ctx->cipher;
303 struct icp_qat_hw_auth_algo_blk *hash =
304 (struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
305 sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
306 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
307 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
308 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
309 void *ptr = &req_tmpl->cd_ctrl;
310 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
311 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
313 /* CD setup */
314 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
315 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
316 hash->sha.inner_setup.auth_config.config =
317 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
318 ctx->qat_hash_alg, digestsize);
319 hash->sha.inner_setup.auth_counter.counter =
320 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
322 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
323 return -EFAULT;
325 /* Request setup */
326 qat_alg_init_common_hdr(header, 1);
327 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
328 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
329 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
330 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
331 ICP_QAT_FW_LA_RET_AUTH_RES);
332 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
333 ICP_QAT_FW_LA_NO_CMP_AUTH_RES);
334 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
335 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
337 /* Cipher CD config setup */
338 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
339 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
340 cipher_cd_ctrl->cipher_cfg_offset = 0;
341 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
342 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
343 /* Auth CD config setup */
344 hash_cd_ctrl->hash_cfg_offset = ((char *)hash - (char *)cipher) >> 3;
345 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
346 hash_cd_ctrl->inner_res_sz = digestsize;
347 hash_cd_ctrl->final_sz = digestsize;
349 switch (ctx->qat_hash_alg) {
350 case ICP_QAT_HW_AUTH_ALGO_SHA1:
351 hash_cd_ctrl->inner_state1_sz =
352 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
353 hash_cd_ctrl->inner_state2_sz =
354 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
355 break;
356 case ICP_QAT_HW_AUTH_ALGO_SHA256:
357 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
358 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
359 break;
360 case ICP_QAT_HW_AUTH_ALGO_SHA512:
361 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
362 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
363 break;
364 default:
365 break;
367 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
368 ((sizeof(struct icp_qat_hw_auth_setup) +
369 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
370 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
371 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
372 return 0;
375 static int qat_alg_aead_init_dec_session(struct crypto_aead *aead_tfm,
376 int alg,
377 struct crypto_authenc_keys *keys,
378 int mode)
380 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(aead_tfm);
381 unsigned int digestsize = crypto_aead_authsize(aead_tfm);
382 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd;
383 struct icp_qat_hw_auth_algo_blk *hash = &dec_ctx->hash;
384 struct icp_qat_hw_cipher_algo_blk *cipher =
385 (struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
386 sizeof(struct icp_qat_hw_auth_setup) +
387 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
388 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
389 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
390 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
391 void *ptr = &req_tmpl->cd_ctrl;
392 struct icp_qat_fw_cipher_cd_ctrl_hdr *cipher_cd_ctrl = ptr;
393 struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
394 struct icp_qat_fw_la_auth_req_params *auth_param =
395 (struct icp_qat_fw_la_auth_req_params *)
396 ((char *)&req_tmpl->serv_specif_rqpars +
397 sizeof(struct icp_qat_fw_la_cipher_req_params));
399 /* CD setup */
400 cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg, mode);
401 memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
402 hash->sha.inner_setup.auth_config.config =
403 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
404 ctx->qat_hash_alg,
405 digestsize);
406 hash->sha.inner_setup.auth_counter.counter =
407 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm));
409 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen))
410 return -EFAULT;
412 /* Request setup */
413 qat_alg_init_common_hdr(header, 1);
414 header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
415 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
416 ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
417 ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
418 ICP_QAT_FW_LA_NO_RET_AUTH_RES);
419 ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
420 ICP_QAT_FW_LA_CMP_AUTH_RES);
421 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
422 cd_pars->u.s.content_desc_params_sz = sizeof(struct qat_alg_cd) >> 3;
424 /* Cipher CD config setup */
425 cipher_cd_ctrl->cipher_key_sz = keys->enckeylen >> 3;
426 cipher_cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
427 cipher_cd_ctrl->cipher_cfg_offset =
428 (sizeof(struct icp_qat_hw_auth_setup) +
429 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3;
430 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
431 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
433 /* Auth CD config setup */
434 hash_cd_ctrl->hash_cfg_offset = 0;
435 hash_cd_ctrl->hash_flags = ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED;
436 hash_cd_ctrl->inner_res_sz = digestsize;
437 hash_cd_ctrl->final_sz = digestsize;
439 switch (ctx->qat_hash_alg) {
440 case ICP_QAT_HW_AUTH_ALGO_SHA1:
441 hash_cd_ctrl->inner_state1_sz =
442 round_up(ICP_QAT_HW_SHA1_STATE1_SZ, 8);
443 hash_cd_ctrl->inner_state2_sz =
444 round_up(ICP_QAT_HW_SHA1_STATE2_SZ, 8);
445 break;
446 case ICP_QAT_HW_AUTH_ALGO_SHA256:
447 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA256_STATE1_SZ;
448 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA256_STATE2_SZ;
449 break;
450 case ICP_QAT_HW_AUTH_ALGO_SHA512:
451 hash_cd_ctrl->inner_state1_sz = ICP_QAT_HW_SHA512_STATE1_SZ;
452 hash_cd_ctrl->inner_state2_sz = ICP_QAT_HW_SHA512_STATE2_SZ;
453 break;
454 default:
455 break;
458 hash_cd_ctrl->inner_state2_offset = hash_cd_ctrl->hash_cfg_offset +
459 ((sizeof(struct icp_qat_hw_auth_setup) +
460 round_up(hash_cd_ctrl->inner_state1_sz, 8)) >> 3);
461 auth_param->auth_res_sz = digestsize;
462 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_AUTH);
463 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
464 return 0;
467 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx *ctx,
468 struct icp_qat_fw_la_bulk_req *req,
469 struct icp_qat_hw_cipher_algo_blk *cd,
470 const uint8_t *key, unsigned int keylen)
472 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
473 struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
474 struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
476 memcpy(cd->aes.key, key, keylen);
477 qat_alg_init_common_hdr(header, 0);
478 header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
479 cd_pars->u.s.content_desc_params_sz =
480 sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
481 /* Cipher CD config setup */
482 cd_ctrl->cipher_key_sz = keylen >> 3;
483 cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
484 cd_ctrl->cipher_cfg_offset = 0;
485 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
486 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
489 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx *ctx,
490 int alg, const uint8_t *key,
491 unsigned int keylen, int mode)
493 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
494 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
495 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
497 qat_alg_skcipher_init_com(ctx, req, enc_cd, key, keylen);
498 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
499 enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg, mode);
502 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx *ctx,
503 int alg, const uint8_t *key,
504 unsigned int keylen, int mode)
506 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
507 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
508 struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
510 qat_alg_skcipher_init_com(ctx, req, dec_cd, key, keylen);
511 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
513 if (mode != ICP_QAT_HW_CIPHER_CTR_MODE)
514 dec_cd->aes.cipher_config.val =
515 QAT_AES_HW_CONFIG_DEC(alg, mode);
516 else
517 dec_cd->aes.cipher_config.val =
518 QAT_AES_HW_CONFIG_ENC(alg, mode);
521 static int qat_alg_validate_key(int key_len, int *alg, int mode)
523 if (mode != ICP_QAT_HW_CIPHER_XTS_MODE) {
524 switch (key_len) {
525 case AES_KEYSIZE_128:
526 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
527 break;
528 case AES_KEYSIZE_192:
529 *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
530 break;
531 case AES_KEYSIZE_256:
532 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
533 break;
534 default:
535 return -EINVAL;
537 } else {
538 switch (key_len) {
539 case AES_KEYSIZE_128 << 1:
540 *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
541 break;
542 case AES_KEYSIZE_256 << 1:
543 *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
544 break;
545 default:
546 return -EINVAL;
549 return 0;
552 static int qat_alg_aead_init_sessions(struct crypto_aead *tfm, const u8 *key,
553 unsigned int keylen, int mode)
555 struct crypto_authenc_keys keys;
556 int alg;
558 if (crypto_authenc_extractkeys(&keys, key, keylen))
559 goto bad_key;
561 if (qat_alg_validate_key(keys.enckeylen, &alg, mode))
562 goto bad_key;
564 if (qat_alg_aead_init_enc_session(tfm, alg, &keys, mode))
565 goto error;
567 if (qat_alg_aead_init_dec_session(tfm, alg, &keys, mode))
568 goto error;
570 memzero_explicit(&keys, sizeof(keys));
571 return 0;
572 bad_key:
573 memzero_explicit(&keys, sizeof(keys));
574 return -EINVAL;
575 error:
576 memzero_explicit(&keys, sizeof(keys));
577 return -EFAULT;
580 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx *ctx,
581 const uint8_t *key,
582 unsigned int keylen,
583 int mode)
585 int alg;
587 if (qat_alg_validate_key(keylen, &alg, mode))
588 return -EINVAL;
590 qat_alg_skcipher_init_enc(ctx, alg, key, keylen, mode);
591 qat_alg_skcipher_init_dec(ctx, alg, key, keylen, mode);
592 return 0;
595 static int qat_alg_aead_rekey(struct crypto_aead *tfm, const uint8_t *key,
596 unsigned int keylen)
598 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
600 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
601 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
602 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
603 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
605 return qat_alg_aead_init_sessions(tfm, key, keylen,
606 ICP_QAT_HW_CIPHER_CBC_MODE);
609 static int qat_alg_aead_newkey(struct crypto_aead *tfm, const uint8_t *key,
610 unsigned int keylen)
612 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
613 struct qat_crypto_instance *inst = NULL;
614 int node = get_current_node();
615 struct device *dev;
616 int ret;
618 inst = qat_crypto_get_instance_node(node);
619 if (!inst)
620 return -EINVAL;
621 dev = &GET_DEV(inst->accel_dev);
622 ctx->inst = inst;
623 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
624 &ctx->enc_cd_paddr,
625 GFP_ATOMIC);
626 if (!ctx->enc_cd) {
627 ret = -ENOMEM;
628 goto out_free_inst;
630 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
631 &ctx->dec_cd_paddr,
632 GFP_ATOMIC);
633 if (!ctx->dec_cd) {
634 ret = -ENOMEM;
635 goto out_free_enc;
638 ret = qat_alg_aead_init_sessions(tfm, key, keylen,
639 ICP_QAT_HW_CIPHER_CBC_MODE);
640 if (ret)
641 goto out_free_all;
643 return 0;
645 out_free_all:
646 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
647 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
648 ctx->dec_cd, ctx->dec_cd_paddr);
649 ctx->dec_cd = NULL;
650 out_free_enc:
651 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
652 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
653 ctx->enc_cd, ctx->enc_cd_paddr);
654 ctx->enc_cd = NULL;
655 out_free_inst:
656 ctx->inst = NULL;
657 qat_crypto_put_instance(inst);
658 return ret;
661 static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
662 unsigned int keylen)
664 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
666 if (ctx->enc_cd)
667 return qat_alg_aead_rekey(tfm, key, keylen);
668 else
669 return qat_alg_aead_newkey(tfm, key, keylen);
672 static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
673 struct qat_crypto_request *qat_req)
675 struct device *dev = &GET_DEV(inst->accel_dev);
676 struct qat_alg_buf_list *bl = qat_req->buf.bl;
677 struct qat_alg_buf_list *blout = qat_req->buf.blout;
678 dma_addr_t blp = qat_req->buf.blp;
679 dma_addr_t blpout = qat_req->buf.bloutp;
680 size_t sz = qat_req->buf.sz;
681 size_t sz_out = qat_req->buf.sz_out;
682 int i;
684 for (i = 0; i < bl->num_bufs; i++)
685 dma_unmap_single(dev, bl->bufers[i].addr,
686 bl->bufers[i].len, DMA_BIDIRECTIONAL);
688 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
689 kfree(bl);
690 if (blp != blpout) {
691 /* If out of place operation dma unmap only data */
692 int bufless = blout->num_bufs - blout->num_mapped_bufs;
694 for (i = bufless; i < blout->num_bufs; i++) {
695 dma_unmap_single(dev, blout->bufers[i].addr,
696 blout->bufers[i].len,
697 DMA_BIDIRECTIONAL);
699 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
700 kfree(blout);
704 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
705 struct scatterlist *sgl,
706 struct scatterlist *sglout,
707 struct qat_crypto_request *qat_req)
709 struct device *dev = &GET_DEV(inst->accel_dev);
710 int i, sg_nctr = 0;
711 int n = sg_nents(sgl);
712 struct qat_alg_buf_list *bufl;
713 struct qat_alg_buf_list *buflout = NULL;
714 dma_addr_t blp;
715 dma_addr_t bloutp = 0;
716 struct scatterlist *sg;
717 size_t sz_out, sz = struct_size(bufl, bufers, n + 1);
719 if (unlikely(!n))
720 return -EINVAL;
722 bufl = kzalloc_node(sz, GFP_ATOMIC,
723 dev_to_node(&GET_DEV(inst->accel_dev)));
724 if (unlikely(!bufl))
725 return -ENOMEM;
727 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
728 if (unlikely(dma_mapping_error(dev, blp)))
729 goto err_in;
731 for_each_sg(sgl, sg, n, i) {
732 int y = sg_nctr;
734 if (!sg->length)
735 continue;
737 bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
738 sg->length,
739 DMA_BIDIRECTIONAL);
740 bufl->bufers[y].len = sg->length;
741 if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
742 goto err_in;
743 sg_nctr++;
745 bufl->num_bufs = sg_nctr;
746 qat_req->buf.bl = bufl;
747 qat_req->buf.blp = blp;
748 qat_req->buf.sz = sz;
749 /* Handle out of place operation */
750 if (sgl != sglout) {
751 struct qat_alg_buf *bufers;
753 n = sg_nents(sglout);
754 sz_out = struct_size(buflout, bufers, n + 1);
755 sg_nctr = 0;
756 buflout = kzalloc_node(sz_out, GFP_ATOMIC,
757 dev_to_node(&GET_DEV(inst->accel_dev)));
758 if (unlikely(!buflout))
759 goto err_in;
760 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
761 if (unlikely(dma_mapping_error(dev, bloutp)))
762 goto err_out;
763 bufers = buflout->bufers;
764 for_each_sg(sglout, sg, n, i) {
765 int y = sg_nctr;
767 if (!sg->length)
768 continue;
770 bufers[y].addr = dma_map_single(dev, sg_virt(sg),
771 sg->length,
772 DMA_BIDIRECTIONAL);
773 if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
774 goto err_out;
775 bufers[y].len = sg->length;
776 sg_nctr++;
778 buflout->num_bufs = sg_nctr;
779 buflout->num_mapped_bufs = sg_nctr;
780 qat_req->buf.blout = buflout;
781 qat_req->buf.bloutp = bloutp;
782 qat_req->buf.sz_out = sz_out;
783 } else {
784 /* Otherwise set the src and dst to the same address */
785 qat_req->buf.bloutp = qat_req->buf.blp;
786 qat_req->buf.sz_out = 0;
788 return 0;
790 err_out:
791 n = sg_nents(sglout);
792 for (i = 0; i < n; i++)
793 if (!dma_mapping_error(dev, buflout->bufers[i].addr))
794 dma_unmap_single(dev, buflout->bufers[i].addr,
795 buflout->bufers[i].len,
796 DMA_BIDIRECTIONAL);
797 if (!dma_mapping_error(dev, bloutp))
798 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
799 kfree(buflout);
801 err_in:
802 n = sg_nents(sgl);
803 for (i = 0; i < n; i++)
804 if (!dma_mapping_error(dev, bufl->bufers[i].addr))
805 dma_unmap_single(dev, bufl->bufers[i].addr,
806 bufl->bufers[i].len,
807 DMA_BIDIRECTIONAL);
809 if (!dma_mapping_error(dev, blp))
810 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
811 kfree(bufl);
813 dev_err(dev, "Failed to map buf for dma\n");
814 return -ENOMEM;
817 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
818 struct qat_crypto_request *qat_req)
820 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
821 struct qat_crypto_instance *inst = ctx->inst;
822 struct aead_request *areq = qat_req->aead_req;
823 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
824 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
826 qat_alg_free_bufl(inst, qat_req);
827 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
828 res = -EBADMSG;
829 areq->base.complete(&areq->base, res);
832 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
833 struct qat_crypto_request *qat_req)
835 struct qat_alg_skcipher_ctx *ctx = qat_req->skcipher_ctx;
836 struct qat_crypto_instance *inst = ctx->inst;
837 struct skcipher_request *sreq = qat_req->skcipher_req;
838 uint8_t stat_filed = qat_resp->comn_resp.comn_status;
839 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
840 int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
842 qat_alg_free_bufl(inst, qat_req);
843 if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
844 res = -EINVAL;
846 memcpy(sreq->iv, qat_req->iv, AES_BLOCK_SIZE);
847 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
848 qat_req->iv_paddr);
850 sreq->base.complete(&sreq->base, res);
853 void qat_alg_callback(void *resp)
855 struct icp_qat_fw_la_resp *qat_resp = resp;
856 struct qat_crypto_request *qat_req =
857 (void *)(__force long)qat_resp->opaque_data;
859 qat_req->cb(qat_resp, qat_req);
862 static int qat_alg_aead_dec(struct aead_request *areq)
864 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
865 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
866 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
867 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
868 struct icp_qat_fw_la_cipher_req_params *cipher_param;
869 struct icp_qat_fw_la_auth_req_params *auth_param;
870 struct icp_qat_fw_la_bulk_req *msg;
871 int digst_size = crypto_aead_authsize(aead_tfm);
872 int ret, ctr = 0;
874 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
875 if (unlikely(ret))
876 return ret;
878 msg = &qat_req->req;
879 *msg = ctx->dec_fw_req;
880 qat_req->aead_ctx = ctx;
881 qat_req->aead_req = areq;
882 qat_req->cb = qat_aead_alg_callback;
883 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
884 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
885 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
886 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
887 cipher_param->cipher_length = areq->cryptlen - digst_size;
888 cipher_param->cipher_offset = areq->assoclen;
889 memcpy(cipher_param->u.cipher_IV_array, areq->iv, AES_BLOCK_SIZE);
890 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
891 auth_param->auth_off = 0;
892 auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
893 do {
894 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
895 } while (ret == -EAGAIN && ctr++ < 10);
897 if (ret == -EAGAIN) {
898 qat_alg_free_bufl(ctx->inst, qat_req);
899 return -EBUSY;
901 return -EINPROGRESS;
904 static int qat_alg_aead_enc(struct aead_request *areq)
906 struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
907 struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
908 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
909 struct qat_crypto_request *qat_req = aead_request_ctx(areq);
910 struct icp_qat_fw_la_cipher_req_params *cipher_param;
911 struct icp_qat_fw_la_auth_req_params *auth_param;
912 struct icp_qat_fw_la_bulk_req *msg;
913 uint8_t *iv = areq->iv;
914 int ret, ctr = 0;
916 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->src, areq->dst, qat_req);
917 if (unlikely(ret))
918 return ret;
920 msg = &qat_req->req;
921 *msg = ctx->enc_fw_req;
922 qat_req->aead_ctx = ctx;
923 qat_req->aead_req = areq;
924 qat_req->cb = qat_aead_alg_callback;
925 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
926 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
927 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
928 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
929 auth_param = (void *)((uint8_t *)cipher_param + sizeof(*cipher_param));
931 memcpy(cipher_param->u.cipher_IV_array, iv, AES_BLOCK_SIZE);
932 cipher_param->cipher_length = areq->cryptlen;
933 cipher_param->cipher_offset = areq->assoclen;
935 auth_param->auth_off = 0;
936 auth_param->auth_len = areq->assoclen + areq->cryptlen;
938 do {
939 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
940 } while (ret == -EAGAIN && ctr++ < 10);
942 if (ret == -EAGAIN) {
943 qat_alg_free_bufl(ctx->inst, qat_req);
944 return -EBUSY;
946 return -EINPROGRESS;
949 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx,
950 const u8 *key, unsigned int keylen,
951 int mode)
953 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
954 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
955 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
956 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
958 return qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
961 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx *ctx,
962 const u8 *key, unsigned int keylen,
963 int mode)
965 struct qat_crypto_instance *inst = NULL;
966 struct device *dev;
967 int node = get_current_node();
968 int ret;
970 inst = qat_crypto_get_instance_node(node);
971 if (!inst)
972 return -EINVAL;
973 dev = &GET_DEV(inst->accel_dev);
974 ctx->inst = inst;
975 ctx->enc_cd = dma_alloc_coherent(dev, sizeof(*ctx->enc_cd),
976 &ctx->enc_cd_paddr,
977 GFP_ATOMIC);
978 if (!ctx->enc_cd) {
979 ret = -ENOMEM;
980 goto out_free_instance;
982 ctx->dec_cd = dma_alloc_coherent(dev, sizeof(*ctx->dec_cd),
983 &ctx->dec_cd_paddr,
984 GFP_ATOMIC);
985 if (!ctx->dec_cd) {
986 ret = -ENOMEM;
987 goto out_free_enc;
990 ret = qat_alg_skcipher_init_sessions(ctx, key, keylen, mode);
991 if (ret)
992 goto out_free_all;
994 return 0;
996 out_free_all:
997 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
998 dma_free_coherent(dev, sizeof(*ctx->dec_cd),
999 ctx->dec_cd, ctx->dec_cd_paddr);
1000 ctx->dec_cd = NULL;
1001 out_free_enc:
1002 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
1003 dma_free_coherent(dev, sizeof(*ctx->enc_cd),
1004 ctx->enc_cd, ctx->enc_cd_paddr);
1005 ctx->enc_cd = NULL;
1006 out_free_instance:
1007 ctx->inst = NULL;
1008 qat_crypto_put_instance(inst);
1009 return ret;
1012 static int qat_alg_skcipher_setkey(struct crypto_skcipher *tfm,
1013 const u8 *key, unsigned int keylen,
1014 int mode)
1016 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1018 if (ctx->enc_cd)
1019 return qat_alg_skcipher_rekey(ctx, key, keylen, mode);
1020 else
1021 return qat_alg_skcipher_newkey(ctx, key, keylen, mode);
1024 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher *tfm,
1025 const u8 *key, unsigned int keylen)
1027 return qat_alg_skcipher_setkey(tfm, key, keylen,
1028 ICP_QAT_HW_CIPHER_CBC_MODE);
1031 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher *tfm,
1032 const u8 *key, unsigned int keylen)
1034 return qat_alg_skcipher_setkey(tfm, key, keylen,
1035 ICP_QAT_HW_CIPHER_CTR_MODE);
1038 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher *tfm,
1039 const u8 *key, unsigned int keylen)
1041 return qat_alg_skcipher_setkey(tfm, key, keylen,
1042 ICP_QAT_HW_CIPHER_XTS_MODE);
1045 static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
1047 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1048 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1049 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1050 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1051 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1052 struct icp_qat_fw_la_bulk_req *msg;
1053 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1054 int ret, ctr = 0;
1056 if (req->cryptlen == 0)
1057 return 0;
1059 qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1060 &qat_req->iv_paddr, GFP_ATOMIC);
1061 if (!qat_req->iv)
1062 return -ENOMEM;
1064 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1065 if (unlikely(ret)) {
1066 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1067 qat_req->iv_paddr);
1068 return ret;
1071 msg = &qat_req->req;
1072 *msg = ctx->enc_fw_req;
1073 qat_req->skcipher_ctx = ctx;
1074 qat_req->skcipher_req = req;
1075 qat_req->cb = qat_skcipher_alg_callback;
1076 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1077 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1078 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1079 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1080 cipher_param->cipher_length = req->cryptlen;
1081 cipher_param->cipher_offset = 0;
1082 cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1083 memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1084 do {
1085 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1086 } while (ret == -EAGAIN && ctr++ < 10);
1088 if (ret == -EAGAIN) {
1089 qat_alg_free_bufl(ctx->inst, qat_req);
1090 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1091 qat_req->iv_paddr);
1092 return -EBUSY;
1094 return -EINPROGRESS;
1097 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req)
1099 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1100 return -EINVAL;
1102 return qat_alg_skcipher_encrypt(req);
1105 static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
1107 struct crypto_skcipher *stfm = crypto_skcipher_reqtfm(req);
1108 struct crypto_tfm *tfm = crypto_skcipher_tfm(stfm);
1109 struct qat_alg_skcipher_ctx *ctx = crypto_tfm_ctx(tfm);
1110 struct qat_crypto_request *qat_req = skcipher_request_ctx(req);
1111 struct icp_qat_fw_la_cipher_req_params *cipher_param;
1112 struct icp_qat_fw_la_bulk_req *msg;
1113 struct device *dev = &GET_DEV(ctx->inst->accel_dev);
1114 int ret, ctr = 0;
1116 if (req->cryptlen == 0)
1117 return 0;
1119 qat_req->iv = dma_alloc_coherent(dev, AES_BLOCK_SIZE,
1120 &qat_req->iv_paddr, GFP_ATOMIC);
1121 if (!qat_req->iv)
1122 return -ENOMEM;
1124 ret = qat_alg_sgl_to_bufl(ctx->inst, req->src, req->dst, qat_req);
1125 if (unlikely(ret)) {
1126 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1127 qat_req->iv_paddr);
1128 return ret;
1131 msg = &qat_req->req;
1132 *msg = ctx->dec_fw_req;
1133 qat_req->skcipher_ctx = ctx;
1134 qat_req->skcipher_req = req;
1135 qat_req->cb = qat_skcipher_alg_callback;
1136 qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
1137 qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
1138 qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
1139 cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
1140 cipher_param->cipher_length = req->cryptlen;
1141 cipher_param->cipher_offset = 0;
1142 cipher_param->u.s.cipher_IV_ptr = qat_req->iv_paddr;
1143 memcpy(qat_req->iv, req->iv, AES_BLOCK_SIZE);
1144 do {
1145 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
1146 } while (ret == -EAGAIN && ctr++ < 10);
1148 if (ret == -EAGAIN) {
1149 qat_alg_free_bufl(ctx->inst, qat_req);
1150 dma_free_coherent(dev, AES_BLOCK_SIZE, qat_req->iv,
1151 qat_req->iv_paddr);
1152 return -EBUSY;
1154 return -EINPROGRESS;
1157 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req)
1159 if (req->cryptlen % AES_BLOCK_SIZE != 0)
1160 return -EINVAL;
1162 return qat_alg_skcipher_decrypt(req);
1164 static int qat_alg_aead_init(struct crypto_aead *tfm,
1165 enum icp_qat_hw_auth_algo hash,
1166 const char *hash_name)
1168 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1170 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
1171 if (IS_ERR(ctx->hash_tfm))
1172 return PTR_ERR(ctx->hash_tfm);
1173 ctx->qat_hash_alg = hash;
1174 crypto_aead_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1175 return 0;
1178 static int qat_alg_aead_sha1_init(struct crypto_aead *tfm)
1180 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
1183 static int qat_alg_aead_sha256_init(struct crypto_aead *tfm)
1185 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
1188 static int qat_alg_aead_sha512_init(struct crypto_aead *tfm)
1190 return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
1193 static void qat_alg_aead_exit(struct crypto_aead *tfm)
1195 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
1196 struct qat_crypto_instance *inst = ctx->inst;
1197 struct device *dev;
1199 crypto_free_shash(ctx->hash_tfm);
1201 if (!inst)
1202 return;
1204 dev = &GET_DEV(inst->accel_dev);
1205 if (ctx->enc_cd) {
1206 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
1207 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1208 ctx->enc_cd, ctx->enc_cd_paddr);
1210 if (ctx->dec_cd) {
1211 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
1212 dma_free_coherent(dev, sizeof(struct qat_alg_cd),
1213 ctx->dec_cd, ctx->dec_cd_paddr);
1215 qat_crypto_put_instance(inst);
1218 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher *tfm)
1220 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1222 crypto_skcipher_set_reqsize(tfm, sizeof(struct qat_crypto_request));
1223 ctx->tfm = tfm;
1224 return 0;
1227 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher *tfm)
1229 struct qat_alg_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
1230 struct qat_crypto_instance *inst = ctx->inst;
1231 struct device *dev;
1233 if (!inst)
1234 return;
1236 dev = &GET_DEV(inst->accel_dev);
1237 if (ctx->enc_cd) {
1238 memset(ctx->enc_cd, 0,
1239 sizeof(struct icp_qat_hw_cipher_algo_blk));
1240 dma_free_coherent(dev,
1241 sizeof(struct icp_qat_hw_cipher_algo_blk),
1242 ctx->enc_cd, ctx->enc_cd_paddr);
1244 if (ctx->dec_cd) {
1245 memset(ctx->dec_cd, 0,
1246 sizeof(struct icp_qat_hw_cipher_algo_blk));
1247 dma_free_coherent(dev,
1248 sizeof(struct icp_qat_hw_cipher_algo_blk),
1249 ctx->dec_cd, ctx->dec_cd_paddr);
1251 qat_crypto_put_instance(inst);
1255 static struct aead_alg qat_aeads[] = { {
1256 .base = {
1257 .cra_name = "authenc(hmac(sha1),cbc(aes))",
1258 .cra_driver_name = "qat_aes_cbc_hmac_sha1",
1259 .cra_priority = 4001,
1260 .cra_flags = CRYPTO_ALG_ASYNC,
1261 .cra_blocksize = AES_BLOCK_SIZE,
1262 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1263 .cra_module = THIS_MODULE,
1265 .init = qat_alg_aead_sha1_init,
1266 .exit = qat_alg_aead_exit,
1267 .setkey = qat_alg_aead_setkey,
1268 .decrypt = qat_alg_aead_dec,
1269 .encrypt = qat_alg_aead_enc,
1270 .ivsize = AES_BLOCK_SIZE,
1271 .maxauthsize = SHA1_DIGEST_SIZE,
1272 }, {
1273 .base = {
1274 .cra_name = "authenc(hmac(sha256),cbc(aes))",
1275 .cra_driver_name = "qat_aes_cbc_hmac_sha256",
1276 .cra_priority = 4001,
1277 .cra_flags = CRYPTO_ALG_ASYNC,
1278 .cra_blocksize = AES_BLOCK_SIZE,
1279 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1280 .cra_module = THIS_MODULE,
1282 .init = qat_alg_aead_sha256_init,
1283 .exit = qat_alg_aead_exit,
1284 .setkey = qat_alg_aead_setkey,
1285 .decrypt = qat_alg_aead_dec,
1286 .encrypt = qat_alg_aead_enc,
1287 .ivsize = AES_BLOCK_SIZE,
1288 .maxauthsize = SHA256_DIGEST_SIZE,
1289 }, {
1290 .base = {
1291 .cra_name = "authenc(hmac(sha512),cbc(aes))",
1292 .cra_driver_name = "qat_aes_cbc_hmac_sha512",
1293 .cra_priority = 4001,
1294 .cra_flags = CRYPTO_ALG_ASYNC,
1295 .cra_blocksize = AES_BLOCK_SIZE,
1296 .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
1297 .cra_module = THIS_MODULE,
1299 .init = qat_alg_aead_sha512_init,
1300 .exit = qat_alg_aead_exit,
1301 .setkey = qat_alg_aead_setkey,
1302 .decrypt = qat_alg_aead_dec,
1303 .encrypt = qat_alg_aead_enc,
1304 .ivsize = AES_BLOCK_SIZE,
1305 .maxauthsize = SHA512_DIGEST_SIZE,
1306 } };
1308 static struct skcipher_alg qat_skciphers[] = { {
1309 .base.cra_name = "cbc(aes)",
1310 .base.cra_driver_name = "qat_aes_cbc",
1311 .base.cra_priority = 4001,
1312 .base.cra_flags = CRYPTO_ALG_ASYNC,
1313 .base.cra_blocksize = AES_BLOCK_SIZE,
1314 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1315 .base.cra_alignmask = 0,
1316 .base.cra_module = THIS_MODULE,
1318 .init = qat_alg_skcipher_init_tfm,
1319 .exit = qat_alg_skcipher_exit_tfm,
1320 .setkey = qat_alg_skcipher_cbc_setkey,
1321 .decrypt = qat_alg_skcipher_blk_decrypt,
1322 .encrypt = qat_alg_skcipher_blk_encrypt,
1323 .min_keysize = AES_MIN_KEY_SIZE,
1324 .max_keysize = AES_MAX_KEY_SIZE,
1325 .ivsize = AES_BLOCK_SIZE,
1326 }, {
1327 .base.cra_name = "ctr(aes)",
1328 .base.cra_driver_name = "qat_aes_ctr",
1329 .base.cra_priority = 4001,
1330 .base.cra_flags = CRYPTO_ALG_ASYNC,
1331 .base.cra_blocksize = 1,
1332 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1333 .base.cra_alignmask = 0,
1334 .base.cra_module = THIS_MODULE,
1336 .init = qat_alg_skcipher_init_tfm,
1337 .exit = qat_alg_skcipher_exit_tfm,
1338 .setkey = qat_alg_skcipher_ctr_setkey,
1339 .decrypt = qat_alg_skcipher_decrypt,
1340 .encrypt = qat_alg_skcipher_encrypt,
1341 .min_keysize = AES_MIN_KEY_SIZE,
1342 .max_keysize = AES_MAX_KEY_SIZE,
1343 .ivsize = AES_BLOCK_SIZE,
1344 }, {
1345 .base.cra_name = "xts(aes)",
1346 .base.cra_driver_name = "qat_aes_xts",
1347 .base.cra_priority = 4001,
1348 .base.cra_flags = CRYPTO_ALG_ASYNC,
1349 .base.cra_blocksize = AES_BLOCK_SIZE,
1350 .base.cra_ctxsize = sizeof(struct qat_alg_skcipher_ctx),
1351 .base.cra_alignmask = 0,
1352 .base.cra_module = THIS_MODULE,
1354 .init = qat_alg_skcipher_init_tfm,
1355 .exit = qat_alg_skcipher_exit_tfm,
1356 .setkey = qat_alg_skcipher_xts_setkey,
1357 .decrypt = qat_alg_skcipher_blk_decrypt,
1358 .encrypt = qat_alg_skcipher_blk_encrypt,
1359 .min_keysize = 2 * AES_MIN_KEY_SIZE,
1360 .max_keysize = 2 * AES_MAX_KEY_SIZE,
1361 .ivsize = AES_BLOCK_SIZE,
1362 } };
1364 int qat_algs_register(void)
1366 int ret = 0;
1368 mutex_lock(&algs_lock);
1369 if (++active_devs != 1)
1370 goto unlock;
1372 ret = crypto_register_skciphers(qat_skciphers,
1373 ARRAY_SIZE(qat_skciphers));
1374 if (ret)
1375 goto unlock;
1377 ret = crypto_register_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1378 if (ret)
1379 goto unreg_algs;
1381 unlock:
1382 mutex_unlock(&algs_lock);
1383 return ret;
1385 unreg_algs:
1386 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1387 goto unlock;
1390 void qat_algs_unregister(void)
1392 mutex_lock(&algs_lock);
1393 if (--active_devs != 0)
1394 goto unlock;
1396 crypto_unregister_aeads(qat_aeads, ARRAY_SIZE(qat_aeads));
1397 crypto_unregister_skciphers(qat_skciphers, ARRAY_SIZE(qat_skciphers));
1399 unlock:
1400 mutex_unlock(&algs_lock);