2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/hmac.h>
55 #include <crypto/algapi.h>
56 #include <crypto/authenc.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static DEFINE_MUTEX(algs_lock
);
77 static unsigned int active_devs
;
85 struct qat_alg_buf_list
{
88 uint32_t num_mapped_bufs
;
89 struct qat_alg_buf bufers
[];
90 } __packed
__aligned(64);
92 /* Common content descriptor */
95 struct qat_enc
{ /* Encrypt content desc */
96 struct icp_qat_hw_cipher_algo_blk cipher
;
97 struct icp_qat_hw_auth_algo_blk hash
;
99 struct qat_dec
{ /* Decrytp content desc */
100 struct icp_qat_hw_auth_algo_blk hash
;
101 struct icp_qat_hw_cipher_algo_blk cipher
;
106 struct qat_alg_aead_ctx
{
107 struct qat_alg_cd
*enc_cd
;
108 struct qat_alg_cd
*dec_cd
;
109 dma_addr_t enc_cd_paddr
;
110 dma_addr_t dec_cd_paddr
;
111 struct icp_qat_fw_la_bulk_req enc_fw_req
;
112 struct icp_qat_fw_la_bulk_req dec_fw_req
;
113 struct crypto_shash
*hash_tfm
;
114 enum icp_qat_hw_auth_algo qat_hash_alg
;
115 struct qat_crypto_instance
*inst
;
117 struct sha1_state sha1
;
118 struct sha256_state sha256
;
119 struct sha512_state sha512
;
121 char ipad
[SHA512_BLOCK_SIZE
]; /* sufficient for SHA-1/SHA-256 as well */
122 char opad
[SHA512_BLOCK_SIZE
];
125 struct qat_alg_ablkcipher_ctx
{
126 struct icp_qat_hw_cipher_algo_blk
*enc_cd
;
127 struct icp_qat_hw_cipher_algo_blk
*dec_cd
;
128 dma_addr_t enc_cd_paddr
;
129 dma_addr_t dec_cd_paddr
;
130 struct icp_qat_fw_la_bulk_req enc_fw_req
;
131 struct icp_qat_fw_la_bulk_req dec_fw_req
;
132 struct qat_crypto_instance
*inst
;
133 struct crypto_tfm
*tfm
;
136 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg
)
138 switch (qat_hash_alg
) {
139 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
140 return ICP_QAT_HW_SHA1_STATE1_SZ
;
141 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
142 return ICP_QAT_HW_SHA256_STATE1_SZ
;
143 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
144 return ICP_QAT_HW_SHA512_STATE1_SZ
;
151 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk
*hash
,
152 struct qat_alg_aead_ctx
*ctx
,
153 const uint8_t *auth_key
,
154 unsigned int auth_keylen
)
156 SHASH_DESC_ON_STACK(shash
, ctx
->hash_tfm
);
157 int block_size
= crypto_shash_blocksize(ctx
->hash_tfm
);
158 int digest_size
= crypto_shash_digestsize(ctx
->hash_tfm
);
159 __be32
*hash_state_out
;
160 __be64
*hash512_state_out
;
163 memset(ctx
->ipad
, 0, block_size
);
164 memset(ctx
->opad
, 0, block_size
);
165 shash
->tfm
= ctx
->hash_tfm
;
167 if (auth_keylen
> block_size
) {
168 int ret
= crypto_shash_digest(shash
, auth_key
,
169 auth_keylen
, ctx
->ipad
);
173 memcpy(ctx
->opad
, ctx
->ipad
, digest_size
);
175 memcpy(ctx
->ipad
, auth_key
, auth_keylen
);
176 memcpy(ctx
->opad
, auth_key
, auth_keylen
);
179 for (i
= 0; i
< block_size
; i
++) {
180 char *ipad_ptr
= ctx
->ipad
+ i
;
181 char *opad_ptr
= ctx
->opad
+ i
;
182 *ipad_ptr
^= HMAC_IPAD_VALUE
;
183 *opad_ptr
^= HMAC_OPAD_VALUE
;
186 if (crypto_shash_init(shash
))
189 if (crypto_shash_update(shash
, ctx
->ipad
, block_size
))
192 hash_state_out
= (__be32
*)hash
->sha
.state1
;
193 hash512_state_out
= (__be64
*)hash_state_out
;
195 switch (ctx
->qat_hash_alg
) {
196 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
197 if (crypto_shash_export(shash
, &ctx
->sha1
))
199 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
200 *hash_state_out
= cpu_to_be32(ctx
->sha1
.state
[i
]);
202 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
203 if (crypto_shash_export(shash
, &ctx
->sha256
))
205 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
206 *hash_state_out
= cpu_to_be32(ctx
->sha256
.state
[i
]);
208 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
209 if (crypto_shash_export(shash
, &ctx
->sha512
))
211 for (i
= 0; i
< digest_size
>> 3; i
++, hash512_state_out
++)
212 *hash512_state_out
= cpu_to_be64(ctx
->sha512
.state
[i
]);
218 if (crypto_shash_init(shash
))
221 if (crypto_shash_update(shash
, ctx
->opad
, block_size
))
224 offset
= round_up(qat_get_inter_state_size(ctx
->qat_hash_alg
), 8);
228 hash_state_out
= (__be32
*)(hash
->sha
.state1
+ offset
);
229 hash512_state_out
= (__be64
*)hash_state_out
;
231 switch (ctx
->qat_hash_alg
) {
232 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
233 if (crypto_shash_export(shash
, &ctx
->sha1
))
235 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
236 *hash_state_out
= cpu_to_be32(ctx
->sha1
.state
[i
]);
238 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
239 if (crypto_shash_export(shash
, &ctx
->sha256
))
241 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
242 *hash_state_out
= cpu_to_be32(ctx
->sha256
.state
[i
]);
244 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
245 if (crypto_shash_export(shash
, &ctx
->sha512
))
247 for (i
= 0; i
< digest_size
>> 3; i
++, hash512_state_out
++)
248 *hash512_state_out
= cpu_to_be64(ctx
->sha512
.state
[i
]);
253 memzero_explicit(ctx
->ipad
, block_size
);
254 memzero_explicit(ctx
->opad
, block_size
);
258 static void qat_alg_init_hdr_iv_updt(struct icp_qat_fw_comn_req_hdr
*header
)
260 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header
->serv_specif_flags
,
261 ICP_QAT_FW_CIPH_IV_64BIT_PTR
);
262 ICP_QAT_FW_LA_UPDATE_STATE_SET(header
->serv_specif_flags
,
263 ICP_QAT_FW_LA_UPDATE_STATE
);
266 static void qat_alg_init_hdr_no_iv_updt(struct icp_qat_fw_comn_req_hdr
*header
)
268 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header
->serv_specif_flags
,
269 ICP_QAT_FW_CIPH_IV_16BYTE_DATA
);
270 ICP_QAT_FW_LA_UPDATE_STATE_SET(header
->serv_specif_flags
,
271 ICP_QAT_FW_LA_NO_UPDATE_STATE
);
274 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr
*header
,
278 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET
);
279 header
->service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_LA
;
280 header
->comn_req_flags
=
281 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR
,
282 QAT_COMN_PTR_TYPE_SGL
);
283 ICP_QAT_FW_LA_PARTIAL_SET(header
->serv_specif_flags
,
284 ICP_QAT_FW_LA_PARTIAL_NONE
);
286 qat_alg_init_hdr_no_iv_updt(header
);
288 qat_alg_init_hdr_iv_updt(header
);
289 ICP_QAT_FW_LA_PROTO_SET(header
->serv_specif_flags
,
290 ICP_QAT_FW_LA_NO_PROTO
);
293 static int qat_alg_aead_init_enc_session(struct crypto_aead
*aead_tfm
,
295 struct crypto_authenc_keys
*keys
,
298 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(aead_tfm
);
299 unsigned int digestsize
= crypto_aead_authsize(aead_tfm
);
300 struct qat_enc
*enc_ctx
= &ctx
->enc_cd
->qat_enc_cd
;
301 struct icp_qat_hw_cipher_algo_blk
*cipher
= &enc_ctx
->cipher
;
302 struct icp_qat_hw_auth_algo_blk
*hash
=
303 (struct icp_qat_hw_auth_algo_blk
*)((char *)enc_ctx
+
304 sizeof(struct icp_qat_hw_auth_setup
) + keys
->enckeylen
);
305 struct icp_qat_fw_la_bulk_req
*req_tmpl
= &ctx
->enc_fw_req
;
306 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req_tmpl
->cd_pars
;
307 struct icp_qat_fw_comn_req_hdr
*header
= &req_tmpl
->comn_hdr
;
308 void *ptr
= &req_tmpl
->cd_ctrl
;
309 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cipher_cd_ctrl
= ptr
;
310 struct icp_qat_fw_auth_cd_ctrl_hdr
*hash_cd_ctrl
= ptr
;
313 cipher
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_ENC(alg
, mode
);
314 memcpy(cipher
->aes
.key
, keys
->enckey
, keys
->enckeylen
);
315 hash
->sha
.inner_setup
.auth_config
.config
=
316 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1
,
317 ctx
->qat_hash_alg
, digestsize
);
318 hash
->sha
.inner_setup
.auth_counter
.counter
=
319 cpu_to_be32(crypto_shash_blocksize(ctx
->hash_tfm
));
321 if (qat_alg_do_precomputes(hash
, ctx
, keys
->authkey
, keys
->authkeylen
))
325 qat_alg_init_common_hdr(header
, 1);
326 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_CIPHER_HASH
;
327 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header
->serv_specif_flags
,
328 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
329 ICP_QAT_FW_LA_RET_AUTH_SET(header
->serv_specif_flags
,
330 ICP_QAT_FW_LA_RET_AUTH_RES
);
331 ICP_QAT_FW_LA_CMP_AUTH_SET(header
->serv_specif_flags
,
332 ICP_QAT_FW_LA_NO_CMP_AUTH_RES
);
333 cd_pars
->u
.s
.content_desc_addr
= ctx
->enc_cd_paddr
;
334 cd_pars
->u
.s
.content_desc_params_sz
= sizeof(struct qat_alg_cd
) >> 3;
336 /* Cipher CD config setup */
337 cipher_cd_ctrl
->cipher_key_sz
= keys
->enckeylen
>> 3;
338 cipher_cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
339 cipher_cd_ctrl
->cipher_cfg_offset
= 0;
340 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
341 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
342 /* Auth CD config setup */
343 hash_cd_ctrl
->hash_cfg_offset
= ((char *)hash
- (char *)cipher
) >> 3;
344 hash_cd_ctrl
->hash_flags
= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED
;
345 hash_cd_ctrl
->inner_res_sz
= digestsize
;
346 hash_cd_ctrl
->final_sz
= digestsize
;
348 switch (ctx
->qat_hash_alg
) {
349 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
350 hash_cd_ctrl
->inner_state1_sz
=
351 round_up(ICP_QAT_HW_SHA1_STATE1_SZ
, 8);
352 hash_cd_ctrl
->inner_state2_sz
=
353 round_up(ICP_QAT_HW_SHA1_STATE2_SZ
, 8);
355 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
356 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA256_STATE1_SZ
;
357 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA256_STATE2_SZ
;
359 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
360 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA512_STATE1_SZ
;
361 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA512_STATE2_SZ
;
366 hash_cd_ctrl
->inner_state2_offset
= hash_cd_ctrl
->hash_cfg_offset
+
367 ((sizeof(struct icp_qat_hw_auth_setup
) +
368 round_up(hash_cd_ctrl
->inner_state1_sz
, 8)) >> 3);
369 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
370 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
374 static int qat_alg_aead_init_dec_session(struct crypto_aead
*aead_tfm
,
376 struct crypto_authenc_keys
*keys
,
379 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(aead_tfm
);
380 unsigned int digestsize
= crypto_aead_authsize(aead_tfm
);
381 struct qat_dec
*dec_ctx
= &ctx
->dec_cd
->qat_dec_cd
;
382 struct icp_qat_hw_auth_algo_blk
*hash
= &dec_ctx
->hash
;
383 struct icp_qat_hw_cipher_algo_blk
*cipher
=
384 (struct icp_qat_hw_cipher_algo_blk
*)((char *)dec_ctx
+
385 sizeof(struct icp_qat_hw_auth_setup
) +
386 roundup(crypto_shash_digestsize(ctx
->hash_tfm
), 8) * 2);
387 struct icp_qat_fw_la_bulk_req
*req_tmpl
= &ctx
->dec_fw_req
;
388 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req_tmpl
->cd_pars
;
389 struct icp_qat_fw_comn_req_hdr
*header
= &req_tmpl
->comn_hdr
;
390 void *ptr
= &req_tmpl
->cd_ctrl
;
391 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cipher_cd_ctrl
= ptr
;
392 struct icp_qat_fw_auth_cd_ctrl_hdr
*hash_cd_ctrl
= ptr
;
393 struct icp_qat_fw_la_auth_req_params
*auth_param
=
394 (struct icp_qat_fw_la_auth_req_params
*)
395 ((char *)&req_tmpl
->serv_specif_rqpars
+
396 sizeof(struct icp_qat_fw_la_cipher_req_params
));
399 cipher
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_DEC(alg
, mode
);
400 memcpy(cipher
->aes
.key
, keys
->enckey
, keys
->enckeylen
);
401 hash
->sha
.inner_setup
.auth_config
.config
=
402 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1
,
405 hash
->sha
.inner_setup
.auth_counter
.counter
=
406 cpu_to_be32(crypto_shash_blocksize(ctx
->hash_tfm
));
408 if (qat_alg_do_precomputes(hash
, ctx
, keys
->authkey
, keys
->authkeylen
))
412 qat_alg_init_common_hdr(header
, 1);
413 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_HASH_CIPHER
;
414 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header
->serv_specif_flags
,
415 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
416 ICP_QAT_FW_LA_RET_AUTH_SET(header
->serv_specif_flags
,
417 ICP_QAT_FW_LA_NO_RET_AUTH_RES
);
418 ICP_QAT_FW_LA_CMP_AUTH_SET(header
->serv_specif_flags
,
419 ICP_QAT_FW_LA_CMP_AUTH_RES
);
420 cd_pars
->u
.s
.content_desc_addr
= ctx
->dec_cd_paddr
;
421 cd_pars
->u
.s
.content_desc_params_sz
= sizeof(struct qat_alg_cd
) >> 3;
423 /* Cipher CD config setup */
424 cipher_cd_ctrl
->cipher_key_sz
= keys
->enckeylen
>> 3;
425 cipher_cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
426 cipher_cd_ctrl
->cipher_cfg_offset
=
427 (sizeof(struct icp_qat_hw_auth_setup
) +
428 roundup(crypto_shash_digestsize(ctx
->hash_tfm
), 8) * 2) >> 3;
429 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
430 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
432 /* Auth CD config setup */
433 hash_cd_ctrl
->hash_cfg_offset
= 0;
434 hash_cd_ctrl
->hash_flags
= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED
;
435 hash_cd_ctrl
->inner_res_sz
= digestsize
;
436 hash_cd_ctrl
->final_sz
= digestsize
;
438 switch (ctx
->qat_hash_alg
) {
439 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
440 hash_cd_ctrl
->inner_state1_sz
=
441 round_up(ICP_QAT_HW_SHA1_STATE1_SZ
, 8);
442 hash_cd_ctrl
->inner_state2_sz
=
443 round_up(ICP_QAT_HW_SHA1_STATE2_SZ
, 8);
445 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
446 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA256_STATE1_SZ
;
447 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA256_STATE2_SZ
;
449 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
450 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA512_STATE1_SZ
;
451 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA512_STATE2_SZ
;
457 hash_cd_ctrl
->inner_state2_offset
= hash_cd_ctrl
->hash_cfg_offset
+
458 ((sizeof(struct icp_qat_hw_auth_setup
) +
459 round_up(hash_cd_ctrl
->inner_state1_sz
, 8)) >> 3);
460 auth_param
->auth_res_sz
= digestsize
;
461 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
462 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
466 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx
*ctx
,
467 struct icp_qat_fw_la_bulk_req
*req
,
468 struct icp_qat_hw_cipher_algo_blk
*cd
,
469 const uint8_t *key
, unsigned int keylen
)
471 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
472 struct icp_qat_fw_comn_req_hdr
*header
= &req
->comn_hdr
;
473 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cd_ctrl
= (void *)&req
->cd_ctrl
;
475 memcpy(cd
->aes
.key
, key
, keylen
);
476 qat_alg_init_common_hdr(header
, 0);
477 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_CIPHER
;
478 cd_pars
->u
.s
.content_desc_params_sz
=
479 sizeof(struct icp_qat_hw_cipher_algo_blk
) >> 3;
480 /* Cipher CD config setup */
481 cd_ctrl
->cipher_key_sz
= keylen
>> 3;
482 cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
483 cd_ctrl
->cipher_cfg_offset
= 0;
484 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
485 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
488 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx
*ctx
,
489 int alg
, const uint8_t *key
,
490 unsigned int keylen
, int mode
)
492 struct icp_qat_hw_cipher_algo_blk
*enc_cd
= ctx
->enc_cd
;
493 struct icp_qat_fw_la_bulk_req
*req
= &ctx
->enc_fw_req
;
494 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
496 qat_alg_ablkcipher_init_com(ctx
, req
, enc_cd
, key
, keylen
);
497 cd_pars
->u
.s
.content_desc_addr
= ctx
->enc_cd_paddr
;
498 enc_cd
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_ENC(alg
, mode
);
501 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx
*ctx
,
502 int alg
, const uint8_t *key
,
503 unsigned int keylen
, int mode
)
505 struct icp_qat_hw_cipher_algo_blk
*dec_cd
= ctx
->dec_cd
;
506 struct icp_qat_fw_la_bulk_req
*req
= &ctx
->dec_fw_req
;
507 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
509 qat_alg_ablkcipher_init_com(ctx
, req
, dec_cd
, key
, keylen
);
510 cd_pars
->u
.s
.content_desc_addr
= ctx
->dec_cd_paddr
;
512 if (mode
!= ICP_QAT_HW_CIPHER_CTR_MODE
)
513 dec_cd
->aes
.cipher_config
.val
=
514 QAT_AES_HW_CONFIG_DEC(alg
, mode
);
516 dec_cd
->aes
.cipher_config
.val
=
517 QAT_AES_HW_CONFIG_ENC(alg
, mode
);
520 static int qat_alg_validate_key(int key_len
, int *alg
, int mode
)
522 if (mode
!= ICP_QAT_HW_CIPHER_XTS_MODE
) {
524 case AES_KEYSIZE_128
:
525 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES128
;
527 case AES_KEYSIZE_192
:
528 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES192
;
530 case AES_KEYSIZE_256
:
531 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES256
;
538 case AES_KEYSIZE_128
<< 1:
539 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES128
;
541 case AES_KEYSIZE_256
<< 1:
542 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES256
;
551 static int qat_alg_aead_init_sessions(struct crypto_aead
*tfm
, const u8
*key
,
552 unsigned int keylen
, int mode
)
554 struct crypto_authenc_keys keys
;
557 if (crypto_authenc_extractkeys(&keys
, key
, keylen
))
560 if (qat_alg_validate_key(keys
.enckeylen
, &alg
, mode
))
563 if (qat_alg_aead_init_enc_session(tfm
, alg
, &keys
, mode
))
566 if (qat_alg_aead_init_dec_session(tfm
, alg
, &keys
, mode
))
569 memzero_explicit(&keys
, sizeof(keys
));
572 crypto_aead_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
573 memzero_explicit(&keys
, sizeof(keys
));
576 memzero_explicit(&keys
, sizeof(keys
));
580 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx
*ctx
,
587 if (qat_alg_validate_key(keylen
, &alg
, mode
))
590 qat_alg_ablkcipher_init_enc(ctx
, alg
, key
, keylen
, mode
);
591 qat_alg_ablkcipher_init_dec(ctx
, alg
, key
, keylen
, mode
);
594 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
598 static int qat_alg_aead_rekey(struct crypto_aead
*tfm
, const uint8_t *key
,
601 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
603 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
604 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
605 memset(&ctx
->enc_fw_req
, 0, sizeof(ctx
->enc_fw_req
));
606 memset(&ctx
->dec_fw_req
, 0, sizeof(ctx
->dec_fw_req
));
608 return qat_alg_aead_init_sessions(tfm
, key
, keylen
,
609 ICP_QAT_HW_CIPHER_CBC_MODE
);
612 static int qat_alg_aead_newkey(struct crypto_aead
*tfm
, const uint8_t *key
,
615 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
616 struct qat_crypto_instance
*inst
= NULL
;
617 int node
= get_current_node();
621 inst
= qat_crypto_get_instance_node(node
);
624 dev
= &GET_DEV(inst
->accel_dev
);
626 ctx
->enc_cd
= dma_alloc_coherent(dev
, sizeof(*ctx
->enc_cd
),
633 ctx
->dec_cd
= dma_alloc_coherent(dev
, sizeof(*ctx
->dec_cd
),
641 ret
= qat_alg_aead_init_sessions(tfm
, key
, keylen
,
642 ICP_QAT_HW_CIPHER_CBC_MODE
);
649 memset(ctx
->dec_cd
, 0, sizeof(struct qat_alg_cd
));
650 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
651 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
654 memset(ctx
->enc_cd
, 0, sizeof(struct qat_alg_cd
));
655 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
656 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
660 qat_crypto_put_instance(inst
);
664 static int qat_alg_aead_setkey(struct crypto_aead
*tfm
, const uint8_t *key
,
667 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
670 return qat_alg_aead_rekey(tfm
, key
, keylen
);
672 return qat_alg_aead_newkey(tfm
, key
, keylen
);
675 static void qat_alg_free_bufl(struct qat_crypto_instance
*inst
,
676 struct qat_crypto_request
*qat_req
)
678 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
679 struct qat_alg_buf_list
*bl
= qat_req
->buf
.bl
;
680 struct qat_alg_buf_list
*blout
= qat_req
->buf
.blout
;
681 dma_addr_t blp
= qat_req
->buf
.blp
;
682 dma_addr_t blpout
= qat_req
->buf
.bloutp
;
683 size_t sz
= qat_req
->buf
.sz
;
684 size_t sz_out
= qat_req
->buf
.sz_out
;
687 for (i
= 0; i
< bl
->num_bufs
; i
++)
688 dma_unmap_single(dev
, bl
->bufers
[i
].addr
,
689 bl
->bufers
[i
].len
, DMA_BIDIRECTIONAL
);
691 dma_unmap_single(dev
, blp
, sz
, DMA_TO_DEVICE
);
694 /* If out of place operation dma unmap only data */
695 int bufless
= blout
->num_bufs
- blout
->num_mapped_bufs
;
697 for (i
= bufless
; i
< blout
->num_bufs
; i
++) {
698 dma_unmap_single(dev
, blout
->bufers
[i
].addr
,
699 blout
->bufers
[i
].len
,
702 dma_unmap_single(dev
, blpout
, sz_out
, DMA_TO_DEVICE
);
707 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance
*inst
,
708 struct scatterlist
*sgl
,
709 struct scatterlist
*sglout
,
710 struct qat_crypto_request
*qat_req
)
712 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
714 int n
= sg_nents(sgl
);
715 struct qat_alg_buf_list
*bufl
;
716 struct qat_alg_buf_list
*buflout
= NULL
;
718 dma_addr_t bloutp
= 0;
719 struct scatterlist
*sg
;
720 size_t sz_out
, sz
= struct_size(bufl
, bufers
, n
+ 1);
725 bufl
= kzalloc_node(sz
, GFP_ATOMIC
,
726 dev_to_node(&GET_DEV(inst
->accel_dev
)));
730 blp
= dma_map_single(dev
, bufl
, sz
, DMA_TO_DEVICE
);
731 if (unlikely(dma_mapping_error(dev
, blp
)))
734 for_each_sg(sgl
, sg
, n
, i
) {
740 bufl
->bufers
[y
].addr
= dma_map_single(dev
, sg_virt(sg
),
743 bufl
->bufers
[y
].len
= sg
->length
;
744 if (unlikely(dma_mapping_error(dev
, bufl
->bufers
[y
].addr
)))
748 bufl
->num_bufs
= sg_nctr
;
749 qat_req
->buf
.bl
= bufl
;
750 qat_req
->buf
.blp
= blp
;
751 qat_req
->buf
.sz
= sz
;
752 /* Handle out of place operation */
754 struct qat_alg_buf
*bufers
;
756 n
= sg_nents(sglout
);
757 sz_out
= struct_size(buflout
, bufers
, n
+ 1);
759 buflout
= kzalloc_node(sz_out
, GFP_ATOMIC
,
760 dev_to_node(&GET_DEV(inst
->accel_dev
)));
761 if (unlikely(!buflout
))
763 bloutp
= dma_map_single(dev
, buflout
, sz_out
, DMA_TO_DEVICE
);
764 if (unlikely(dma_mapping_error(dev
, bloutp
)))
766 bufers
= buflout
->bufers
;
767 for_each_sg(sglout
, sg
, n
, i
) {
773 bufers
[y
].addr
= dma_map_single(dev
, sg_virt(sg
),
776 if (unlikely(dma_mapping_error(dev
, bufers
[y
].addr
)))
778 bufers
[y
].len
= sg
->length
;
781 buflout
->num_bufs
= sg_nctr
;
782 buflout
->num_mapped_bufs
= sg_nctr
;
783 qat_req
->buf
.blout
= buflout
;
784 qat_req
->buf
.bloutp
= bloutp
;
785 qat_req
->buf
.sz_out
= sz_out
;
787 /* Otherwise set the src and dst to the same address */
788 qat_req
->buf
.bloutp
= qat_req
->buf
.blp
;
789 qat_req
->buf
.sz_out
= 0;
794 n
= sg_nents(sglout
);
795 for (i
= 0; i
< n
; i
++)
796 if (!dma_mapping_error(dev
, buflout
->bufers
[i
].addr
))
797 dma_unmap_single(dev
, buflout
->bufers
[i
].addr
,
798 buflout
->bufers
[i
].len
,
800 if (!dma_mapping_error(dev
, bloutp
))
801 dma_unmap_single(dev
, bloutp
, sz_out
, DMA_TO_DEVICE
);
806 for (i
= 0; i
< n
; i
++)
807 if (!dma_mapping_error(dev
, bufl
->bufers
[i
].addr
))
808 dma_unmap_single(dev
, bufl
->bufers
[i
].addr
,
812 if (!dma_mapping_error(dev
, blp
))
813 dma_unmap_single(dev
, blp
, sz
, DMA_TO_DEVICE
);
816 dev_err(dev
, "Failed to map buf for dma\n");
820 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp
*qat_resp
,
821 struct qat_crypto_request
*qat_req
)
823 struct qat_alg_aead_ctx
*ctx
= qat_req
->aead_ctx
;
824 struct qat_crypto_instance
*inst
= ctx
->inst
;
825 struct aead_request
*areq
= qat_req
->aead_req
;
826 uint8_t stat_filed
= qat_resp
->comn_resp
.comn_status
;
827 int res
= 0, qat_res
= ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed
);
829 qat_alg_free_bufl(inst
, qat_req
);
830 if (unlikely(qat_res
!= ICP_QAT_FW_COMN_STATUS_FLAG_OK
))
832 areq
->base
.complete(&areq
->base
, res
);
835 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp
*qat_resp
,
836 struct qat_crypto_request
*qat_req
)
838 struct qat_alg_ablkcipher_ctx
*ctx
= qat_req
->ablkcipher_ctx
;
839 struct qat_crypto_instance
*inst
= ctx
->inst
;
840 struct ablkcipher_request
*areq
= qat_req
->ablkcipher_req
;
841 uint8_t stat_filed
= qat_resp
->comn_resp
.comn_status
;
842 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
843 int res
= 0, qat_res
= ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed
);
845 qat_alg_free_bufl(inst
, qat_req
);
846 if (unlikely(qat_res
!= ICP_QAT_FW_COMN_STATUS_FLAG_OK
))
849 memcpy(areq
->info
, qat_req
->iv
, AES_BLOCK_SIZE
);
850 dma_free_coherent(dev
, AES_BLOCK_SIZE
, qat_req
->iv
,
853 areq
->base
.complete(&areq
->base
, res
);
856 void qat_alg_callback(void *resp
)
858 struct icp_qat_fw_la_resp
*qat_resp
= resp
;
859 struct qat_crypto_request
*qat_req
=
860 (void *)(__force
long)qat_resp
->opaque_data
;
862 qat_req
->cb(qat_resp
, qat_req
);
865 static int qat_alg_aead_dec(struct aead_request
*areq
)
867 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(areq
);
868 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
869 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
870 struct qat_crypto_request
*qat_req
= aead_request_ctx(areq
);
871 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
872 struct icp_qat_fw_la_auth_req_params
*auth_param
;
873 struct icp_qat_fw_la_bulk_req
*msg
;
874 int digst_size
= crypto_aead_authsize(aead_tfm
);
877 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, areq
->src
, areq
->dst
, qat_req
);
882 *msg
= ctx
->dec_fw_req
;
883 qat_req
->aead_ctx
= ctx
;
884 qat_req
->aead_req
= areq
;
885 qat_req
->cb
= qat_aead_alg_callback
;
886 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
887 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
888 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
889 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
890 cipher_param
->cipher_length
= areq
->cryptlen
- digst_size
;
891 cipher_param
->cipher_offset
= areq
->assoclen
;
892 memcpy(cipher_param
->u
.cipher_IV_array
, areq
->iv
, AES_BLOCK_SIZE
);
893 auth_param
= (void *)((uint8_t *)cipher_param
+ sizeof(*cipher_param
));
894 auth_param
->auth_off
= 0;
895 auth_param
->auth_len
= areq
->assoclen
+ cipher_param
->cipher_length
;
897 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
898 } while (ret
== -EAGAIN
&& ctr
++ < 10);
900 if (ret
== -EAGAIN
) {
901 qat_alg_free_bufl(ctx
->inst
, qat_req
);
907 static int qat_alg_aead_enc(struct aead_request
*areq
)
909 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(areq
);
910 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
911 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
912 struct qat_crypto_request
*qat_req
= aead_request_ctx(areq
);
913 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
914 struct icp_qat_fw_la_auth_req_params
*auth_param
;
915 struct icp_qat_fw_la_bulk_req
*msg
;
916 uint8_t *iv
= areq
->iv
;
919 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, areq
->src
, areq
->dst
, qat_req
);
924 *msg
= ctx
->enc_fw_req
;
925 qat_req
->aead_ctx
= ctx
;
926 qat_req
->aead_req
= areq
;
927 qat_req
->cb
= qat_aead_alg_callback
;
928 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
929 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
930 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
931 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
932 auth_param
= (void *)((uint8_t *)cipher_param
+ sizeof(*cipher_param
));
934 memcpy(cipher_param
->u
.cipher_IV_array
, iv
, AES_BLOCK_SIZE
);
935 cipher_param
->cipher_length
= areq
->cryptlen
;
936 cipher_param
->cipher_offset
= areq
->assoclen
;
938 auth_param
->auth_off
= 0;
939 auth_param
->auth_len
= areq
->assoclen
+ areq
->cryptlen
;
942 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
943 } while (ret
== -EAGAIN
&& ctr
++ < 10);
945 if (ret
== -EAGAIN
) {
946 qat_alg_free_bufl(ctx
->inst
, qat_req
);
952 static int qat_alg_ablkcipher_rekey(struct qat_alg_ablkcipher_ctx
*ctx
,
953 const u8
*key
, unsigned int keylen
,
956 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
957 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
958 memset(&ctx
->enc_fw_req
, 0, sizeof(ctx
->enc_fw_req
));
959 memset(&ctx
->dec_fw_req
, 0, sizeof(ctx
->dec_fw_req
));
961 return qat_alg_ablkcipher_init_sessions(ctx
, key
, keylen
, mode
);
964 static int qat_alg_ablkcipher_newkey(struct qat_alg_ablkcipher_ctx
*ctx
,
965 const u8
*key
, unsigned int keylen
,
968 struct qat_crypto_instance
*inst
= NULL
;
970 int node
= get_current_node();
973 inst
= qat_crypto_get_instance_node(node
);
976 dev
= &GET_DEV(inst
->accel_dev
);
978 ctx
->enc_cd
= dma_alloc_coherent(dev
, sizeof(*ctx
->enc_cd
),
983 goto out_free_instance
;
985 ctx
->dec_cd
= dma_alloc_coherent(dev
, sizeof(*ctx
->dec_cd
),
993 ret
= qat_alg_ablkcipher_init_sessions(ctx
, key
, keylen
, mode
);
1000 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
1001 dma_free_coherent(dev
, sizeof(*ctx
->dec_cd
),
1002 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1005 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
1006 dma_free_coherent(dev
, sizeof(*ctx
->enc_cd
),
1007 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1011 qat_crypto_put_instance(inst
);
1015 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher
*tfm
,
1016 const u8
*key
, unsigned int keylen
,
1019 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
1022 return qat_alg_ablkcipher_rekey(ctx
, key
, keylen
, mode
);
1024 return qat_alg_ablkcipher_newkey(ctx
, key
, keylen
, mode
);
1027 static int qat_alg_ablkcipher_cbc_setkey(struct crypto_ablkcipher
*tfm
,
1028 const u8
*key
, unsigned int keylen
)
1030 return qat_alg_ablkcipher_setkey(tfm
, key
, keylen
,
1031 ICP_QAT_HW_CIPHER_CBC_MODE
);
1034 static int qat_alg_ablkcipher_ctr_setkey(struct crypto_ablkcipher
*tfm
,
1035 const u8
*key
, unsigned int keylen
)
1037 return qat_alg_ablkcipher_setkey(tfm
, key
, keylen
,
1038 ICP_QAT_HW_CIPHER_CTR_MODE
);
1041 static int qat_alg_ablkcipher_xts_setkey(struct crypto_ablkcipher
*tfm
,
1042 const u8
*key
, unsigned int keylen
)
1044 return qat_alg_ablkcipher_setkey(tfm
, key
, keylen
,
1045 ICP_QAT_HW_CIPHER_XTS_MODE
);
1048 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request
*req
)
1050 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
1051 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(atfm
);
1052 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1053 struct qat_crypto_request
*qat_req
= ablkcipher_request_ctx(req
);
1054 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
1055 struct icp_qat_fw_la_bulk_req
*msg
;
1056 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
1059 if (req
->nbytes
== 0)
1062 qat_req
->iv
= dma_alloc_coherent(dev
, AES_BLOCK_SIZE
,
1063 &qat_req
->iv_paddr
, GFP_ATOMIC
);
1067 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, req
->src
, req
->dst
, qat_req
);
1068 if (unlikely(ret
)) {
1069 dma_free_coherent(dev
, AES_BLOCK_SIZE
, qat_req
->iv
,
1074 msg
= &qat_req
->req
;
1075 *msg
= ctx
->enc_fw_req
;
1076 qat_req
->ablkcipher_ctx
= ctx
;
1077 qat_req
->ablkcipher_req
= req
;
1078 qat_req
->cb
= qat_ablkcipher_alg_callback
;
1079 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
1080 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
1081 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
1082 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
1083 cipher_param
->cipher_length
= req
->nbytes
;
1084 cipher_param
->cipher_offset
= 0;
1085 cipher_param
->u
.s
.cipher_IV_ptr
= qat_req
->iv_paddr
;
1086 memcpy(qat_req
->iv
, req
->info
, AES_BLOCK_SIZE
);
1088 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
1089 } while (ret
== -EAGAIN
&& ctr
++ < 10);
1091 if (ret
== -EAGAIN
) {
1092 qat_alg_free_bufl(ctx
->inst
, qat_req
);
1093 dma_free_coherent(dev
, AES_BLOCK_SIZE
, qat_req
->iv
,
1097 return -EINPROGRESS
;
1100 static int qat_alg_ablkcipher_blk_encrypt(struct ablkcipher_request
*req
)
1102 if (req
->nbytes
% AES_BLOCK_SIZE
!= 0)
1105 return qat_alg_ablkcipher_encrypt(req
);
1108 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request
*req
)
1110 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
1111 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(atfm
);
1112 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1113 struct qat_crypto_request
*qat_req
= ablkcipher_request_ctx(req
);
1114 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
1115 struct icp_qat_fw_la_bulk_req
*msg
;
1116 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
1119 if (req
->nbytes
== 0)
1122 qat_req
->iv
= dma_alloc_coherent(dev
, AES_BLOCK_SIZE
,
1123 &qat_req
->iv_paddr
, GFP_ATOMIC
);
1127 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, req
->src
, req
->dst
, qat_req
);
1128 if (unlikely(ret
)) {
1129 dma_free_coherent(dev
, AES_BLOCK_SIZE
, qat_req
->iv
,
1134 msg
= &qat_req
->req
;
1135 *msg
= ctx
->dec_fw_req
;
1136 qat_req
->ablkcipher_ctx
= ctx
;
1137 qat_req
->ablkcipher_req
= req
;
1138 qat_req
->cb
= qat_ablkcipher_alg_callback
;
1139 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
1140 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
1141 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
1142 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
1143 cipher_param
->cipher_length
= req
->nbytes
;
1144 cipher_param
->cipher_offset
= 0;
1145 cipher_param
->u
.s
.cipher_IV_ptr
= qat_req
->iv_paddr
;
1146 memcpy(qat_req
->iv
, req
->info
, AES_BLOCK_SIZE
);
1148 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
1149 } while (ret
== -EAGAIN
&& ctr
++ < 10);
1151 if (ret
== -EAGAIN
) {
1152 qat_alg_free_bufl(ctx
->inst
, qat_req
);
1153 dma_free_coherent(dev
, AES_BLOCK_SIZE
, qat_req
->iv
,
1157 return -EINPROGRESS
;
1160 static int qat_alg_ablkcipher_blk_decrypt(struct ablkcipher_request
*req
)
1162 if (req
->nbytes
% AES_BLOCK_SIZE
!= 0)
1165 return qat_alg_ablkcipher_decrypt(req
);
1167 static int qat_alg_aead_init(struct crypto_aead
*tfm
,
1168 enum icp_qat_hw_auth_algo hash
,
1169 const char *hash_name
)
1171 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1173 ctx
->hash_tfm
= crypto_alloc_shash(hash_name
, 0, 0);
1174 if (IS_ERR(ctx
->hash_tfm
))
1175 return PTR_ERR(ctx
->hash_tfm
);
1176 ctx
->qat_hash_alg
= hash
;
1177 crypto_aead_set_reqsize(tfm
, sizeof(struct qat_crypto_request
));
1181 static int qat_alg_aead_sha1_init(struct crypto_aead
*tfm
)
1183 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA1
, "sha1");
1186 static int qat_alg_aead_sha256_init(struct crypto_aead
*tfm
)
1188 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA256
, "sha256");
1191 static int qat_alg_aead_sha512_init(struct crypto_aead
*tfm
)
1193 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA512
, "sha512");
1196 static void qat_alg_aead_exit(struct crypto_aead
*tfm
)
1198 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1199 struct qat_crypto_instance
*inst
= ctx
->inst
;
1202 crypto_free_shash(ctx
->hash_tfm
);
1207 dev
= &GET_DEV(inst
->accel_dev
);
1209 memset(ctx
->enc_cd
, 0, sizeof(struct qat_alg_cd
));
1210 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
1211 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1214 memset(ctx
->dec_cd
, 0, sizeof(struct qat_alg_cd
));
1215 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
1216 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1218 qat_crypto_put_instance(inst
);
1221 static int qat_alg_ablkcipher_init(struct crypto_tfm
*tfm
)
1223 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1225 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct qat_crypto_request
);
1230 static void qat_alg_ablkcipher_exit(struct crypto_tfm
*tfm
)
1232 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1233 struct qat_crypto_instance
*inst
= ctx
->inst
;
1239 dev
= &GET_DEV(inst
->accel_dev
);
1241 memset(ctx
->enc_cd
, 0,
1242 sizeof(struct icp_qat_hw_cipher_algo_blk
));
1243 dma_free_coherent(dev
,
1244 sizeof(struct icp_qat_hw_cipher_algo_blk
),
1245 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1248 memset(ctx
->dec_cd
, 0,
1249 sizeof(struct icp_qat_hw_cipher_algo_blk
));
1250 dma_free_coherent(dev
,
1251 sizeof(struct icp_qat_hw_cipher_algo_blk
),
1252 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1254 qat_crypto_put_instance(inst
);
1258 static struct aead_alg qat_aeads
[] = { {
1260 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1261 .cra_driver_name
= "qat_aes_cbc_hmac_sha1",
1262 .cra_priority
= 4001,
1263 .cra_flags
= CRYPTO_ALG_ASYNC
,
1264 .cra_blocksize
= AES_BLOCK_SIZE
,
1265 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1266 .cra_module
= THIS_MODULE
,
1268 .init
= qat_alg_aead_sha1_init
,
1269 .exit
= qat_alg_aead_exit
,
1270 .setkey
= qat_alg_aead_setkey
,
1271 .decrypt
= qat_alg_aead_dec
,
1272 .encrypt
= qat_alg_aead_enc
,
1273 .ivsize
= AES_BLOCK_SIZE
,
1274 .maxauthsize
= SHA1_DIGEST_SIZE
,
1277 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1278 .cra_driver_name
= "qat_aes_cbc_hmac_sha256",
1279 .cra_priority
= 4001,
1280 .cra_flags
= CRYPTO_ALG_ASYNC
,
1281 .cra_blocksize
= AES_BLOCK_SIZE
,
1282 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1283 .cra_module
= THIS_MODULE
,
1285 .init
= qat_alg_aead_sha256_init
,
1286 .exit
= qat_alg_aead_exit
,
1287 .setkey
= qat_alg_aead_setkey
,
1288 .decrypt
= qat_alg_aead_dec
,
1289 .encrypt
= qat_alg_aead_enc
,
1290 .ivsize
= AES_BLOCK_SIZE
,
1291 .maxauthsize
= SHA256_DIGEST_SIZE
,
1294 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1295 .cra_driver_name
= "qat_aes_cbc_hmac_sha512",
1296 .cra_priority
= 4001,
1297 .cra_flags
= CRYPTO_ALG_ASYNC
,
1298 .cra_blocksize
= AES_BLOCK_SIZE
,
1299 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1300 .cra_module
= THIS_MODULE
,
1302 .init
= qat_alg_aead_sha512_init
,
1303 .exit
= qat_alg_aead_exit
,
1304 .setkey
= qat_alg_aead_setkey
,
1305 .decrypt
= qat_alg_aead_dec
,
1306 .encrypt
= qat_alg_aead_enc
,
1307 .ivsize
= AES_BLOCK_SIZE
,
1308 .maxauthsize
= SHA512_DIGEST_SIZE
,
1311 static struct crypto_alg qat_algs
[] = { {
1312 .cra_name
= "cbc(aes)",
1313 .cra_driver_name
= "qat_aes_cbc",
1314 .cra_priority
= 4001,
1315 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1316 .cra_blocksize
= AES_BLOCK_SIZE
,
1317 .cra_ctxsize
= sizeof(struct qat_alg_ablkcipher_ctx
),
1319 .cra_type
= &crypto_ablkcipher_type
,
1320 .cra_module
= THIS_MODULE
,
1321 .cra_init
= qat_alg_ablkcipher_init
,
1322 .cra_exit
= qat_alg_ablkcipher_exit
,
1325 .setkey
= qat_alg_ablkcipher_cbc_setkey
,
1326 .decrypt
= qat_alg_ablkcipher_blk_decrypt
,
1327 .encrypt
= qat_alg_ablkcipher_blk_encrypt
,
1328 .min_keysize
= AES_MIN_KEY_SIZE
,
1329 .max_keysize
= AES_MAX_KEY_SIZE
,
1330 .ivsize
= AES_BLOCK_SIZE
,
1334 .cra_name
= "ctr(aes)",
1335 .cra_driver_name
= "qat_aes_ctr",
1336 .cra_priority
= 4001,
1337 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1339 .cra_ctxsize
= sizeof(struct qat_alg_ablkcipher_ctx
),
1341 .cra_type
= &crypto_ablkcipher_type
,
1342 .cra_module
= THIS_MODULE
,
1343 .cra_init
= qat_alg_ablkcipher_init
,
1344 .cra_exit
= qat_alg_ablkcipher_exit
,
1347 .setkey
= qat_alg_ablkcipher_ctr_setkey
,
1348 .decrypt
= qat_alg_ablkcipher_decrypt
,
1349 .encrypt
= qat_alg_ablkcipher_encrypt
,
1350 .min_keysize
= AES_MIN_KEY_SIZE
,
1351 .max_keysize
= AES_MAX_KEY_SIZE
,
1352 .ivsize
= AES_BLOCK_SIZE
,
1356 .cra_name
= "xts(aes)",
1357 .cra_driver_name
= "qat_aes_xts",
1358 .cra_priority
= 4001,
1359 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1360 .cra_blocksize
= AES_BLOCK_SIZE
,
1361 .cra_ctxsize
= sizeof(struct qat_alg_ablkcipher_ctx
),
1363 .cra_type
= &crypto_ablkcipher_type
,
1364 .cra_module
= THIS_MODULE
,
1365 .cra_init
= qat_alg_ablkcipher_init
,
1366 .cra_exit
= qat_alg_ablkcipher_exit
,
1369 .setkey
= qat_alg_ablkcipher_xts_setkey
,
1370 .decrypt
= qat_alg_ablkcipher_blk_decrypt
,
1371 .encrypt
= qat_alg_ablkcipher_blk_encrypt
,
1372 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1373 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1374 .ivsize
= AES_BLOCK_SIZE
,
1379 int qat_algs_register(void)
1383 mutex_lock(&algs_lock
);
1384 if (++active_devs
!= 1)
1387 for (i
= 0; i
< ARRAY_SIZE(qat_algs
); i
++)
1388 qat_algs
[i
].cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
;
1390 ret
= crypto_register_algs(qat_algs
, ARRAY_SIZE(qat_algs
));
1394 for (i
= 0; i
< ARRAY_SIZE(qat_aeads
); i
++)
1395 qat_aeads
[i
].base
.cra_flags
= CRYPTO_ALG_ASYNC
;
1397 ret
= crypto_register_aeads(qat_aeads
, ARRAY_SIZE(qat_aeads
));
1402 mutex_unlock(&algs_lock
);
1406 crypto_unregister_algs(qat_algs
, ARRAY_SIZE(qat_algs
));
1410 void qat_algs_unregister(void)
1412 mutex_lock(&algs_lock
);
1413 if (--active_devs
!= 0)
1416 crypto_unregister_aeads(qat_aeads
, ARRAY_SIZE(qat_aeads
));
1417 crypto_unregister_algs(qat_algs
, ARRAY_SIZE(qat_algs
));
1420 mutex_unlock(&algs_lock
);