2 This file is provided under a dual BSD/GPLv2 license. When using or
3 redistributing this file, you may do so under either license.
6 Copyright(c) 2014 Intel Corporation.
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of version 2 of the GNU General Public License as
9 published by the Free Software Foundation.
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
20 Copyright(c) 2014 Intel Corporation.
21 Redistribution and use in source and binary forms, with or without
22 modification, are permitted provided that the following conditions
25 * Redistributions of source code must retain the above copyright
26 notice, this list of conditions and the following disclaimer.
27 * Redistributions in binary form must reproduce the above copyright
28 notice, this list of conditions and the following disclaimer in
29 the documentation and/or other materials provided with the
31 * Neither the name of Intel Corporation nor the names of its
32 contributors may be used to endorse or promote products derived
33 from this software without specific prior written permission.
35 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36 "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39 OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40 SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41 LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42 DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43 THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45 OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/module.h>
48 #include <linux/slab.h>
49 #include <linux/crypto.h>
50 #include <crypto/internal/aead.h>
51 #include <crypto/aes.h>
52 #include <crypto/sha.h>
53 #include <crypto/hash.h>
54 #include <crypto/algapi.h>
55 #include <crypto/authenc.h>
56 #include <crypto/rng.h>
57 #include <linux/dma-mapping.h>
58 #include "adf_accel_devices.h"
59 #include "adf_transport.h"
60 #include "adf_common_drv.h"
61 #include "qat_crypto.h"
62 #include "icp_qat_hw.h"
63 #include "icp_qat_fw.h"
64 #include "icp_qat_fw_la.h"
66 #define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
67 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
68 ICP_QAT_HW_CIPHER_NO_CONVERT, \
69 ICP_QAT_HW_CIPHER_ENCRYPT)
71 #define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
72 ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT)
76 static DEFINE_MUTEX(algs_lock
);
77 static unsigned int active_devs
;
85 struct qat_alg_buf_list
{
88 uint32_t num_mapped_bufs
;
89 struct qat_alg_buf bufers
[];
90 } __packed
__aligned(64);
92 /* Common content descriptor */
95 struct qat_enc
{ /* Encrypt content desc */
96 struct icp_qat_hw_cipher_algo_blk cipher
;
97 struct icp_qat_hw_auth_algo_blk hash
;
99 struct qat_dec
{ /* Decrytp content desc */
100 struct icp_qat_hw_auth_algo_blk hash
;
101 struct icp_qat_hw_cipher_algo_blk cipher
;
106 struct qat_alg_aead_ctx
{
107 struct qat_alg_cd
*enc_cd
;
108 struct qat_alg_cd
*dec_cd
;
109 dma_addr_t enc_cd_paddr
;
110 dma_addr_t dec_cd_paddr
;
111 struct icp_qat_fw_la_bulk_req enc_fw_req
;
112 struct icp_qat_fw_la_bulk_req dec_fw_req
;
113 struct crypto_shash
*hash_tfm
;
114 enum icp_qat_hw_auth_algo qat_hash_alg
;
115 struct qat_crypto_instance
*inst
;
116 struct crypto_tfm
*tfm
;
117 uint8_t salt
[AES_BLOCK_SIZE
];
118 spinlock_t lock
; /* protects qat_alg_aead_ctx struct */
121 struct qat_alg_ablkcipher_ctx
{
122 struct icp_qat_hw_cipher_algo_blk
*enc_cd
;
123 struct icp_qat_hw_cipher_algo_blk
*dec_cd
;
124 dma_addr_t enc_cd_paddr
;
125 dma_addr_t dec_cd_paddr
;
126 struct icp_qat_fw_la_bulk_req enc_fw_req
;
127 struct icp_qat_fw_la_bulk_req dec_fw_req
;
128 struct qat_crypto_instance
*inst
;
129 struct crypto_tfm
*tfm
;
130 spinlock_t lock
; /* protects qat_alg_ablkcipher_ctx struct */
133 static int get_current_node(void)
135 return cpu_data(current_thread_info()->cpu
).phys_proc_id
;
138 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg
)
140 switch (qat_hash_alg
) {
141 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
142 return ICP_QAT_HW_SHA1_STATE1_SZ
;
143 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
144 return ICP_QAT_HW_SHA256_STATE1_SZ
;
145 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
146 return ICP_QAT_HW_SHA512_STATE1_SZ
;
153 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk
*hash
,
154 struct qat_alg_aead_ctx
*ctx
,
155 const uint8_t *auth_key
,
156 unsigned int auth_keylen
)
158 SHASH_DESC_ON_STACK(shash
, ctx
->hash_tfm
);
159 struct sha1_state sha1
;
160 struct sha256_state sha256
;
161 struct sha512_state sha512
;
162 int block_size
= crypto_shash_blocksize(ctx
->hash_tfm
);
163 int digest_size
= crypto_shash_digestsize(ctx
->hash_tfm
);
164 char ipad
[block_size
];
165 char opad
[block_size
];
166 __be32
*hash_state_out
;
167 __be64
*hash512_state_out
;
170 memset(ipad
, 0, block_size
);
171 memset(opad
, 0, block_size
);
172 shash
->tfm
= ctx
->hash_tfm
;
175 if (auth_keylen
> block_size
) {
176 int ret
= crypto_shash_digest(shash
, auth_key
,
181 memcpy(opad
, ipad
, digest_size
);
183 memcpy(ipad
, auth_key
, auth_keylen
);
184 memcpy(opad
, auth_key
, auth_keylen
);
187 for (i
= 0; i
< block_size
; i
++) {
188 char *ipad_ptr
= ipad
+ i
;
189 char *opad_ptr
= opad
+ i
;
194 if (crypto_shash_init(shash
))
197 if (crypto_shash_update(shash
, ipad
, block_size
))
200 hash_state_out
= (__be32
*)hash
->sha
.state1
;
201 hash512_state_out
= (__be64
*)hash_state_out
;
203 switch (ctx
->qat_hash_alg
) {
204 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
205 if (crypto_shash_export(shash
, &sha1
))
207 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
208 *hash_state_out
= cpu_to_be32(*(sha1
.state
+ i
));
210 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
211 if (crypto_shash_export(shash
, &sha256
))
213 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
214 *hash_state_out
= cpu_to_be32(*(sha256
.state
+ i
));
216 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
217 if (crypto_shash_export(shash
, &sha512
))
219 for (i
= 0; i
< digest_size
>> 3; i
++, hash512_state_out
++)
220 *hash512_state_out
= cpu_to_be64(*(sha512
.state
+ i
));
226 if (crypto_shash_init(shash
))
229 if (crypto_shash_update(shash
, opad
, block_size
))
232 offset
= round_up(qat_get_inter_state_size(ctx
->qat_hash_alg
), 8);
233 hash_state_out
= (__be32
*)(hash
->sha
.state1
+ offset
);
234 hash512_state_out
= (__be64
*)hash_state_out
;
236 switch (ctx
->qat_hash_alg
) {
237 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
238 if (crypto_shash_export(shash
, &sha1
))
240 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
241 *hash_state_out
= cpu_to_be32(*(sha1
.state
+ i
));
243 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
244 if (crypto_shash_export(shash
, &sha256
))
246 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
247 *hash_state_out
= cpu_to_be32(*(sha256
.state
+ i
));
249 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
250 if (crypto_shash_export(shash
, &sha512
))
252 for (i
= 0; i
< digest_size
>> 3; i
++, hash512_state_out
++)
253 *hash512_state_out
= cpu_to_be64(*(sha512
.state
+ i
));
258 memzero_explicit(ipad
, block_size
);
259 memzero_explicit(opad
, block_size
);
263 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr
*header
)
266 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET
);
267 header
->service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_LA
;
268 header
->comn_req_flags
=
269 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR
,
270 QAT_COMN_PTR_TYPE_SGL
);
271 ICP_QAT_FW_LA_PARTIAL_SET(header
->serv_specif_flags
,
272 ICP_QAT_FW_LA_PARTIAL_NONE
);
273 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header
->serv_specif_flags
,
274 ICP_QAT_FW_CIPH_IV_16BYTE_DATA
);
275 ICP_QAT_FW_LA_PROTO_SET(header
->serv_specif_flags
,
276 ICP_QAT_FW_LA_NO_PROTO
);
277 ICP_QAT_FW_LA_UPDATE_STATE_SET(header
->serv_specif_flags
,
278 ICP_QAT_FW_LA_NO_UPDATE_STATE
);
281 static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx
*ctx
,
283 struct crypto_authenc_keys
*keys
)
285 struct crypto_aead
*aead_tfm
= __crypto_aead_cast(ctx
->tfm
);
286 unsigned int digestsize
= crypto_aead_crt(aead_tfm
)->authsize
;
287 struct qat_enc
*enc_ctx
= &ctx
->enc_cd
->qat_enc_cd
;
288 struct icp_qat_hw_cipher_algo_blk
*cipher
= &enc_ctx
->cipher
;
289 struct icp_qat_hw_auth_algo_blk
*hash
=
290 (struct icp_qat_hw_auth_algo_blk
*)((char *)enc_ctx
+
291 sizeof(struct icp_qat_hw_auth_setup
) + keys
->enckeylen
);
292 struct icp_qat_fw_la_bulk_req
*req_tmpl
= &ctx
->enc_fw_req
;
293 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req_tmpl
->cd_pars
;
294 struct icp_qat_fw_comn_req_hdr
*header
= &req_tmpl
->comn_hdr
;
295 void *ptr
= &req_tmpl
->cd_ctrl
;
296 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cipher_cd_ctrl
= ptr
;
297 struct icp_qat_fw_auth_cd_ctrl_hdr
*hash_cd_ctrl
= ptr
;
300 cipher
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_ENC(alg
);
301 memcpy(cipher
->aes
.key
, keys
->enckey
, keys
->enckeylen
);
302 hash
->sha
.inner_setup
.auth_config
.config
=
303 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1
,
304 ctx
->qat_hash_alg
, digestsize
);
305 hash
->sha
.inner_setup
.auth_counter
.counter
=
306 cpu_to_be32(crypto_shash_blocksize(ctx
->hash_tfm
));
308 if (qat_alg_do_precomputes(hash
, ctx
, keys
->authkey
, keys
->authkeylen
))
312 qat_alg_init_common_hdr(header
);
313 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_CIPHER_HASH
;
314 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header
->serv_specif_flags
,
315 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
316 ICP_QAT_FW_LA_RET_AUTH_SET(header
->serv_specif_flags
,
317 ICP_QAT_FW_LA_RET_AUTH_RES
);
318 ICP_QAT_FW_LA_CMP_AUTH_SET(header
->serv_specif_flags
,
319 ICP_QAT_FW_LA_NO_CMP_AUTH_RES
);
320 cd_pars
->u
.s
.content_desc_addr
= ctx
->enc_cd_paddr
;
321 cd_pars
->u
.s
.content_desc_params_sz
= sizeof(struct qat_alg_cd
) >> 3;
323 /* Cipher CD config setup */
324 cipher_cd_ctrl
->cipher_key_sz
= keys
->enckeylen
>> 3;
325 cipher_cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
326 cipher_cd_ctrl
->cipher_cfg_offset
= 0;
327 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
328 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
329 /* Auth CD config setup */
330 hash_cd_ctrl
->hash_cfg_offset
= ((char *)hash
- (char *)cipher
) >> 3;
331 hash_cd_ctrl
->hash_flags
= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED
;
332 hash_cd_ctrl
->inner_res_sz
= digestsize
;
333 hash_cd_ctrl
->final_sz
= digestsize
;
335 switch (ctx
->qat_hash_alg
) {
336 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
337 hash_cd_ctrl
->inner_state1_sz
=
338 round_up(ICP_QAT_HW_SHA1_STATE1_SZ
, 8);
339 hash_cd_ctrl
->inner_state2_sz
=
340 round_up(ICP_QAT_HW_SHA1_STATE2_SZ
, 8);
342 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
343 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA256_STATE1_SZ
;
344 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA256_STATE2_SZ
;
346 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
347 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA512_STATE1_SZ
;
348 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA512_STATE2_SZ
;
353 hash_cd_ctrl
->inner_state2_offset
= hash_cd_ctrl
->hash_cfg_offset
+
354 ((sizeof(struct icp_qat_hw_auth_setup
) +
355 round_up(hash_cd_ctrl
->inner_state1_sz
, 8)) >> 3);
356 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
357 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
361 static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx
*ctx
,
363 struct crypto_authenc_keys
*keys
)
365 struct crypto_aead
*aead_tfm
= __crypto_aead_cast(ctx
->tfm
);
366 unsigned int digestsize
= crypto_aead_crt(aead_tfm
)->authsize
;
367 struct qat_dec
*dec_ctx
= &ctx
->dec_cd
->qat_dec_cd
;
368 struct icp_qat_hw_auth_algo_blk
*hash
= &dec_ctx
->hash
;
369 struct icp_qat_hw_cipher_algo_blk
*cipher
=
370 (struct icp_qat_hw_cipher_algo_blk
*)((char *)dec_ctx
+
371 sizeof(struct icp_qat_hw_auth_setup
) +
372 roundup(crypto_shash_digestsize(ctx
->hash_tfm
), 8) * 2);
373 struct icp_qat_fw_la_bulk_req
*req_tmpl
= &ctx
->dec_fw_req
;
374 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req_tmpl
->cd_pars
;
375 struct icp_qat_fw_comn_req_hdr
*header
= &req_tmpl
->comn_hdr
;
376 void *ptr
= &req_tmpl
->cd_ctrl
;
377 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cipher_cd_ctrl
= ptr
;
378 struct icp_qat_fw_auth_cd_ctrl_hdr
*hash_cd_ctrl
= ptr
;
379 struct icp_qat_fw_la_auth_req_params
*auth_param
=
380 (struct icp_qat_fw_la_auth_req_params
*)
381 ((char *)&req_tmpl
->serv_specif_rqpars
+
382 sizeof(struct icp_qat_fw_la_cipher_req_params
));
385 cipher
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_DEC(alg
);
386 memcpy(cipher
->aes
.key
, keys
->enckey
, keys
->enckeylen
);
387 hash
->sha
.inner_setup
.auth_config
.config
=
388 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1
,
391 hash
->sha
.inner_setup
.auth_counter
.counter
=
392 cpu_to_be32(crypto_shash_blocksize(ctx
->hash_tfm
));
394 if (qat_alg_do_precomputes(hash
, ctx
, keys
->authkey
, keys
->authkeylen
))
398 qat_alg_init_common_hdr(header
);
399 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_HASH_CIPHER
;
400 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header
->serv_specif_flags
,
401 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
402 ICP_QAT_FW_LA_RET_AUTH_SET(header
->serv_specif_flags
,
403 ICP_QAT_FW_LA_NO_RET_AUTH_RES
);
404 ICP_QAT_FW_LA_CMP_AUTH_SET(header
->serv_specif_flags
,
405 ICP_QAT_FW_LA_CMP_AUTH_RES
);
406 cd_pars
->u
.s
.content_desc_addr
= ctx
->dec_cd_paddr
;
407 cd_pars
->u
.s
.content_desc_params_sz
= sizeof(struct qat_alg_cd
) >> 3;
409 /* Cipher CD config setup */
410 cipher_cd_ctrl
->cipher_key_sz
= keys
->enckeylen
>> 3;
411 cipher_cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
412 cipher_cd_ctrl
->cipher_cfg_offset
=
413 (sizeof(struct icp_qat_hw_auth_setup
) +
414 roundup(crypto_shash_digestsize(ctx
->hash_tfm
), 8) * 2) >> 3;
415 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
416 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
418 /* Auth CD config setup */
419 hash_cd_ctrl
->hash_cfg_offset
= 0;
420 hash_cd_ctrl
->hash_flags
= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED
;
421 hash_cd_ctrl
->inner_res_sz
= digestsize
;
422 hash_cd_ctrl
->final_sz
= digestsize
;
424 switch (ctx
->qat_hash_alg
) {
425 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
426 hash_cd_ctrl
->inner_state1_sz
=
427 round_up(ICP_QAT_HW_SHA1_STATE1_SZ
, 8);
428 hash_cd_ctrl
->inner_state2_sz
=
429 round_up(ICP_QAT_HW_SHA1_STATE2_SZ
, 8);
431 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
432 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA256_STATE1_SZ
;
433 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA256_STATE2_SZ
;
435 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
436 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA512_STATE1_SZ
;
437 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA512_STATE2_SZ
;
443 hash_cd_ctrl
->inner_state2_offset
= hash_cd_ctrl
->hash_cfg_offset
+
444 ((sizeof(struct icp_qat_hw_auth_setup
) +
445 round_up(hash_cd_ctrl
->inner_state1_sz
, 8)) >> 3);
446 auth_param
->auth_res_sz
= digestsize
;
447 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
448 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
452 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx
*ctx
,
453 struct icp_qat_fw_la_bulk_req
*req
,
454 struct icp_qat_hw_cipher_algo_blk
*cd
,
455 const uint8_t *key
, unsigned int keylen
)
457 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
458 struct icp_qat_fw_comn_req_hdr
*header
= &req
->comn_hdr
;
459 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cd_ctrl
= (void *)&req
->cd_ctrl
;
461 memcpy(cd
->aes
.key
, key
, keylen
);
462 qat_alg_init_common_hdr(header
);
463 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_CIPHER
;
464 cd_pars
->u
.s
.content_desc_params_sz
=
465 sizeof(struct icp_qat_hw_cipher_algo_blk
) >> 3;
466 /* Cipher CD config setup */
467 cd_ctrl
->cipher_key_sz
= keylen
>> 3;
468 cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
469 cd_ctrl
->cipher_cfg_offset
= 0;
470 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
471 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
474 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx
*ctx
,
475 int alg
, const uint8_t *key
,
478 struct icp_qat_hw_cipher_algo_blk
*enc_cd
= ctx
->enc_cd
;
479 struct icp_qat_fw_la_bulk_req
*req
= &ctx
->enc_fw_req
;
480 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
482 qat_alg_ablkcipher_init_com(ctx
, req
, enc_cd
, key
, keylen
);
483 cd_pars
->u
.s
.content_desc_addr
= ctx
->enc_cd_paddr
;
484 enc_cd
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_ENC(alg
);
487 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx
*ctx
,
488 int alg
, const uint8_t *key
,
491 struct icp_qat_hw_cipher_algo_blk
*dec_cd
= ctx
->dec_cd
;
492 struct icp_qat_fw_la_bulk_req
*req
= &ctx
->dec_fw_req
;
493 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
495 qat_alg_ablkcipher_init_com(ctx
, req
, dec_cd
, key
, keylen
);
496 cd_pars
->u
.s
.content_desc_addr
= ctx
->dec_cd_paddr
;
497 dec_cd
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_CBC_DEC(alg
);
500 static int qat_alg_validate_key(int key_len
, int *alg
)
503 case AES_KEYSIZE_128
:
504 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES128
;
506 case AES_KEYSIZE_192
:
507 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES192
;
509 case AES_KEYSIZE_256
:
510 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES256
;
518 static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx
*ctx
,
519 const uint8_t *key
, unsigned int keylen
)
521 struct crypto_authenc_keys keys
;
524 if (crypto_rng_get_bytes(crypto_default_rng
, ctx
->salt
, AES_BLOCK_SIZE
))
527 if (crypto_authenc_extractkeys(&keys
, key
, keylen
))
530 if (qat_alg_validate_key(keys
.enckeylen
, &alg
))
533 if (qat_alg_aead_init_enc_session(ctx
, alg
, &keys
))
536 if (qat_alg_aead_init_dec_session(ctx
, alg
, &keys
))
541 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
547 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx
*ctx
,
553 if (qat_alg_validate_key(keylen
, &alg
))
556 qat_alg_ablkcipher_init_enc(ctx
, alg
, key
, keylen
);
557 qat_alg_ablkcipher_init_dec(ctx
, alg
, key
, keylen
);
560 crypto_tfm_set_flags(ctx
->tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
564 static int qat_alg_aead_setkey(struct crypto_aead
*tfm
, const uint8_t *key
,
567 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
570 spin_lock(&ctx
->lock
);
573 dev
= &GET_DEV(ctx
->inst
->accel_dev
);
574 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
575 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
576 memset(&ctx
->enc_fw_req
, 0, sizeof(ctx
->enc_fw_req
));
577 memset(&ctx
->dec_fw_req
, 0, sizeof(ctx
->dec_fw_req
));
580 int node
= get_current_node();
581 struct qat_crypto_instance
*inst
=
582 qat_crypto_get_instance_node(node
);
584 spin_unlock(&ctx
->lock
);
588 dev
= &GET_DEV(inst
->accel_dev
);
590 ctx
->enc_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->enc_cd
),
594 spin_unlock(&ctx
->lock
);
597 ctx
->dec_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->dec_cd
),
601 spin_unlock(&ctx
->lock
);
605 spin_unlock(&ctx
->lock
);
606 if (qat_alg_aead_init_sessions(ctx
, key
, keylen
))
612 memset(ctx
->dec_cd
, 0, sizeof(struct qat_alg_cd
));
613 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
614 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
617 memset(ctx
->enc_cd
, 0, sizeof(struct qat_alg_cd
));
618 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
619 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
624 static void qat_alg_free_bufl(struct qat_crypto_instance
*inst
,
625 struct qat_crypto_request
*qat_req
)
627 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
628 struct qat_alg_buf_list
*bl
= qat_req
->buf
.bl
;
629 struct qat_alg_buf_list
*blout
= qat_req
->buf
.blout
;
630 dma_addr_t blp
= qat_req
->buf
.blp
;
631 dma_addr_t blpout
= qat_req
->buf
.bloutp
;
632 size_t sz
= qat_req
->buf
.sz
;
633 size_t sz_out
= qat_req
->buf
.sz_out
;
636 for (i
= 0; i
< bl
->num_bufs
; i
++)
637 dma_unmap_single(dev
, bl
->bufers
[i
].addr
,
638 bl
->bufers
[i
].len
, DMA_BIDIRECTIONAL
);
640 dma_unmap_single(dev
, blp
, sz
, DMA_TO_DEVICE
);
643 /* If out of place operation dma unmap only data */
644 int bufless
= blout
->num_bufs
- blout
->num_mapped_bufs
;
646 for (i
= bufless
; i
< blout
->num_bufs
; i
++) {
647 dma_unmap_single(dev
, blout
->bufers
[i
].addr
,
648 blout
->bufers
[i
].len
,
651 dma_unmap_single(dev
, blpout
, sz_out
, DMA_TO_DEVICE
);
656 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance
*inst
,
657 struct scatterlist
*assoc
, int assoclen
,
658 struct scatterlist
*sgl
,
659 struct scatterlist
*sglout
, uint8_t *iv
,
661 struct qat_crypto_request
*qat_req
)
663 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
664 int i
, bufs
= 0, sg_nctr
= 0;
665 int n
= sg_nents(sgl
), assoc_n
= sg_nents(assoc
);
666 struct qat_alg_buf_list
*bufl
;
667 struct qat_alg_buf_list
*buflout
= NULL
;
669 dma_addr_t bloutp
= 0;
670 struct scatterlist
*sg
;
671 size_t sz_out
, sz
= sizeof(struct qat_alg_buf_list
) +
672 ((1 + n
+ assoc_n
) * sizeof(struct qat_alg_buf
));
677 bufl
= kzalloc_node(sz
, GFP_ATOMIC
,
678 dev_to_node(&GET_DEV(inst
->accel_dev
)));
682 blp
= dma_map_single(dev
, bufl
, sz
, DMA_TO_DEVICE
);
683 if (unlikely(dma_mapping_error(dev
, blp
)))
686 for_each_sg(assoc
, sg
, assoc_n
, i
) {
693 bufl
->bufers
[bufs
].addr
=
694 dma_map_single(dev
, sg_virt(sg
),
695 min_t(int, assoclen
, sg
->length
),
697 bufl
->bufers
[bufs
].len
= min_t(int, assoclen
, sg
->length
);
698 if (unlikely(dma_mapping_error(dev
, bufl
->bufers
[bufs
].addr
)))
701 assoclen
-= sg
->length
;
705 bufl
->bufers
[bufs
].addr
= dma_map_single(dev
, iv
, ivlen
,
707 bufl
->bufers
[bufs
].len
= ivlen
;
708 if (unlikely(dma_mapping_error(dev
, bufl
->bufers
[bufs
].addr
)))
713 for_each_sg(sgl
, sg
, n
, i
) {
714 int y
= sg_nctr
+ bufs
;
719 bufl
->bufers
[y
].addr
= dma_map_single(dev
, sg_virt(sg
),
722 bufl
->bufers
[y
].len
= sg
->length
;
723 if (unlikely(dma_mapping_error(dev
, bufl
->bufers
[y
].addr
)))
727 bufl
->num_bufs
= sg_nctr
+ bufs
;
728 qat_req
->buf
.bl
= bufl
;
729 qat_req
->buf
.blp
= blp
;
730 qat_req
->buf
.sz
= sz
;
731 /* Handle out of place operation */
733 struct qat_alg_buf
*bufers
;
735 n
= sg_nents(sglout
);
736 sz_out
= sizeof(struct qat_alg_buf_list
) +
737 ((1 + n
+ assoc_n
) * sizeof(struct qat_alg_buf
));
739 buflout
= kzalloc_node(sz_out
, GFP_ATOMIC
,
740 dev_to_node(&GET_DEV(inst
->accel_dev
)));
741 if (unlikely(!buflout
))
743 bloutp
= dma_map_single(dev
, buflout
, sz_out
, DMA_TO_DEVICE
);
744 if (unlikely(dma_mapping_error(dev
, bloutp
)))
746 bufers
= buflout
->bufers
;
747 /* For out of place operation dma map only data and
748 * reuse assoc mapping and iv */
749 for (i
= 0; i
< bufs
; i
++) {
750 bufers
[i
].len
= bufl
->bufers
[i
].len
;
751 bufers
[i
].addr
= bufl
->bufers
[i
].addr
;
753 for_each_sg(sglout
, sg
, n
, i
) {
754 int y
= sg_nctr
+ bufs
;
759 bufers
[y
].addr
= dma_map_single(dev
, sg_virt(sg
),
762 if (unlikely(dma_mapping_error(dev
, bufers
[y
].addr
)))
764 bufers
[y
].len
= sg
->length
;
767 buflout
->num_bufs
= sg_nctr
+ bufs
;
768 buflout
->num_mapped_bufs
= sg_nctr
;
769 qat_req
->buf
.blout
= buflout
;
770 qat_req
->buf
.bloutp
= bloutp
;
771 qat_req
->buf
.sz_out
= sz_out
;
773 /* Otherwise set the src and dst to the same address */
774 qat_req
->buf
.bloutp
= qat_req
->buf
.blp
;
775 qat_req
->buf
.sz_out
= 0;
779 dev_err(dev
, "Failed to map buf for dma\n");
781 for (i
= 0; i
< n
+ bufs
; i
++)
782 if (!dma_mapping_error(dev
, bufl
->bufers
[i
].addr
))
783 dma_unmap_single(dev
, bufl
->bufers
[i
].addr
,
787 if (!dma_mapping_error(dev
, blp
))
788 dma_unmap_single(dev
, blp
, sz
, DMA_TO_DEVICE
);
790 if (sgl
!= sglout
&& buflout
) {
791 n
= sg_nents(sglout
);
792 for (i
= bufs
; i
< n
+ bufs
; i
++)
793 if (!dma_mapping_error(dev
, buflout
->bufers
[i
].addr
))
794 dma_unmap_single(dev
, buflout
->bufers
[i
].addr
,
795 buflout
->bufers
[i
].len
,
797 if (!dma_mapping_error(dev
, bloutp
))
798 dma_unmap_single(dev
, bloutp
, sz_out
, DMA_TO_DEVICE
);
804 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp
*qat_resp
,
805 struct qat_crypto_request
*qat_req
)
807 struct qat_alg_aead_ctx
*ctx
= qat_req
->aead_ctx
;
808 struct qat_crypto_instance
*inst
= ctx
->inst
;
809 struct aead_request
*areq
= qat_req
->aead_req
;
810 uint8_t stat_filed
= qat_resp
->comn_resp
.comn_status
;
811 int res
= 0, qat_res
= ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed
);
813 qat_alg_free_bufl(inst
, qat_req
);
814 if (unlikely(qat_res
!= ICP_QAT_FW_COMN_STATUS_FLAG_OK
))
816 areq
->base
.complete(&areq
->base
, res
);
819 static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp
*qat_resp
,
820 struct qat_crypto_request
*qat_req
)
822 struct qat_alg_ablkcipher_ctx
*ctx
= qat_req
->ablkcipher_ctx
;
823 struct qat_crypto_instance
*inst
= ctx
->inst
;
824 struct ablkcipher_request
*areq
= qat_req
->ablkcipher_req
;
825 uint8_t stat_filed
= qat_resp
->comn_resp
.comn_status
;
826 int res
= 0, qat_res
= ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed
);
828 qat_alg_free_bufl(inst
, qat_req
);
829 if (unlikely(qat_res
!= ICP_QAT_FW_COMN_STATUS_FLAG_OK
))
831 areq
->base
.complete(&areq
->base
, res
);
834 void qat_alg_callback(void *resp
)
836 struct icp_qat_fw_la_resp
*qat_resp
= resp
;
837 struct qat_crypto_request
*qat_req
=
838 (void *)(__force
long)qat_resp
->opaque_data
;
840 qat_req
->cb(qat_resp
, qat_req
);
843 static int qat_alg_aead_dec(struct aead_request
*areq
)
845 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(areq
);
846 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
847 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
848 struct qat_crypto_request
*qat_req
= aead_request_ctx(areq
);
849 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
850 struct icp_qat_fw_la_auth_req_params
*auth_param
;
851 struct icp_qat_fw_la_bulk_req
*msg
;
852 int digst_size
= crypto_aead_crt(aead_tfm
)->authsize
;
855 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, areq
->assoc
, areq
->assoclen
,
856 areq
->src
, areq
->dst
, areq
->iv
,
857 AES_BLOCK_SIZE
, qat_req
);
862 *msg
= ctx
->dec_fw_req
;
863 qat_req
->aead_ctx
= ctx
;
864 qat_req
->aead_req
= areq
;
865 qat_req
->cb
= qat_aead_alg_callback
;
866 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
867 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
868 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
869 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
870 cipher_param
->cipher_length
= areq
->cryptlen
- digst_size
;
871 cipher_param
->cipher_offset
= areq
->assoclen
+ AES_BLOCK_SIZE
;
872 memcpy(cipher_param
->u
.cipher_IV_array
, areq
->iv
, AES_BLOCK_SIZE
);
873 auth_param
= (void *)((uint8_t *)cipher_param
+ sizeof(*cipher_param
));
874 auth_param
->auth_off
= 0;
875 auth_param
->auth_len
= areq
->assoclen
+
876 cipher_param
->cipher_length
+ AES_BLOCK_SIZE
;
878 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
879 } while (ret
== -EAGAIN
&& ctr
++ < 10);
881 if (ret
== -EAGAIN
) {
882 qat_alg_free_bufl(ctx
->inst
, qat_req
);
888 static int qat_alg_aead_enc_internal(struct aead_request
*areq
, uint8_t *iv
,
891 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(areq
);
892 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
893 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
894 struct qat_crypto_request
*qat_req
= aead_request_ctx(areq
);
895 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
896 struct icp_qat_fw_la_auth_req_params
*auth_param
;
897 struct icp_qat_fw_la_bulk_req
*msg
;
900 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, areq
->assoc
, areq
->assoclen
,
901 areq
->src
, areq
->dst
, iv
, AES_BLOCK_SIZE
,
907 *msg
= ctx
->enc_fw_req
;
908 qat_req
->aead_ctx
= ctx
;
909 qat_req
->aead_req
= areq
;
910 qat_req
->cb
= qat_aead_alg_callback
;
911 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
912 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
913 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
914 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
915 auth_param
= (void *)((uint8_t *)cipher_param
+ sizeof(*cipher_param
));
918 cipher_param
->cipher_length
= areq
->cryptlen
+ AES_BLOCK_SIZE
;
919 cipher_param
->cipher_offset
= areq
->assoclen
;
921 memcpy(cipher_param
->u
.cipher_IV_array
, iv
, AES_BLOCK_SIZE
);
922 cipher_param
->cipher_length
= areq
->cryptlen
;
923 cipher_param
->cipher_offset
= areq
->assoclen
+ AES_BLOCK_SIZE
;
925 auth_param
->auth_off
= 0;
926 auth_param
->auth_len
= areq
->assoclen
+ areq
->cryptlen
+ AES_BLOCK_SIZE
;
929 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
930 } while (ret
== -EAGAIN
&& ctr
++ < 10);
932 if (ret
== -EAGAIN
) {
933 qat_alg_free_bufl(ctx
->inst
, qat_req
);
939 static int qat_alg_aead_enc(struct aead_request
*areq
)
941 return qat_alg_aead_enc_internal(areq
, areq
->iv
, 0);
944 static int qat_alg_aead_genivenc(struct aead_givcrypt_request
*req
)
946 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(&req
->areq
);
947 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
948 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
951 memcpy(req
->giv
, ctx
->salt
, AES_BLOCK_SIZE
);
952 seq
= cpu_to_be64(req
->seq
);
953 memcpy(req
->giv
+ AES_BLOCK_SIZE
- sizeof(uint64_t),
954 &seq
, sizeof(uint64_t));
955 return qat_alg_aead_enc_internal(&req
->areq
, req
->giv
, 1);
958 static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher
*tfm
,
962 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_ablkcipher_ctx(tfm
);
965 spin_lock(&ctx
->lock
);
968 dev
= &GET_DEV(ctx
->inst
->accel_dev
);
969 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
970 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
971 memset(&ctx
->enc_fw_req
, 0, sizeof(ctx
->enc_fw_req
));
972 memset(&ctx
->dec_fw_req
, 0, sizeof(ctx
->dec_fw_req
));
975 int node
= get_current_node();
976 struct qat_crypto_instance
*inst
=
977 qat_crypto_get_instance_node(node
);
979 spin_unlock(&ctx
->lock
);
983 dev
= &GET_DEV(inst
->accel_dev
);
985 ctx
->enc_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->enc_cd
),
989 spin_unlock(&ctx
->lock
);
992 ctx
->dec_cd
= dma_zalloc_coherent(dev
, sizeof(*ctx
->dec_cd
),
996 spin_unlock(&ctx
->lock
);
1000 spin_unlock(&ctx
->lock
);
1001 if (qat_alg_ablkcipher_init_sessions(ctx
, key
, keylen
))
1007 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->enc_cd
));
1008 dma_free_coherent(dev
, sizeof(*ctx
->enc_cd
),
1009 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1012 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->dec_cd
));
1013 dma_free_coherent(dev
, sizeof(*ctx
->dec_cd
),
1014 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1019 static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request
*req
)
1021 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
1022 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(atfm
);
1023 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1024 struct qat_crypto_request
*qat_req
= ablkcipher_request_ctx(req
);
1025 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
1026 struct icp_qat_fw_la_bulk_req
*msg
;
1029 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, NULL
, 0, req
->src
, req
->dst
,
1034 msg
= &qat_req
->req
;
1035 *msg
= ctx
->enc_fw_req
;
1036 qat_req
->ablkcipher_ctx
= ctx
;
1037 qat_req
->ablkcipher_req
= req
;
1038 qat_req
->cb
= qat_ablkcipher_alg_callback
;
1039 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
1040 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
1041 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
1042 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
1043 cipher_param
->cipher_length
= req
->nbytes
;
1044 cipher_param
->cipher_offset
= 0;
1045 memcpy(cipher_param
->u
.cipher_IV_array
, req
->info
, AES_BLOCK_SIZE
);
1047 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
1048 } while (ret
== -EAGAIN
&& ctr
++ < 10);
1050 if (ret
== -EAGAIN
) {
1051 qat_alg_free_bufl(ctx
->inst
, qat_req
);
1054 return -EINPROGRESS
;
1057 static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request
*req
)
1059 struct crypto_ablkcipher
*atfm
= crypto_ablkcipher_reqtfm(req
);
1060 struct crypto_tfm
*tfm
= crypto_ablkcipher_tfm(atfm
);
1061 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1062 struct qat_crypto_request
*qat_req
= ablkcipher_request_ctx(req
);
1063 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
1064 struct icp_qat_fw_la_bulk_req
*msg
;
1067 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, NULL
, 0, req
->src
, req
->dst
,
1072 msg
= &qat_req
->req
;
1073 *msg
= ctx
->dec_fw_req
;
1074 qat_req
->ablkcipher_ctx
= ctx
;
1075 qat_req
->ablkcipher_req
= req
;
1076 qat_req
->cb
= qat_ablkcipher_alg_callback
;
1077 qat_req
->req
.comn_mid
.opaque_data
= (uint64_t)(__force
long)qat_req
;
1078 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
1079 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
1080 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
1081 cipher_param
->cipher_length
= req
->nbytes
;
1082 cipher_param
->cipher_offset
= 0;
1083 memcpy(cipher_param
->u
.cipher_IV_array
, req
->info
, AES_BLOCK_SIZE
);
1085 ret
= adf_send_message(ctx
->inst
->sym_tx
, (uint32_t *)msg
);
1086 } while (ret
== -EAGAIN
&& ctr
++ < 10);
1088 if (ret
== -EAGAIN
) {
1089 qat_alg_free_bufl(ctx
->inst
, qat_req
);
1092 return -EINPROGRESS
;
1095 static int qat_alg_aead_init(struct crypto_tfm
*tfm
,
1096 enum icp_qat_hw_auth_algo hash
,
1097 const char *hash_name
)
1099 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1101 ctx
->hash_tfm
= crypto_alloc_shash(hash_name
, 0, 0);
1102 if (IS_ERR(ctx
->hash_tfm
))
1104 spin_lock_init(&ctx
->lock
);
1105 ctx
->qat_hash_alg
= hash
;
1106 crypto_aead_set_reqsize(__crypto_aead_cast(tfm
),
1107 sizeof(struct aead_request
) +
1108 sizeof(struct qat_crypto_request
));
1113 static int qat_alg_aead_sha1_init(struct crypto_tfm
*tfm
)
1115 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA1
, "sha1");
1118 static int qat_alg_aead_sha256_init(struct crypto_tfm
*tfm
)
1120 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA256
, "sha256");
1123 static int qat_alg_aead_sha512_init(struct crypto_tfm
*tfm
)
1125 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA512
, "sha512");
1128 static void qat_alg_aead_exit(struct crypto_tfm
*tfm
)
1130 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1131 struct qat_crypto_instance
*inst
= ctx
->inst
;
1134 if (!IS_ERR(ctx
->hash_tfm
))
1135 crypto_free_shash(ctx
->hash_tfm
);
1140 dev
= &GET_DEV(inst
->accel_dev
);
1142 memset(ctx
->enc_cd
, 0, sizeof(struct qat_alg_cd
));
1143 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
1144 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1147 memset(ctx
->dec_cd
, 0, sizeof(struct qat_alg_cd
));
1148 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
1149 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1151 qat_crypto_put_instance(inst
);
1154 static int qat_alg_ablkcipher_init(struct crypto_tfm
*tfm
)
1156 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1158 spin_lock_init(&ctx
->lock
);
1159 tfm
->crt_ablkcipher
.reqsize
= sizeof(struct ablkcipher_request
) +
1160 sizeof(struct qat_crypto_request
);
1165 static void qat_alg_ablkcipher_exit(struct crypto_tfm
*tfm
)
1167 struct qat_alg_ablkcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1168 struct qat_crypto_instance
*inst
= ctx
->inst
;
1174 dev
= &GET_DEV(inst
->accel_dev
);
1176 memset(ctx
->enc_cd
, 0,
1177 sizeof(struct icp_qat_hw_cipher_algo_blk
));
1178 dma_free_coherent(dev
,
1179 sizeof(struct icp_qat_hw_cipher_algo_blk
),
1180 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1183 memset(ctx
->dec_cd
, 0,
1184 sizeof(struct icp_qat_hw_cipher_algo_blk
));
1185 dma_free_coherent(dev
,
1186 sizeof(struct icp_qat_hw_cipher_algo_blk
),
1187 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1189 qat_crypto_put_instance(inst
);
1192 static struct crypto_alg qat_algs
[] = { {
1193 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1194 .cra_driver_name
= "qat_aes_cbc_hmac_sha1",
1195 .cra_priority
= 4001,
1196 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1197 .cra_blocksize
= AES_BLOCK_SIZE
,
1198 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1200 .cra_type
= &crypto_aead_type
,
1201 .cra_module
= THIS_MODULE
,
1202 .cra_init
= qat_alg_aead_sha1_init
,
1203 .cra_exit
= qat_alg_aead_exit
,
1206 .setkey
= qat_alg_aead_setkey
,
1207 .decrypt
= qat_alg_aead_dec
,
1208 .encrypt
= qat_alg_aead_enc
,
1209 .givencrypt
= qat_alg_aead_genivenc
,
1210 .ivsize
= AES_BLOCK_SIZE
,
1211 .maxauthsize
= SHA1_DIGEST_SIZE
,
1215 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1216 .cra_driver_name
= "qat_aes_cbc_hmac_sha256",
1217 .cra_priority
= 4001,
1218 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1219 .cra_blocksize
= AES_BLOCK_SIZE
,
1220 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1222 .cra_type
= &crypto_aead_type
,
1223 .cra_module
= THIS_MODULE
,
1224 .cra_init
= qat_alg_aead_sha256_init
,
1225 .cra_exit
= qat_alg_aead_exit
,
1228 .setkey
= qat_alg_aead_setkey
,
1229 .decrypt
= qat_alg_aead_dec
,
1230 .encrypt
= qat_alg_aead_enc
,
1231 .givencrypt
= qat_alg_aead_genivenc
,
1232 .ivsize
= AES_BLOCK_SIZE
,
1233 .maxauthsize
= SHA256_DIGEST_SIZE
,
1237 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1238 .cra_driver_name
= "qat_aes_cbc_hmac_sha512",
1239 .cra_priority
= 4001,
1240 .cra_flags
= CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
,
1241 .cra_blocksize
= AES_BLOCK_SIZE
,
1242 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1244 .cra_type
= &crypto_aead_type
,
1245 .cra_module
= THIS_MODULE
,
1246 .cra_init
= qat_alg_aead_sha512_init
,
1247 .cra_exit
= qat_alg_aead_exit
,
1250 .setkey
= qat_alg_aead_setkey
,
1251 .decrypt
= qat_alg_aead_dec
,
1252 .encrypt
= qat_alg_aead_enc
,
1253 .givencrypt
= qat_alg_aead_genivenc
,
1254 .ivsize
= AES_BLOCK_SIZE
,
1255 .maxauthsize
= SHA512_DIGEST_SIZE
,
1259 .cra_name
= "cbc(aes)",
1260 .cra_driver_name
= "qat_aes_cbc",
1261 .cra_priority
= 4001,
1262 .cra_flags
= CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
,
1263 .cra_blocksize
= AES_BLOCK_SIZE
,
1264 .cra_ctxsize
= sizeof(struct qat_alg_ablkcipher_ctx
),
1266 .cra_type
= &crypto_ablkcipher_type
,
1267 .cra_module
= THIS_MODULE
,
1268 .cra_init
= qat_alg_ablkcipher_init
,
1269 .cra_exit
= qat_alg_ablkcipher_exit
,
1272 .setkey
= qat_alg_ablkcipher_setkey
,
1273 .decrypt
= qat_alg_ablkcipher_decrypt
,
1274 .encrypt
= qat_alg_ablkcipher_encrypt
,
1275 .min_keysize
= AES_MIN_KEY_SIZE
,
1276 .max_keysize
= AES_MAX_KEY_SIZE
,
1277 .ivsize
= AES_BLOCK_SIZE
,
1282 int qat_algs_register(void)
1286 mutex_lock(&algs_lock
);
1287 if (++active_devs
== 1) {
1290 for (i
= 0; i
< ARRAY_SIZE(qat_algs
); i
++)
1291 qat_algs
[i
].cra_flags
=
1292 (qat_algs
[i
].cra_type
== &crypto_aead_type
) ?
1293 CRYPTO_ALG_TYPE_AEAD
| CRYPTO_ALG_ASYNC
:
1294 CRYPTO_ALG_TYPE_ABLKCIPHER
| CRYPTO_ALG_ASYNC
;
1296 ret
= crypto_register_algs(qat_algs
, ARRAY_SIZE(qat_algs
));
1298 mutex_unlock(&algs_lock
);
1302 int qat_algs_unregister(void)
1306 mutex_lock(&algs_lock
);
1307 if (--active_devs
== 0)
1308 ret
= crypto_unregister_algs(qat_algs
, ARRAY_SIZE(qat_algs
));
1309 mutex_unlock(&algs_lock
);
1313 int qat_algs_init(void)
1315 crypto_get_default_rng();
1319 void qat_algs_exit(void)
1321 crypto_put_default_rng();