1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/module.h>
4 #include <linux/slab.h>
5 #include <linux/crypto.h>
6 #include <crypto/internal/aead.h>
7 #include <crypto/internal/skcipher.h>
8 #include <crypto/aes.h>
9 #include <crypto/sha1.h>
10 #include <crypto/sha2.h>
11 #include <crypto/hash.h>
12 #include <crypto/hmac.h>
13 #include <crypto/algapi.h>
14 #include <crypto/authenc.h>
15 #include <crypto/scatterwalk.h>
16 #include <crypto/xts.h>
17 #include <linux/dma-mapping.h>
18 #include "adf_accel_devices.h"
19 #include "adf_transport.h"
20 #include "adf_common_drv.h"
21 #include "qat_crypto.h"
22 #include "icp_qat_hw.h"
23 #include "icp_qat_fw.h"
24 #include "icp_qat_fw_la.h"
26 #define QAT_AES_HW_CONFIG_ENC(alg, mode) \
27 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
28 ICP_QAT_HW_CIPHER_NO_CONVERT, \
29 ICP_QAT_HW_CIPHER_ENCRYPT)
31 #define QAT_AES_HW_CONFIG_DEC(alg, mode) \
32 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
33 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
34 ICP_QAT_HW_CIPHER_DECRYPT)
36 #define QAT_AES_HW_CONFIG_DEC_NO_CONV(alg, mode) \
37 ICP_QAT_HW_CIPHER_CONFIG_BUILD(mode, alg, \
38 ICP_QAT_HW_CIPHER_NO_CONVERT, \
39 ICP_QAT_HW_CIPHER_DECRYPT)
41 #define HW_CAP_AES_V2(accel_dev) \
42 (GET_HW_DATA(accel_dev)->accel_capabilities_mask & \
43 ICP_ACCEL_CAPABILITIES_AES_V2)
45 static DEFINE_MUTEX(algs_lock
);
46 static unsigned int active_devs
;
54 struct qat_alg_buf_list
{
58 struct qat_alg_buf bufers
[];
59 } __packed
__aligned(64);
61 /* Common content descriptor */
64 struct qat_enc
{ /* Encrypt content desc */
65 struct icp_qat_hw_cipher_algo_blk cipher
;
66 struct icp_qat_hw_auth_algo_blk hash
;
68 struct qat_dec
{ /* Decrypt content desc */
69 struct icp_qat_hw_auth_algo_blk hash
;
70 struct icp_qat_hw_cipher_algo_blk cipher
;
75 struct qat_alg_aead_ctx
{
76 struct qat_alg_cd
*enc_cd
;
77 struct qat_alg_cd
*dec_cd
;
78 dma_addr_t enc_cd_paddr
;
79 dma_addr_t dec_cd_paddr
;
80 struct icp_qat_fw_la_bulk_req enc_fw_req
;
81 struct icp_qat_fw_la_bulk_req dec_fw_req
;
82 struct crypto_shash
*hash_tfm
;
83 enum icp_qat_hw_auth_algo qat_hash_alg
;
84 struct qat_crypto_instance
*inst
;
86 struct sha1_state sha1
;
87 struct sha256_state sha256
;
88 struct sha512_state sha512
;
90 char ipad
[SHA512_BLOCK_SIZE
]; /* sufficient for SHA-1/SHA-256 as well */
91 char opad
[SHA512_BLOCK_SIZE
];
94 struct qat_alg_skcipher_ctx
{
95 struct icp_qat_hw_cipher_algo_blk
*enc_cd
;
96 struct icp_qat_hw_cipher_algo_blk
*dec_cd
;
97 dma_addr_t enc_cd_paddr
;
98 dma_addr_t dec_cd_paddr
;
99 struct icp_qat_fw_la_bulk_req enc_fw_req
;
100 struct icp_qat_fw_la_bulk_req dec_fw_req
;
101 struct qat_crypto_instance
*inst
;
102 struct crypto_skcipher
*ftfm
;
103 struct crypto_cipher
*tweak
;
108 static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg
)
110 switch (qat_hash_alg
) {
111 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
112 return ICP_QAT_HW_SHA1_STATE1_SZ
;
113 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
114 return ICP_QAT_HW_SHA256_STATE1_SZ
;
115 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
116 return ICP_QAT_HW_SHA512_STATE1_SZ
;
123 static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk
*hash
,
124 struct qat_alg_aead_ctx
*ctx
,
126 unsigned int auth_keylen
)
128 SHASH_DESC_ON_STACK(shash
, ctx
->hash_tfm
);
129 int block_size
= crypto_shash_blocksize(ctx
->hash_tfm
);
130 int digest_size
= crypto_shash_digestsize(ctx
->hash_tfm
);
131 __be32
*hash_state_out
;
132 __be64
*hash512_state_out
;
135 memset(ctx
->ipad
, 0, block_size
);
136 memset(ctx
->opad
, 0, block_size
);
137 shash
->tfm
= ctx
->hash_tfm
;
139 if (auth_keylen
> block_size
) {
140 int ret
= crypto_shash_digest(shash
, auth_key
,
141 auth_keylen
, ctx
->ipad
);
145 memcpy(ctx
->opad
, ctx
->ipad
, digest_size
);
147 memcpy(ctx
->ipad
, auth_key
, auth_keylen
);
148 memcpy(ctx
->opad
, auth_key
, auth_keylen
);
151 for (i
= 0; i
< block_size
; i
++) {
152 char *ipad_ptr
= ctx
->ipad
+ i
;
153 char *opad_ptr
= ctx
->opad
+ i
;
154 *ipad_ptr
^= HMAC_IPAD_VALUE
;
155 *opad_ptr
^= HMAC_OPAD_VALUE
;
158 if (crypto_shash_init(shash
))
161 if (crypto_shash_update(shash
, ctx
->ipad
, block_size
))
164 hash_state_out
= (__be32
*)hash
->sha
.state1
;
165 hash512_state_out
= (__be64
*)hash_state_out
;
167 switch (ctx
->qat_hash_alg
) {
168 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
169 if (crypto_shash_export(shash
, &ctx
->sha1
))
171 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
172 *hash_state_out
= cpu_to_be32(ctx
->sha1
.state
[i
]);
174 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
175 if (crypto_shash_export(shash
, &ctx
->sha256
))
177 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
178 *hash_state_out
= cpu_to_be32(ctx
->sha256
.state
[i
]);
180 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
181 if (crypto_shash_export(shash
, &ctx
->sha512
))
183 for (i
= 0; i
< digest_size
>> 3; i
++, hash512_state_out
++)
184 *hash512_state_out
= cpu_to_be64(ctx
->sha512
.state
[i
]);
190 if (crypto_shash_init(shash
))
193 if (crypto_shash_update(shash
, ctx
->opad
, block_size
))
196 offset
= round_up(qat_get_inter_state_size(ctx
->qat_hash_alg
), 8);
200 hash_state_out
= (__be32
*)(hash
->sha
.state1
+ offset
);
201 hash512_state_out
= (__be64
*)hash_state_out
;
203 switch (ctx
->qat_hash_alg
) {
204 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
205 if (crypto_shash_export(shash
, &ctx
->sha1
))
207 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
208 *hash_state_out
= cpu_to_be32(ctx
->sha1
.state
[i
]);
210 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
211 if (crypto_shash_export(shash
, &ctx
->sha256
))
213 for (i
= 0; i
< digest_size
>> 2; i
++, hash_state_out
++)
214 *hash_state_out
= cpu_to_be32(ctx
->sha256
.state
[i
]);
216 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
217 if (crypto_shash_export(shash
, &ctx
->sha512
))
219 for (i
= 0; i
< digest_size
>> 3; i
++, hash512_state_out
++)
220 *hash512_state_out
= cpu_to_be64(ctx
->sha512
.state
[i
]);
225 memzero_explicit(ctx
->ipad
, block_size
);
226 memzero_explicit(ctx
->opad
, block_size
);
230 static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr
*header
)
233 ICP_QAT_FW_COMN_HDR_FLAGS_BUILD(ICP_QAT_FW_COMN_REQ_FLAG_SET
);
234 header
->service_type
= ICP_QAT_FW_COMN_REQ_CPM_FW_LA
;
235 header
->comn_req_flags
=
236 ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR
,
237 QAT_COMN_PTR_TYPE_SGL
);
238 ICP_QAT_FW_LA_PARTIAL_SET(header
->serv_specif_flags
,
239 ICP_QAT_FW_LA_PARTIAL_NONE
);
240 ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header
->serv_specif_flags
,
241 ICP_QAT_FW_CIPH_IV_16BYTE_DATA
);
242 ICP_QAT_FW_LA_PROTO_SET(header
->serv_specif_flags
,
243 ICP_QAT_FW_LA_NO_PROTO
);
244 ICP_QAT_FW_LA_UPDATE_STATE_SET(header
->serv_specif_flags
,
245 ICP_QAT_FW_LA_NO_UPDATE_STATE
);
248 static int qat_alg_aead_init_enc_session(struct crypto_aead
*aead_tfm
,
250 struct crypto_authenc_keys
*keys
,
253 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(aead_tfm
);
254 unsigned int digestsize
= crypto_aead_authsize(aead_tfm
);
255 struct qat_enc
*enc_ctx
= &ctx
->enc_cd
->qat_enc_cd
;
256 struct icp_qat_hw_cipher_algo_blk
*cipher
= &enc_ctx
->cipher
;
257 struct icp_qat_hw_auth_algo_blk
*hash
=
258 (struct icp_qat_hw_auth_algo_blk
*)((char *)enc_ctx
+
259 sizeof(struct icp_qat_hw_auth_setup
) + keys
->enckeylen
);
260 struct icp_qat_fw_la_bulk_req
*req_tmpl
= &ctx
->enc_fw_req
;
261 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req_tmpl
->cd_pars
;
262 struct icp_qat_fw_comn_req_hdr
*header
= &req_tmpl
->comn_hdr
;
263 void *ptr
= &req_tmpl
->cd_ctrl
;
264 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cipher_cd_ctrl
= ptr
;
265 struct icp_qat_fw_auth_cd_ctrl_hdr
*hash_cd_ctrl
= ptr
;
268 cipher
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_ENC(alg
, mode
);
269 memcpy(cipher
->aes
.key
, keys
->enckey
, keys
->enckeylen
);
270 hash
->sha
.inner_setup
.auth_config
.config
=
271 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1
,
272 ctx
->qat_hash_alg
, digestsize
);
273 hash
->sha
.inner_setup
.auth_counter
.counter
=
274 cpu_to_be32(crypto_shash_blocksize(ctx
->hash_tfm
));
276 if (qat_alg_do_precomputes(hash
, ctx
, keys
->authkey
, keys
->authkeylen
))
280 qat_alg_init_common_hdr(header
);
281 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_CIPHER_HASH
;
282 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header
->serv_specif_flags
,
283 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
284 ICP_QAT_FW_LA_RET_AUTH_SET(header
->serv_specif_flags
,
285 ICP_QAT_FW_LA_RET_AUTH_RES
);
286 ICP_QAT_FW_LA_CMP_AUTH_SET(header
->serv_specif_flags
,
287 ICP_QAT_FW_LA_NO_CMP_AUTH_RES
);
288 cd_pars
->u
.s
.content_desc_addr
= ctx
->enc_cd_paddr
;
289 cd_pars
->u
.s
.content_desc_params_sz
= sizeof(struct qat_alg_cd
) >> 3;
291 /* Cipher CD config setup */
292 cipher_cd_ctrl
->cipher_key_sz
= keys
->enckeylen
>> 3;
293 cipher_cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
294 cipher_cd_ctrl
->cipher_cfg_offset
= 0;
295 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
296 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
297 /* Auth CD config setup */
298 hash_cd_ctrl
->hash_cfg_offset
= ((char *)hash
- (char *)cipher
) >> 3;
299 hash_cd_ctrl
->hash_flags
= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED
;
300 hash_cd_ctrl
->inner_res_sz
= digestsize
;
301 hash_cd_ctrl
->final_sz
= digestsize
;
303 switch (ctx
->qat_hash_alg
) {
304 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
305 hash_cd_ctrl
->inner_state1_sz
=
306 round_up(ICP_QAT_HW_SHA1_STATE1_SZ
, 8);
307 hash_cd_ctrl
->inner_state2_sz
=
308 round_up(ICP_QAT_HW_SHA1_STATE2_SZ
, 8);
310 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
311 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA256_STATE1_SZ
;
312 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA256_STATE2_SZ
;
314 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
315 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA512_STATE1_SZ
;
316 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA512_STATE2_SZ
;
321 hash_cd_ctrl
->inner_state2_offset
= hash_cd_ctrl
->hash_cfg_offset
+
322 ((sizeof(struct icp_qat_hw_auth_setup
) +
323 round_up(hash_cd_ctrl
->inner_state1_sz
, 8)) >> 3);
324 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
325 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
329 static int qat_alg_aead_init_dec_session(struct crypto_aead
*aead_tfm
,
331 struct crypto_authenc_keys
*keys
,
334 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(aead_tfm
);
335 unsigned int digestsize
= crypto_aead_authsize(aead_tfm
);
336 struct qat_dec
*dec_ctx
= &ctx
->dec_cd
->qat_dec_cd
;
337 struct icp_qat_hw_auth_algo_blk
*hash
= &dec_ctx
->hash
;
338 struct icp_qat_hw_cipher_algo_blk
*cipher
=
339 (struct icp_qat_hw_cipher_algo_blk
*)((char *)dec_ctx
+
340 sizeof(struct icp_qat_hw_auth_setup
) +
341 roundup(crypto_shash_digestsize(ctx
->hash_tfm
), 8) * 2);
342 struct icp_qat_fw_la_bulk_req
*req_tmpl
= &ctx
->dec_fw_req
;
343 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req_tmpl
->cd_pars
;
344 struct icp_qat_fw_comn_req_hdr
*header
= &req_tmpl
->comn_hdr
;
345 void *ptr
= &req_tmpl
->cd_ctrl
;
346 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cipher_cd_ctrl
= ptr
;
347 struct icp_qat_fw_auth_cd_ctrl_hdr
*hash_cd_ctrl
= ptr
;
348 struct icp_qat_fw_la_auth_req_params
*auth_param
=
349 (struct icp_qat_fw_la_auth_req_params
*)
350 ((char *)&req_tmpl
->serv_specif_rqpars
+
351 sizeof(struct icp_qat_fw_la_cipher_req_params
));
354 cipher
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_DEC(alg
, mode
);
355 memcpy(cipher
->aes
.key
, keys
->enckey
, keys
->enckeylen
);
356 hash
->sha
.inner_setup
.auth_config
.config
=
357 ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1
,
360 hash
->sha
.inner_setup
.auth_counter
.counter
=
361 cpu_to_be32(crypto_shash_blocksize(ctx
->hash_tfm
));
363 if (qat_alg_do_precomputes(hash
, ctx
, keys
->authkey
, keys
->authkeylen
))
367 qat_alg_init_common_hdr(header
);
368 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_HASH_CIPHER
;
369 ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header
->serv_specif_flags
,
370 ICP_QAT_FW_LA_DIGEST_IN_BUFFER
);
371 ICP_QAT_FW_LA_RET_AUTH_SET(header
->serv_specif_flags
,
372 ICP_QAT_FW_LA_NO_RET_AUTH_RES
);
373 ICP_QAT_FW_LA_CMP_AUTH_SET(header
->serv_specif_flags
,
374 ICP_QAT_FW_LA_CMP_AUTH_RES
);
375 cd_pars
->u
.s
.content_desc_addr
= ctx
->dec_cd_paddr
;
376 cd_pars
->u
.s
.content_desc_params_sz
= sizeof(struct qat_alg_cd
) >> 3;
378 /* Cipher CD config setup */
379 cipher_cd_ctrl
->cipher_key_sz
= keys
->enckeylen
>> 3;
380 cipher_cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
381 cipher_cd_ctrl
->cipher_cfg_offset
=
382 (sizeof(struct icp_qat_hw_auth_setup
) +
383 roundup(crypto_shash_digestsize(ctx
->hash_tfm
), 8) * 2) >> 3;
384 ICP_QAT_FW_COMN_CURR_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
385 ICP_QAT_FW_COMN_NEXT_ID_SET(cipher_cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
387 /* Auth CD config setup */
388 hash_cd_ctrl
->hash_cfg_offset
= 0;
389 hash_cd_ctrl
->hash_flags
= ICP_QAT_FW_AUTH_HDR_FLAG_NO_NESTED
;
390 hash_cd_ctrl
->inner_res_sz
= digestsize
;
391 hash_cd_ctrl
->final_sz
= digestsize
;
393 switch (ctx
->qat_hash_alg
) {
394 case ICP_QAT_HW_AUTH_ALGO_SHA1
:
395 hash_cd_ctrl
->inner_state1_sz
=
396 round_up(ICP_QAT_HW_SHA1_STATE1_SZ
, 8);
397 hash_cd_ctrl
->inner_state2_sz
=
398 round_up(ICP_QAT_HW_SHA1_STATE2_SZ
, 8);
400 case ICP_QAT_HW_AUTH_ALGO_SHA256
:
401 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA256_STATE1_SZ
;
402 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA256_STATE2_SZ
;
404 case ICP_QAT_HW_AUTH_ALGO_SHA512
:
405 hash_cd_ctrl
->inner_state1_sz
= ICP_QAT_HW_SHA512_STATE1_SZ
;
406 hash_cd_ctrl
->inner_state2_sz
= ICP_QAT_HW_SHA512_STATE2_SZ
;
412 hash_cd_ctrl
->inner_state2_offset
= hash_cd_ctrl
->hash_cfg_offset
+
413 ((sizeof(struct icp_qat_hw_auth_setup
) +
414 round_up(hash_cd_ctrl
->inner_state1_sz
, 8)) >> 3);
415 auth_param
->auth_res_sz
= digestsize
;
416 ICP_QAT_FW_COMN_CURR_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_AUTH
);
417 ICP_QAT_FW_COMN_NEXT_ID_SET(hash_cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
421 static void qat_alg_skcipher_init_com(struct qat_alg_skcipher_ctx
*ctx
,
422 struct icp_qat_fw_la_bulk_req
*req
,
423 struct icp_qat_hw_cipher_algo_blk
*cd
,
424 const u8
*key
, unsigned int keylen
)
426 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
427 struct icp_qat_fw_comn_req_hdr
*header
= &req
->comn_hdr
;
428 struct icp_qat_fw_cipher_cd_ctrl_hdr
*cd_ctrl
= (void *)&req
->cd_ctrl
;
429 bool aes_v2_capable
= HW_CAP_AES_V2(ctx
->inst
->accel_dev
);
430 int mode
= ctx
->mode
;
432 qat_alg_init_common_hdr(header
);
433 header
->service_cmd_id
= ICP_QAT_FW_LA_CMD_CIPHER
;
434 cd_pars
->u
.s
.content_desc_params_sz
=
435 sizeof(struct icp_qat_hw_cipher_algo_blk
) >> 3;
437 if (aes_v2_capable
&& mode
== ICP_QAT_HW_CIPHER_XTS_MODE
) {
438 ICP_QAT_FW_LA_SLICE_TYPE_SET(header
->serv_specif_flags
,
439 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE
);
441 /* Store both XTS keys in CD, only the first key is sent
442 * to the HW, the second key is used for tweak calculation
444 memcpy(cd
->ucs_aes
.key
, key
, keylen
);
446 } else if (aes_v2_capable
&& mode
== ICP_QAT_HW_CIPHER_CTR_MODE
) {
447 ICP_QAT_FW_LA_SLICE_TYPE_SET(header
->serv_specif_flags
,
448 ICP_QAT_FW_LA_USE_UCS_SLICE_TYPE
);
449 keylen
= round_up(keylen
, 16);
450 memcpy(cd
->ucs_aes
.key
, key
, keylen
);
452 memcpy(cd
->aes
.key
, key
, keylen
);
455 /* Cipher CD config setup */
456 cd_ctrl
->cipher_key_sz
= keylen
>> 3;
457 cd_ctrl
->cipher_state_sz
= AES_BLOCK_SIZE
>> 3;
458 cd_ctrl
->cipher_cfg_offset
= 0;
459 ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl
, ICP_QAT_FW_SLICE_CIPHER
);
460 ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl
, ICP_QAT_FW_SLICE_DRAM_WR
);
463 static void qat_alg_skcipher_init_enc(struct qat_alg_skcipher_ctx
*ctx
,
464 int alg
, const u8
*key
,
465 unsigned int keylen
, int mode
)
467 struct icp_qat_hw_cipher_algo_blk
*enc_cd
= ctx
->enc_cd
;
468 struct icp_qat_fw_la_bulk_req
*req
= &ctx
->enc_fw_req
;
469 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
471 qat_alg_skcipher_init_com(ctx
, req
, enc_cd
, key
, keylen
);
472 cd_pars
->u
.s
.content_desc_addr
= ctx
->enc_cd_paddr
;
473 enc_cd
->aes
.cipher_config
.val
= QAT_AES_HW_CONFIG_ENC(alg
, mode
);
476 static void qat_alg_xts_reverse_key(const u8
*key_forward
, unsigned int keylen
,
479 struct crypto_aes_ctx aes_expanded
;
483 aes_expandkey(&aes_expanded
, key_forward
, keylen
);
484 if (keylen
== AES_KEYSIZE_128
) {
486 key
= (u8
*)aes_expanded
.key_enc
+ (AES_BLOCK_SIZE
* nrounds
);
487 memcpy(key_reverse
, key
, AES_BLOCK_SIZE
);
489 /* AES_KEYSIZE_256 */
491 key
= (u8
*)aes_expanded
.key_enc
+ (AES_BLOCK_SIZE
* nrounds
);
492 memcpy(key_reverse
, key
, AES_BLOCK_SIZE
);
493 memcpy(key_reverse
+ AES_BLOCK_SIZE
, key
- AES_BLOCK_SIZE
,
498 static void qat_alg_skcipher_init_dec(struct qat_alg_skcipher_ctx
*ctx
,
499 int alg
, const u8
*key
,
500 unsigned int keylen
, int mode
)
502 struct icp_qat_hw_cipher_algo_blk
*dec_cd
= ctx
->dec_cd
;
503 struct icp_qat_fw_la_bulk_req
*req
= &ctx
->dec_fw_req
;
504 struct icp_qat_fw_comn_req_hdr_cd_pars
*cd_pars
= &req
->cd_pars
;
505 bool aes_v2_capable
= HW_CAP_AES_V2(ctx
->inst
->accel_dev
);
507 qat_alg_skcipher_init_com(ctx
, req
, dec_cd
, key
, keylen
);
508 cd_pars
->u
.s
.content_desc_addr
= ctx
->dec_cd_paddr
;
510 if (aes_v2_capable
&& mode
== ICP_QAT_HW_CIPHER_XTS_MODE
) {
511 /* Key reversing not supported, set no convert */
512 dec_cd
->aes
.cipher_config
.val
=
513 QAT_AES_HW_CONFIG_DEC_NO_CONV(alg
, mode
);
515 /* In-place key reversal */
516 qat_alg_xts_reverse_key(dec_cd
->ucs_aes
.key
, keylen
/ 2,
517 dec_cd
->ucs_aes
.key
);
518 } else if (mode
!= ICP_QAT_HW_CIPHER_CTR_MODE
) {
519 dec_cd
->aes
.cipher_config
.val
=
520 QAT_AES_HW_CONFIG_DEC(alg
, mode
);
522 dec_cd
->aes
.cipher_config
.val
=
523 QAT_AES_HW_CONFIG_ENC(alg
, mode
);
527 static int qat_alg_validate_key(int key_len
, int *alg
, int mode
)
529 if (mode
!= ICP_QAT_HW_CIPHER_XTS_MODE
) {
531 case AES_KEYSIZE_128
:
532 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES128
;
534 case AES_KEYSIZE_192
:
535 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES192
;
537 case AES_KEYSIZE_256
:
538 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES256
;
545 case AES_KEYSIZE_128
<< 1:
546 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES128
;
548 case AES_KEYSIZE_256
<< 1:
549 *alg
= ICP_QAT_HW_CIPHER_ALGO_AES256
;
558 static int qat_alg_aead_init_sessions(struct crypto_aead
*tfm
, const u8
*key
,
559 unsigned int keylen
, int mode
)
561 struct crypto_authenc_keys keys
;
564 if (crypto_authenc_extractkeys(&keys
, key
, keylen
))
567 if (qat_alg_validate_key(keys
.enckeylen
, &alg
, mode
))
570 if (qat_alg_aead_init_enc_session(tfm
, alg
, &keys
, mode
))
573 if (qat_alg_aead_init_dec_session(tfm
, alg
, &keys
, mode
))
576 memzero_explicit(&keys
, sizeof(keys
));
579 memzero_explicit(&keys
, sizeof(keys
));
582 memzero_explicit(&keys
, sizeof(keys
));
586 static int qat_alg_skcipher_init_sessions(struct qat_alg_skcipher_ctx
*ctx
,
593 if (qat_alg_validate_key(keylen
, &alg
, mode
))
596 qat_alg_skcipher_init_enc(ctx
, alg
, key
, keylen
, mode
);
597 qat_alg_skcipher_init_dec(ctx
, alg
, key
, keylen
, mode
);
601 static int qat_alg_aead_rekey(struct crypto_aead
*tfm
, const u8
*key
,
604 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
606 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
607 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
608 memset(&ctx
->enc_fw_req
, 0, sizeof(ctx
->enc_fw_req
));
609 memset(&ctx
->dec_fw_req
, 0, sizeof(ctx
->dec_fw_req
));
611 return qat_alg_aead_init_sessions(tfm
, key
, keylen
,
612 ICP_QAT_HW_CIPHER_CBC_MODE
);
615 static int qat_alg_aead_newkey(struct crypto_aead
*tfm
, const u8
*key
,
618 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
619 struct qat_crypto_instance
*inst
= NULL
;
620 int node
= get_current_node();
624 inst
= qat_crypto_get_instance_node(node
);
627 dev
= &GET_DEV(inst
->accel_dev
);
629 ctx
->enc_cd
= dma_alloc_coherent(dev
, sizeof(*ctx
->enc_cd
),
636 ctx
->dec_cd
= dma_alloc_coherent(dev
, sizeof(*ctx
->dec_cd
),
644 ret
= qat_alg_aead_init_sessions(tfm
, key
, keylen
,
645 ICP_QAT_HW_CIPHER_CBC_MODE
);
652 memset(ctx
->dec_cd
, 0, sizeof(struct qat_alg_cd
));
653 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
654 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
657 memset(ctx
->enc_cd
, 0, sizeof(struct qat_alg_cd
));
658 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
659 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
663 qat_crypto_put_instance(inst
);
667 static int qat_alg_aead_setkey(struct crypto_aead
*tfm
, const u8
*key
,
670 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
673 return qat_alg_aead_rekey(tfm
, key
, keylen
);
675 return qat_alg_aead_newkey(tfm
, key
, keylen
);
678 static void qat_alg_free_bufl(struct qat_crypto_instance
*inst
,
679 struct qat_crypto_request
*qat_req
)
681 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
682 struct qat_alg_buf_list
*bl
= qat_req
->buf
.bl
;
683 struct qat_alg_buf_list
*blout
= qat_req
->buf
.blout
;
684 dma_addr_t blp
= qat_req
->buf
.blp
;
685 dma_addr_t blpout
= qat_req
->buf
.bloutp
;
686 size_t sz
= qat_req
->buf
.sz
;
687 size_t sz_out
= qat_req
->buf
.sz_out
;
690 for (i
= 0; i
< bl
->num_bufs
; i
++)
691 dma_unmap_single(dev
, bl
->bufers
[i
].addr
,
692 bl
->bufers
[i
].len
, DMA_BIDIRECTIONAL
);
694 dma_unmap_single(dev
, blp
, sz
, DMA_TO_DEVICE
);
697 /* If out of place operation dma unmap only data */
698 int bufless
= blout
->num_bufs
- blout
->num_mapped_bufs
;
700 for (i
= bufless
; i
< blout
->num_bufs
; i
++) {
701 dma_unmap_single(dev
, blout
->bufers
[i
].addr
,
702 blout
->bufers
[i
].len
,
705 dma_unmap_single(dev
, blpout
, sz_out
, DMA_TO_DEVICE
);
710 static int qat_alg_sgl_to_bufl(struct qat_crypto_instance
*inst
,
711 struct scatterlist
*sgl
,
712 struct scatterlist
*sglout
,
713 struct qat_crypto_request
*qat_req
)
715 struct device
*dev
= &GET_DEV(inst
->accel_dev
);
717 int n
= sg_nents(sgl
);
718 struct qat_alg_buf_list
*bufl
;
719 struct qat_alg_buf_list
*buflout
= NULL
;
721 dma_addr_t bloutp
= 0;
722 struct scatterlist
*sg
;
723 size_t sz_out
, sz
= struct_size(bufl
, bufers
, n
+ 1);
728 bufl
= kzalloc_node(sz
, GFP_ATOMIC
,
729 dev_to_node(&GET_DEV(inst
->accel_dev
)));
733 blp
= dma_map_single(dev
, bufl
, sz
, DMA_TO_DEVICE
);
734 if (unlikely(dma_mapping_error(dev
, blp
)))
737 for_each_sg(sgl
, sg
, n
, i
) {
743 bufl
->bufers
[y
].addr
= dma_map_single(dev
, sg_virt(sg
),
746 bufl
->bufers
[y
].len
= sg
->length
;
747 if (unlikely(dma_mapping_error(dev
, bufl
->bufers
[y
].addr
)))
751 bufl
->num_bufs
= sg_nctr
;
752 qat_req
->buf
.bl
= bufl
;
753 qat_req
->buf
.blp
= blp
;
754 qat_req
->buf
.sz
= sz
;
755 /* Handle out of place operation */
757 struct qat_alg_buf
*bufers
;
759 n
= sg_nents(sglout
);
760 sz_out
= struct_size(buflout
, bufers
, n
+ 1);
762 buflout
= kzalloc_node(sz_out
, GFP_ATOMIC
,
763 dev_to_node(&GET_DEV(inst
->accel_dev
)));
764 if (unlikely(!buflout
))
766 bloutp
= dma_map_single(dev
, buflout
, sz_out
, DMA_TO_DEVICE
);
767 if (unlikely(dma_mapping_error(dev
, bloutp
)))
769 bufers
= buflout
->bufers
;
770 for_each_sg(sglout
, sg
, n
, i
) {
776 bufers
[y
].addr
= dma_map_single(dev
, sg_virt(sg
),
779 if (unlikely(dma_mapping_error(dev
, bufers
[y
].addr
)))
781 bufers
[y
].len
= sg
->length
;
784 buflout
->num_bufs
= sg_nctr
;
785 buflout
->num_mapped_bufs
= sg_nctr
;
786 qat_req
->buf
.blout
= buflout
;
787 qat_req
->buf
.bloutp
= bloutp
;
788 qat_req
->buf
.sz_out
= sz_out
;
790 /* Otherwise set the src and dst to the same address */
791 qat_req
->buf
.bloutp
= qat_req
->buf
.blp
;
792 qat_req
->buf
.sz_out
= 0;
797 n
= sg_nents(sglout
);
798 for (i
= 0; i
< n
; i
++)
799 if (!dma_mapping_error(dev
, buflout
->bufers
[i
].addr
))
800 dma_unmap_single(dev
, buflout
->bufers
[i
].addr
,
801 buflout
->bufers
[i
].len
,
803 if (!dma_mapping_error(dev
, bloutp
))
804 dma_unmap_single(dev
, bloutp
, sz_out
, DMA_TO_DEVICE
);
809 for (i
= 0; i
< n
; i
++)
810 if (!dma_mapping_error(dev
, bufl
->bufers
[i
].addr
))
811 dma_unmap_single(dev
, bufl
->bufers
[i
].addr
,
815 if (!dma_mapping_error(dev
, blp
))
816 dma_unmap_single(dev
, blp
, sz
, DMA_TO_DEVICE
);
819 dev_err(dev
, "Failed to map buf for dma\n");
823 static void qat_aead_alg_callback(struct icp_qat_fw_la_resp
*qat_resp
,
824 struct qat_crypto_request
*qat_req
)
826 struct qat_alg_aead_ctx
*ctx
= qat_req
->aead_ctx
;
827 struct qat_crypto_instance
*inst
= ctx
->inst
;
828 struct aead_request
*areq
= qat_req
->aead_req
;
829 u8 stat_filed
= qat_resp
->comn_resp
.comn_status
;
830 int res
= 0, qat_res
= ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed
);
832 qat_alg_free_bufl(inst
, qat_req
);
833 if (unlikely(qat_res
!= ICP_QAT_FW_COMN_STATUS_FLAG_OK
))
835 areq
->base
.complete(&areq
->base
, res
);
838 static void qat_alg_update_iv_ctr_mode(struct qat_crypto_request
*qat_req
)
840 struct skcipher_request
*sreq
= qat_req
->skcipher_req
;
845 memcpy(qat_req
->iv
, sreq
->iv
, AES_BLOCK_SIZE
);
847 iv_lo
= be64_to_cpu(qat_req
->iv_lo
);
848 iv_hi
= be64_to_cpu(qat_req
->iv_hi
);
851 iv_lo
+= DIV_ROUND_UP(sreq
->cryptlen
, AES_BLOCK_SIZE
);
852 if (iv_lo
< iv_lo_prev
)
855 qat_req
->iv_lo
= cpu_to_be64(iv_lo
);
856 qat_req
->iv_hi
= cpu_to_be64(iv_hi
);
859 static void qat_alg_update_iv_cbc_mode(struct qat_crypto_request
*qat_req
)
861 struct skcipher_request
*sreq
= qat_req
->skcipher_req
;
862 int offset
= sreq
->cryptlen
- AES_BLOCK_SIZE
;
863 struct scatterlist
*sgl
;
865 if (qat_req
->encryption
)
870 scatterwalk_map_and_copy(qat_req
->iv
, sgl
, offset
, AES_BLOCK_SIZE
, 0);
873 static void qat_alg_update_iv(struct qat_crypto_request
*qat_req
)
875 struct qat_alg_skcipher_ctx
*ctx
= qat_req
->skcipher_ctx
;
876 struct device
*dev
= &GET_DEV(ctx
->inst
->accel_dev
);
879 case ICP_QAT_HW_CIPHER_CTR_MODE
:
880 qat_alg_update_iv_ctr_mode(qat_req
);
882 case ICP_QAT_HW_CIPHER_CBC_MODE
:
883 qat_alg_update_iv_cbc_mode(qat_req
);
885 case ICP_QAT_HW_CIPHER_XTS_MODE
:
888 dev_warn(dev
, "Unsupported IV update for cipher mode %d\n",
893 static void qat_skcipher_alg_callback(struct icp_qat_fw_la_resp
*qat_resp
,
894 struct qat_crypto_request
*qat_req
)
896 struct qat_alg_skcipher_ctx
*ctx
= qat_req
->skcipher_ctx
;
897 struct qat_crypto_instance
*inst
= ctx
->inst
;
898 struct skcipher_request
*sreq
= qat_req
->skcipher_req
;
899 u8 stat_filed
= qat_resp
->comn_resp
.comn_status
;
900 int res
= 0, qat_res
= ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed
);
902 qat_alg_free_bufl(inst
, qat_req
);
903 if (unlikely(qat_res
!= ICP_QAT_FW_COMN_STATUS_FLAG_OK
))
906 if (qat_req
->encryption
)
907 qat_alg_update_iv(qat_req
);
909 memcpy(sreq
->iv
, qat_req
->iv
, AES_BLOCK_SIZE
);
911 sreq
->base
.complete(&sreq
->base
, res
);
914 void qat_alg_callback(void *resp
)
916 struct icp_qat_fw_la_resp
*qat_resp
= resp
;
917 struct qat_crypto_request
*qat_req
=
918 (void *)(__force
long)qat_resp
->opaque_data
;
920 qat_req
->cb(qat_resp
, qat_req
);
923 static int qat_alg_aead_dec(struct aead_request
*areq
)
925 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(areq
);
926 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
927 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
928 struct qat_crypto_request
*qat_req
= aead_request_ctx(areq
);
929 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
930 struct icp_qat_fw_la_auth_req_params
*auth_param
;
931 struct icp_qat_fw_la_bulk_req
*msg
;
932 int digst_size
= crypto_aead_authsize(aead_tfm
);
936 cipher_len
= areq
->cryptlen
- digst_size
;
937 if (cipher_len
% AES_BLOCK_SIZE
!= 0)
940 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, areq
->src
, areq
->dst
, qat_req
);
945 *msg
= ctx
->dec_fw_req
;
946 qat_req
->aead_ctx
= ctx
;
947 qat_req
->aead_req
= areq
;
948 qat_req
->cb
= qat_aead_alg_callback
;
949 qat_req
->req
.comn_mid
.opaque_data
= (u64
)(__force
long)qat_req
;
950 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
951 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
952 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
953 cipher_param
->cipher_length
= cipher_len
;
954 cipher_param
->cipher_offset
= areq
->assoclen
;
955 memcpy(cipher_param
->u
.cipher_IV_array
, areq
->iv
, AES_BLOCK_SIZE
);
956 auth_param
= (void *)((u8
*)cipher_param
+ sizeof(*cipher_param
));
957 auth_param
->auth_off
= 0;
958 auth_param
->auth_len
= areq
->assoclen
+ cipher_param
->cipher_length
;
960 ret
= adf_send_message(ctx
->inst
->sym_tx
, (u32
*)msg
);
961 } while (ret
== -EAGAIN
&& ctr
++ < 10);
963 if (ret
== -EAGAIN
) {
964 qat_alg_free_bufl(ctx
->inst
, qat_req
);
970 static int qat_alg_aead_enc(struct aead_request
*areq
)
972 struct crypto_aead
*aead_tfm
= crypto_aead_reqtfm(areq
);
973 struct crypto_tfm
*tfm
= crypto_aead_tfm(aead_tfm
);
974 struct qat_alg_aead_ctx
*ctx
= crypto_tfm_ctx(tfm
);
975 struct qat_crypto_request
*qat_req
= aead_request_ctx(areq
);
976 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
977 struct icp_qat_fw_la_auth_req_params
*auth_param
;
978 struct icp_qat_fw_la_bulk_req
*msg
;
982 if (areq
->cryptlen
% AES_BLOCK_SIZE
!= 0)
985 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, areq
->src
, areq
->dst
, qat_req
);
990 *msg
= ctx
->enc_fw_req
;
991 qat_req
->aead_ctx
= ctx
;
992 qat_req
->aead_req
= areq
;
993 qat_req
->cb
= qat_aead_alg_callback
;
994 qat_req
->req
.comn_mid
.opaque_data
= (u64
)(__force
long)qat_req
;
995 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
996 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
997 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
998 auth_param
= (void *)((u8
*)cipher_param
+ sizeof(*cipher_param
));
1000 memcpy(cipher_param
->u
.cipher_IV_array
, iv
, AES_BLOCK_SIZE
);
1001 cipher_param
->cipher_length
= areq
->cryptlen
;
1002 cipher_param
->cipher_offset
= areq
->assoclen
;
1004 auth_param
->auth_off
= 0;
1005 auth_param
->auth_len
= areq
->assoclen
+ areq
->cryptlen
;
1008 ret
= adf_send_message(ctx
->inst
->sym_tx
, (u32
*)msg
);
1009 } while (ret
== -EAGAIN
&& ctr
++ < 10);
1011 if (ret
== -EAGAIN
) {
1012 qat_alg_free_bufl(ctx
->inst
, qat_req
);
1015 return -EINPROGRESS
;
1018 static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx
*ctx
,
1019 const u8
*key
, unsigned int keylen
,
1022 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
1023 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
1024 memset(&ctx
->enc_fw_req
, 0, sizeof(ctx
->enc_fw_req
));
1025 memset(&ctx
->dec_fw_req
, 0, sizeof(ctx
->dec_fw_req
));
1027 return qat_alg_skcipher_init_sessions(ctx
, key
, keylen
, mode
);
1030 static int qat_alg_skcipher_newkey(struct qat_alg_skcipher_ctx
*ctx
,
1031 const u8
*key
, unsigned int keylen
,
1034 struct qat_crypto_instance
*inst
= NULL
;
1036 int node
= get_current_node();
1039 inst
= qat_crypto_get_instance_node(node
);
1042 dev
= &GET_DEV(inst
->accel_dev
);
1044 ctx
->enc_cd
= dma_alloc_coherent(dev
, sizeof(*ctx
->enc_cd
),
1049 goto out_free_instance
;
1051 ctx
->dec_cd
= dma_alloc_coherent(dev
, sizeof(*ctx
->dec_cd
),
1059 ret
= qat_alg_skcipher_init_sessions(ctx
, key
, keylen
, mode
);
1066 memset(ctx
->dec_cd
, 0, sizeof(*ctx
->dec_cd
));
1067 dma_free_coherent(dev
, sizeof(*ctx
->dec_cd
),
1068 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1071 memset(ctx
->enc_cd
, 0, sizeof(*ctx
->enc_cd
));
1072 dma_free_coherent(dev
, sizeof(*ctx
->enc_cd
),
1073 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1077 qat_crypto_put_instance(inst
);
1081 static int qat_alg_skcipher_setkey(struct crypto_skcipher
*tfm
,
1082 const u8
*key
, unsigned int keylen
,
1085 struct qat_alg_skcipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
1090 return qat_alg_skcipher_rekey(ctx
, key
, keylen
, mode
);
1092 return qat_alg_skcipher_newkey(ctx
, key
, keylen
, mode
);
1095 static int qat_alg_skcipher_cbc_setkey(struct crypto_skcipher
*tfm
,
1096 const u8
*key
, unsigned int keylen
)
1098 return qat_alg_skcipher_setkey(tfm
, key
, keylen
,
1099 ICP_QAT_HW_CIPHER_CBC_MODE
);
1102 static int qat_alg_skcipher_ctr_setkey(struct crypto_skcipher
*tfm
,
1103 const u8
*key
, unsigned int keylen
)
1105 return qat_alg_skcipher_setkey(tfm
, key
, keylen
,
1106 ICP_QAT_HW_CIPHER_CTR_MODE
);
1109 static int qat_alg_skcipher_xts_setkey(struct crypto_skcipher
*tfm
,
1110 const u8
*key
, unsigned int keylen
)
1112 struct qat_alg_skcipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
1115 ret
= xts_verify_key(tfm
, key
, keylen
);
1119 if (keylen
>> 1 == AES_KEYSIZE_192
) {
1120 ret
= crypto_skcipher_setkey(ctx
->ftfm
, key
, keylen
);
1124 ctx
->fallback
= true;
1129 ctx
->fallback
= false;
1131 ret
= qat_alg_skcipher_setkey(tfm
, key
, keylen
,
1132 ICP_QAT_HW_CIPHER_XTS_MODE
);
1136 if (HW_CAP_AES_V2(ctx
->inst
->accel_dev
))
1137 ret
= crypto_cipher_setkey(ctx
->tweak
, key
+ (keylen
/ 2),
1143 static void qat_alg_set_req_iv(struct qat_crypto_request
*qat_req
)
1145 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
1146 struct qat_alg_skcipher_ctx
*ctx
= qat_req
->skcipher_ctx
;
1147 bool aes_v2_capable
= HW_CAP_AES_V2(ctx
->inst
->accel_dev
);
1148 u8
*iv
= qat_req
->skcipher_req
->iv
;
1150 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
1152 if (aes_v2_capable
&& ctx
->mode
== ICP_QAT_HW_CIPHER_XTS_MODE
)
1153 crypto_cipher_encrypt_one(ctx
->tweak
,
1154 (u8
*)cipher_param
->u
.cipher_IV_array
,
1157 memcpy(cipher_param
->u
.cipher_IV_array
, iv
, AES_BLOCK_SIZE
);
1160 static int qat_alg_skcipher_encrypt(struct skcipher_request
*req
)
1162 struct crypto_skcipher
*stfm
= crypto_skcipher_reqtfm(req
);
1163 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(stfm
);
1164 struct qat_alg_skcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1165 struct qat_crypto_request
*qat_req
= skcipher_request_ctx(req
);
1166 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
1167 struct icp_qat_fw_la_bulk_req
*msg
;
1170 if (req
->cryptlen
== 0)
1173 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, req
->src
, req
->dst
, qat_req
);
1177 msg
= &qat_req
->req
;
1178 *msg
= ctx
->enc_fw_req
;
1179 qat_req
->skcipher_ctx
= ctx
;
1180 qat_req
->skcipher_req
= req
;
1181 qat_req
->cb
= qat_skcipher_alg_callback
;
1182 qat_req
->req
.comn_mid
.opaque_data
= (u64
)(__force
long)qat_req
;
1183 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
1184 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
1185 qat_req
->encryption
= true;
1186 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
1187 cipher_param
->cipher_length
= req
->cryptlen
;
1188 cipher_param
->cipher_offset
= 0;
1190 qat_alg_set_req_iv(qat_req
);
1193 ret
= adf_send_message(ctx
->inst
->sym_tx
, (u32
*)msg
);
1194 } while (ret
== -EAGAIN
&& ctr
++ < 10);
1196 if (ret
== -EAGAIN
) {
1197 qat_alg_free_bufl(ctx
->inst
, qat_req
);
1200 return -EINPROGRESS
;
1203 static int qat_alg_skcipher_blk_encrypt(struct skcipher_request
*req
)
1205 if (req
->cryptlen
% AES_BLOCK_SIZE
!= 0)
1208 return qat_alg_skcipher_encrypt(req
);
1211 static int qat_alg_skcipher_xts_encrypt(struct skcipher_request
*req
)
1213 struct crypto_skcipher
*stfm
= crypto_skcipher_reqtfm(req
);
1214 struct qat_alg_skcipher_ctx
*ctx
= crypto_skcipher_ctx(stfm
);
1215 struct skcipher_request
*nreq
= skcipher_request_ctx(req
);
1217 if (req
->cryptlen
< XTS_BLOCK_SIZE
)
1220 if (ctx
->fallback
) {
1221 memcpy(nreq
, req
, sizeof(*req
));
1222 skcipher_request_set_tfm(nreq
, ctx
->ftfm
);
1223 return crypto_skcipher_encrypt(nreq
);
1226 return qat_alg_skcipher_encrypt(req
);
1229 static int qat_alg_skcipher_decrypt(struct skcipher_request
*req
)
1231 struct crypto_skcipher
*stfm
= crypto_skcipher_reqtfm(req
);
1232 struct crypto_tfm
*tfm
= crypto_skcipher_tfm(stfm
);
1233 struct qat_alg_skcipher_ctx
*ctx
= crypto_tfm_ctx(tfm
);
1234 struct qat_crypto_request
*qat_req
= skcipher_request_ctx(req
);
1235 struct icp_qat_fw_la_cipher_req_params
*cipher_param
;
1236 struct icp_qat_fw_la_bulk_req
*msg
;
1239 if (req
->cryptlen
== 0)
1242 ret
= qat_alg_sgl_to_bufl(ctx
->inst
, req
->src
, req
->dst
, qat_req
);
1246 msg
= &qat_req
->req
;
1247 *msg
= ctx
->dec_fw_req
;
1248 qat_req
->skcipher_ctx
= ctx
;
1249 qat_req
->skcipher_req
= req
;
1250 qat_req
->cb
= qat_skcipher_alg_callback
;
1251 qat_req
->req
.comn_mid
.opaque_data
= (u64
)(__force
long)qat_req
;
1252 qat_req
->req
.comn_mid
.src_data_addr
= qat_req
->buf
.blp
;
1253 qat_req
->req
.comn_mid
.dest_data_addr
= qat_req
->buf
.bloutp
;
1254 qat_req
->encryption
= false;
1255 cipher_param
= (void *)&qat_req
->req
.serv_specif_rqpars
;
1256 cipher_param
->cipher_length
= req
->cryptlen
;
1257 cipher_param
->cipher_offset
= 0;
1259 qat_alg_set_req_iv(qat_req
);
1260 qat_alg_update_iv(qat_req
);
1263 ret
= adf_send_message(ctx
->inst
->sym_tx
, (u32
*)msg
);
1264 } while (ret
== -EAGAIN
&& ctr
++ < 10);
1266 if (ret
== -EAGAIN
) {
1267 qat_alg_free_bufl(ctx
->inst
, qat_req
);
1270 return -EINPROGRESS
;
1273 static int qat_alg_skcipher_blk_decrypt(struct skcipher_request
*req
)
1275 if (req
->cryptlen
% AES_BLOCK_SIZE
!= 0)
1278 return qat_alg_skcipher_decrypt(req
);
1281 static int qat_alg_skcipher_xts_decrypt(struct skcipher_request
*req
)
1283 struct crypto_skcipher
*stfm
= crypto_skcipher_reqtfm(req
);
1284 struct qat_alg_skcipher_ctx
*ctx
= crypto_skcipher_ctx(stfm
);
1285 struct skcipher_request
*nreq
= skcipher_request_ctx(req
);
1287 if (req
->cryptlen
< XTS_BLOCK_SIZE
)
1290 if (ctx
->fallback
) {
1291 memcpy(nreq
, req
, sizeof(*req
));
1292 skcipher_request_set_tfm(nreq
, ctx
->ftfm
);
1293 return crypto_skcipher_decrypt(nreq
);
1296 return qat_alg_skcipher_decrypt(req
);
1299 static int qat_alg_aead_init(struct crypto_aead
*tfm
,
1300 enum icp_qat_hw_auth_algo hash
,
1301 const char *hash_name
)
1303 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1305 ctx
->hash_tfm
= crypto_alloc_shash(hash_name
, 0, 0);
1306 if (IS_ERR(ctx
->hash_tfm
))
1307 return PTR_ERR(ctx
->hash_tfm
);
1308 ctx
->qat_hash_alg
= hash
;
1309 crypto_aead_set_reqsize(tfm
, sizeof(struct qat_crypto_request
));
1313 static int qat_alg_aead_sha1_init(struct crypto_aead
*tfm
)
1315 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA1
, "sha1");
1318 static int qat_alg_aead_sha256_init(struct crypto_aead
*tfm
)
1320 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA256
, "sha256");
1323 static int qat_alg_aead_sha512_init(struct crypto_aead
*tfm
)
1325 return qat_alg_aead_init(tfm
, ICP_QAT_HW_AUTH_ALGO_SHA512
, "sha512");
1328 static void qat_alg_aead_exit(struct crypto_aead
*tfm
)
1330 struct qat_alg_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1331 struct qat_crypto_instance
*inst
= ctx
->inst
;
1334 crypto_free_shash(ctx
->hash_tfm
);
1339 dev
= &GET_DEV(inst
->accel_dev
);
1341 memset(ctx
->enc_cd
, 0, sizeof(struct qat_alg_cd
));
1342 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
1343 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1346 memset(ctx
->dec_cd
, 0, sizeof(struct qat_alg_cd
));
1347 dma_free_coherent(dev
, sizeof(struct qat_alg_cd
),
1348 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1350 qat_crypto_put_instance(inst
);
1353 static int qat_alg_skcipher_init_tfm(struct crypto_skcipher
*tfm
)
1355 crypto_skcipher_set_reqsize(tfm
, sizeof(struct qat_crypto_request
));
1359 static int qat_alg_skcipher_init_xts_tfm(struct crypto_skcipher
*tfm
)
1361 struct qat_alg_skcipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
1364 ctx
->ftfm
= crypto_alloc_skcipher("xts(aes)", 0,
1365 CRYPTO_ALG_NEED_FALLBACK
);
1366 if (IS_ERR(ctx
->ftfm
))
1367 return PTR_ERR(ctx
->ftfm
);
1369 ctx
->tweak
= crypto_alloc_cipher("aes", 0, 0);
1370 if (IS_ERR(ctx
->tweak
)) {
1371 crypto_free_skcipher(ctx
->ftfm
);
1372 return PTR_ERR(ctx
->tweak
);
1375 reqsize
= max(sizeof(struct qat_crypto_request
),
1376 sizeof(struct skcipher_request
) +
1377 crypto_skcipher_reqsize(ctx
->ftfm
));
1378 crypto_skcipher_set_reqsize(tfm
, reqsize
);
1383 static void qat_alg_skcipher_exit_tfm(struct crypto_skcipher
*tfm
)
1385 struct qat_alg_skcipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
1386 struct qat_crypto_instance
*inst
= ctx
->inst
;
1392 dev
= &GET_DEV(inst
->accel_dev
);
1394 memset(ctx
->enc_cd
, 0,
1395 sizeof(struct icp_qat_hw_cipher_algo_blk
));
1396 dma_free_coherent(dev
,
1397 sizeof(struct icp_qat_hw_cipher_algo_blk
),
1398 ctx
->enc_cd
, ctx
->enc_cd_paddr
);
1401 memset(ctx
->dec_cd
, 0,
1402 sizeof(struct icp_qat_hw_cipher_algo_blk
));
1403 dma_free_coherent(dev
,
1404 sizeof(struct icp_qat_hw_cipher_algo_blk
),
1405 ctx
->dec_cd
, ctx
->dec_cd_paddr
);
1407 qat_crypto_put_instance(inst
);
1410 static void qat_alg_skcipher_exit_xts_tfm(struct crypto_skcipher
*tfm
)
1412 struct qat_alg_skcipher_ctx
*ctx
= crypto_skcipher_ctx(tfm
);
1415 crypto_free_skcipher(ctx
->ftfm
);
1418 crypto_free_cipher(ctx
->tweak
);
1420 qat_alg_skcipher_exit_tfm(tfm
);
1423 static struct aead_alg qat_aeads
[] = { {
1425 .cra_name
= "authenc(hmac(sha1),cbc(aes))",
1426 .cra_driver_name
= "qat_aes_cbc_hmac_sha1",
1427 .cra_priority
= 4001,
1428 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1429 .cra_blocksize
= AES_BLOCK_SIZE
,
1430 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1431 .cra_module
= THIS_MODULE
,
1433 .init
= qat_alg_aead_sha1_init
,
1434 .exit
= qat_alg_aead_exit
,
1435 .setkey
= qat_alg_aead_setkey
,
1436 .decrypt
= qat_alg_aead_dec
,
1437 .encrypt
= qat_alg_aead_enc
,
1438 .ivsize
= AES_BLOCK_SIZE
,
1439 .maxauthsize
= SHA1_DIGEST_SIZE
,
1442 .cra_name
= "authenc(hmac(sha256),cbc(aes))",
1443 .cra_driver_name
= "qat_aes_cbc_hmac_sha256",
1444 .cra_priority
= 4001,
1445 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1446 .cra_blocksize
= AES_BLOCK_SIZE
,
1447 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1448 .cra_module
= THIS_MODULE
,
1450 .init
= qat_alg_aead_sha256_init
,
1451 .exit
= qat_alg_aead_exit
,
1452 .setkey
= qat_alg_aead_setkey
,
1453 .decrypt
= qat_alg_aead_dec
,
1454 .encrypt
= qat_alg_aead_enc
,
1455 .ivsize
= AES_BLOCK_SIZE
,
1456 .maxauthsize
= SHA256_DIGEST_SIZE
,
1459 .cra_name
= "authenc(hmac(sha512),cbc(aes))",
1460 .cra_driver_name
= "qat_aes_cbc_hmac_sha512",
1461 .cra_priority
= 4001,
1462 .cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1463 .cra_blocksize
= AES_BLOCK_SIZE
,
1464 .cra_ctxsize
= sizeof(struct qat_alg_aead_ctx
),
1465 .cra_module
= THIS_MODULE
,
1467 .init
= qat_alg_aead_sha512_init
,
1468 .exit
= qat_alg_aead_exit
,
1469 .setkey
= qat_alg_aead_setkey
,
1470 .decrypt
= qat_alg_aead_dec
,
1471 .encrypt
= qat_alg_aead_enc
,
1472 .ivsize
= AES_BLOCK_SIZE
,
1473 .maxauthsize
= SHA512_DIGEST_SIZE
,
1476 static struct skcipher_alg qat_skciphers
[] = { {
1477 .base
.cra_name
= "cbc(aes)",
1478 .base
.cra_driver_name
= "qat_aes_cbc",
1479 .base
.cra_priority
= 4001,
1480 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1481 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1482 .base
.cra_ctxsize
= sizeof(struct qat_alg_skcipher_ctx
),
1483 .base
.cra_alignmask
= 0,
1484 .base
.cra_module
= THIS_MODULE
,
1486 .init
= qat_alg_skcipher_init_tfm
,
1487 .exit
= qat_alg_skcipher_exit_tfm
,
1488 .setkey
= qat_alg_skcipher_cbc_setkey
,
1489 .decrypt
= qat_alg_skcipher_blk_decrypt
,
1490 .encrypt
= qat_alg_skcipher_blk_encrypt
,
1491 .min_keysize
= AES_MIN_KEY_SIZE
,
1492 .max_keysize
= AES_MAX_KEY_SIZE
,
1493 .ivsize
= AES_BLOCK_SIZE
,
1495 .base
.cra_name
= "ctr(aes)",
1496 .base
.cra_driver_name
= "qat_aes_ctr",
1497 .base
.cra_priority
= 4001,
1498 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_ALLOCATES_MEMORY
,
1499 .base
.cra_blocksize
= 1,
1500 .base
.cra_ctxsize
= sizeof(struct qat_alg_skcipher_ctx
),
1501 .base
.cra_alignmask
= 0,
1502 .base
.cra_module
= THIS_MODULE
,
1504 .init
= qat_alg_skcipher_init_tfm
,
1505 .exit
= qat_alg_skcipher_exit_tfm
,
1506 .setkey
= qat_alg_skcipher_ctr_setkey
,
1507 .decrypt
= qat_alg_skcipher_decrypt
,
1508 .encrypt
= qat_alg_skcipher_encrypt
,
1509 .min_keysize
= AES_MIN_KEY_SIZE
,
1510 .max_keysize
= AES_MAX_KEY_SIZE
,
1511 .ivsize
= AES_BLOCK_SIZE
,
1513 .base
.cra_name
= "xts(aes)",
1514 .base
.cra_driver_name
= "qat_aes_xts",
1515 .base
.cra_priority
= 4001,
1516 .base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_NEED_FALLBACK
|
1517 CRYPTO_ALG_ALLOCATES_MEMORY
,
1518 .base
.cra_blocksize
= AES_BLOCK_SIZE
,
1519 .base
.cra_ctxsize
= sizeof(struct qat_alg_skcipher_ctx
),
1520 .base
.cra_alignmask
= 0,
1521 .base
.cra_module
= THIS_MODULE
,
1523 .init
= qat_alg_skcipher_init_xts_tfm
,
1524 .exit
= qat_alg_skcipher_exit_xts_tfm
,
1525 .setkey
= qat_alg_skcipher_xts_setkey
,
1526 .decrypt
= qat_alg_skcipher_xts_decrypt
,
1527 .encrypt
= qat_alg_skcipher_xts_encrypt
,
1528 .min_keysize
= 2 * AES_MIN_KEY_SIZE
,
1529 .max_keysize
= 2 * AES_MAX_KEY_SIZE
,
1530 .ivsize
= AES_BLOCK_SIZE
,
1533 int qat_algs_register(void)
1537 mutex_lock(&algs_lock
);
1538 if (++active_devs
!= 1)
1541 ret
= crypto_register_skciphers(qat_skciphers
,
1542 ARRAY_SIZE(qat_skciphers
));
1546 ret
= crypto_register_aeads(qat_aeads
, ARRAY_SIZE(qat_aeads
));
1551 mutex_unlock(&algs_lock
);
1555 crypto_unregister_skciphers(qat_skciphers
, ARRAY_SIZE(qat_skciphers
));
1559 void qat_algs_unregister(void)
1561 mutex_lock(&algs_lock
);
1562 if (--active_devs
!= 0)
1565 crypto_unregister_aeads(qat_aeads
, ARRAY_SIZE(qat_aeads
));
1566 crypto_unregister_skciphers(qat_skciphers
, ARRAY_SIZE(qat_skciphers
));
1569 mutex_unlock(&algs_lock
);