1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
7 #include <linux/interrupt.h>
8 #include <linux/types.h>
9 #include <crypto/scatterwalk.h>
10 #include <crypto/sha.h>
18 static inline u32
qce_read(struct qce_device
*qce
, u32 offset
)
20 return readl(qce
->base
+ offset
);
23 static inline void qce_write(struct qce_device
*qce
, u32 offset
, u32 val
)
25 writel(val
, qce
->base
+ offset
);
28 static inline void qce_write_array(struct qce_device
*qce
, u32 offset
,
29 const u32
*val
, unsigned int len
)
33 for (i
= 0; i
< len
; i
++)
34 qce_write(qce
, offset
+ i
* sizeof(u32
), val
[i
]);
38 qce_clear_array(struct qce_device
*qce
, u32 offset
, unsigned int len
)
42 for (i
= 0; i
< len
; i
++)
43 qce_write(qce
, offset
+ i
* sizeof(u32
), 0);
46 static u32
qce_config_reg(struct qce_device
*qce
, int little
)
48 u32 beats
= (qce
->burst_size
>> 3) - 1;
49 u32 pipe_pair
= qce
->pipe_pair_id
;
52 config
= (beats
<< REQ_SIZE_SHIFT
) & REQ_SIZE_MASK
;
53 config
|= BIT(MASK_DOUT_INTR_SHIFT
) | BIT(MASK_DIN_INTR_SHIFT
) |
54 BIT(MASK_OP_DONE_INTR_SHIFT
) | BIT(MASK_ERR_INTR_SHIFT
);
55 config
|= (pipe_pair
<< PIPE_SET_SELECT_SHIFT
) & PIPE_SET_SELECT_MASK
;
56 config
&= ~HIGH_SPD_EN_N_SHIFT
;
59 config
|= BIT(LITTLE_ENDIAN_MODE_SHIFT
);
64 void qce_cpu_to_be32p_array(__be32
*dst
, const u8
*src
, unsigned int len
)
70 n
= len
/ sizeof(u32
);
72 *d
= cpu_to_be32p((const __u32
*) s
);
78 static void qce_setup_config(struct qce_device
*qce
)
82 /* get big endianness */
83 config
= qce_config_reg(qce
, 0);
86 qce_write(qce
, REG_STATUS
, 0);
87 qce_write(qce
, REG_CONFIG
, config
);
90 static inline void qce_crypto_go(struct qce_device
*qce
)
92 qce_write(qce
, REG_GOPROC
, BIT(GO_SHIFT
) | BIT(RESULTS_DUMP_SHIFT
));
95 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
96 static u32
qce_auth_cfg(unsigned long flags
, u32 key_size
)
100 if (IS_AES(flags
) && (IS_CCM(flags
) || IS_CMAC(flags
)))
101 cfg
|= AUTH_ALG_AES
<< AUTH_ALG_SHIFT
;
103 cfg
|= AUTH_ALG_SHA
<< AUTH_ALG_SHIFT
;
105 if (IS_CCM(flags
) || IS_CMAC(flags
)) {
106 if (key_size
== AES_KEYSIZE_128
)
107 cfg
|= AUTH_KEY_SZ_AES128
<< AUTH_KEY_SIZE_SHIFT
;
108 else if (key_size
== AES_KEYSIZE_256
)
109 cfg
|= AUTH_KEY_SZ_AES256
<< AUTH_KEY_SIZE_SHIFT
;
112 if (IS_SHA1(flags
) || IS_SHA1_HMAC(flags
))
113 cfg
|= AUTH_SIZE_SHA1
<< AUTH_SIZE_SHIFT
;
114 else if (IS_SHA256(flags
) || IS_SHA256_HMAC(flags
))
115 cfg
|= AUTH_SIZE_SHA256
<< AUTH_SIZE_SHIFT
;
116 else if (IS_CMAC(flags
))
117 cfg
|= AUTH_SIZE_ENUM_16_BYTES
<< AUTH_SIZE_SHIFT
;
119 if (IS_SHA1(flags
) || IS_SHA256(flags
))
120 cfg
|= AUTH_MODE_HASH
<< AUTH_MODE_SHIFT
;
121 else if (IS_SHA1_HMAC(flags
) || IS_SHA256_HMAC(flags
) ||
122 IS_CBC(flags
) || IS_CTR(flags
))
123 cfg
|= AUTH_MODE_HMAC
<< AUTH_MODE_SHIFT
;
124 else if (IS_AES(flags
) && IS_CCM(flags
))
125 cfg
|= AUTH_MODE_CCM
<< AUTH_MODE_SHIFT
;
126 else if (IS_AES(flags
) && IS_CMAC(flags
))
127 cfg
|= AUTH_MODE_CMAC
<< AUTH_MODE_SHIFT
;
129 if (IS_SHA(flags
) || IS_SHA_HMAC(flags
))
130 cfg
|= AUTH_POS_BEFORE
<< AUTH_POS_SHIFT
;
133 cfg
|= QCE_MAX_NONCE_WORDS
<< AUTH_NONCE_NUM_WORDS_SHIFT
;
135 if (IS_CBC(flags
) || IS_CTR(flags
) || IS_CCM(flags
) ||
137 cfg
|= BIT(AUTH_LAST_SHIFT
) | BIT(AUTH_FIRST_SHIFT
);
142 static int qce_setup_regs_ahash(struct crypto_async_request
*async_req
,
143 u32 totallen
, u32 offset
)
145 struct ahash_request
*req
= ahash_request_cast(async_req
);
146 struct crypto_ahash
*ahash
= __crypto_ahash_cast(async_req
->tfm
);
147 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
148 struct qce_alg_template
*tmpl
= to_ahash_tmpl(async_req
->tfm
);
149 struct qce_device
*qce
= tmpl
->qce
;
150 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
151 unsigned int blocksize
= crypto_tfm_alg_blocksize(async_req
->tfm
);
152 __be32 auth
[SHA256_DIGEST_SIZE
/ sizeof(__be32
)] = {0};
153 __be32 mackey
[QCE_SHA_HMAC_KEY_SIZE
/ sizeof(__be32
)] = {0};
154 u32 auth_cfg
= 0, config
;
155 unsigned int iv_words
;
157 /* if not the last, the size has to be on the block boundary */
158 if (!rctx
->last_blk
&& req
->nbytes
% blocksize
)
161 qce_setup_config(qce
);
163 if (IS_CMAC(rctx
->flags
)) {
164 qce_write(qce
, REG_AUTH_SEG_CFG
, 0);
165 qce_write(qce
, REG_ENCR_SEG_CFG
, 0);
166 qce_write(qce
, REG_ENCR_SEG_SIZE
, 0);
167 qce_clear_array(qce
, REG_AUTH_IV0
, 16);
168 qce_clear_array(qce
, REG_AUTH_KEY0
, 16);
169 qce_clear_array(qce
, REG_AUTH_BYTECNT0
, 4);
171 auth_cfg
= qce_auth_cfg(rctx
->flags
, rctx
->authklen
);
174 if (IS_SHA_HMAC(rctx
->flags
) || IS_CMAC(rctx
->flags
)) {
175 u32 authkey_words
= rctx
->authklen
/ sizeof(u32
);
177 qce_cpu_to_be32p_array(mackey
, rctx
->authkey
, rctx
->authklen
);
178 qce_write_array(qce
, REG_AUTH_KEY0
, (u32
*)mackey
,
182 if (IS_CMAC(rctx
->flags
))
186 memcpy(auth
, rctx
->digest
, digestsize
);
188 qce_cpu_to_be32p_array(auth
, rctx
->digest
, digestsize
);
190 iv_words
= (IS_SHA1(rctx
->flags
) || IS_SHA1_HMAC(rctx
->flags
)) ? 5 : 8;
191 qce_write_array(qce
, REG_AUTH_IV0
, (u32
*)auth
, iv_words
);
194 qce_clear_array(qce
, REG_AUTH_BYTECNT0
, 4);
196 qce_write_array(qce
, REG_AUTH_BYTECNT0
,
197 (u32
*)rctx
->byte_count
, 2);
199 auth_cfg
= qce_auth_cfg(rctx
->flags
, 0);
202 auth_cfg
|= BIT(AUTH_LAST_SHIFT
);
204 auth_cfg
&= ~BIT(AUTH_LAST_SHIFT
);
207 auth_cfg
|= BIT(AUTH_FIRST_SHIFT
);
209 auth_cfg
&= ~BIT(AUTH_FIRST_SHIFT
);
212 qce_write(qce
, REG_AUTH_SEG_CFG
, auth_cfg
);
213 qce_write(qce
, REG_AUTH_SEG_SIZE
, req
->nbytes
);
214 qce_write(qce
, REG_AUTH_SEG_START
, 0);
215 qce_write(qce
, REG_ENCR_SEG_CFG
, 0);
216 qce_write(qce
, REG_SEG_SIZE
, req
->nbytes
);
218 /* get little endianness */
219 config
= qce_config_reg(qce
, 1);
220 qce_write(qce
, REG_CONFIG
, config
);
228 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
229 static u32
qce_encr_cfg(unsigned long flags
, u32 aes_key_size
)
234 if (aes_key_size
== AES_KEYSIZE_128
)
235 cfg
|= ENCR_KEY_SZ_AES128
<< ENCR_KEY_SZ_SHIFT
;
236 else if (aes_key_size
== AES_KEYSIZE_256
)
237 cfg
|= ENCR_KEY_SZ_AES256
<< ENCR_KEY_SZ_SHIFT
;
241 cfg
|= ENCR_ALG_AES
<< ENCR_ALG_SHIFT
;
242 else if (IS_DES(flags
) || IS_3DES(flags
))
243 cfg
|= ENCR_ALG_DES
<< ENCR_ALG_SHIFT
;
246 cfg
|= ENCR_KEY_SZ_DES
<< ENCR_KEY_SZ_SHIFT
;
249 cfg
|= ENCR_KEY_SZ_3DES
<< ENCR_KEY_SZ_SHIFT
;
251 switch (flags
& QCE_MODE_MASK
) {
253 cfg
|= ENCR_MODE_ECB
<< ENCR_MODE_SHIFT
;
256 cfg
|= ENCR_MODE_CBC
<< ENCR_MODE_SHIFT
;
259 cfg
|= ENCR_MODE_CTR
<< ENCR_MODE_SHIFT
;
262 cfg
|= ENCR_MODE_XTS
<< ENCR_MODE_SHIFT
;
265 cfg
|= ENCR_MODE_CCM
<< ENCR_MODE_SHIFT
;
266 cfg
|= LAST_CCM_XFR
<< LAST_CCM_SHIFT
;
275 static void qce_xts_swapiv(__be32
*dst
, const u8
*src
, unsigned int ivsize
)
277 u8 swap
[QCE_AES_IV_LENGTH
];
280 if (ivsize
> QCE_AES_IV_LENGTH
)
283 memset(swap
, 0, QCE_AES_IV_LENGTH
);
285 for (i
= (QCE_AES_IV_LENGTH
- ivsize
), j
= ivsize
- 1;
286 i
< QCE_AES_IV_LENGTH
; i
++, j
--)
289 qce_cpu_to_be32p_array(dst
, swap
, QCE_AES_IV_LENGTH
);
292 static void qce_xtskey(struct qce_device
*qce
, const u8
*enckey
,
293 unsigned int enckeylen
, unsigned int cryptlen
)
295 u32 xtskey
[QCE_MAX_CIPHER_KEY_SIZE
/ sizeof(u32
)] = {0};
296 unsigned int xtsklen
= enckeylen
/ (2 * sizeof(u32
));
297 unsigned int xtsdusize
;
299 qce_cpu_to_be32p_array((__be32
*)xtskey
, enckey
+ enckeylen
/ 2,
301 qce_write_array(qce
, REG_ENCR_XTS_KEY0
, xtskey
, xtsklen
);
303 /* xts du size 512B */
304 xtsdusize
= min_t(u32
, QCE_SECTOR_SIZE
, cryptlen
);
305 qce_write(qce
, REG_ENCR_XTS_DU_SIZE
, xtsdusize
);
308 static int qce_setup_regs_skcipher(struct crypto_async_request
*async_req
,
309 u32 totallen
, u32 offset
)
311 struct skcipher_request
*req
= skcipher_request_cast(async_req
);
312 struct qce_cipher_reqctx
*rctx
= skcipher_request_ctx(req
);
313 struct qce_cipher_ctx
*ctx
= crypto_tfm_ctx(async_req
->tfm
);
314 struct qce_alg_template
*tmpl
= to_cipher_tmpl(crypto_skcipher_reqtfm(req
));
315 struct qce_device
*qce
= tmpl
->qce
;
316 __be32 enckey
[QCE_MAX_CIPHER_KEY_SIZE
/ sizeof(__be32
)] = {0};
317 __be32 enciv
[QCE_MAX_IV_SIZE
/ sizeof(__be32
)] = {0};
318 unsigned int enckey_words
, enciv_words
;
320 u32 encr_cfg
= 0, auth_cfg
= 0, config
;
321 unsigned int ivsize
= rctx
->ivsize
;
322 unsigned long flags
= rctx
->flags
;
324 qce_setup_config(qce
);
327 keylen
= ctx
->enc_keylen
/ 2;
329 keylen
= ctx
->enc_keylen
;
331 qce_cpu_to_be32p_array(enckey
, ctx
->enc_key
, keylen
);
332 enckey_words
= keylen
/ sizeof(u32
);
334 qce_write(qce
, REG_AUTH_SEG_CFG
, auth_cfg
);
336 encr_cfg
= qce_encr_cfg(flags
, keylen
);
341 } else if (IS_3DES(flags
)) {
344 } else if (IS_AES(flags
)) {
346 qce_xtskey(qce
, ctx
->enc_key
, ctx
->enc_keylen
,
353 qce_write_array(qce
, REG_ENCR_KEY0
, (u32
*)enckey
, enckey_words
);
355 if (!IS_ECB(flags
)) {
357 qce_xts_swapiv(enciv
, rctx
->iv
, ivsize
);
359 qce_cpu_to_be32p_array(enciv
, rctx
->iv
, ivsize
);
361 qce_write_array(qce
, REG_CNTR0_IV0
, (u32
*)enciv
, enciv_words
);
364 if (IS_ENCRYPT(flags
))
365 encr_cfg
|= BIT(ENCODE_SHIFT
);
367 qce_write(qce
, REG_ENCR_SEG_CFG
, encr_cfg
);
368 qce_write(qce
, REG_ENCR_SEG_SIZE
, rctx
->cryptlen
);
369 qce_write(qce
, REG_ENCR_SEG_START
, offset
& 0xffff);
372 qce_write(qce
, REG_CNTR_MASK
, ~0);
373 qce_write(qce
, REG_CNTR_MASK0
, ~0);
374 qce_write(qce
, REG_CNTR_MASK1
, ~0);
375 qce_write(qce
, REG_CNTR_MASK2
, ~0);
378 qce_write(qce
, REG_SEG_SIZE
, totallen
);
380 /* get little endianness */
381 config
= qce_config_reg(qce
, 1);
382 qce_write(qce
, REG_CONFIG
, config
);
390 int qce_start(struct crypto_async_request
*async_req
, u32 type
, u32 totallen
,
394 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER
395 case CRYPTO_ALG_TYPE_SKCIPHER
:
396 return qce_setup_regs_skcipher(async_req
, totallen
, offset
);
398 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA
399 case CRYPTO_ALG_TYPE_AHASH
:
400 return qce_setup_regs_ahash(async_req
, totallen
, offset
);
407 #define STATUS_ERRORS \
408 (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
410 int qce_check_status(struct qce_device
*qce
, u32
*status
)
414 *status
= qce_read(qce
, REG_STATUS
);
417 * Don't use result dump status. The operation may not be complete.
418 * Instead, use the status we just read from device. In case, we need to
419 * use result_status from result dump the result_status needs to be byte
420 * swapped, since we set the device to little endian.
422 if (*status
& STATUS_ERRORS
|| !(*status
& BIT(OPERATION_DONE_SHIFT
)))
428 void qce_get_version(struct qce_device
*qce
, u32
*major
, u32
*minor
, u32
*step
)
432 val
= qce_read(qce
, REG_VERSION
);
433 *major
= (val
& CORE_MAJOR_REV_MASK
) >> CORE_MAJOR_REV_SHIFT
;
434 *minor
= (val
& CORE_MINOR_REV_MASK
) >> CORE_MINOR_REV_SHIFT
;
435 *step
= (val
& CORE_STEP_REV_MASK
) >> CORE_STEP_REV_SHIFT
;