2 * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/types.h>
17 #include <crypto/scatterwalk.h>
18 #include <crypto/sha.h>
26 #define QCE_SECTOR_SIZE 512
28 static inline u32
qce_read(struct qce_device
*qce
, u32 offset
)
30 return readl(qce
->base
+ offset
);
33 static inline void qce_write(struct qce_device
*qce
, u32 offset
, u32 val
)
35 writel(val
, qce
->base
+ offset
);
38 static inline void qce_write_array(struct qce_device
*qce
, u32 offset
,
39 const u32
*val
, unsigned int len
)
43 for (i
= 0; i
< len
; i
++)
44 qce_write(qce
, offset
+ i
* sizeof(u32
), val
[i
]);
48 qce_clear_array(struct qce_device
*qce
, u32 offset
, unsigned int len
)
52 for (i
= 0; i
< len
; i
++)
53 qce_write(qce
, offset
+ i
* sizeof(u32
), 0);
56 static u32
qce_encr_cfg(unsigned long flags
, u32 aes_key_size
)
61 if (aes_key_size
== AES_KEYSIZE_128
)
62 cfg
|= ENCR_KEY_SZ_AES128
<< ENCR_KEY_SZ_SHIFT
;
63 else if (aes_key_size
== AES_KEYSIZE_256
)
64 cfg
|= ENCR_KEY_SZ_AES256
<< ENCR_KEY_SZ_SHIFT
;
68 cfg
|= ENCR_ALG_AES
<< ENCR_ALG_SHIFT
;
69 else if (IS_DES(flags
) || IS_3DES(flags
))
70 cfg
|= ENCR_ALG_DES
<< ENCR_ALG_SHIFT
;
73 cfg
|= ENCR_KEY_SZ_DES
<< ENCR_KEY_SZ_SHIFT
;
76 cfg
|= ENCR_KEY_SZ_3DES
<< ENCR_KEY_SZ_SHIFT
;
78 switch (flags
& QCE_MODE_MASK
) {
80 cfg
|= ENCR_MODE_ECB
<< ENCR_MODE_SHIFT
;
83 cfg
|= ENCR_MODE_CBC
<< ENCR_MODE_SHIFT
;
86 cfg
|= ENCR_MODE_CTR
<< ENCR_MODE_SHIFT
;
89 cfg
|= ENCR_MODE_XTS
<< ENCR_MODE_SHIFT
;
92 cfg
|= ENCR_MODE_CCM
<< ENCR_MODE_SHIFT
;
93 cfg
|= LAST_CCM_XFR
<< LAST_CCM_SHIFT
;
102 static u32
qce_auth_cfg(unsigned long flags
, u32 key_size
)
106 if (IS_AES(flags
) && (IS_CCM(flags
) || IS_CMAC(flags
)))
107 cfg
|= AUTH_ALG_AES
<< AUTH_ALG_SHIFT
;
109 cfg
|= AUTH_ALG_SHA
<< AUTH_ALG_SHIFT
;
111 if (IS_CCM(flags
) || IS_CMAC(flags
)) {
112 if (key_size
== AES_KEYSIZE_128
)
113 cfg
|= AUTH_KEY_SZ_AES128
<< AUTH_KEY_SIZE_SHIFT
;
114 else if (key_size
== AES_KEYSIZE_256
)
115 cfg
|= AUTH_KEY_SZ_AES256
<< AUTH_KEY_SIZE_SHIFT
;
118 if (IS_SHA1(flags
) || IS_SHA1_HMAC(flags
))
119 cfg
|= AUTH_SIZE_SHA1
<< AUTH_SIZE_SHIFT
;
120 else if (IS_SHA256(flags
) || IS_SHA256_HMAC(flags
))
121 cfg
|= AUTH_SIZE_SHA256
<< AUTH_SIZE_SHIFT
;
122 else if (IS_CMAC(flags
))
123 cfg
|= AUTH_SIZE_ENUM_16_BYTES
<< AUTH_SIZE_SHIFT
;
125 if (IS_SHA1(flags
) || IS_SHA256(flags
))
126 cfg
|= AUTH_MODE_HASH
<< AUTH_MODE_SHIFT
;
127 else if (IS_SHA1_HMAC(flags
) || IS_SHA256_HMAC(flags
) ||
128 IS_CBC(flags
) || IS_CTR(flags
))
129 cfg
|= AUTH_MODE_HMAC
<< AUTH_MODE_SHIFT
;
130 else if (IS_AES(flags
) && IS_CCM(flags
))
131 cfg
|= AUTH_MODE_CCM
<< AUTH_MODE_SHIFT
;
132 else if (IS_AES(flags
) && IS_CMAC(flags
))
133 cfg
|= AUTH_MODE_CMAC
<< AUTH_MODE_SHIFT
;
135 if (IS_SHA(flags
) || IS_SHA_HMAC(flags
))
136 cfg
|= AUTH_POS_BEFORE
<< AUTH_POS_SHIFT
;
139 cfg
|= QCE_MAX_NONCE_WORDS
<< AUTH_NONCE_NUM_WORDS_SHIFT
;
141 if (IS_CBC(flags
) || IS_CTR(flags
) || IS_CCM(flags
) ||
143 cfg
|= BIT(AUTH_LAST_SHIFT
) | BIT(AUTH_FIRST_SHIFT
);
148 static u32
qce_config_reg(struct qce_device
*qce
, int little
)
150 u32 beats
= (qce
->burst_size
>> 3) - 1;
151 u32 pipe_pair
= qce
->pipe_pair_id
;
154 config
= (beats
<< REQ_SIZE_SHIFT
) & REQ_SIZE_MASK
;
155 config
|= BIT(MASK_DOUT_INTR_SHIFT
) | BIT(MASK_DIN_INTR_SHIFT
) |
156 BIT(MASK_OP_DONE_INTR_SHIFT
) | BIT(MASK_ERR_INTR_SHIFT
);
157 config
|= (pipe_pair
<< PIPE_SET_SELECT_SHIFT
) & PIPE_SET_SELECT_MASK
;
158 config
&= ~HIGH_SPD_EN_N_SHIFT
;
161 config
|= BIT(LITTLE_ENDIAN_MODE_SHIFT
);
166 void qce_cpu_to_be32p_array(__be32
*dst
, const u8
*src
, unsigned int len
)
172 n
= len
/ sizeof(u32
);
174 *d
= cpu_to_be32p((const __u32
*) s
);
180 static void qce_xts_swapiv(__be32
*dst
, const u8
*src
, unsigned int ivsize
)
182 u8 swap
[QCE_AES_IV_LENGTH
];
185 if (ivsize
> QCE_AES_IV_LENGTH
)
188 memset(swap
, 0, QCE_AES_IV_LENGTH
);
190 for (i
= (QCE_AES_IV_LENGTH
- ivsize
), j
= ivsize
- 1;
191 i
< QCE_AES_IV_LENGTH
; i
++, j
--)
194 qce_cpu_to_be32p_array(dst
, swap
, QCE_AES_IV_LENGTH
);
197 static void qce_xtskey(struct qce_device
*qce
, const u8
*enckey
,
198 unsigned int enckeylen
, unsigned int cryptlen
)
200 u32 xtskey
[QCE_MAX_CIPHER_KEY_SIZE
/ sizeof(u32
)] = {0};
201 unsigned int xtsklen
= enckeylen
/ (2 * sizeof(u32
));
202 unsigned int xtsdusize
;
204 qce_cpu_to_be32p_array((__be32
*)xtskey
, enckey
+ enckeylen
/ 2,
206 qce_write_array(qce
, REG_ENCR_XTS_KEY0
, xtskey
, xtsklen
);
208 /* xts du size 512B */
209 xtsdusize
= min_t(u32
, QCE_SECTOR_SIZE
, cryptlen
);
210 qce_write(qce
, REG_ENCR_XTS_DU_SIZE
, xtsdusize
);
213 static void qce_setup_config(struct qce_device
*qce
)
217 /* get big endianness */
218 config
= qce_config_reg(qce
, 0);
221 qce_write(qce
, REG_STATUS
, 0);
222 qce_write(qce
, REG_CONFIG
, config
);
225 static inline void qce_crypto_go(struct qce_device
*qce
)
227 qce_write(qce
, REG_GOPROC
, BIT(GO_SHIFT
) | BIT(RESULTS_DUMP_SHIFT
));
230 static int qce_setup_regs_ahash(struct crypto_async_request
*async_req
,
231 u32 totallen
, u32 offset
)
233 struct ahash_request
*req
= ahash_request_cast(async_req
);
234 struct crypto_ahash
*ahash
= __crypto_ahash_cast(async_req
->tfm
);
235 struct qce_sha_reqctx
*rctx
= ahash_request_ctx(req
);
236 struct qce_alg_template
*tmpl
= to_ahash_tmpl(async_req
->tfm
);
237 struct qce_device
*qce
= tmpl
->qce
;
238 unsigned int digestsize
= crypto_ahash_digestsize(ahash
);
239 unsigned int blocksize
= crypto_tfm_alg_blocksize(async_req
->tfm
);
240 __be32 auth
[SHA256_DIGEST_SIZE
/ sizeof(__be32
)] = {0};
241 __be32 mackey
[QCE_SHA_HMAC_KEY_SIZE
/ sizeof(__be32
)] = {0};
242 u32 auth_cfg
= 0, config
;
243 unsigned int iv_words
;
245 /* if not the last, the size has to be on the block boundary */
246 if (!rctx
->last_blk
&& req
->nbytes
% blocksize
)
249 qce_setup_config(qce
);
251 if (IS_CMAC(rctx
->flags
)) {
252 qce_write(qce
, REG_AUTH_SEG_CFG
, 0);
253 qce_write(qce
, REG_ENCR_SEG_CFG
, 0);
254 qce_write(qce
, REG_ENCR_SEG_SIZE
, 0);
255 qce_clear_array(qce
, REG_AUTH_IV0
, 16);
256 qce_clear_array(qce
, REG_AUTH_KEY0
, 16);
257 qce_clear_array(qce
, REG_AUTH_BYTECNT0
, 4);
259 auth_cfg
= qce_auth_cfg(rctx
->flags
, rctx
->authklen
);
262 if (IS_SHA_HMAC(rctx
->flags
) || IS_CMAC(rctx
->flags
)) {
263 u32 authkey_words
= rctx
->authklen
/ sizeof(u32
);
265 qce_cpu_to_be32p_array(mackey
, rctx
->authkey
, rctx
->authklen
);
266 qce_write_array(qce
, REG_AUTH_KEY0
, (u32
*)mackey
,
270 if (IS_CMAC(rctx
->flags
))
274 memcpy(auth
, rctx
->digest
, digestsize
);
276 qce_cpu_to_be32p_array(auth
, rctx
->digest
, digestsize
);
278 iv_words
= (IS_SHA1(rctx
->flags
) || IS_SHA1_HMAC(rctx
->flags
)) ? 5 : 8;
279 qce_write_array(qce
, REG_AUTH_IV0
, (u32
*)auth
, iv_words
);
282 qce_clear_array(qce
, REG_AUTH_BYTECNT0
, 4);
284 qce_write_array(qce
, REG_AUTH_BYTECNT0
,
285 (u32
*)rctx
->byte_count
, 2);
287 auth_cfg
= qce_auth_cfg(rctx
->flags
, 0);
290 auth_cfg
|= BIT(AUTH_LAST_SHIFT
);
292 auth_cfg
&= ~BIT(AUTH_LAST_SHIFT
);
295 auth_cfg
|= BIT(AUTH_FIRST_SHIFT
);
297 auth_cfg
&= ~BIT(AUTH_FIRST_SHIFT
);
300 qce_write(qce
, REG_AUTH_SEG_CFG
, auth_cfg
);
301 qce_write(qce
, REG_AUTH_SEG_SIZE
, req
->nbytes
);
302 qce_write(qce
, REG_AUTH_SEG_START
, 0);
303 qce_write(qce
, REG_ENCR_SEG_CFG
, 0);
304 qce_write(qce
, REG_SEG_SIZE
, req
->nbytes
);
306 /* get little endianness */
307 config
= qce_config_reg(qce
, 1);
308 qce_write(qce
, REG_CONFIG
, config
);
315 static int qce_setup_regs_ablkcipher(struct crypto_async_request
*async_req
,
316 u32 totallen
, u32 offset
)
318 struct ablkcipher_request
*req
= ablkcipher_request_cast(async_req
);
319 struct qce_cipher_reqctx
*rctx
= ablkcipher_request_ctx(req
);
320 struct qce_cipher_ctx
*ctx
= crypto_tfm_ctx(async_req
->tfm
);
321 struct qce_alg_template
*tmpl
= to_cipher_tmpl(async_req
->tfm
);
322 struct qce_device
*qce
= tmpl
->qce
;
323 __be32 enckey
[QCE_MAX_CIPHER_KEY_SIZE
/ sizeof(__be32
)] = {0};
324 __be32 enciv
[QCE_MAX_IV_SIZE
/ sizeof(__be32
)] = {0};
325 unsigned int enckey_words
, enciv_words
;
327 u32 encr_cfg
= 0, auth_cfg
= 0, config
;
328 unsigned int ivsize
= rctx
->ivsize
;
329 unsigned long flags
= rctx
->flags
;
331 qce_setup_config(qce
);
334 keylen
= ctx
->enc_keylen
/ 2;
336 keylen
= ctx
->enc_keylen
;
338 qce_cpu_to_be32p_array(enckey
, ctx
->enc_key
, keylen
);
339 enckey_words
= keylen
/ sizeof(u32
);
341 qce_write(qce
, REG_AUTH_SEG_CFG
, auth_cfg
);
343 encr_cfg
= qce_encr_cfg(flags
, keylen
);
348 } else if (IS_3DES(flags
)) {
351 } else if (IS_AES(flags
)) {
353 qce_xtskey(qce
, ctx
->enc_key
, ctx
->enc_keylen
,
360 qce_write_array(qce
, REG_ENCR_KEY0
, (u32
*)enckey
, enckey_words
);
362 if (!IS_ECB(flags
)) {
364 qce_xts_swapiv(enciv
, rctx
->iv
, ivsize
);
366 qce_cpu_to_be32p_array(enciv
, rctx
->iv
, ivsize
);
368 qce_write_array(qce
, REG_CNTR0_IV0
, (u32
*)enciv
, enciv_words
);
371 if (IS_ENCRYPT(flags
))
372 encr_cfg
|= BIT(ENCODE_SHIFT
);
374 qce_write(qce
, REG_ENCR_SEG_CFG
, encr_cfg
);
375 qce_write(qce
, REG_ENCR_SEG_SIZE
, rctx
->cryptlen
);
376 qce_write(qce
, REG_ENCR_SEG_START
, offset
& 0xffff);
379 qce_write(qce
, REG_CNTR_MASK
, ~0);
380 qce_write(qce
, REG_CNTR_MASK0
, ~0);
381 qce_write(qce
, REG_CNTR_MASK1
, ~0);
382 qce_write(qce
, REG_CNTR_MASK2
, ~0);
385 qce_write(qce
, REG_SEG_SIZE
, totallen
);
387 /* get little endianness */
388 config
= qce_config_reg(qce
, 1);
389 qce_write(qce
, REG_CONFIG
, config
);
396 int qce_start(struct crypto_async_request
*async_req
, u32 type
, u32 totallen
,
400 case CRYPTO_ALG_TYPE_ABLKCIPHER
:
401 return qce_setup_regs_ablkcipher(async_req
, totallen
, offset
);
402 case CRYPTO_ALG_TYPE_AHASH
:
403 return qce_setup_regs_ahash(async_req
, totallen
, offset
);
409 #define STATUS_ERRORS \
410 (BIT(SW_ERR_SHIFT) | BIT(AXI_ERR_SHIFT) | BIT(HSD_ERR_SHIFT))
412 int qce_check_status(struct qce_device
*qce
, u32
*status
)
416 *status
= qce_read(qce
, REG_STATUS
);
419 * Don't use result dump status. The operation may not be complete.
420 * Instead, use the status we just read from device. In case, we need to
421 * use result_status from result dump the result_status needs to be byte
422 * swapped, since we set the device to little endian.
424 if (*status
& STATUS_ERRORS
|| !(*status
& BIT(OPERATION_DONE_SHIFT
)))
430 void qce_get_version(struct qce_device
*qce
, u32
*major
, u32
*minor
, u32
*step
)
434 val
= qce_read(qce
, REG_VERSION
);
435 *major
= (val
& CORE_MAJOR_REV_MASK
) >> CORE_MAJOR_REV_SHIFT
;
436 *minor
= (val
& CORE_MINOR_REV_MASK
) >> CORE_MINOR_REV_SHIFT
;
437 *step
= (val
& CORE_STEP_REV_MASK
) >> CORE_STEP_REV_SHIFT
;