1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/hash.h>
8 #include <crypto/md5.h>
9 #include <crypto/sm3.h>
10 #include <crypto/internal/hash.h>
12 #include "cc_driver.h"
13 #include "cc_request_mgr.h"
14 #include "cc_buffer_mgr.h"
16 #include "cc_sram_mgr.h"
18 #define CC_MAX_HASH_SEQ_LEN 12
19 #define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
20 #define CC_SM3_HASH_LEN_SIZE 8
22 struct cc_hash_handle
{
23 u32 digest_len_sram_addr
; /* const value in SRAM*/
24 u32 larval_digest_sram_addr
; /* const value in SRAM */
25 struct list_head hash_list
;
28 static const u32 cc_digest_len_init
[] = {
29 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
30 static const u32 cc_md5_init
[] = {
31 SHA1_H3
, SHA1_H2
, SHA1_H1
, SHA1_H0
};
32 static const u32 cc_sha1_init
[] = {
33 SHA1_H4
, SHA1_H3
, SHA1_H2
, SHA1_H1
, SHA1_H0
};
34 static const u32 cc_sha224_init
[] = {
35 SHA224_H7
, SHA224_H6
, SHA224_H5
, SHA224_H4
,
36 SHA224_H3
, SHA224_H2
, SHA224_H1
, SHA224_H0
};
37 static const u32 cc_sha256_init
[] = {
38 SHA256_H7
, SHA256_H6
, SHA256_H5
, SHA256_H4
,
39 SHA256_H3
, SHA256_H2
, SHA256_H1
, SHA256_H0
};
40 static const u32 cc_digest_len_sha512_init
[] = {
41 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
44 * Due to the way the HW works, every double word in the SHA384 and SHA512
45 * larval hashes must be stored in hi/lo order
47 #define hilo(x) upper_32_bits(x), lower_32_bits(x)
48 static const u32 cc_sha384_init
[] = {
49 hilo(SHA384_H7
), hilo(SHA384_H6
), hilo(SHA384_H5
), hilo(SHA384_H4
),
50 hilo(SHA384_H3
), hilo(SHA384_H2
), hilo(SHA384_H1
), hilo(SHA384_H0
) };
51 static const u32 cc_sha512_init
[] = {
52 hilo(SHA512_H7
), hilo(SHA512_H6
), hilo(SHA512_H5
), hilo(SHA512_H4
),
53 hilo(SHA512_H3
), hilo(SHA512_H2
), hilo(SHA512_H1
), hilo(SHA512_H0
) };
55 static const u32 cc_sm3_init
[] = {
56 SM3_IVH
, SM3_IVG
, SM3_IVF
, SM3_IVE
,
57 SM3_IVD
, SM3_IVC
, SM3_IVB
, SM3_IVA
};
59 static void cc_setup_xcbc(struct ahash_request
*areq
, struct cc_hw_desc desc
[],
60 unsigned int *seq_size
);
62 static void cc_setup_cmac(struct ahash_request
*areq
, struct cc_hw_desc desc
[],
63 unsigned int *seq_size
);
65 static const void *cc_larval_digest(struct device
*dev
, u32 mode
);
68 struct list_head entry
;
72 struct cc_drvdata
*drvdata
;
73 struct ahash_alg ahash_alg
;
76 struct hash_key_req_ctx
{
78 dma_addr_t key_dma_addr
;
82 /* hash per-session context */
84 struct cc_drvdata
*drvdata
;
85 /* holds the origin digest; the digest after "setkey" if HMAC,*
86 * the initial digest if HASH.
88 u8 digest_buff
[CC_MAX_HASH_DIGEST_SIZE
] ____cacheline_aligned
;
89 u8 opad_tmp_keys_buff
[CC_MAX_OPAD_KEYS_SIZE
] ____cacheline_aligned
;
91 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned
;
92 dma_addr_t digest_buff_dma_addr
;
93 /* use for hmac with key large then mode block size */
94 struct hash_key_req_ctx key_params
;
98 unsigned int hash_len
;
99 struct completion setkey_comp
;
103 static void cc_set_desc(struct ahash_req_ctx
*areq_ctx
, struct cc_hash_ctx
*ctx
,
104 unsigned int flow_mode
, struct cc_hw_desc desc
[],
105 bool is_not_last_data
, unsigned int *seq_size
);
107 static void cc_set_endianity(u32 mode
, struct cc_hw_desc
*desc
)
109 if (mode
== DRV_HASH_MD5
|| mode
== DRV_HASH_SHA384
||
110 mode
== DRV_HASH_SHA512
) {
111 set_bytes_swap(desc
, 1);
113 set_cipher_config0(desc
, HASH_DIGEST_RESULT_LITTLE_ENDIAN
);
117 static int cc_map_result(struct device
*dev
, struct ahash_req_ctx
*state
,
118 unsigned int digestsize
)
120 state
->digest_result_dma_addr
=
121 dma_map_single(dev
, state
->digest_result_buff
,
122 digestsize
, DMA_BIDIRECTIONAL
);
123 if (dma_mapping_error(dev
, state
->digest_result_dma_addr
)) {
124 dev_err(dev
, "Mapping digest result buffer %u B for DMA failed\n",
128 dev_dbg(dev
, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
129 digestsize
, state
->digest_result_buff
,
130 &state
->digest_result_dma_addr
);
135 static void cc_init_req(struct device
*dev
, struct ahash_req_ctx
*state
,
136 struct cc_hash_ctx
*ctx
)
138 bool is_hmac
= ctx
->is_hmac
;
140 memset(state
, 0, sizeof(*state
));
143 if (ctx
->hw_mode
!= DRV_CIPHER_XCBC_MAC
&&
144 ctx
->hw_mode
!= DRV_CIPHER_CMAC
) {
145 dma_sync_single_for_cpu(dev
, ctx
->digest_buff_dma_addr
,
146 ctx
->inter_digestsize
,
149 memcpy(state
->digest_buff
, ctx
->digest_buff
,
150 ctx
->inter_digestsize
);
151 if (ctx
->hash_mode
== DRV_HASH_SHA512
||
152 ctx
->hash_mode
== DRV_HASH_SHA384
)
153 memcpy(state
->digest_bytes_len
,
154 cc_digest_len_sha512_init
,
157 memcpy(state
->digest_bytes_len
,
162 if (ctx
->hash_mode
!= DRV_HASH_NULL
) {
163 dma_sync_single_for_cpu(dev
,
164 ctx
->opad_tmp_keys_dma_addr
,
165 ctx
->inter_digestsize
,
167 memcpy(state
->opad_digest_buff
,
168 ctx
->opad_tmp_keys_buff
, ctx
->inter_digestsize
);
171 /* Copy the initial digests if hash flow. */
172 const void *larval
= cc_larval_digest(dev
, ctx
->hash_mode
);
174 memcpy(state
->digest_buff
, larval
, ctx
->inter_digestsize
);
178 static int cc_map_req(struct device
*dev
, struct ahash_req_ctx
*state
,
179 struct cc_hash_ctx
*ctx
)
181 bool is_hmac
= ctx
->is_hmac
;
183 state
->digest_buff_dma_addr
=
184 dma_map_single(dev
, state
->digest_buff
,
185 ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
186 if (dma_mapping_error(dev
, state
->digest_buff_dma_addr
)) {
187 dev_err(dev
, "Mapping digest len %d B at va=%pK for DMA failed\n",
188 ctx
->inter_digestsize
, state
->digest_buff
);
191 dev_dbg(dev
, "Mapped digest %d B at va=%pK to dma=%pad\n",
192 ctx
->inter_digestsize
, state
->digest_buff
,
193 &state
->digest_buff_dma_addr
);
195 if (ctx
->hw_mode
!= DRV_CIPHER_XCBC_MAC
) {
196 state
->digest_bytes_len_dma_addr
=
197 dma_map_single(dev
, state
->digest_bytes_len
,
198 HASH_MAX_LEN_SIZE
, DMA_BIDIRECTIONAL
);
199 if (dma_mapping_error(dev
, state
->digest_bytes_len_dma_addr
)) {
200 dev_err(dev
, "Mapping digest len %u B at va=%pK for DMA failed\n",
201 HASH_MAX_LEN_SIZE
, state
->digest_bytes_len
);
202 goto unmap_digest_buf
;
204 dev_dbg(dev
, "Mapped digest len %u B at va=%pK to dma=%pad\n",
205 HASH_MAX_LEN_SIZE
, state
->digest_bytes_len
,
206 &state
->digest_bytes_len_dma_addr
);
209 if (is_hmac
&& ctx
->hash_mode
!= DRV_HASH_NULL
) {
210 state
->opad_digest_dma_addr
=
211 dma_map_single(dev
, state
->opad_digest_buff
,
212 ctx
->inter_digestsize
,
214 if (dma_mapping_error(dev
, state
->opad_digest_dma_addr
)) {
215 dev_err(dev
, "Mapping opad digest %d B at va=%pK for DMA failed\n",
216 ctx
->inter_digestsize
,
217 state
->opad_digest_buff
);
218 goto unmap_digest_len
;
220 dev_dbg(dev
, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
221 ctx
->inter_digestsize
, state
->opad_digest_buff
,
222 &state
->opad_digest_dma_addr
);
228 if (state
->digest_bytes_len_dma_addr
) {
229 dma_unmap_single(dev
, state
->digest_bytes_len_dma_addr
,
230 HASH_MAX_LEN_SIZE
, DMA_BIDIRECTIONAL
);
231 state
->digest_bytes_len_dma_addr
= 0;
234 if (state
->digest_buff_dma_addr
) {
235 dma_unmap_single(dev
, state
->digest_buff_dma_addr
,
236 ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
237 state
->digest_buff_dma_addr
= 0;
243 static void cc_unmap_req(struct device
*dev
, struct ahash_req_ctx
*state
,
244 struct cc_hash_ctx
*ctx
)
246 if (state
->digest_buff_dma_addr
) {
247 dma_unmap_single(dev
, state
->digest_buff_dma_addr
,
248 ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
249 dev_dbg(dev
, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
250 &state
->digest_buff_dma_addr
);
251 state
->digest_buff_dma_addr
= 0;
253 if (state
->digest_bytes_len_dma_addr
) {
254 dma_unmap_single(dev
, state
->digest_bytes_len_dma_addr
,
255 HASH_MAX_LEN_SIZE
, DMA_BIDIRECTIONAL
);
256 dev_dbg(dev
, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
257 &state
->digest_bytes_len_dma_addr
);
258 state
->digest_bytes_len_dma_addr
= 0;
260 if (state
->opad_digest_dma_addr
) {
261 dma_unmap_single(dev
, state
->opad_digest_dma_addr
,
262 ctx
->inter_digestsize
, DMA_BIDIRECTIONAL
);
263 dev_dbg(dev
, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
264 &state
->opad_digest_dma_addr
);
265 state
->opad_digest_dma_addr
= 0;
269 static void cc_unmap_result(struct device
*dev
, struct ahash_req_ctx
*state
,
270 unsigned int digestsize
, u8
*result
)
272 if (state
->digest_result_dma_addr
) {
273 dma_unmap_single(dev
, state
->digest_result_dma_addr
, digestsize
,
275 dev_dbg(dev
, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
276 state
->digest_result_buff
,
277 &state
->digest_result_dma_addr
, digestsize
);
278 memcpy(result
, state
->digest_result_buff
, digestsize
);
280 state
->digest_result_dma_addr
= 0;
283 static void cc_update_complete(struct device
*dev
, void *cc_req
, int err
)
285 struct ahash_request
*req
= (struct ahash_request
*)cc_req
;
286 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
287 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
288 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
290 dev_dbg(dev
, "req=%pK\n", req
);
292 if (err
!= -EINPROGRESS
) {
293 /* Not a BACKLOG notification */
294 cc_unmap_hash_request(dev
, state
, req
->src
, false);
295 cc_unmap_req(dev
, state
, ctx
);
298 ahash_request_complete(req
, err
);
301 static void cc_digest_complete(struct device
*dev
, void *cc_req
, int err
)
303 struct ahash_request
*req
= (struct ahash_request
*)cc_req
;
304 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
305 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
306 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
307 u32 digestsize
= crypto_ahash_digestsize(tfm
);
309 dev_dbg(dev
, "req=%pK\n", req
);
311 if (err
!= -EINPROGRESS
) {
312 /* Not a BACKLOG notification */
313 cc_unmap_hash_request(dev
, state
, req
->src
, false);
314 cc_unmap_result(dev
, state
, digestsize
, req
->result
);
315 cc_unmap_req(dev
, state
, ctx
);
318 ahash_request_complete(req
, err
);
321 static void cc_hash_complete(struct device
*dev
, void *cc_req
, int err
)
323 struct ahash_request
*req
= (struct ahash_request
*)cc_req
;
324 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
325 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
326 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
327 u32 digestsize
= crypto_ahash_digestsize(tfm
);
329 dev_dbg(dev
, "req=%pK\n", req
);
331 if (err
!= -EINPROGRESS
) {
332 /* Not a BACKLOG notification */
333 cc_unmap_hash_request(dev
, state
, req
->src
, false);
334 cc_unmap_result(dev
, state
, digestsize
, req
->result
);
335 cc_unmap_req(dev
, state
, ctx
);
338 ahash_request_complete(req
, err
);
341 static int cc_fin_result(struct cc_hw_desc
*desc
, struct ahash_request
*req
,
344 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
345 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
346 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
347 u32 digestsize
= crypto_ahash_digestsize(tfm
);
349 /* Get final MAC result */
350 hw_desc_init(&desc
[idx
]);
351 set_hash_cipher_mode(&desc
[idx
], ctx
->hw_mode
, ctx
->hash_mode
);
352 set_dout_dlli(&desc
[idx
], state
->digest_result_dma_addr
, digestsize
,
354 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
355 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
356 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
357 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
358 cc_set_endianity(ctx
->hash_mode
, &desc
[idx
]);
364 static int cc_fin_hmac(struct cc_hw_desc
*desc
, struct ahash_request
*req
,
367 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
368 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
369 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
370 u32 digestsize
= crypto_ahash_digestsize(tfm
);
372 /* store the hash digest result in the context */
373 hw_desc_init(&desc
[idx
]);
374 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
375 set_dout_dlli(&desc
[idx
], state
->digest_buff_dma_addr
, digestsize
,
377 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
378 cc_set_endianity(ctx
->hash_mode
, &desc
[idx
]);
379 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
382 /* Loading hash opad xor key state */
383 hw_desc_init(&desc
[idx
]);
384 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
385 set_din_type(&desc
[idx
], DMA_DLLI
, state
->opad_digest_dma_addr
,
386 ctx
->inter_digestsize
, NS_BIT
);
387 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
388 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
391 /* Load the hash current length */
392 hw_desc_init(&desc
[idx
]);
393 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
394 set_din_sram(&desc
[idx
],
395 cc_digest_len_addr(ctx
->drvdata
, ctx
->hash_mode
),
397 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
398 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
399 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
402 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
403 hw_desc_init(&desc
[idx
]);
404 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
405 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
408 /* Perform HASH update */
409 hw_desc_init(&desc
[idx
]);
410 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
412 set_flow_mode(&desc
[idx
], DIN_HASH
);
418 static int cc_hash_digest(struct ahash_request
*req
)
420 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
421 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
422 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
423 u32 digestsize
= crypto_ahash_digestsize(tfm
);
424 struct scatterlist
*src
= req
->src
;
425 unsigned int nbytes
= req
->nbytes
;
426 u8
*result
= req
->result
;
427 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
428 bool is_hmac
= ctx
->is_hmac
;
429 struct cc_crypto_req cc_req
= {};
430 struct cc_hw_desc desc
[CC_MAX_HASH_SEQ_LEN
];
431 u32 larval_digest_addr
;
434 gfp_t flags
= cc_gfp_flags(&req
->base
);
436 dev_dbg(dev
, "===== %s-digest (%d) ====\n", is_hmac
? "hmac" : "hash",
439 cc_init_req(dev
, state
, ctx
);
441 if (cc_map_req(dev
, state
, ctx
)) {
442 dev_err(dev
, "map_ahash_source() failed\n");
446 if (cc_map_result(dev
, state
, digestsize
)) {
447 dev_err(dev
, "map_ahash_digest() failed\n");
448 cc_unmap_req(dev
, state
, ctx
);
452 if (cc_map_hash_request_final(ctx
->drvdata
, state
, src
, nbytes
, 1,
454 dev_err(dev
, "map_ahash_request_final() failed\n");
455 cc_unmap_result(dev
, state
, digestsize
, result
);
456 cc_unmap_req(dev
, state
, ctx
);
460 /* Setup request structure */
461 cc_req
.user_cb
= cc_digest_complete
;
462 cc_req
.user_arg
= req
;
464 /* If HMAC then load hash IPAD xor key, if HASH then load initial
467 hw_desc_init(&desc
[idx
]);
468 set_hash_cipher_mode(&desc
[idx
], ctx
->hw_mode
, ctx
->hash_mode
);
470 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
471 ctx
->inter_digestsize
, NS_BIT
);
473 larval_digest_addr
= cc_larval_digest_addr(ctx
->drvdata
,
475 set_din_sram(&desc
[idx
], larval_digest_addr
,
476 ctx
->inter_digestsize
);
478 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
479 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
482 /* Load the hash current length */
483 hw_desc_init(&desc
[idx
]);
484 set_hash_cipher_mode(&desc
[idx
], ctx
->hw_mode
, ctx
->hash_mode
);
487 set_din_type(&desc
[idx
], DMA_DLLI
,
488 state
->digest_bytes_len_dma_addr
,
489 ctx
->hash_len
, NS_BIT
);
491 set_din_const(&desc
[idx
], 0, ctx
->hash_len
);
493 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
495 set_cipher_do(&desc
[idx
], DO_PAD
);
497 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
498 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
501 cc_set_desc(state
, ctx
, DIN_HASH
, desc
, false, &idx
);
504 /* HW last hash block padding (aka. "DO_PAD") */
505 hw_desc_init(&desc
[idx
]);
506 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
507 set_dout_dlli(&desc
[idx
], state
->digest_buff_dma_addr
,
508 ctx
->hash_len
, NS_BIT
, 0);
509 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
510 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE1
);
511 set_cipher_do(&desc
[idx
], DO_PAD
);
514 idx
= cc_fin_hmac(desc
, req
, idx
);
517 idx
= cc_fin_result(desc
, req
, idx
);
519 rc
= cc_send_request(ctx
->drvdata
, &cc_req
, desc
, idx
, &req
->base
);
520 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
) {
521 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
522 cc_unmap_hash_request(dev
, state
, src
, true);
523 cc_unmap_result(dev
, state
, digestsize
, result
);
524 cc_unmap_req(dev
, state
, ctx
);
529 static int cc_restore_hash(struct cc_hw_desc
*desc
, struct cc_hash_ctx
*ctx
,
530 struct ahash_req_ctx
*state
, unsigned int idx
)
532 /* Restore hash digest */
533 hw_desc_init(&desc
[idx
]);
534 set_hash_cipher_mode(&desc
[idx
], ctx
->hw_mode
, ctx
->hash_mode
);
535 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
536 ctx
->inter_digestsize
, NS_BIT
);
537 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
538 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
541 /* Restore hash current length */
542 hw_desc_init(&desc
[idx
]);
543 set_hash_cipher_mode(&desc
[idx
], ctx
->hw_mode
, ctx
->hash_mode
);
544 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
545 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_bytes_len_dma_addr
,
546 ctx
->hash_len
, NS_BIT
);
547 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
548 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
551 cc_set_desc(state
, ctx
, DIN_HASH
, desc
, false, &idx
);
556 static int cc_hash_update(struct ahash_request
*req
)
558 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
559 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
560 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
561 unsigned int block_size
= crypto_tfm_alg_blocksize(&tfm
->base
);
562 struct scatterlist
*src
= req
->src
;
563 unsigned int nbytes
= req
->nbytes
;
564 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
565 struct cc_crypto_req cc_req
= {};
566 struct cc_hw_desc desc
[CC_MAX_HASH_SEQ_LEN
];
569 gfp_t flags
= cc_gfp_flags(&req
->base
);
571 dev_dbg(dev
, "===== %s-update (%d) ====\n", ctx
->is_hmac
?
572 "hmac" : "hash", nbytes
);
575 /* no real updates required */
579 rc
= cc_map_hash_request_update(ctx
->drvdata
, state
, src
, nbytes
,
583 dev_dbg(dev
, " data size not require HW update %x\n",
585 /* No hardware updates are required */
588 dev_err(dev
, "map_ahash_request_update() failed\n");
592 if (cc_map_req(dev
, state
, ctx
)) {
593 dev_err(dev
, "map_ahash_source() failed\n");
594 cc_unmap_hash_request(dev
, state
, src
, true);
598 /* Setup request structure */
599 cc_req
.user_cb
= cc_update_complete
;
600 cc_req
.user_arg
= req
;
602 idx
= cc_restore_hash(desc
, ctx
, state
, idx
);
604 /* store the hash digest result in context */
605 hw_desc_init(&desc
[idx
]);
606 set_hash_cipher_mode(&desc
[idx
], ctx
->hw_mode
, ctx
->hash_mode
);
607 set_dout_dlli(&desc
[idx
], state
->digest_buff_dma_addr
,
608 ctx
->inter_digestsize
, NS_BIT
, 0);
609 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
610 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
613 /* store current hash length in context */
614 hw_desc_init(&desc
[idx
]);
615 set_hash_cipher_mode(&desc
[idx
], ctx
->hw_mode
, ctx
->hash_mode
);
616 set_dout_dlli(&desc
[idx
], state
->digest_bytes_len_dma_addr
,
617 ctx
->hash_len
, NS_BIT
, 1);
618 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
619 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
620 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE1
);
623 rc
= cc_send_request(ctx
->drvdata
, &cc_req
, desc
, idx
, &req
->base
);
624 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
) {
625 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
626 cc_unmap_hash_request(dev
, state
, src
, true);
627 cc_unmap_req(dev
, state
, ctx
);
632 static int cc_do_finup(struct ahash_request
*req
, bool update
)
634 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
635 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
636 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
637 u32 digestsize
= crypto_ahash_digestsize(tfm
);
638 struct scatterlist
*src
= req
->src
;
639 unsigned int nbytes
= req
->nbytes
;
640 u8
*result
= req
->result
;
641 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
642 bool is_hmac
= ctx
->is_hmac
;
643 struct cc_crypto_req cc_req
= {};
644 struct cc_hw_desc desc
[CC_MAX_HASH_SEQ_LEN
];
645 unsigned int idx
= 0;
647 gfp_t flags
= cc_gfp_flags(&req
->base
);
649 dev_dbg(dev
, "===== %s-%s (%d) ====\n", is_hmac
? "hmac" : "hash",
650 update
? "finup" : "final", nbytes
);
652 if (cc_map_req(dev
, state
, ctx
)) {
653 dev_err(dev
, "map_ahash_source() failed\n");
657 if (cc_map_hash_request_final(ctx
->drvdata
, state
, src
, nbytes
, update
,
659 dev_err(dev
, "map_ahash_request_final() failed\n");
660 cc_unmap_req(dev
, state
, ctx
);
663 if (cc_map_result(dev
, state
, digestsize
)) {
664 dev_err(dev
, "map_ahash_digest() failed\n");
665 cc_unmap_hash_request(dev
, state
, src
, true);
666 cc_unmap_req(dev
, state
, ctx
);
670 /* Setup request structure */
671 cc_req
.user_cb
= cc_hash_complete
;
672 cc_req
.user_arg
= req
;
674 idx
= cc_restore_hash(desc
, ctx
, state
, idx
);
677 hw_desc_init(&desc
[idx
]);
678 set_cipher_do(&desc
[idx
], DO_PAD
);
679 set_hash_cipher_mode(&desc
[idx
], ctx
->hw_mode
, ctx
->hash_mode
);
680 set_dout_dlli(&desc
[idx
], state
->digest_bytes_len_dma_addr
,
681 ctx
->hash_len
, NS_BIT
, 0);
682 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE1
);
683 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
687 idx
= cc_fin_hmac(desc
, req
, idx
);
689 idx
= cc_fin_result(desc
, req
, idx
);
691 rc
= cc_send_request(ctx
->drvdata
, &cc_req
, desc
, idx
, &req
->base
);
692 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
) {
693 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
694 cc_unmap_hash_request(dev
, state
, src
, true);
695 cc_unmap_result(dev
, state
, digestsize
, result
);
696 cc_unmap_req(dev
, state
, ctx
);
701 static int cc_hash_finup(struct ahash_request
*req
)
703 return cc_do_finup(req
, true);
707 static int cc_hash_final(struct ahash_request
*req
)
709 return cc_do_finup(req
, false);
712 static int cc_hash_init(struct ahash_request
*req
)
714 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
715 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
716 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
717 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
719 dev_dbg(dev
, "===== init (%d) ====\n", req
->nbytes
);
721 cc_init_req(dev
, state
, ctx
);
726 static int cc_hash_setkey(struct crypto_ahash
*ahash
, const u8
*key
,
729 unsigned int hmac_pad_const
[2] = { HMAC_IPAD_CONST
, HMAC_OPAD_CONST
};
730 struct cc_crypto_req cc_req
= {};
731 struct cc_hash_ctx
*ctx
= NULL
;
734 int i
, idx
= 0, rc
= 0;
735 struct cc_hw_desc desc
[CC_MAX_HASH_SEQ_LEN
];
739 ctx
= crypto_ahash_ctx_dma(ahash
);
740 dev
= drvdata_to_dev(ctx
->drvdata
);
741 dev_dbg(dev
, "start keylen: %d", keylen
);
743 blocksize
= crypto_tfm_alg_blocksize(&ahash
->base
);
744 digestsize
= crypto_ahash_digestsize(ahash
);
746 larval_addr
= cc_larval_digest_addr(ctx
->drvdata
, ctx
->hash_mode
);
748 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
749 * any NON-ZERO value utilizes HMAC flow
751 ctx
->key_params
.keylen
= keylen
;
752 ctx
->key_params
.key_dma_addr
= 0;
754 ctx
->key_params
.key
= NULL
;
757 ctx
->key_params
.key
= kmemdup(key
, keylen
, GFP_KERNEL
);
758 if (!ctx
->key_params
.key
)
761 ctx
->key_params
.key_dma_addr
=
762 dma_map_single(dev
, ctx
->key_params
.key
, keylen
,
764 if (dma_mapping_error(dev
, ctx
->key_params
.key_dma_addr
)) {
765 dev_err(dev
, "Mapping key va=0x%p len=%u for DMA failed\n",
766 ctx
->key_params
.key
, keylen
);
767 kfree_sensitive(ctx
->key_params
.key
);
770 dev_dbg(dev
, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
771 &ctx
->key_params
.key_dma_addr
, ctx
->key_params
.keylen
);
773 if (keylen
> blocksize
) {
774 /* Load hash initial state */
775 hw_desc_init(&desc
[idx
]);
776 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
777 set_din_sram(&desc
[idx
], larval_addr
,
778 ctx
->inter_digestsize
);
779 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
780 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
783 /* Load the hash current length*/
784 hw_desc_init(&desc
[idx
]);
785 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
786 set_din_const(&desc
[idx
], 0, ctx
->hash_len
);
787 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
788 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
789 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
792 hw_desc_init(&desc
[idx
]);
793 set_din_type(&desc
[idx
], DMA_DLLI
,
794 ctx
->key_params
.key_dma_addr
, keylen
,
796 set_flow_mode(&desc
[idx
], DIN_HASH
);
800 hw_desc_init(&desc
[idx
]);
801 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
802 set_dout_dlli(&desc
[idx
], ctx
->opad_tmp_keys_dma_addr
,
803 digestsize
, NS_BIT
, 0);
804 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
805 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
806 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
807 cc_set_endianity(ctx
->hash_mode
, &desc
[idx
]);
810 hw_desc_init(&desc
[idx
]);
811 set_din_const(&desc
[idx
], 0, (blocksize
- digestsize
));
812 set_flow_mode(&desc
[idx
], BYPASS
);
813 set_dout_dlli(&desc
[idx
],
814 (ctx
->opad_tmp_keys_dma_addr
+
816 (blocksize
- digestsize
), NS_BIT
, 0);
819 hw_desc_init(&desc
[idx
]);
820 set_din_type(&desc
[idx
], DMA_DLLI
,
821 ctx
->key_params
.key_dma_addr
, keylen
,
823 set_flow_mode(&desc
[idx
], BYPASS
);
824 set_dout_dlli(&desc
[idx
], ctx
->opad_tmp_keys_dma_addr
,
828 if ((blocksize
- keylen
)) {
829 hw_desc_init(&desc
[idx
]);
830 set_din_const(&desc
[idx
], 0,
831 (blocksize
- keylen
));
832 set_flow_mode(&desc
[idx
], BYPASS
);
833 set_dout_dlli(&desc
[idx
],
834 (ctx
->opad_tmp_keys_dma_addr
+
835 keylen
), (blocksize
- keylen
),
841 hw_desc_init(&desc
[idx
]);
842 set_din_const(&desc
[idx
], 0, blocksize
);
843 set_flow_mode(&desc
[idx
], BYPASS
);
844 set_dout_dlli(&desc
[idx
], (ctx
->opad_tmp_keys_dma_addr
),
845 blocksize
, NS_BIT
, 0);
849 rc
= cc_send_sync_request(ctx
->drvdata
, &cc_req
, desc
, idx
);
851 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
855 /* calc derived HMAC key */
856 for (idx
= 0, i
= 0; i
< 2; i
++) {
857 /* Load hash initial state */
858 hw_desc_init(&desc
[idx
]);
859 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
860 set_din_sram(&desc
[idx
], larval_addr
, ctx
->inter_digestsize
);
861 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
862 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
865 /* Load the hash current length*/
866 hw_desc_init(&desc
[idx
]);
867 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
868 set_din_const(&desc
[idx
], 0, ctx
->hash_len
);
869 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
870 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
873 /* Prepare ipad key */
874 hw_desc_init(&desc
[idx
]);
875 set_xor_val(&desc
[idx
], hmac_pad_const
[i
]);
876 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
877 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
878 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
881 /* Perform HASH update */
882 hw_desc_init(&desc
[idx
]);
883 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->opad_tmp_keys_dma_addr
,
885 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
886 set_xor_active(&desc
[idx
]);
887 set_flow_mode(&desc
[idx
], DIN_HASH
);
890 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
891 * of the first HASH "update" state)
893 hw_desc_init(&desc
[idx
]);
894 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
895 if (i
> 0) /* Not first iteration */
896 set_dout_dlli(&desc
[idx
], ctx
->opad_tmp_keys_dma_addr
,
897 ctx
->inter_digestsize
, NS_BIT
, 0);
898 else /* First iteration */
899 set_dout_dlli(&desc
[idx
], ctx
->digest_buff_dma_addr
,
900 ctx
->inter_digestsize
, NS_BIT
, 0);
901 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
902 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
906 rc
= cc_send_sync_request(ctx
->drvdata
, &cc_req
, desc
, idx
);
909 if (ctx
->key_params
.key_dma_addr
) {
910 dma_unmap_single(dev
, ctx
->key_params
.key_dma_addr
,
911 ctx
->key_params
.keylen
, DMA_TO_DEVICE
);
912 dev_dbg(dev
, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
913 &ctx
->key_params
.key_dma_addr
, ctx
->key_params
.keylen
);
916 kfree_sensitive(ctx
->key_params
.key
);
921 static int cc_xcbc_setkey(struct crypto_ahash
*ahash
,
922 const u8
*key
, unsigned int keylen
)
924 struct cc_crypto_req cc_req
= {};
925 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
926 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
928 unsigned int idx
= 0;
929 struct cc_hw_desc desc
[CC_MAX_HASH_SEQ_LEN
];
931 dev_dbg(dev
, "===== setkey (%d) ====\n", keylen
);
934 case AES_KEYSIZE_128
:
935 case AES_KEYSIZE_192
:
936 case AES_KEYSIZE_256
:
942 ctx
->key_params
.keylen
= keylen
;
944 ctx
->key_params
.key
= kmemdup(key
, keylen
, GFP_KERNEL
);
945 if (!ctx
->key_params
.key
)
948 ctx
->key_params
.key_dma_addr
=
949 dma_map_single(dev
, ctx
->key_params
.key
, keylen
, DMA_TO_DEVICE
);
950 if (dma_mapping_error(dev
, ctx
->key_params
.key_dma_addr
)) {
951 dev_err(dev
, "Mapping key va=0x%p len=%u for DMA failed\n",
953 kfree_sensitive(ctx
->key_params
.key
);
956 dev_dbg(dev
, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
957 &ctx
->key_params
.key_dma_addr
, ctx
->key_params
.keylen
);
960 /* 1. Load the AES key */
961 hw_desc_init(&desc
[idx
]);
962 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->key_params
.key_dma_addr
,
964 set_cipher_mode(&desc
[idx
], DRV_CIPHER_ECB
);
965 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
966 set_key_size_aes(&desc
[idx
], keylen
);
967 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
968 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
971 hw_desc_init(&desc
[idx
]);
972 set_din_const(&desc
[idx
], 0x01010101, CC_AES_128_BIT_KEY_SIZE
);
973 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
974 set_dout_dlli(&desc
[idx
],
975 (ctx
->opad_tmp_keys_dma_addr
+ XCBC_MAC_K1_OFFSET
),
976 CC_AES_128_BIT_KEY_SIZE
, NS_BIT
, 0);
979 hw_desc_init(&desc
[idx
]);
980 set_din_const(&desc
[idx
], 0x02020202, CC_AES_128_BIT_KEY_SIZE
);
981 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
982 set_dout_dlli(&desc
[idx
],
983 (ctx
->opad_tmp_keys_dma_addr
+ XCBC_MAC_K2_OFFSET
),
984 CC_AES_128_BIT_KEY_SIZE
, NS_BIT
, 0);
987 hw_desc_init(&desc
[idx
]);
988 set_din_const(&desc
[idx
], 0x03030303, CC_AES_128_BIT_KEY_SIZE
);
989 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
990 set_dout_dlli(&desc
[idx
],
991 (ctx
->opad_tmp_keys_dma_addr
+ XCBC_MAC_K3_OFFSET
),
992 CC_AES_128_BIT_KEY_SIZE
, NS_BIT
, 0);
995 rc
= cc_send_sync_request(ctx
->drvdata
, &cc_req
, desc
, idx
);
997 dma_unmap_single(dev
, ctx
->key_params
.key_dma_addr
,
998 ctx
->key_params
.keylen
, DMA_TO_DEVICE
);
999 dev_dbg(dev
, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
1000 &ctx
->key_params
.key_dma_addr
, ctx
->key_params
.keylen
);
1002 kfree_sensitive(ctx
->key_params
.key
);
1007 static int cc_cmac_setkey(struct crypto_ahash
*ahash
,
1008 const u8
*key
, unsigned int keylen
)
1010 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
1011 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1013 dev_dbg(dev
, "===== setkey (%d) ====\n", keylen
);
1015 ctx
->is_hmac
= true;
1018 case AES_KEYSIZE_128
:
1019 case AES_KEYSIZE_192
:
1020 case AES_KEYSIZE_256
:
1026 ctx
->key_params
.keylen
= keylen
;
1028 /* STAT_PHASE_1: Copy key to ctx */
1030 dma_sync_single_for_cpu(dev
, ctx
->opad_tmp_keys_dma_addr
,
1031 keylen
, DMA_TO_DEVICE
);
1033 memcpy(ctx
->opad_tmp_keys_buff
, key
, keylen
);
1035 memset(ctx
->opad_tmp_keys_buff
+ 24, 0,
1036 CC_AES_KEY_SIZE_MAX
- 24);
1039 dma_sync_single_for_device(dev
, ctx
->opad_tmp_keys_dma_addr
,
1040 keylen
, DMA_TO_DEVICE
);
1042 ctx
->key_params
.keylen
= keylen
;
1047 static void cc_free_ctx(struct cc_hash_ctx
*ctx
)
1049 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1051 if (ctx
->digest_buff_dma_addr
) {
1052 dma_unmap_single(dev
, ctx
->digest_buff_dma_addr
,
1053 sizeof(ctx
->digest_buff
), DMA_BIDIRECTIONAL
);
1054 dev_dbg(dev
, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1055 &ctx
->digest_buff_dma_addr
);
1056 ctx
->digest_buff_dma_addr
= 0;
1058 if (ctx
->opad_tmp_keys_dma_addr
) {
1059 dma_unmap_single(dev
, ctx
->opad_tmp_keys_dma_addr
,
1060 sizeof(ctx
->opad_tmp_keys_buff
),
1062 dev_dbg(dev
, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1063 &ctx
->opad_tmp_keys_dma_addr
);
1064 ctx
->opad_tmp_keys_dma_addr
= 0;
1067 ctx
->key_params
.keylen
= 0;
1070 static int cc_alloc_ctx(struct cc_hash_ctx
*ctx
)
1072 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1074 ctx
->key_params
.keylen
= 0;
1076 ctx
->digest_buff_dma_addr
=
1077 dma_map_single(dev
, ctx
->digest_buff
, sizeof(ctx
->digest_buff
),
1079 if (dma_mapping_error(dev
, ctx
->digest_buff_dma_addr
)) {
1080 dev_err(dev
, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1081 sizeof(ctx
->digest_buff
), ctx
->digest_buff
);
1084 dev_dbg(dev
, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1085 sizeof(ctx
->digest_buff
), ctx
->digest_buff
,
1086 &ctx
->digest_buff_dma_addr
);
1088 ctx
->opad_tmp_keys_dma_addr
=
1089 dma_map_single(dev
, ctx
->opad_tmp_keys_buff
,
1090 sizeof(ctx
->opad_tmp_keys_buff
),
1092 if (dma_mapping_error(dev
, ctx
->opad_tmp_keys_dma_addr
)) {
1093 dev_err(dev
, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1094 sizeof(ctx
->opad_tmp_keys_buff
),
1095 ctx
->opad_tmp_keys_buff
);
1098 dev_dbg(dev
, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1099 sizeof(ctx
->opad_tmp_keys_buff
), ctx
->opad_tmp_keys_buff
,
1100 &ctx
->opad_tmp_keys_dma_addr
);
1102 ctx
->is_hmac
= false;
1110 static int cc_get_hash_len(struct crypto_tfm
*tfm
)
1112 struct cc_hash_ctx
*ctx
= crypto_tfm_ctx_dma(tfm
);
1114 if (ctx
->hash_mode
== DRV_HASH_SM3
)
1115 return CC_SM3_HASH_LEN_SIZE
;
1117 return cc_get_default_hash_len(ctx
->drvdata
);
1120 static int cc_cra_init(struct crypto_tfm
*tfm
)
1122 struct cc_hash_ctx
*ctx
= crypto_tfm_ctx_dma(tfm
);
1123 struct hash_alg_common
*hash_alg_common
=
1124 container_of(tfm
->__crt_alg
, struct hash_alg_common
, base
);
1125 struct ahash_alg
*ahash_alg
=
1126 container_of(hash_alg_common
, struct ahash_alg
, halg
);
1127 struct cc_hash_alg
*cc_alg
=
1128 container_of(ahash_alg
, struct cc_hash_alg
, ahash_alg
);
1130 crypto_ahash_set_reqsize_dma(__crypto_ahash_cast(tfm
),
1131 sizeof(struct ahash_req_ctx
));
1133 ctx
->hash_mode
= cc_alg
->hash_mode
;
1134 ctx
->hw_mode
= cc_alg
->hw_mode
;
1135 ctx
->inter_digestsize
= cc_alg
->inter_digestsize
;
1136 ctx
->drvdata
= cc_alg
->drvdata
;
1137 ctx
->hash_len
= cc_get_hash_len(tfm
);
1138 return cc_alloc_ctx(ctx
);
1141 static void cc_cra_exit(struct crypto_tfm
*tfm
)
1143 struct cc_hash_ctx
*ctx
= crypto_tfm_ctx_dma(tfm
);
1144 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1146 dev_dbg(dev
, "cc_cra_exit");
1150 static int cc_mac_update(struct ahash_request
*req
)
1152 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
1153 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1154 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
1155 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1156 unsigned int block_size
= crypto_tfm_alg_blocksize(&tfm
->base
);
1157 struct cc_crypto_req cc_req
= {};
1158 struct cc_hw_desc desc
[CC_MAX_HASH_SEQ_LEN
];
1161 gfp_t flags
= cc_gfp_flags(&req
->base
);
1163 if (req
->nbytes
== 0) {
1164 /* no real updates required */
1168 state
->xcbc_count
++;
1170 rc
= cc_map_hash_request_update(ctx
->drvdata
, state
, req
->src
,
1171 req
->nbytes
, block_size
, flags
);
1174 dev_dbg(dev
, " data size not require HW update %x\n",
1176 /* No hardware updates are required */
1179 dev_err(dev
, "map_ahash_request_update() failed\n");
1183 if (cc_map_req(dev
, state
, ctx
)) {
1184 dev_err(dev
, "map_ahash_source() failed\n");
1188 if (ctx
->hw_mode
== DRV_CIPHER_XCBC_MAC
)
1189 cc_setup_xcbc(req
, desc
, &idx
);
1191 cc_setup_cmac(req
, desc
, &idx
);
1193 cc_set_desc(state
, ctx
, DIN_AES_DOUT
, desc
, true, &idx
);
1195 /* store the hash digest result in context */
1196 hw_desc_init(&desc
[idx
]);
1197 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1198 set_dout_dlli(&desc
[idx
], state
->digest_buff_dma_addr
,
1199 ctx
->inter_digestsize
, NS_BIT
, 1);
1200 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
1201 set_flow_mode(&desc
[idx
], S_AES_to_DOUT
);
1202 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1205 /* Setup request structure */
1206 cc_req
.user_cb
= cc_update_complete
;
1207 cc_req
.user_arg
= req
;
1209 rc
= cc_send_request(ctx
->drvdata
, &cc_req
, desc
, idx
, &req
->base
);
1210 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
) {
1211 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
1212 cc_unmap_hash_request(dev
, state
, req
->src
, true);
1213 cc_unmap_req(dev
, state
, ctx
);
1218 static int cc_mac_final(struct ahash_request
*req
)
1220 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
1221 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1222 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
1223 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1224 struct cc_crypto_req cc_req
= {};
1225 struct cc_hw_desc desc
[CC_MAX_HASH_SEQ_LEN
];
1228 u32 key_size
, key_len
;
1229 u32 digestsize
= crypto_ahash_digestsize(tfm
);
1230 gfp_t flags
= cc_gfp_flags(&req
->base
);
1231 u32 rem_cnt
= *cc_hash_buf_cnt(state
);
1233 if (ctx
->hw_mode
== DRV_CIPHER_XCBC_MAC
) {
1234 key_size
= CC_AES_128_BIT_KEY_SIZE
;
1235 key_len
= CC_AES_128_BIT_KEY_SIZE
;
1237 key_size
= (ctx
->key_params
.keylen
== 24) ? AES_MAX_KEY_SIZE
:
1238 ctx
->key_params
.keylen
;
1239 key_len
= ctx
->key_params
.keylen
;
1242 dev_dbg(dev
, "===== final xcbc reminder (%d) ====\n", rem_cnt
);
1244 if (cc_map_req(dev
, state
, ctx
)) {
1245 dev_err(dev
, "map_ahash_source() failed\n");
1249 if (cc_map_hash_request_final(ctx
->drvdata
, state
, req
->src
,
1250 req
->nbytes
, 0, flags
)) {
1251 dev_err(dev
, "map_ahash_request_final() failed\n");
1252 cc_unmap_req(dev
, state
, ctx
);
1256 if (cc_map_result(dev
, state
, digestsize
)) {
1257 dev_err(dev
, "map_ahash_digest() failed\n");
1258 cc_unmap_hash_request(dev
, state
, req
->src
, true);
1259 cc_unmap_req(dev
, state
, ctx
);
1263 /* Setup request structure */
1264 cc_req
.user_cb
= cc_hash_complete
;
1265 cc_req
.user_arg
= req
;
1267 if (state
->xcbc_count
&& rem_cnt
== 0) {
1268 /* Load key for ECB decryption */
1269 hw_desc_init(&desc
[idx
]);
1270 set_cipher_mode(&desc
[idx
], DRV_CIPHER_ECB
);
1271 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_DECRYPT
);
1272 set_din_type(&desc
[idx
], DMA_DLLI
,
1273 (ctx
->opad_tmp_keys_dma_addr
+ XCBC_MAC_K1_OFFSET
),
1275 set_key_size_aes(&desc
[idx
], key_len
);
1276 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1277 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1280 /* Initiate decryption of block state to previous
1281 * block_state-XOR-M[n]
1283 hw_desc_init(&desc
[idx
]);
1284 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
1285 CC_AES_BLOCK_SIZE
, NS_BIT
);
1286 set_dout_dlli(&desc
[idx
], state
->digest_buff_dma_addr
,
1287 CC_AES_BLOCK_SIZE
, NS_BIT
, 0);
1288 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1291 /* Memory Barrier: wait for axi write to complete */
1292 hw_desc_init(&desc
[idx
]);
1293 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1294 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1298 if (ctx
->hw_mode
== DRV_CIPHER_XCBC_MAC
)
1299 cc_setup_xcbc(req
, desc
, &idx
);
1301 cc_setup_cmac(req
, desc
, &idx
);
1303 if (state
->xcbc_count
== 0) {
1304 hw_desc_init(&desc
[idx
]);
1305 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1306 set_key_size_aes(&desc
[idx
], key_len
);
1307 set_cmac_size0_mode(&desc
[idx
]);
1308 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1310 } else if (rem_cnt
> 0) {
1311 cc_set_desc(state
, ctx
, DIN_AES_DOUT
, desc
, false, &idx
);
1313 hw_desc_init(&desc
[idx
]);
1314 set_din_const(&desc
[idx
], 0x00, CC_AES_BLOCK_SIZE
);
1315 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1319 /* Get final MAC result */
1320 hw_desc_init(&desc
[idx
]);
1321 set_dout_dlli(&desc
[idx
], state
->digest_result_dma_addr
,
1322 digestsize
, NS_BIT
, 1);
1323 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
1324 set_flow_mode(&desc
[idx
], S_AES_to_DOUT
);
1325 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1326 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1329 rc
= cc_send_request(ctx
->drvdata
, &cc_req
, desc
, idx
, &req
->base
);
1330 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
) {
1331 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
1332 cc_unmap_hash_request(dev
, state
, req
->src
, true);
1333 cc_unmap_result(dev
, state
, digestsize
, req
->result
);
1334 cc_unmap_req(dev
, state
, ctx
);
1339 static int cc_mac_finup(struct ahash_request
*req
)
1341 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
1342 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1343 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
1344 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1345 struct cc_crypto_req cc_req
= {};
1346 struct cc_hw_desc desc
[CC_MAX_HASH_SEQ_LEN
];
1350 u32 digestsize
= crypto_ahash_digestsize(tfm
);
1351 gfp_t flags
= cc_gfp_flags(&req
->base
);
1353 dev_dbg(dev
, "===== finup xcbc(%d) ====\n", req
->nbytes
);
1354 if (state
->xcbc_count
> 0 && req
->nbytes
== 0) {
1355 dev_dbg(dev
, "No data to update. Call to fdx_mac_final\n");
1356 return cc_mac_final(req
);
1359 if (cc_map_req(dev
, state
, ctx
)) {
1360 dev_err(dev
, "map_ahash_source() failed\n");
1364 if (cc_map_hash_request_final(ctx
->drvdata
, state
, req
->src
,
1365 req
->nbytes
, 1, flags
)) {
1366 dev_err(dev
, "map_ahash_request_final() failed\n");
1367 cc_unmap_req(dev
, state
, ctx
);
1370 if (cc_map_result(dev
, state
, digestsize
)) {
1371 dev_err(dev
, "map_ahash_digest() failed\n");
1372 cc_unmap_hash_request(dev
, state
, req
->src
, true);
1373 cc_unmap_req(dev
, state
, ctx
);
1377 /* Setup request structure */
1378 cc_req
.user_cb
= cc_hash_complete
;
1379 cc_req
.user_arg
= req
;
1381 if (ctx
->hw_mode
== DRV_CIPHER_XCBC_MAC
) {
1382 key_len
= CC_AES_128_BIT_KEY_SIZE
;
1383 cc_setup_xcbc(req
, desc
, &idx
);
1385 key_len
= ctx
->key_params
.keylen
;
1386 cc_setup_cmac(req
, desc
, &idx
);
1389 if (req
->nbytes
== 0) {
1390 hw_desc_init(&desc
[idx
]);
1391 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1392 set_key_size_aes(&desc
[idx
], key_len
);
1393 set_cmac_size0_mode(&desc
[idx
]);
1394 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1397 cc_set_desc(state
, ctx
, DIN_AES_DOUT
, desc
, false, &idx
);
1400 /* Get final MAC result */
1401 hw_desc_init(&desc
[idx
]);
1402 set_dout_dlli(&desc
[idx
], state
->digest_result_dma_addr
,
1403 digestsize
, NS_BIT
, 1);
1404 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
1405 set_flow_mode(&desc
[idx
], S_AES_to_DOUT
);
1406 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1407 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1410 rc
= cc_send_request(ctx
->drvdata
, &cc_req
, desc
, idx
, &req
->base
);
1411 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
) {
1412 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
1413 cc_unmap_hash_request(dev
, state
, req
->src
, true);
1414 cc_unmap_result(dev
, state
, digestsize
, req
->result
);
1415 cc_unmap_req(dev
, state
, ctx
);
1420 static int cc_mac_digest(struct ahash_request
*req
)
1422 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
1423 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(req
);
1424 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
1425 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1426 u32 digestsize
= crypto_ahash_digestsize(tfm
);
1427 struct cc_crypto_req cc_req
= {};
1428 struct cc_hw_desc desc
[CC_MAX_HASH_SEQ_LEN
];
1430 unsigned int idx
= 0;
1432 gfp_t flags
= cc_gfp_flags(&req
->base
);
1434 dev_dbg(dev
, "===== -digest mac (%d) ====\n", req
->nbytes
);
1436 cc_init_req(dev
, state
, ctx
);
1438 if (cc_map_req(dev
, state
, ctx
)) {
1439 dev_err(dev
, "map_ahash_source() failed\n");
1442 if (cc_map_result(dev
, state
, digestsize
)) {
1443 dev_err(dev
, "map_ahash_digest() failed\n");
1444 cc_unmap_req(dev
, state
, ctx
);
1448 if (cc_map_hash_request_final(ctx
->drvdata
, state
, req
->src
,
1449 req
->nbytes
, 1, flags
)) {
1450 dev_err(dev
, "map_ahash_request_final() failed\n");
1451 cc_unmap_req(dev
, state
, ctx
);
1455 /* Setup request structure */
1456 cc_req
.user_cb
= cc_digest_complete
;
1457 cc_req
.user_arg
= req
;
1459 if (ctx
->hw_mode
== DRV_CIPHER_XCBC_MAC
) {
1460 key_len
= CC_AES_128_BIT_KEY_SIZE
;
1461 cc_setup_xcbc(req
, desc
, &idx
);
1463 key_len
= ctx
->key_params
.keylen
;
1464 cc_setup_cmac(req
, desc
, &idx
);
1467 if (req
->nbytes
== 0) {
1468 hw_desc_init(&desc
[idx
]);
1469 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1470 set_key_size_aes(&desc
[idx
], key_len
);
1471 set_cmac_size0_mode(&desc
[idx
]);
1472 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1475 cc_set_desc(state
, ctx
, DIN_AES_DOUT
, desc
, false, &idx
);
1478 /* Get final MAC result */
1479 hw_desc_init(&desc
[idx
]);
1480 set_dout_dlli(&desc
[idx
], state
->digest_result_dma_addr
,
1481 CC_AES_BLOCK_SIZE
, NS_BIT
, 1);
1482 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
1483 set_flow_mode(&desc
[idx
], S_AES_to_DOUT
);
1484 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1485 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1486 set_cipher_mode(&desc
[idx
], ctx
->hw_mode
);
1489 rc
= cc_send_request(ctx
->drvdata
, &cc_req
, desc
, idx
, &req
->base
);
1490 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
) {
1491 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
1492 cc_unmap_hash_request(dev
, state
, req
->src
, true);
1493 cc_unmap_result(dev
, state
, digestsize
, req
->result
);
1494 cc_unmap_req(dev
, state
, ctx
);
1499 static int cc_hash_export(struct ahash_request
*req
, void *out
)
1501 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1502 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
1503 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
1504 u8
*curr_buff
= cc_hash_buf(state
);
1505 u32 curr_buff_cnt
= *cc_hash_buf_cnt(state
);
1506 const u32 tmp
= CC_EXPORT_MAGIC
;
1508 memcpy(out
, &tmp
, sizeof(u32
));
1511 memcpy(out
, state
->digest_buff
, ctx
->inter_digestsize
);
1512 out
+= ctx
->inter_digestsize
;
1514 memcpy(out
, state
->digest_bytes_len
, ctx
->hash_len
);
1515 out
+= ctx
->hash_len
;
1517 memcpy(out
, &curr_buff_cnt
, sizeof(u32
));
1520 memcpy(out
, curr_buff
, curr_buff_cnt
);
1525 static int cc_hash_import(struct ahash_request
*req
, const void *in
)
1527 struct crypto_ahash
*ahash
= crypto_ahash_reqtfm(req
);
1528 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(ahash
);
1529 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1530 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(req
);
1533 memcpy(&tmp
, in
, sizeof(u32
));
1534 if (tmp
!= CC_EXPORT_MAGIC
)
1538 cc_init_req(dev
, state
, ctx
);
1540 memcpy(state
->digest_buff
, in
, ctx
->inter_digestsize
);
1541 in
+= ctx
->inter_digestsize
;
1543 memcpy(state
->digest_bytes_len
, in
, ctx
->hash_len
);
1544 in
+= ctx
->hash_len
;
1546 /* Sanity check the data as much as possible */
1547 memcpy(&tmp
, in
, sizeof(u32
));
1548 if (tmp
> CC_MAX_HASH_BLCK_SIZE
)
1552 state
->buf_cnt
[0] = tmp
;
1553 memcpy(state
->buffers
[0], in
, tmp
);
1558 struct cc_hash_template
{
1559 char name
[CRYPTO_MAX_ALG_NAME
];
1560 char driver_name
[CRYPTO_MAX_ALG_NAME
];
1561 char mac_name
[CRYPTO_MAX_ALG_NAME
];
1562 char mac_driver_name
[CRYPTO_MAX_ALG_NAME
];
1563 unsigned int blocksize
;
1566 struct ahash_alg template_ahash
;
1569 int inter_digestsize
;
1570 struct cc_drvdata
*drvdata
;
1572 enum cc_std_body std_body
;
1575 #define CC_STATE_SIZE(_x) \
1576 ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1578 /* hash descriptors */
1579 static struct cc_hash_template driver_hash
[] = {
1580 //Asynchronous hash template
1583 .driver_name
= "sha1-ccree",
1584 .mac_name
= "hmac(sha1)",
1585 .mac_driver_name
= "hmac-sha1-ccree",
1586 .blocksize
= SHA1_BLOCK_SIZE
,
1588 .synchronize
= false,
1590 .init
= cc_hash_init
,
1591 .update
= cc_hash_update
,
1592 .final
= cc_hash_final
,
1593 .finup
= cc_hash_finup
,
1594 .digest
= cc_hash_digest
,
1595 .export
= cc_hash_export
,
1596 .import
= cc_hash_import
,
1597 .setkey
= cc_hash_setkey
,
1599 .digestsize
= SHA1_DIGEST_SIZE
,
1600 .statesize
= CC_STATE_SIZE(SHA1_DIGEST_SIZE
),
1603 .hash_mode
= DRV_HASH_SHA1
,
1604 .hw_mode
= DRV_HASH_HW_SHA1
,
1605 .inter_digestsize
= SHA1_DIGEST_SIZE
,
1606 .min_hw_rev
= CC_HW_REV_630
,
1607 .std_body
= CC_STD_NIST
,
1611 .driver_name
= "sha256-ccree",
1612 .mac_name
= "hmac(sha256)",
1613 .mac_driver_name
= "hmac-sha256-ccree",
1614 .blocksize
= SHA256_BLOCK_SIZE
,
1617 .init
= cc_hash_init
,
1618 .update
= cc_hash_update
,
1619 .final
= cc_hash_final
,
1620 .finup
= cc_hash_finup
,
1621 .digest
= cc_hash_digest
,
1622 .export
= cc_hash_export
,
1623 .import
= cc_hash_import
,
1624 .setkey
= cc_hash_setkey
,
1626 .digestsize
= SHA256_DIGEST_SIZE
,
1627 .statesize
= CC_STATE_SIZE(SHA256_DIGEST_SIZE
)
1630 .hash_mode
= DRV_HASH_SHA256
,
1631 .hw_mode
= DRV_HASH_HW_SHA256
,
1632 .inter_digestsize
= SHA256_DIGEST_SIZE
,
1633 .min_hw_rev
= CC_HW_REV_630
,
1634 .std_body
= CC_STD_NIST
,
1638 .driver_name
= "sha224-ccree",
1639 .mac_name
= "hmac(sha224)",
1640 .mac_driver_name
= "hmac-sha224-ccree",
1641 .blocksize
= SHA224_BLOCK_SIZE
,
1644 .init
= cc_hash_init
,
1645 .update
= cc_hash_update
,
1646 .final
= cc_hash_final
,
1647 .finup
= cc_hash_finup
,
1648 .digest
= cc_hash_digest
,
1649 .export
= cc_hash_export
,
1650 .import
= cc_hash_import
,
1651 .setkey
= cc_hash_setkey
,
1653 .digestsize
= SHA224_DIGEST_SIZE
,
1654 .statesize
= CC_STATE_SIZE(SHA256_DIGEST_SIZE
),
1657 .hash_mode
= DRV_HASH_SHA224
,
1658 .hw_mode
= DRV_HASH_HW_SHA256
,
1659 .inter_digestsize
= SHA256_DIGEST_SIZE
,
1660 .min_hw_rev
= CC_HW_REV_630
,
1661 .std_body
= CC_STD_NIST
,
1665 .driver_name
= "sha384-ccree",
1666 .mac_name
= "hmac(sha384)",
1667 .mac_driver_name
= "hmac-sha384-ccree",
1668 .blocksize
= SHA384_BLOCK_SIZE
,
1671 .init
= cc_hash_init
,
1672 .update
= cc_hash_update
,
1673 .final
= cc_hash_final
,
1674 .finup
= cc_hash_finup
,
1675 .digest
= cc_hash_digest
,
1676 .export
= cc_hash_export
,
1677 .import
= cc_hash_import
,
1678 .setkey
= cc_hash_setkey
,
1680 .digestsize
= SHA384_DIGEST_SIZE
,
1681 .statesize
= CC_STATE_SIZE(SHA512_DIGEST_SIZE
),
1684 .hash_mode
= DRV_HASH_SHA384
,
1685 .hw_mode
= DRV_HASH_HW_SHA512
,
1686 .inter_digestsize
= SHA512_DIGEST_SIZE
,
1687 .min_hw_rev
= CC_HW_REV_712
,
1688 .std_body
= CC_STD_NIST
,
1692 .driver_name
= "sha512-ccree",
1693 .mac_name
= "hmac(sha512)",
1694 .mac_driver_name
= "hmac-sha512-ccree",
1695 .blocksize
= SHA512_BLOCK_SIZE
,
1698 .init
= cc_hash_init
,
1699 .update
= cc_hash_update
,
1700 .final
= cc_hash_final
,
1701 .finup
= cc_hash_finup
,
1702 .digest
= cc_hash_digest
,
1703 .export
= cc_hash_export
,
1704 .import
= cc_hash_import
,
1705 .setkey
= cc_hash_setkey
,
1707 .digestsize
= SHA512_DIGEST_SIZE
,
1708 .statesize
= CC_STATE_SIZE(SHA512_DIGEST_SIZE
),
1711 .hash_mode
= DRV_HASH_SHA512
,
1712 .hw_mode
= DRV_HASH_HW_SHA512
,
1713 .inter_digestsize
= SHA512_DIGEST_SIZE
,
1714 .min_hw_rev
= CC_HW_REV_712
,
1715 .std_body
= CC_STD_NIST
,
1719 .driver_name
= "md5-ccree",
1720 .mac_name
= "hmac(md5)",
1721 .mac_driver_name
= "hmac-md5-ccree",
1722 .blocksize
= MD5_HMAC_BLOCK_SIZE
,
1725 .init
= cc_hash_init
,
1726 .update
= cc_hash_update
,
1727 .final
= cc_hash_final
,
1728 .finup
= cc_hash_finup
,
1729 .digest
= cc_hash_digest
,
1730 .export
= cc_hash_export
,
1731 .import
= cc_hash_import
,
1732 .setkey
= cc_hash_setkey
,
1734 .digestsize
= MD5_DIGEST_SIZE
,
1735 .statesize
= CC_STATE_SIZE(MD5_DIGEST_SIZE
),
1738 .hash_mode
= DRV_HASH_MD5
,
1739 .hw_mode
= DRV_HASH_HW_MD5
,
1740 .inter_digestsize
= MD5_DIGEST_SIZE
,
1741 .min_hw_rev
= CC_HW_REV_630
,
1742 .std_body
= CC_STD_NIST
,
1746 .driver_name
= "sm3-ccree",
1747 .blocksize
= SM3_BLOCK_SIZE
,
1750 .init
= cc_hash_init
,
1751 .update
= cc_hash_update
,
1752 .final
= cc_hash_final
,
1753 .finup
= cc_hash_finup
,
1754 .digest
= cc_hash_digest
,
1755 .export
= cc_hash_export
,
1756 .import
= cc_hash_import
,
1757 .setkey
= cc_hash_setkey
,
1759 .digestsize
= SM3_DIGEST_SIZE
,
1760 .statesize
= CC_STATE_SIZE(SM3_DIGEST_SIZE
),
1763 .hash_mode
= DRV_HASH_SM3
,
1764 .hw_mode
= DRV_HASH_HW_SM3
,
1765 .inter_digestsize
= SM3_DIGEST_SIZE
,
1766 .min_hw_rev
= CC_HW_REV_713
,
1767 .std_body
= CC_STD_OSCCA
,
1770 .mac_name
= "xcbc(aes)",
1771 .mac_driver_name
= "xcbc-aes-ccree",
1772 .blocksize
= AES_BLOCK_SIZE
,
1775 .init
= cc_hash_init
,
1776 .update
= cc_mac_update
,
1777 .final
= cc_mac_final
,
1778 .finup
= cc_mac_finup
,
1779 .digest
= cc_mac_digest
,
1780 .setkey
= cc_xcbc_setkey
,
1781 .export
= cc_hash_export
,
1782 .import
= cc_hash_import
,
1784 .digestsize
= AES_BLOCK_SIZE
,
1785 .statesize
= CC_STATE_SIZE(AES_BLOCK_SIZE
),
1788 .hash_mode
= DRV_HASH_NULL
,
1789 .hw_mode
= DRV_CIPHER_XCBC_MAC
,
1790 .inter_digestsize
= AES_BLOCK_SIZE
,
1791 .min_hw_rev
= CC_HW_REV_630
,
1792 .std_body
= CC_STD_NIST
,
1795 .mac_name
= "cmac(aes)",
1796 .mac_driver_name
= "cmac-aes-ccree",
1797 .blocksize
= AES_BLOCK_SIZE
,
1800 .init
= cc_hash_init
,
1801 .update
= cc_mac_update
,
1802 .final
= cc_mac_final
,
1803 .finup
= cc_mac_finup
,
1804 .digest
= cc_mac_digest
,
1805 .setkey
= cc_cmac_setkey
,
1806 .export
= cc_hash_export
,
1807 .import
= cc_hash_import
,
1809 .digestsize
= AES_BLOCK_SIZE
,
1810 .statesize
= CC_STATE_SIZE(AES_BLOCK_SIZE
),
1813 .hash_mode
= DRV_HASH_NULL
,
1814 .hw_mode
= DRV_CIPHER_CMAC
,
1815 .inter_digestsize
= AES_BLOCK_SIZE
,
1816 .min_hw_rev
= CC_HW_REV_630
,
1817 .std_body
= CC_STD_NIST
,
1821 static struct cc_hash_alg
*cc_alloc_hash_alg(struct cc_hash_template
*template,
1822 struct device
*dev
, bool keyed
)
1824 struct cc_hash_alg
*t_crypto_alg
;
1825 struct crypto_alg
*alg
;
1826 struct ahash_alg
*halg
;
1828 t_crypto_alg
= devm_kzalloc(dev
, sizeof(*t_crypto_alg
), GFP_KERNEL
);
1830 return ERR_PTR(-ENOMEM
);
1832 t_crypto_alg
->ahash_alg
= template->template_ahash
;
1833 halg
= &t_crypto_alg
->ahash_alg
;
1834 alg
= &halg
->halg
.base
;
1837 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1838 template->mac_name
);
1839 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1840 template->mac_driver_name
);
1842 halg
->setkey
= NULL
;
1843 snprintf(alg
->cra_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1845 snprintf(alg
->cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
1846 template->driver_name
);
1848 alg
->cra_module
= THIS_MODULE
;
1849 alg
->cra_ctxsize
= sizeof(struct cc_hash_ctx
) + crypto_dma_padding();
1850 alg
->cra_priority
= CC_CRA_PRIO
;
1851 alg
->cra_blocksize
= template->blocksize
;
1852 alg
->cra_alignmask
= 0;
1853 alg
->cra_exit
= cc_cra_exit
;
1855 alg
->cra_init
= cc_cra_init
;
1856 alg
->cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
;
1858 t_crypto_alg
->hash_mode
= template->hash_mode
;
1859 t_crypto_alg
->hw_mode
= template->hw_mode
;
1860 t_crypto_alg
->inter_digestsize
= template->inter_digestsize
;
1862 return t_crypto_alg
;
1865 static int cc_init_copy_sram(struct cc_drvdata
*drvdata
, const u32
*data
,
1866 unsigned int size
, u32
*sram_buff_ofs
)
1868 struct cc_hw_desc larval_seq
[CC_DIGEST_SIZE_MAX
/ sizeof(u32
)];
1869 unsigned int larval_seq_len
= 0;
1872 cc_set_sram_desc(data
, *sram_buff_ofs
, size
/ sizeof(*data
),
1873 larval_seq
, &larval_seq_len
);
1874 rc
= send_request_init(drvdata
, larval_seq
, larval_seq_len
);
1878 *sram_buff_ofs
+= size
;
1882 int cc_init_hash_sram(struct cc_drvdata
*drvdata
)
1884 struct cc_hash_handle
*hash_handle
= drvdata
->hash_handle
;
1885 u32 sram_buff_ofs
= hash_handle
->digest_len_sram_addr
;
1886 bool large_sha_supported
= (drvdata
->hw_rev
>= CC_HW_REV_712
);
1887 bool sm3_supported
= (drvdata
->hw_rev
>= CC_HW_REV_713
);
1890 /* Copy-to-sram digest-len */
1891 rc
= cc_init_copy_sram(drvdata
, cc_digest_len_init
,
1892 sizeof(cc_digest_len_init
), &sram_buff_ofs
);
1894 goto init_digest_const_err
;
1896 if (large_sha_supported
) {
1897 /* Copy-to-sram digest-len for sha384/512 */
1898 rc
= cc_init_copy_sram(drvdata
, cc_digest_len_sha512_init
,
1899 sizeof(cc_digest_len_sha512_init
),
1902 goto init_digest_const_err
;
1905 /* The initial digests offset */
1906 hash_handle
->larval_digest_sram_addr
= sram_buff_ofs
;
1908 /* Copy-to-sram initial SHA* digests */
1909 rc
= cc_init_copy_sram(drvdata
, cc_md5_init
, sizeof(cc_md5_init
),
1912 goto init_digest_const_err
;
1914 rc
= cc_init_copy_sram(drvdata
, cc_sha1_init
, sizeof(cc_sha1_init
),
1917 goto init_digest_const_err
;
1919 rc
= cc_init_copy_sram(drvdata
, cc_sha224_init
, sizeof(cc_sha224_init
),
1922 goto init_digest_const_err
;
1924 rc
= cc_init_copy_sram(drvdata
, cc_sha256_init
, sizeof(cc_sha256_init
),
1927 goto init_digest_const_err
;
1929 if (sm3_supported
) {
1930 rc
= cc_init_copy_sram(drvdata
, cc_sm3_init
,
1931 sizeof(cc_sm3_init
), &sram_buff_ofs
);
1933 goto init_digest_const_err
;
1936 if (large_sha_supported
) {
1937 rc
= cc_init_copy_sram(drvdata
, cc_sha384_init
,
1938 sizeof(cc_sha384_init
), &sram_buff_ofs
);
1940 goto init_digest_const_err
;
1942 rc
= cc_init_copy_sram(drvdata
, cc_sha512_init
,
1943 sizeof(cc_sha512_init
), &sram_buff_ofs
);
1945 goto init_digest_const_err
;
1948 init_digest_const_err
:
1952 int cc_hash_alloc(struct cc_drvdata
*drvdata
)
1954 struct cc_hash_handle
*hash_handle
;
1956 u32 sram_size_to_alloc
;
1957 struct device
*dev
= drvdata_to_dev(drvdata
);
1961 hash_handle
= devm_kzalloc(dev
, sizeof(*hash_handle
), GFP_KERNEL
);
1965 INIT_LIST_HEAD(&hash_handle
->hash_list
);
1966 drvdata
->hash_handle
= hash_handle
;
1968 sram_size_to_alloc
= sizeof(cc_digest_len_init
) +
1969 sizeof(cc_md5_init
) +
1970 sizeof(cc_sha1_init
) +
1971 sizeof(cc_sha224_init
) +
1972 sizeof(cc_sha256_init
);
1974 if (drvdata
->hw_rev
>= CC_HW_REV_713
)
1975 sram_size_to_alloc
+= sizeof(cc_sm3_init
);
1977 if (drvdata
->hw_rev
>= CC_HW_REV_712
)
1978 sram_size_to_alloc
+= sizeof(cc_digest_len_sha512_init
) +
1979 sizeof(cc_sha384_init
) + sizeof(cc_sha512_init
);
1981 sram_buff
= cc_sram_alloc(drvdata
, sram_size_to_alloc
);
1982 if (sram_buff
== NULL_SRAM_ADDR
) {
1987 /* The initial digest-len offset */
1988 hash_handle
->digest_len_sram_addr
= sram_buff
;
1990 /*must be set before the alg registration as it is being used there*/
1991 rc
= cc_init_hash_sram(drvdata
);
1993 dev_err(dev
, "Init digest CONST failed (rc=%d)\n", rc
);
1997 /* ahash registration */
1998 for (alg
= 0; alg
< ARRAY_SIZE(driver_hash
); alg
++) {
1999 struct cc_hash_alg
*t_alg
;
2000 int hw_mode
= driver_hash
[alg
].hw_mode
;
2002 /* Check that the HW revision and variants are suitable */
2003 if ((driver_hash
[alg
].min_hw_rev
> drvdata
->hw_rev
) ||
2004 !(drvdata
->std_bodies
& driver_hash
[alg
].std_body
))
2007 if (driver_hash
[alg
].is_mac
) {
2008 /* register hmac version */
2009 t_alg
= cc_alloc_hash_alg(&driver_hash
[alg
], dev
, true);
2010 if (IS_ERR(t_alg
)) {
2011 rc
= PTR_ERR(t_alg
);
2012 dev_err(dev
, "%s alg allocation failed\n",
2013 driver_hash
[alg
].driver_name
);
2016 t_alg
->drvdata
= drvdata
;
2018 rc
= crypto_register_ahash(&t_alg
->ahash_alg
);
2020 dev_err(dev
, "%s alg registration failed\n",
2021 driver_hash
[alg
].driver_name
);
2025 list_add_tail(&t_alg
->entry
, &hash_handle
->hash_list
);
2027 if (hw_mode
== DRV_CIPHER_XCBC_MAC
||
2028 hw_mode
== DRV_CIPHER_CMAC
)
2031 /* register hash version */
2032 t_alg
= cc_alloc_hash_alg(&driver_hash
[alg
], dev
, false);
2033 if (IS_ERR(t_alg
)) {
2034 rc
= PTR_ERR(t_alg
);
2035 dev_err(dev
, "%s alg allocation failed\n",
2036 driver_hash
[alg
].driver_name
);
2039 t_alg
->drvdata
= drvdata
;
2041 rc
= crypto_register_ahash(&t_alg
->ahash_alg
);
2043 dev_err(dev
, "%s alg registration failed\n",
2044 driver_hash
[alg
].driver_name
);
2048 list_add_tail(&t_alg
->entry
, &hash_handle
->hash_list
);
2054 cc_hash_free(drvdata
);
2058 int cc_hash_free(struct cc_drvdata
*drvdata
)
2060 struct cc_hash_alg
*t_hash_alg
, *hash_n
;
2061 struct cc_hash_handle
*hash_handle
= drvdata
->hash_handle
;
2063 list_for_each_entry_safe(t_hash_alg
, hash_n
, &hash_handle
->hash_list
,
2065 crypto_unregister_ahash(&t_hash_alg
->ahash_alg
);
2066 list_del(&t_hash_alg
->entry
);
2072 static void cc_setup_xcbc(struct ahash_request
*areq
, struct cc_hw_desc desc
[],
2073 unsigned int *seq_size
)
2075 unsigned int idx
= *seq_size
;
2076 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(areq
);
2077 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2078 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
2080 /* Setup XCBC MAC K1 */
2081 hw_desc_init(&desc
[idx
]);
2082 set_din_type(&desc
[idx
], DMA_DLLI
, (ctx
->opad_tmp_keys_dma_addr
+
2083 XCBC_MAC_K1_OFFSET
),
2084 CC_AES_128_BIT_KEY_SIZE
, NS_BIT
);
2085 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
2086 set_hash_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
, ctx
->hash_mode
);
2087 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
2088 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
2089 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
2092 /* Setup XCBC MAC K2 */
2093 hw_desc_init(&desc
[idx
]);
2094 set_din_type(&desc
[idx
], DMA_DLLI
,
2095 (ctx
->opad_tmp_keys_dma_addr
+ XCBC_MAC_K2_OFFSET
),
2096 CC_AES_128_BIT_KEY_SIZE
, NS_BIT
);
2097 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
2098 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
2099 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
2100 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
2101 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
2104 /* Setup XCBC MAC K3 */
2105 hw_desc_init(&desc
[idx
]);
2106 set_din_type(&desc
[idx
], DMA_DLLI
,
2107 (ctx
->opad_tmp_keys_dma_addr
+ XCBC_MAC_K3_OFFSET
),
2108 CC_AES_128_BIT_KEY_SIZE
, NS_BIT
);
2109 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE2
);
2110 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
2111 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
2112 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
2113 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
2116 /* Loading MAC state */
2117 hw_desc_init(&desc
[idx
]);
2118 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
2119 CC_AES_BLOCK_SIZE
, NS_BIT
);
2120 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
2121 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
2122 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
2123 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
2124 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
2129 static void cc_setup_cmac(struct ahash_request
*areq
, struct cc_hw_desc desc
[],
2130 unsigned int *seq_size
)
2132 unsigned int idx
= *seq_size
;
2133 struct ahash_req_ctx
*state
= ahash_request_ctx_dma(areq
);
2134 struct crypto_ahash
*tfm
= crypto_ahash_reqtfm(areq
);
2135 struct cc_hash_ctx
*ctx
= crypto_ahash_ctx_dma(tfm
);
2137 /* Setup CMAC Key */
2138 hw_desc_init(&desc
[idx
]);
2139 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->opad_tmp_keys_dma_addr
,
2140 ((ctx
->key_params
.keylen
== 24) ? AES_MAX_KEY_SIZE
:
2141 ctx
->key_params
.keylen
), NS_BIT
);
2142 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
2143 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CMAC
);
2144 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
2145 set_key_size_aes(&desc
[idx
], ctx
->key_params
.keylen
);
2146 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
2149 /* Load MAC state */
2150 hw_desc_init(&desc
[idx
]);
2151 set_din_type(&desc
[idx
], DMA_DLLI
, state
->digest_buff_dma_addr
,
2152 CC_AES_BLOCK_SIZE
, NS_BIT
);
2153 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
2154 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CMAC
);
2155 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
2156 set_key_size_aes(&desc
[idx
], ctx
->key_params
.keylen
);
2157 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
2162 static void cc_set_desc(struct ahash_req_ctx
*areq_ctx
,
2163 struct cc_hash_ctx
*ctx
, unsigned int flow_mode
,
2164 struct cc_hw_desc desc
[], bool is_not_last_data
,
2165 unsigned int *seq_size
)
2167 unsigned int idx
= *seq_size
;
2168 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2170 if (areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_DLLI
) {
2171 hw_desc_init(&desc
[idx
]);
2172 set_din_type(&desc
[idx
], DMA_DLLI
,
2173 sg_dma_address(areq_ctx
->curr_sg
),
2174 areq_ctx
->curr_sg
->length
, NS_BIT
);
2175 set_flow_mode(&desc
[idx
], flow_mode
);
2178 if (areq_ctx
->data_dma_buf_type
== CC_DMA_BUF_NULL
) {
2179 dev_dbg(dev
, " NULL mode\n");
2180 /* nothing to build */
2184 hw_desc_init(&desc
[idx
]);
2185 set_din_type(&desc
[idx
], DMA_DLLI
,
2186 areq_ctx
->mlli_params
.mlli_dma_addr
,
2187 areq_ctx
->mlli_params
.mlli_len
, NS_BIT
);
2188 set_dout_sram(&desc
[idx
], ctx
->drvdata
->mlli_sram_addr
,
2189 areq_ctx
->mlli_params
.mlli_len
);
2190 set_flow_mode(&desc
[idx
], BYPASS
);
2193 hw_desc_init(&desc
[idx
]);
2194 set_din_type(&desc
[idx
], DMA_MLLI
,
2195 ctx
->drvdata
->mlli_sram_addr
,
2196 areq_ctx
->mlli_nents
, NS_BIT
);
2197 set_flow_mode(&desc
[idx
], flow_mode
);
2200 if (is_not_last_data
)
2201 set_din_not_last_indication(&desc
[(idx
- 1)]);
2202 /* return updated desc sequence size */
2206 static const void *cc_larval_digest(struct device
*dev
, u32 mode
)
2212 return cc_sha1_init
;
2213 case DRV_HASH_SHA224
:
2214 return cc_sha224_init
;
2215 case DRV_HASH_SHA256
:
2216 return cc_sha256_init
;
2217 case DRV_HASH_SHA384
:
2218 return cc_sha384_init
;
2219 case DRV_HASH_SHA512
:
2220 return cc_sha512_init
;
2224 dev_err(dev
, "Invalid hash mode (%d)\n", mode
);
2230 * cc_larval_digest_addr() - Get the address of the initial digest in SRAM
2231 * according to the given hash mode
2233 * @drvdata: Associated device driver context
2234 * @mode: The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2237 * The address of the initial digest in SRAM
2239 u32
cc_larval_digest_addr(void *drvdata
, u32 mode
)
2241 struct cc_drvdata
*_drvdata
= (struct cc_drvdata
*)drvdata
;
2242 struct cc_hash_handle
*hash_handle
= _drvdata
->hash_handle
;
2243 struct device
*dev
= drvdata_to_dev(_drvdata
);
2244 bool sm3_supported
= (_drvdata
->hw_rev
>= CC_HW_REV_713
);
2251 return (hash_handle
->larval_digest_sram_addr
);
2253 return (hash_handle
->larval_digest_sram_addr
+
2254 sizeof(cc_md5_init
));
2255 case DRV_HASH_SHA224
:
2256 return (hash_handle
->larval_digest_sram_addr
+
2257 sizeof(cc_md5_init
) +
2258 sizeof(cc_sha1_init
));
2259 case DRV_HASH_SHA256
:
2260 return (hash_handle
->larval_digest_sram_addr
+
2261 sizeof(cc_md5_init
) +
2262 sizeof(cc_sha1_init
) +
2263 sizeof(cc_sha224_init
));
2265 return (hash_handle
->larval_digest_sram_addr
+
2266 sizeof(cc_md5_init
) +
2267 sizeof(cc_sha1_init
) +
2268 sizeof(cc_sha224_init
) +
2269 sizeof(cc_sha256_init
));
2270 case DRV_HASH_SHA384
:
2271 addr
= (hash_handle
->larval_digest_sram_addr
+
2272 sizeof(cc_md5_init
) +
2273 sizeof(cc_sha1_init
) +
2274 sizeof(cc_sha224_init
) +
2275 sizeof(cc_sha256_init
));
2277 addr
+= sizeof(cc_sm3_init
);
2279 case DRV_HASH_SHA512
:
2280 addr
= (hash_handle
->larval_digest_sram_addr
+
2281 sizeof(cc_md5_init
) +
2282 sizeof(cc_sha1_init
) +
2283 sizeof(cc_sha224_init
) +
2284 sizeof(cc_sha256_init
) +
2285 sizeof(cc_sha384_init
));
2287 addr
+= sizeof(cc_sm3_init
);
2290 dev_err(dev
, "Invalid hash mode (%d)\n", mode
);
2293 /*This is valid wrong value to avoid kernel crash*/
2294 return hash_handle
->larval_digest_sram_addr
;
2297 u32
cc_digest_len_addr(void *drvdata
, u32 mode
)
2299 struct cc_drvdata
*_drvdata
= (struct cc_drvdata
*)drvdata
;
2300 struct cc_hash_handle
*hash_handle
= _drvdata
->hash_handle
;
2301 u32 digest_len_addr
= hash_handle
->digest_len_sram_addr
;
2305 case DRV_HASH_SHA224
:
2306 case DRV_HASH_SHA256
:
2308 return digest_len_addr
;
2309 case DRV_HASH_SHA384
:
2310 case DRV_HASH_SHA512
:
2311 return digest_len_addr
+ sizeof(cc_digest_len_init
);
2313 return digest_len_addr
; /*to avoid kernel crash*/