Linux 4.19.133
[linux/fpc-iii.git] / drivers / crypto / ccree / cc_hash.c
blob2cadd7a218445f13c4aab9b13d46c59c501349d8
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/hash.h>
8 #include <crypto/md5.h>
9 #include <crypto/internal/hash.h>
11 #include "cc_driver.h"
12 #include "cc_request_mgr.h"
13 #include "cc_buffer_mgr.h"
14 #include "cc_hash.h"
15 #include "cc_sram_mgr.h"
17 #define CC_MAX_HASH_SEQ_LEN 12
18 #define CC_MAX_OPAD_KEYS_SIZE CC_MAX_HASH_BLCK_SIZE
20 struct cc_hash_handle {
21 cc_sram_addr_t digest_len_sram_addr; /* const value in SRAM*/
22 cc_sram_addr_t larval_digest_sram_addr; /* const value in SRAM */
23 struct list_head hash_list;
26 static const u32 digest_len_init[] = {
27 0x00000040, 0x00000000, 0x00000000, 0x00000000 };
28 static const u32 md5_init[] = {
29 SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
30 static const u32 sha1_init[] = {
31 SHA1_H4, SHA1_H3, SHA1_H2, SHA1_H1, SHA1_H0 };
32 static const u32 sha224_init[] = {
33 SHA224_H7, SHA224_H6, SHA224_H5, SHA224_H4,
34 SHA224_H3, SHA224_H2, SHA224_H1, SHA224_H0 };
35 static const u32 sha256_init[] = {
36 SHA256_H7, SHA256_H6, SHA256_H5, SHA256_H4,
37 SHA256_H3, SHA256_H2, SHA256_H1, SHA256_H0 };
38 static const u32 digest_len_sha512_init[] = {
39 0x00000080, 0x00000000, 0x00000000, 0x00000000 };
40 static u64 sha384_init[] = {
41 SHA384_H7, SHA384_H6, SHA384_H5, SHA384_H4,
42 SHA384_H3, SHA384_H2, SHA384_H1, SHA384_H0 };
43 static u64 sha512_init[] = {
44 SHA512_H7, SHA512_H6, SHA512_H5, SHA512_H4,
45 SHA512_H3, SHA512_H2, SHA512_H1, SHA512_H0 };
47 static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
48 unsigned int *seq_size);
50 static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
51 unsigned int *seq_size);
53 static const void *cc_larval_digest(struct device *dev, u32 mode);
55 struct cc_hash_alg {
56 struct list_head entry;
57 int hash_mode;
58 int hw_mode;
59 int inter_digestsize;
60 struct cc_drvdata *drvdata;
61 struct ahash_alg ahash_alg;
64 struct hash_key_req_ctx {
65 u32 keylen;
66 dma_addr_t key_dma_addr;
67 u8 *key;
70 /* hash per-session context */
71 struct cc_hash_ctx {
72 struct cc_drvdata *drvdata;
73 /* holds the origin digest; the digest after "setkey" if HMAC,*
74 * the initial digest if HASH.
76 u8 digest_buff[CC_MAX_HASH_DIGEST_SIZE] ____cacheline_aligned;
77 u8 opad_tmp_keys_buff[CC_MAX_OPAD_KEYS_SIZE] ____cacheline_aligned;
79 dma_addr_t opad_tmp_keys_dma_addr ____cacheline_aligned;
80 dma_addr_t digest_buff_dma_addr;
81 /* use for hmac with key large then mode block size */
82 struct hash_key_req_ctx key_params;
83 int hash_mode;
84 int hw_mode;
85 int inter_digestsize;
86 struct completion setkey_comp;
87 bool is_hmac;
90 static void cc_set_desc(struct ahash_req_ctx *areq_ctx, struct cc_hash_ctx *ctx,
91 unsigned int flow_mode, struct cc_hw_desc desc[],
92 bool is_not_last_data, unsigned int *seq_size);
94 static void cc_set_endianity(u32 mode, struct cc_hw_desc *desc)
96 if (mode == DRV_HASH_MD5 || mode == DRV_HASH_SHA384 ||
97 mode == DRV_HASH_SHA512) {
98 set_bytes_swap(desc, 1);
99 } else {
100 set_cipher_config0(desc, HASH_DIGEST_RESULT_LITTLE_ENDIAN);
104 static int cc_map_result(struct device *dev, struct ahash_req_ctx *state,
105 unsigned int digestsize)
107 state->digest_result_dma_addr =
108 dma_map_single(dev, state->digest_result_buff,
109 digestsize, DMA_BIDIRECTIONAL);
110 if (dma_mapping_error(dev, state->digest_result_dma_addr)) {
111 dev_err(dev, "Mapping digest result buffer %u B for DMA failed\n",
112 digestsize);
113 return -ENOMEM;
115 dev_dbg(dev, "Mapped digest result buffer %u B at va=%pK to dma=%pad\n",
116 digestsize, state->digest_result_buff,
117 &state->digest_result_dma_addr);
119 return 0;
122 static void cc_init_req(struct device *dev, struct ahash_req_ctx *state,
123 struct cc_hash_ctx *ctx)
125 bool is_hmac = ctx->is_hmac;
127 memset(state, 0, sizeof(*state));
129 if (is_hmac) {
130 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC &&
131 ctx->hw_mode != DRV_CIPHER_CMAC) {
132 dma_sync_single_for_cpu(dev, ctx->digest_buff_dma_addr,
133 ctx->inter_digestsize,
134 DMA_BIDIRECTIONAL);
136 memcpy(state->digest_buff, ctx->digest_buff,
137 ctx->inter_digestsize);
138 if (ctx->hash_mode == DRV_HASH_SHA512 ||
139 ctx->hash_mode == DRV_HASH_SHA384)
140 memcpy(state->digest_bytes_len,
141 digest_len_sha512_init,
142 ctx->drvdata->hash_len_sz);
143 else
144 memcpy(state->digest_bytes_len, digest_len_init,
145 ctx->drvdata->hash_len_sz);
148 if (ctx->hash_mode != DRV_HASH_NULL) {
149 dma_sync_single_for_cpu(dev,
150 ctx->opad_tmp_keys_dma_addr,
151 ctx->inter_digestsize,
152 DMA_BIDIRECTIONAL);
153 memcpy(state->opad_digest_buff,
154 ctx->opad_tmp_keys_buff, ctx->inter_digestsize);
156 } else { /*hash*/
157 /* Copy the initial digests if hash flow. */
158 const void *larval = cc_larval_digest(dev, ctx->hash_mode);
160 memcpy(state->digest_buff, larval, ctx->inter_digestsize);
164 static int cc_map_req(struct device *dev, struct ahash_req_ctx *state,
165 struct cc_hash_ctx *ctx)
167 bool is_hmac = ctx->is_hmac;
169 state->digest_buff_dma_addr =
170 dma_map_single(dev, state->digest_buff,
171 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
172 if (dma_mapping_error(dev, state->digest_buff_dma_addr)) {
173 dev_err(dev, "Mapping digest len %d B at va=%pK for DMA failed\n",
174 ctx->inter_digestsize, state->digest_buff);
175 return -EINVAL;
177 dev_dbg(dev, "Mapped digest %d B at va=%pK to dma=%pad\n",
178 ctx->inter_digestsize, state->digest_buff,
179 &state->digest_buff_dma_addr);
181 if (ctx->hw_mode != DRV_CIPHER_XCBC_MAC) {
182 state->digest_bytes_len_dma_addr =
183 dma_map_single(dev, state->digest_bytes_len,
184 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
185 if (dma_mapping_error(dev, state->digest_bytes_len_dma_addr)) {
186 dev_err(dev, "Mapping digest len %u B at va=%pK for DMA failed\n",
187 HASH_MAX_LEN_SIZE, state->digest_bytes_len);
188 goto unmap_digest_buf;
190 dev_dbg(dev, "Mapped digest len %u B at va=%pK to dma=%pad\n",
191 HASH_MAX_LEN_SIZE, state->digest_bytes_len,
192 &state->digest_bytes_len_dma_addr);
195 if (is_hmac && ctx->hash_mode != DRV_HASH_NULL) {
196 state->opad_digest_dma_addr =
197 dma_map_single(dev, state->opad_digest_buff,
198 ctx->inter_digestsize,
199 DMA_BIDIRECTIONAL);
200 if (dma_mapping_error(dev, state->opad_digest_dma_addr)) {
201 dev_err(dev, "Mapping opad digest %d B at va=%pK for DMA failed\n",
202 ctx->inter_digestsize,
203 state->opad_digest_buff);
204 goto unmap_digest_len;
206 dev_dbg(dev, "Mapped opad digest %d B at va=%pK to dma=%pad\n",
207 ctx->inter_digestsize, state->opad_digest_buff,
208 &state->opad_digest_dma_addr);
211 return 0;
213 unmap_digest_len:
214 if (state->digest_bytes_len_dma_addr) {
215 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
216 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
217 state->digest_bytes_len_dma_addr = 0;
219 unmap_digest_buf:
220 if (state->digest_buff_dma_addr) {
221 dma_unmap_single(dev, state->digest_buff_dma_addr,
222 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
223 state->digest_buff_dma_addr = 0;
226 return -EINVAL;
229 static void cc_unmap_req(struct device *dev, struct ahash_req_ctx *state,
230 struct cc_hash_ctx *ctx)
232 if (state->digest_buff_dma_addr) {
233 dma_unmap_single(dev, state->digest_buff_dma_addr,
234 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
235 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
236 &state->digest_buff_dma_addr);
237 state->digest_buff_dma_addr = 0;
239 if (state->digest_bytes_len_dma_addr) {
240 dma_unmap_single(dev, state->digest_bytes_len_dma_addr,
241 HASH_MAX_LEN_SIZE, DMA_BIDIRECTIONAL);
242 dev_dbg(dev, "Unmapped digest-bytes-len buffer: digest_bytes_len_dma_addr=%pad\n",
243 &state->digest_bytes_len_dma_addr);
244 state->digest_bytes_len_dma_addr = 0;
246 if (state->opad_digest_dma_addr) {
247 dma_unmap_single(dev, state->opad_digest_dma_addr,
248 ctx->inter_digestsize, DMA_BIDIRECTIONAL);
249 dev_dbg(dev, "Unmapped opad-digest: opad_digest_dma_addr=%pad\n",
250 &state->opad_digest_dma_addr);
251 state->opad_digest_dma_addr = 0;
255 static void cc_unmap_result(struct device *dev, struct ahash_req_ctx *state,
256 unsigned int digestsize, u8 *result)
258 if (state->digest_result_dma_addr) {
259 dma_unmap_single(dev, state->digest_result_dma_addr, digestsize,
260 DMA_BIDIRECTIONAL);
261 dev_dbg(dev, "unmpa digest result buffer va (%pK) pa (%pad) len %u\n",
262 state->digest_result_buff,
263 &state->digest_result_dma_addr, digestsize);
264 memcpy(result, state->digest_result_buff, digestsize);
266 state->digest_result_dma_addr = 0;
269 static void cc_update_complete(struct device *dev, void *cc_req, int err)
271 struct ahash_request *req = (struct ahash_request *)cc_req;
272 struct ahash_req_ctx *state = ahash_request_ctx(req);
273 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
274 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
276 dev_dbg(dev, "req=%pK\n", req);
278 cc_unmap_hash_request(dev, state, req->src, false);
279 cc_unmap_req(dev, state, ctx);
280 req->base.complete(&req->base, err);
283 static void cc_digest_complete(struct device *dev, void *cc_req, int err)
285 struct ahash_request *req = (struct ahash_request *)cc_req;
286 struct ahash_req_ctx *state = ahash_request_ctx(req);
287 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
288 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
289 u32 digestsize = crypto_ahash_digestsize(tfm);
291 dev_dbg(dev, "req=%pK\n", req);
293 cc_unmap_hash_request(dev, state, req->src, false);
294 cc_unmap_result(dev, state, digestsize, req->result);
295 cc_unmap_req(dev, state, ctx);
296 req->base.complete(&req->base, err);
299 static void cc_hash_complete(struct device *dev, void *cc_req, int err)
301 struct ahash_request *req = (struct ahash_request *)cc_req;
302 struct ahash_req_ctx *state = ahash_request_ctx(req);
303 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
304 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
305 u32 digestsize = crypto_ahash_digestsize(tfm);
307 dev_dbg(dev, "req=%pK\n", req);
309 cc_unmap_hash_request(dev, state, req->src, false);
310 cc_unmap_result(dev, state, digestsize, req->result);
311 cc_unmap_req(dev, state, ctx);
312 req->base.complete(&req->base, err);
315 static int cc_fin_result(struct cc_hw_desc *desc, struct ahash_request *req,
316 int idx)
318 struct ahash_req_ctx *state = ahash_request_ctx(req);
319 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
320 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
321 u32 digestsize = crypto_ahash_digestsize(tfm);
323 /* Get final MAC result */
324 hw_desc_init(&desc[idx]);
325 set_cipher_mode(&desc[idx], ctx->hw_mode);
326 /* TODO */
327 set_dout_dlli(&desc[idx], state->digest_result_dma_addr, digestsize,
328 NS_BIT, 1);
329 set_queue_last_ind(ctx->drvdata, &desc[idx]);
330 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
331 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
332 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
333 cc_set_endianity(ctx->hash_mode, &desc[idx]);
334 idx++;
336 return idx;
339 static int cc_fin_hmac(struct cc_hw_desc *desc, struct ahash_request *req,
340 int idx)
342 struct ahash_req_ctx *state = ahash_request_ctx(req);
343 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
344 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
345 u32 digestsize = crypto_ahash_digestsize(tfm);
347 /* store the hash digest result in the context */
348 hw_desc_init(&desc[idx]);
349 set_cipher_mode(&desc[idx], ctx->hw_mode);
350 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr, digestsize,
351 NS_BIT, 0);
352 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
353 cc_set_endianity(ctx->hash_mode, &desc[idx]);
354 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
355 idx++;
357 /* Loading hash opad xor key state */
358 hw_desc_init(&desc[idx]);
359 set_cipher_mode(&desc[idx], ctx->hw_mode);
360 set_din_type(&desc[idx], DMA_DLLI, state->opad_digest_dma_addr,
361 ctx->inter_digestsize, NS_BIT);
362 set_flow_mode(&desc[idx], S_DIN_to_HASH);
363 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
364 idx++;
366 /* Load the hash current length */
367 hw_desc_init(&desc[idx]);
368 set_cipher_mode(&desc[idx], ctx->hw_mode);
369 set_din_sram(&desc[idx],
370 cc_digest_len_addr(ctx->drvdata, ctx->hash_mode),
371 ctx->drvdata->hash_len_sz);
372 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
373 set_flow_mode(&desc[idx], S_DIN_to_HASH);
374 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
375 idx++;
377 /* Memory Barrier: wait for IPAD/OPAD axi write to complete */
378 hw_desc_init(&desc[idx]);
379 set_din_no_dma(&desc[idx], 0, 0xfffff0);
380 set_dout_no_dma(&desc[idx], 0, 0, 1);
381 idx++;
383 /* Perform HASH update */
384 hw_desc_init(&desc[idx]);
385 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
386 digestsize, NS_BIT);
387 set_flow_mode(&desc[idx], DIN_HASH);
388 idx++;
390 return idx;
393 static int cc_hash_digest(struct ahash_request *req)
395 struct ahash_req_ctx *state = ahash_request_ctx(req);
396 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
397 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
398 u32 digestsize = crypto_ahash_digestsize(tfm);
399 struct scatterlist *src = req->src;
400 unsigned int nbytes = req->nbytes;
401 u8 *result = req->result;
402 struct device *dev = drvdata_to_dev(ctx->drvdata);
403 bool is_hmac = ctx->is_hmac;
404 struct cc_crypto_req cc_req = {};
405 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
406 cc_sram_addr_t larval_digest_addr =
407 cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
408 int idx = 0;
409 int rc = 0;
410 gfp_t flags = cc_gfp_flags(&req->base);
412 dev_dbg(dev, "===== %s-digest (%d) ====\n", is_hmac ? "hmac" : "hash",
413 nbytes);
415 cc_init_req(dev, state, ctx);
417 if (cc_map_req(dev, state, ctx)) {
418 dev_err(dev, "map_ahash_source() failed\n");
419 return -ENOMEM;
422 if (cc_map_result(dev, state, digestsize)) {
423 dev_err(dev, "map_ahash_digest() failed\n");
424 cc_unmap_req(dev, state, ctx);
425 return -ENOMEM;
428 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, 1,
429 flags)) {
430 dev_err(dev, "map_ahash_request_final() failed\n");
431 cc_unmap_result(dev, state, digestsize, result);
432 cc_unmap_req(dev, state, ctx);
433 return -ENOMEM;
436 /* Setup request structure */
437 cc_req.user_cb = cc_digest_complete;
438 cc_req.user_arg = req;
440 /* If HMAC then load hash IPAD xor key, if HASH then load initial
441 * digest
443 hw_desc_init(&desc[idx]);
444 set_cipher_mode(&desc[idx], ctx->hw_mode);
445 if (is_hmac) {
446 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
447 ctx->inter_digestsize, NS_BIT);
448 } else {
449 set_din_sram(&desc[idx], larval_digest_addr,
450 ctx->inter_digestsize);
452 set_flow_mode(&desc[idx], S_DIN_to_HASH);
453 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
454 idx++;
456 /* Load the hash current length */
457 hw_desc_init(&desc[idx]);
458 set_cipher_mode(&desc[idx], ctx->hw_mode);
460 if (is_hmac) {
461 set_din_type(&desc[idx], DMA_DLLI,
462 state->digest_bytes_len_dma_addr,
463 ctx->drvdata->hash_len_sz, NS_BIT);
464 } else {
465 set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
466 if (nbytes)
467 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
468 else
469 set_cipher_do(&desc[idx], DO_PAD);
471 set_flow_mode(&desc[idx], S_DIN_to_HASH);
472 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
473 idx++;
475 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
477 if (is_hmac) {
478 /* HW last hash block padding (aka. "DO_PAD") */
479 hw_desc_init(&desc[idx]);
480 set_cipher_mode(&desc[idx], ctx->hw_mode);
481 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
482 ctx->drvdata->hash_len_sz, NS_BIT, 0);
483 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
484 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
485 set_cipher_do(&desc[idx], DO_PAD);
486 idx++;
488 idx = cc_fin_hmac(desc, req, idx);
491 idx = cc_fin_result(desc, req, idx);
493 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
494 if (rc != -EINPROGRESS && rc != -EBUSY) {
495 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
496 cc_unmap_hash_request(dev, state, src, true);
497 cc_unmap_result(dev, state, digestsize, result);
498 cc_unmap_req(dev, state, ctx);
500 return rc;
503 static int cc_restore_hash(struct cc_hw_desc *desc, struct cc_hash_ctx *ctx,
504 struct ahash_req_ctx *state, unsigned int idx)
506 /* Restore hash digest */
507 hw_desc_init(&desc[idx]);
508 set_cipher_mode(&desc[idx], ctx->hw_mode);
509 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
510 ctx->inter_digestsize, NS_BIT);
511 set_flow_mode(&desc[idx], S_DIN_to_HASH);
512 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
513 idx++;
515 /* Restore hash current length */
516 hw_desc_init(&desc[idx]);
517 set_cipher_mode(&desc[idx], ctx->hw_mode);
518 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
519 set_din_type(&desc[idx], DMA_DLLI, state->digest_bytes_len_dma_addr,
520 ctx->drvdata->hash_len_sz, NS_BIT);
521 set_flow_mode(&desc[idx], S_DIN_to_HASH);
522 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
523 idx++;
525 cc_set_desc(state, ctx, DIN_HASH, desc, false, &idx);
527 return idx;
530 static int cc_hash_update(struct ahash_request *req)
532 struct ahash_req_ctx *state = ahash_request_ctx(req);
533 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
534 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
535 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
536 struct scatterlist *src = req->src;
537 unsigned int nbytes = req->nbytes;
538 struct device *dev = drvdata_to_dev(ctx->drvdata);
539 struct cc_crypto_req cc_req = {};
540 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
541 u32 idx = 0;
542 int rc;
543 gfp_t flags = cc_gfp_flags(&req->base);
545 dev_dbg(dev, "===== %s-update (%d) ====\n", ctx->is_hmac ?
546 "hmac" : "hash", nbytes);
548 if (nbytes == 0) {
549 /* no real updates required */
550 return 0;
553 rc = cc_map_hash_request_update(ctx->drvdata, state, src, nbytes,
554 block_size, flags);
555 if (rc) {
556 if (rc == 1) {
557 dev_dbg(dev, " data size not require HW update %x\n",
558 nbytes);
559 /* No hardware updates are required */
560 return 0;
562 dev_err(dev, "map_ahash_request_update() failed\n");
563 return -ENOMEM;
566 if (cc_map_req(dev, state, ctx)) {
567 dev_err(dev, "map_ahash_source() failed\n");
568 cc_unmap_hash_request(dev, state, src, true);
569 return -EINVAL;
572 /* Setup request structure */
573 cc_req.user_cb = cc_update_complete;
574 cc_req.user_arg = req;
576 idx = cc_restore_hash(desc, ctx, state, idx);
578 /* store the hash digest result in context */
579 hw_desc_init(&desc[idx]);
580 set_cipher_mode(&desc[idx], ctx->hw_mode);
581 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
582 ctx->inter_digestsize, NS_BIT, 0);
583 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
584 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
585 idx++;
587 /* store current hash length in context */
588 hw_desc_init(&desc[idx]);
589 set_cipher_mode(&desc[idx], ctx->hw_mode);
590 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
591 ctx->drvdata->hash_len_sz, NS_BIT, 1);
592 set_queue_last_ind(ctx->drvdata, &desc[idx]);
593 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
594 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
595 idx++;
597 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
598 if (rc != -EINPROGRESS && rc != -EBUSY) {
599 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
600 cc_unmap_hash_request(dev, state, src, true);
601 cc_unmap_req(dev, state, ctx);
603 return rc;
606 static int cc_do_finup(struct ahash_request *req, bool update)
608 struct ahash_req_ctx *state = ahash_request_ctx(req);
609 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
610 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
611 u32 digestsize = crypto_ahash_digestsize(tfm);
612 struct scatterlist *src = req->src;
613 unsigned int nbytes = req->nbytes;
614 u8 *result = req->result;
615 struct device *dev = drvdata_to_dev(ctx->drvdata);
616 bool is_hmac = ctx->is_hmac;
617 struct cc_crypto_req cc_req = {};
618 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
619 unsigned int idx = 0;
620 int rc;
621 gfp_t flags = cc_gfp_flags(&req->base);
623 dev_dbg(dev, "===== %s-%s (%d) ====\n", is_hmac ? "hmac" : "hash",
624 update ? "finup" : "final", nbytes);
626 if (cc_map_req(dev, state, ctx)) {
627 dev_err(dev, "map_ahash_source() failed\n");
628 return -EINVAL;
631 if (cc_map_hash_request_final(ctx->drvdata, state, src, nbytes, update,
632 flags)) {
633 dev_err(dev, "map_ahash_request_final() failed\n");
634 cc_unmap_req(dev, state, ctx);
635 return -ENOMEM;
637 if (cc_map_result(dev, state, digestsize)) {
638 dev_err(dev, "map_ahash_digest() failed\n");
639 cc_unmap_hash_request(dev, state, src, true);
640 cc_unmap_req(dev, state, ctx);
641 return -ENOMEM;
644 /* Setup request structure */
645 cc_req.user_cb = cc_hash_complete;
646 cc_req.user_arg = req;
648 idx = cc_restore_hash(desc, ctx, state, idx);
650 /* Pad the hash */
651 hw_desc_init(&desc[idx]);
652 set_cipher_do(&desc[idx], DO_PAD);
653 set_cipher_mode(&desc[idx], ctx->hw_mode);
654 set_dout_dlli(&desc[idx], state->digest_bytes_len_dma_addr,
655 ctx->drvdata->hash_len_sz, NS_BIT, 0);
656 set_setup_mode(&desc[idx], SETUP_WRITE_STATE1);
657 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
658 idx++;
660 if (is_hmac)
661 idx = cc_fin_hmac(desc, req, idx);
663 idx = cc_fin_result(desc, req, idx);
665 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
666 if (rc != -EINPROGRESS && rc != -EBUSY) {
667 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
668 cc_unmap_hash_request(dev, state, src, true);
669 cc_unmap_result(dev, state, digestsize, result);
670 cc_unmap_req(dev, state, ctx);
672 return rc;
675 static int cc_hash_finup(struct ahash_request *req)
677 return cc_do_finup(req, true);
681 static int cc_hash_final(struct ahash_request *req)
683 return cc_do_finup(req, false);
686 static int cc_hash_init(struct ahash_request *req)
688 struct ahash_req_ctx *state = ahash_request_ctx(req);
689 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
690 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
691 struct device *dev = drvdata_to_dev(ctx->drvdata);
693 dev_dbg(dev, "===== init (%d) ====\n", req->nbytes);
695 cc_init_req(dev, state, ctx);
697 return 0;
700 static int cc_hash_setkey(struct crypto_ahash *ahash, const u8 *key,
701 unsigned int keylen)
703 unsigned int hmac_pad_const[2] = { HMAC_IPAD_CONST, HMAC_OPAD_CONST };
704 struct cc_crypto_req cc_req = {};
705 struct cc_hash_ctx *ctx = NULL;
706 int blocksize = 0;
707 int digestsize = 0;
708 int i, idx = 0, rc = 0;
709 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
710 cc_sram_addr_t larval_addr;
711 struct device *dev;
713 ctx = crypto_ahash_ctx(ahash);
714 dev = drvdata_to_dev(ctx->drvdata);
715 dev_dbg(dev, "start keylen: %d", keylen);
717 blocksize = crypto_tfm_alg_blocksize(&ahash->base);
718 digestsize = crypto_ahash_digestsize(ahash);
720 larval_addr = cc_larval_digest_addr(ctx->drvdata, ctx->hash_mode);
722 /* The keylen value distinguishes HASH in case keylen is ZERO bytes,
723 * any NON-ZERO value utilizes HMAC flow
725 ctx->key_params.keylen = keylen;
726 ctx->key_params.key_dma_addr = 0;
727 ctx->is_hmac = true;
728 ctx->key_params.key = NULL;
730 if (keylen) {
731 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
732 if (!ctx->key_params.key)
733 return -ENOMEM;
735 ctx->key_params.key_dma_addr =
736 dma_map_single(dev, (void *)ctx->key_params.key, keylen,
737 DMA_TO_DEVICE);
738 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
739 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
740 ctx->key_params.key, keylen);
741 kzfree(ctx->key_params.key);
742 return -ENOMEM;
744 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
745 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
747 if (keylen > blocksize) {
748 /* Load hash initial state */
749 hw_desc_init(&desc[idx]);
750 set_cipher_mode(&desc[idx], ctx->hw_mode);
751 set_din_sram(&desc[idx], larval_addr,
752 ctx->inter_digestsize);
753 set_flow_mode(&desc[idx], S_DIN_to_HASH);
754 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
755 idx++;
757 /* Load the hash current length*/
758 hw_desc_init(&desc[idx]);
759 set_cipher_mode(&desc[idx], ctx->hw_mode);
760 set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
761 set_cipher_config1(&desc[idx], HASH_PADDING_ENABLED);
762 set_flow_mode(&desc[idx], S_DIN_to_HASH);
763 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
764 idx++;
766 hw_desc_init(&desc[idx]);
767 set_din_type(&desc[idx], DMA_DLLI,
768 ctx->key_params.key_dma_addr, keylen,
769 NS_BIT);
770 set_flow_mode(&desc[idx], DIN_HASH);
771 idx++;
773 /* Get hashed key */
774 hw_desc_init(&desc[idx]);
775 set_cipher_mode(&desc[idx], ctx->hw_mode);
776 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
777 digestsize, NS_BIT, 0);
778 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
779 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
780 set_cipher_config1(&desc[idx], HASH_PADDING_DISABLED);
781 cc_set_endianity(ctx->hash_mode, &desc[idx]);
782 idx++;
784 hw_desc_init(&desc[idx]);
785 set_din_const(&desc[idx], 0, (blocksize - digestsize));
786 set_flow_mode(&desc[idx], BYPASS);
787 set_dout_dlli(&desc[idx],
788 (ctx->opad_tmp_keys_dma_addr +
789 digestsize),
790 (blocksize - digestsize), NS_BIT, 0);
791 idx++;
792 } else {
793 hw_desc_init(&desc[idx]);
794 set_din_type(&desc[idx], DMA_DLLI,
795 ctx->key_params.key_dma_addr, keylen,
796 NS_BIT);
797 set_flow_mode(&desc[idx], BYPASS);
798 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
799 keylen, NS_BIT, 0);
800 idx++;
802 if ((blocksize - keylen)) {
803 hw_desc_init(&desc[idx]);
804 set_din_const(&desc[idx], 0,
805 (blocksize - keylen));
806 set_flow_mode(&desc[idx], BYPASS);
807 set_dout_dlli(&desc[idx],
808 (ctx->opad_tmp_keys_dma_addr +
809 keylen), (blocksize - keylen),
810 NS_BIT, 0);
811 idx++;
814 } else {
815 hw_desc_init(&desc[idx]);
816 set_din_const(&desc[idx], 0, blocksize);
817 set_flow_mode(&desc[idx], BYPASS);
818 set_dout_dlli(&desc[idx], (ctx->opad_tmp_keys_dma_addr),
819 blocksize, NS_BIT, 0);
820 idx++;
823 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
824 if (rc) {
825 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
826 goto out;
829 /* calc derived HMAC key */
830 for (idx = 0, i = 0; i < 2; i++) {
831 /* Load hash initial state */
832 hw_desc_init(&desc[idx]);
833 set_cipher_mode(&desc[idx], ctx->hw_mode);
834 set_din_sram(&desc[idx], larval_addr, ctx->inter_digestsize);
835 set_flow_mode(&desc[idx], S_DIN_to_HASH);
836 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
837 idx++;
839 /* Load the hash current length*/
840 hw_desc_init(&desc[idx]);
841 set_cipher_mode(&desc[idx], ctx->hw_mode);
842 set_din_const(&desc[idx], 0, ctx->drvdata->hash_len_sz);
843 set_flow_mode(&desc[idx], S_DIN_to_HASH);
844 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
845 idx++;
847 /* Prepare ipad key */
848 hw_desc_init(&desc[idx]);
849 set_xor_val(&desc[idx], hmac_pad_const[i]);
850 set_cipher_mode(&desc[idx], ctx->hw_mode);
851 set_flow_mode(&desc[idx], S_DIN_to_HASH);
852 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
853 idx++;
855 /* Perform HASH update */
856 hw_desc_init(&desc[idx]);
857 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
858 blocksize, NS_BIT);
859 set_cipher_mode(&desc[idx], ctx->hw_mode);
860 set_xor_active(&desc[idx]);
861 set_flow_mode(&desc[idx], DIN_HASH);
862 idx++;
864 /* Get the IPAD/OPAD xor key (Note, IPAD is the initial digest
865 * of the first HASH "update" state)
867 hw_desc_init(&desc[idx]);
868 set_cipher_mode(&desc[idx], ctx->hw_mode);
869 if (i > 0) /* Not first iteration */
870 set_dout_dlli(&desc[idx], ctx->opad_tmp_keys_dma_addr,
871 ctx->inter_digestsize, NS_BIT, 0);
872 else /* First iteration */
873 set_dout_dlli(&desc[idx], ctx->digest_buff_dma_addr,
874 ctx->inter_digestsize, NS_BIT, 0);
875 set_flow_mode(&desc[idx], S_HASH_to_DOUT);
876 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
877 idx++;
880 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
882 out:
883 if (rc)
884 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
886 if (ctx->key_params.key_dma_addr) {
887 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
888 ctx->key_params.keylen, DMA_TO_DEVICE);
889 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
890 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
893 kzfree(ctx->key_params.key);
895 return rc;
898 static int cc_xcbc_setkey(struct crypto_ahash *ahash,
899 const u8 *key, unsigned int keylen)
901 struct cc_crypto_req cc_req = {};
902 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
903 struct device *dev = drvdata_to_dev(ctx->drvdata);
904 int rc = 0;
905 unsigned int idx = 0;
906 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
908 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
910 switch (keylen) {
911 case AES_KEYSIZE_128:
912 case AES_KEYSIZE_192:
913 case AES_KEYSIZE_256:
914 break;
915 default:
916 return -EINVAL;
919 ctx->key_params.keylen = keylen;
921 ctx->key_params.key = kmemdup(key, keylen, GFP_KERNEL);
922 if (!ctx->key_params.key)
923 return -ENOMEM;
925 ctx->key_params.key_dma_addr =
926 dma_map_single(dev, ctx->key_params.key, keylen, DMA_TO_DEVICE);
927 if (dma_mapping_error(dev, ctx->key_params.key_dma_addr)) {
928 dev_err(dev, "Mapping key va=0x%p len=%u for DMA failed\n",
929 key, keylen);
930 kzfree(ctx->key_params.key);
931 return -ENOMEM;
933 dev_dbg(dev, "mapping key-buffer: key_dma_addr=%pad keylen=%u\n",
934 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
936 ctx->is_hmac = true;
937 /* 1. Load the AES key */
938 hw_desc_init(&desc[idx]);
939 set_din_type(&desc[idx], DMA_DLLI, ctx->key_params.key_dma_addr,
940 keylen, NS_BIT);
941 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
942 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_ENCRYPT);
943 set_key_size_aes(&desc[idx], keylen);
944 set_flow_mode(&desc[idx], S_DIN_to_AES);
945 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
946 idx++;
948 hw_desc_init(&desc[idx]);
949 set_din_const(&desc[idx], 0x01010101, CC_AES_128_BIT_KEY_SIZE);
950 set_flow_mode(&desc[idx], DIN_AES_DOUT);
951 set_dout_dlli(&desc[idx],
952 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
953 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
954 idx++;
956 hw_desc_init(&desc[idx]);
957 set_din_const(&desc[idx], 0x02020202, CC_AES_128_BIT_KEY_SIZE);
958 set_flow_mode(&desc[idx], DIN_AES_DOUT);
959 set_dout_dlli(&desc[idx],
960 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
961 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
962 idx++;
964 hw_desc_init(&desc[idx]);
965 set_din_const(&desc[idx], 0x03030303, CC_AES_128_BIT_KEY_SIZE);
966 set_flow_mode(&desc[idx], DIN_AES_DOUT);
967 set_dout_dlli(&desc[idx],
968 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
969 CC_AES_128_BIT_KEY_SIZE, NS_BIT, 0);
970 idx++;
972 rc = cc_send_sync_request(ctx->drvdata, &cc_req, desc, idx);
974 if (rc)
975 crypto_ahash_set_flags(ahash, CRYPTO_TFM_RES_BAD_KEY_LEN);
977 dma_unmap_single(dev, ctx->key_params.key_dma_addr,
978 ctx->key_params.keylen, DMA_TO_DEVICE);
979 dev_dbg(dev, "Unmapped key-buffer: key_dma_addr=%pad keylen=%u\n",
980 &ctx->key_params.key_dma_addr, ctx->key_params.keylen);
982 kzfree(ctx->key_params.key);
984 return rc;
987 static int cc_cmac_setkey(struct crypto_ahash *ahash,
988 const u8 *key, unsigned int keylen)
990 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
991 struct device *dev = drvdata_to_dev(ctx->drvdata);
993 dev_dbg(dev, "===== setkey (%d) ====\n", keylen);
995 ctx->is_hmac = true;
997 switch (keylen) {
998 case AES_KEYSIZE_128:
999 case AES_KEYSIZE_192:
1000 case AES_KEYSIZE_256:
1001 break;
1002 default:
1003 return -EINVAL;
1006 ctx->key_params.keylen = keylen;
1008 /* STAT_PHASE_1: Copy key to ctx */
1010 dma_sync_single_for_cpu(dev, ctx->opad_tmp_keys_dma_addr,
1011 keylen, DMA_TO_DEVICE);
1013 memcpy(ctx->opad_tmp_keys_buff, key, keylen);
1014 if (keylen == 24) {
1015 memset(ctx->opad_tmp_keys_buff + 24, 0,
1016 CC_AES_KEY_SIZE_MAX - 24);
1019 dma_sync_single_for_device(dev, ctx->opad_tmp_keys_dma_addr,
1020 keylen, DMA_TO_DEVICE);
1022 ctx->key_params.keylen = keylen;
1024 return 0;
1027 static void cc_free_ctx(struct cc_hash_ctx *ctx)
1029 struct device *dev = drvdata_to_dev(ctx->drvdata);
1031 if (ctx->digest_buff_dma_addr) {
1032 dma_unmap_single(dev, ctx->digest_buff_dma_addr,
1033 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1034 dev_dbg(dev, "Unmapped digest-buffer: digest_buff_dma_addr=%pad\n",
1035 &ctx->digest_buff_dma_addr);
1036 ctx->digest_buff_dma_addr = 0;
1038 if (ctx->opad_tmp_keys_dma_addr) {
1039 dma_unmap_single(dev, ctx->opad_tmp_keys_dma_addr,
1040 sizeof(ctx->opad_tmp_keys_buff),
1041 DMA_BIDIRECTIONAL);
1042 dev_dbg(dev, "Unmapped opad-digest: opad_tmp_keys_dma_addr=%pad\n",
1043 &ctx->opad_tmp_keys_dma_addr);
1044 ctx->opad_tmp_keys_dma_addr = 0;
1047 ctx->key_params.keylen = 0;
1050 static int cc_alloc_ctx(struct cc_hash_ctx *ctx)
1052 struct device *dev = drvdata_to_dev(ctx->drvdata);
1054 ctx->key_params.keylen = 0;
1056 ctx->digest_buff_dma_addr =
1057 dma_map_single(dev, (void *)ctx->digest_buff,
1058 sizeof(ctx->digest_buff), DMA_BIDIRECTIONAL);
1059 if (dma_mapping_error(dev, ctx->digest_buff_dma_addr)) {
1060 dev_err(dev, "Mapping digest len %zu B at va=%pK for DMA failed\n",
1061 sizeof(ctx->digest_buff), ctx->digest_buff);
1062 goto fail;
1064 dev_dbg(dev, "Mapped digest %zu B at va=%pK to dma=%pad\n",
1065 sizeof(ctx->digest_buff), ctx->digest_buff,
1066 &ctx->digest_buff_dma_addr);
1068 ctx->opad_tmp_keys_dma_addr =
1069 dma_map_single(dev, (void *)ctx->opad_tmp_keys_buff,
1070 sizeof(ctx->opad_tmp_keys_buff),
1071 DMA_BIDIRECTIONAL);
1072 if (dma_mapping_error(dev, ctx->opad_tmp_keys_dma_addr)) {
1073 dev_err(dev, "Mapping opad digest %zu B at va=%pK for DMA failed\n",
1074 sizeof(ctx->opad_tmp_keys_buff),
1075 ctx->opad_tmp_keys_buff);
1076 goto fail;
1078 dev_dbg(dev, "Mapped opad_tmp_keys %zu B at va=%pK to dma=%pad\n",
1079 sizeof(ctx->opad_tmp_keys_buff), ctx->opad_tmp_keys_buff,
1080 &ctx->opad_tmp_keys_dma_addr);
1082 ctx->is_hmac = false;
1083 return 0;
1085 fail:
1086 cc_free_ctx(ctx);
1087 return -ENOMEM;
1090 static int cc_cra_init(struct crypto_tfm *tfm)
1092 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1093 struct hash_alg_common *hash_alg_common =
1094 container_of(tfm->__crt_alg, struct hash_alg_common, base);
1095 struct ahash_alg *ahash_alg =
1096 container_of(hash_alg_common, struct ahash_alg, halg);
1097 struct cc_hash_alg *cc_alg =
1098 container_of(ahash_alg, struct cc_hash_alg, ahash_alg);
1100 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1101 sizeof(struct ahash_req_ctx));
1103 ctx->hash_mode = cc_alg->hash_mode;
1104 ctx->hw_mode = cc_alg->hw_mode;
1105 ctx->inter_digestsize = cc_alg->inter_digestsize;
1106 ctx->drvdata = cc_alg->drvdata;
1108 return cc_alloc_ctx(ctx);
1111 static void cc_cra_exit(struct crypto_tfm *tfm)
1113 struct cc_hash_ctx *ctx = crypto_tfm_ctx(tfm);
1114 struct device *dev = drvdata_to_dev(ctx->drvdata);
1116 dev_dbg(dev, "cc_cra_exit");
1117 cc_free_ctx(ctx);
1120 static int cc_mac_update(struct ahash_request *req)
1122 struct ahash_req_ctx *state = ahash_request_ctx(req);
1123 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1124 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1125 struct device *dev = drvdata_to_dev(ctx->drvdata);
1126 unsigned int block_size = crypto_tfm_alg_blocksize(&tfm->base);
1127 struct cc_crypto_req cc_req = {};
1128 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1129 int rc;
1130 u32 idx = 0;
1131 gfp_t flags = cc_gfp_flags(&req->base);
1133 if (req->nbytes == 0) {
1134 /* no real updates required */
1135 return 0;
1138 state->xcbc_count++;
1140 rc = cc_map_hash_request_update(ctx->drvdata, state, req->src,
1141 req->nbytes, block_size, flags);
1142 if (rc) {
1143 if (rc == 1) {
1144 dev_dbg(dev, " data size not require HW update %x\n",
1145 req->nbytes);
1146 /* No hardware updates are required */
1147 return 0;
1149 dev_err(dev, "map_ahash_request_update() failed\n");
1150 return -ENOMEM;
1153 if (cc_map_req(dev, state, ctx)) {
1154 dev_err(dev, "map_ahash_source() failed\n");
1155 return -EINVAL;
1158 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1159 cc_setup_xcbc(req, desc, &idx);
1160 else
1161 cc_setup_cmac(req, desc, &idx);
1163 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, true, &idx);
1165 /* store the hash digest result in context */
1166 hw_desc_init(&desc[idx]);
1167 set_cipher_mode(&desc[idx], ctx->hw_mode);
1168 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1169 ctx->inter_digestsize, NS_BIT, 1);
1170 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1171 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1172 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1173 idx++;
1175 /* Setup request structure */
1176 cc_req.user_cb = (void *)cc_update_complete;
1177 cc_req.user_arg = (void *)req;
1179 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1180 if (rc != -EINPROGRESS && rc != -EBUSY) {
1181 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1182 cc_unmap_hash_request(dev, state, req->src, true);
1183 cc_unmap_req(dev, state, ctx);
1185 return rc;
1188 static int cc_mac_final(struct ahash_request *req)
1190 struct ahash_req_ctx *state = ahash_request_ctx(req);
1191 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1192 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1193 struct device *dev = drvdata_to_dev(ctx->drvdata);
1194 struct cc_crypto_req cc_req = {};
1195 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1196 int idx = 0;
1197 int rc = 0;
1198 u32 key_size, key_len;
1199 u32 digestsize = crypto_ahash_digestsize(tfm);
1200 gfp_t flags = cc_gfp_flags(&req->base);
1201 u32 rem_cnt = *cc_hash_buf_cnt(state);
1203 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1204 key_size = CC_AES_128_BIT_KEY_SIZE;
1205 key_len = CC_AES_128_BIT_KEY_SIZE;
1206 } else {
1207 key_size = (ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
1208 ctx->key_params.keylen;
1209 key_len = ctx->key_params.keylen;
1212 dev_dbg(dev, "===== final xcbc reminder (%d) ====\n", rem_cnt);
1214 if (cc_map_req(dev, state, ctx)) {
1215 dev_err(dev, "map_ahash_source() failed\n");
1216 return -EINVAL;
1219 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1220 req->nbytes, 0, flags)) {
1221 dev_err(dev, "map_ahash_request_final() failed\n");
1222 cc_unmap_req(dev, state, ctx);
1223 return -ENOMEM;
1226 if (cc_map_result(dev, state, digestsize)) {
1227 dev_err(dev, "map_ahash_digest() failed\n");
1228 cc_unmap_hash_request(dev, state, req->src, true);
1229 cc_unmap_req(dev, state, ctx);
1230 return -ENOMEM;
1233 /* Setup request structure */
1234 cc_req.user_cb = (void *)cc_hash_complete;
1235 cc_req.user_arg = (void *)req;
1237 if (state->xcbc_count && rem_cnt == 0) {
1238 /* Load key for ECB decryption */
1239 hw_desc_init(&desc[idx]);
1240 set_cipher_mode(&desc[idx], DRV_CIPHER_ECB);
1241 set_cipher_config0(&desc[idx], DRV_CRYPTO_DIRECTION_DECRYPT);
1242 set_din_type(&desc[idx], DMA_DLLI,
1243 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K1_OFFSET),
1244 key_size, NS_BIT);
1245 set_key_size_aes(&desc[idx], key_len);
1246 set_flow_mode(&desc[idx], S_DIN_to_AES);
1247 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
1248 idx++;
1250 /* Initiate decryption of block state to previous
1251 * block_state-XOR-M[n]
1253 hw_desc_init(&desc[idx]);
1254 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
1255 CC_AES_BLOCK_SIZE, NS_BIT);
1256 set_dout_dlli(&desc[idx], state->digest_buff_dma_addr,
1257 CC_AES_BLOCK_SIZE, NS_BIT, 0);
1258 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1259 idx++;
1261 /* Memory Barrier: wait for axi write to complete */
1262 hw_desc_init(&desc[idx]);
1263 set_din_no_dma(&desc[idx], 0, 0xfffff0);
1264 set_dout_no_dma(&desc[idx], 0, 0, 1);
1265 idx++;
1268 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC)
1269 cc_setup_xcbc(req, desc, &idx);
1270 else
1271 cc_setup_cmac(req, desc, &idx);
1273 if (state->xcbc_count == 0) {
1274 hw_desc_init(&desc[idx]);
1275 set_cipher_mode(&desc[idx], ctx->hw_mode);
1276 set_key_size_aes(&desc[idx], key_len);
1277 set_cmac_size0_mode(&desc[idx]);
1278 set_flow_mode(&desc[idx], S_DIN_to_AES);
1279 idx++;
1280 } else if (rem_cnt > 0) {
1281 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1282 } else {
1283 hw_desc_init(&desc[idx]);
1284 set_din_const(&desc[idx], 0x00, CC_AES_BLOCK_SIZE);
1285 set_flow_mode(&desc[idx], DIN_AES_DOUT);
1286 idx++;
1289 /* Get final MAC result */
1290 hw_desc_init(&desc[idx]);
1291 /* TODO */
1292 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1293 digestsize, NS_BIT, 1);
1294 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1295 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1296 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1297 set_cipher_mode(&desc[idx], ctx->hw_mode);
1298 idx++;
1300 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1301 if (rc != -EINPROGRESS && rc != -EBUSY) {
1302 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1303 cc_unmap_hash_request(dev, state, req->src, true);
1304 cc_unmap_result(dev, state, digestsize, req->result);
1305 cc_unmap_req(dev, state, ctx);
1307 return rc;
1310 static int cc_mac_finup(struct ahash_request *req)
1312 struct ahash_req_ctx *state = ahash_request_ctx(req);
1313 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1314 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1315 struct device *dev = drvdata_to_dev(ctx->drvdata);
1316 struct cc_crypto_req cc_req = {};
1317 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1318 int idx = 0;
1319 int rc = 0;
1320 u32 key_len = 0;
1321 u32 digestsize = crypto_ahash_digestsize(tfm);
1322 gfp_t flags = cc_gfp_flags(&req->base);
1324 dev_dbg(dev, "===== finup xcbc(%d) ====\n", req->nbytes);
1325 if (state->xcbc_count > 0 && req->nbytes == 0) {
1326 dev_dbg(dev, "No data to update. Call to fdx_mac_final\n");
1327 return cc_mac_final(req);
1330 if (cc_map_req(dev, state, ctx)) {
1331 dev_err(dev, "map_ahash_source() failed\n");
1332 return -EINVAL;
1335 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1336 req->nbytes, 1, flags)) {
1337 dev_err(dev, "map_ahash_request_final() failed\n");
1338 cc_unmap_req(dev, state, ctx);
1339 return -ENOMEM;
1341 if (cc_map_result(dev, state, digestsize)) {
1342 dev_err(dev, "map_ahash_digest() failed\n");
1343 cc_unmap_hash_request(dev, state, req->src, true);
1344 cc_unmap_req(dev, state, ctx);
1345 return -ENOMEM;
1348 /* Setup request structure */
1349 cc_req.user_cb = (void *)cc_hash_complete;
1350 cc_req.user_arg = (void *)req;
1352 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1353 key_len = CC_AES_128_BIT_KEY_SIZE;
1354 cc_setup_xcbc(req, desc, &idx);
1355 } else {
1356 key_len = ctx->key_params.keylen;
1357 cc_setup_cmac(req, desc, &idx);
1360 if (req->nbytes == 0) {
1361 hw_desc_init(&desc[idx]);
1362 set_cipher_mode(&desc[idx], ctx->hw_mode);
1363 set_key_size_aes(&desc[idx], key_len);
1364 set_cmac_size0_mode(&desc[idx]);
1365 set_flow_mode(&desc[idx], S_DIN_to_AES);
1366 idx++;
1367 } else {
1368 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1371 /* Get final MAC result */
1372 hw_desc_init(&desc[idx]);
1373 /* TODO */
1374 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1375 digestsize, NS_BIT, 1);
1376 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1377 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1378 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1379 set_cipher_mode(&desc[idx], ctx->hw_mode);
1380 idx++;
1382 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1383 if (rc != -EINPROGRESS && rc != -EBUSY) {
1384 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1385 cc_unmap_hash_request(dev, state, req->src, true);
1386 cc_unmap_result(dev, state, digestsize, req->result);
1387 cc_unmap_req(dev, state, ctx);
1389 return rc;
1392 static int cc_mac_digest(struct ahash_request *req)
1394 struct ahash_req_ctx *state = ahash_request_ctx(req);
1395 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1396 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
1397 struct device *dev = drvdata_to_dev(ctx->drvdata);
1398 u32 digestsize = crypto_ahash_digestsize(tfm);
1399 struct cc_crypto_req cc_req = {};
1400 struct cc_hw_desc desc[CC_MAX_HASH_SEQ_LEN];
1401 u32 key_len;
1402 unsigned int idx = 0;
1403 int rc;
1404 gfp_t flags = cc_gfp_flags(&req->base);
1406 dev_dbg(dev, "===== -digest mac (%d) ====\n", req->nbytes);
1408 cc_init_req(dev, state, ctx);
1410 if (cc_map_req(dev, state, ctx)) {
1411 dev_err(dev, "map_ahash_source() failed\n");
1412 return -ENOMEM;
1414 if (cc_map_result(dev, state, digestsize)) {
1415 dev_err(dev, "map_ahash_digest() failed\n");
1416 cc_unmap_req(dev, state, ctx);
1417 return -ENOMEM;
1420 if (cc_map_hash_request_final(ctx->drvdata, state, req->src,
1421 req->nbytes, 1, flags)) {
1422 dev_err(dev, "map_ahash_request_final() failed\n");
1423 cc_unmap_req(dev, state, ctx);
1424 return -ENOMEM;
1427 /* Setup request structure */
1428 cc_req.user_cb = (void *)cc_digest_complete;
1429 cc_req.user_arg = (void *)req;
1431 if (ctx->hw_mode == DRV_CIPHER_XCBC_MAC) {
1432 key_len = CC_AES_128_BIT_KEY_SIZE;
1433 cc_setup_xcbc(req, desc, &idx);
1434 } else {
1435 key_len = ctx->key_params.keylen;
1436 cc_setup_cmac(req, desc, &idx);
1439 if (req->nbytes == 0) {
1440 hw_desc_init(&desc[idx]);
1441 set_cipher_mode(&desc[idx], ctx->hw_mode);
1442 set_key_size_aes(&desc[idx], key_len);
1443 set_cmac_size0_mode(&desc[idx]);
1444 set_flow_mode(&desc[idx], S_DIN_to_AES);
1445 idx++;
1446 } else {
1447 cc_set_desc(state, ctx, DIN_AES_DOUT, desc, false, &idx);
1450 /* Get final MAC result */
1451 hw_desc_init(&desc[idx]);
1452 set_dout_dlli(&desc[idx], state->digest_result_dma_addr,
1453 CC_AES_BLOCK_SIZE, NS_BIT, 1);
1454 set_queue_last_ind(ctx->drvdata, &desc[idx]);
1455 set_flow_mode(&desc[idx], S_AES_to_DOUT);
1456 set_setup_mode(&desc[idx], SETUP_WRITE_STATE0);
1457 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
1458 set_cipher_mode(&desc[idx], ctx->hw_mode);
1459 idx++;
1461 rc = cc_send_request(ctx->drvdata, &cc_req, desc, idx, &req->base);
1462 if (rc != -EINPROGRESS && rc != -EBUSY) {
1463 dev_err(dev, "send_request() failed (rc=%d)\n", rc);
1464 cc_unmap_hash_request(dev, state, req->src, true);
1465 cc_unmap_result(dev, state, digestsize, req->result);
1466 cc_unmap_req(dev, state, ctx);
1468 return rc;
1471 static int cc_hash_export(struct ahash_request *req, void *out)
1473 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1474 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1475 struct ahash_req_ctx *state = ahash_request_ctx(req);
1476 u8 *curr_buff = cc_hash_buf(state);
1477 u32 curr_buff_cnt = *cc_hash_buf_cnt(state);
1478 const u32 tmp = CC_EXPORT_MAGIC;
1480 memcpy(out, &tmp, sizeof(u32));
1481 out += sizeof(u32);
1483 memcpy(out, state->digest_buff, ctx->inter_digestsize);
1484 out += ctx->inter_digestsize;
1486 memcpy(out, state->digest_bytes_len, ctx->drvdata->hash_len_sz);
1487 out += ctx->drvdata->hash_len_sz;
1489 memcpy(out, &curr_buff_cnt, sizeof(u32));
1490 out += sizeof(u32);
1492 memcpy(out, curr_buff, curr_buff_cnt);
1494 return 0;
1497 static int cc_hash_import(struct ahash_request *req, const void *in)
1499 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
1500 struct cc_hash_ctx *ctx = crypto_ahash_ctx(ahash);
1501 struct device *dev = drvdata_to_dev(ctx->drvdata);
1502 struct ahash_req_ctx *state = ahash_request_ctx(req);
1503 u32 tmp;
1505 memcpy(&tmp, in, sizeof(u32));
1506 if (tmp != CC_EXPORT_MAGIC)
1507 return -EINVAL;
1508 in += sizeof(u32);
1510 cc_init_req(dev, state, ctx);
1512 memcpy(state->digest_buff, in, ctx->inter_digestsize);
1513 in += ctx->inter_digestsize;
1515 memcpy(state->digest_bytes_len, in, ctx->drvdata->hash_len_sz);
1516 in += ctx->drvdata->hash_len_sz;
1518 /* Sanity check the data as much as possible */
1519 memcpy(&tmp, in, sizeof(u32));
1520 if (tmp > CC_MAX_HASH_BLCK_SIZE)
1521 return -EINVAL;
1522 in += sizeof(u32);
1524 state->buf_cnt[0] = tmp;
1525 memcpy(state->buffers[0], in, tmp);
1527 return 0;
1530 struct cc_hash_template {
1531 char name[CRYPTO_MAX_ALG_NAME];
1532 char driver_name[CRYPTO_MAX_ALG_NAME];
1533 char mac_name[CRYPTO_MAX_ALG_NAME];
1534 char mac_driver_name[CRYPTO_MAX_ALG_NAME];
1535 unsigned int blocksize;
1536 bool synchronize;
1537 struct ahash_alg template_ahash;
1538 int hash_mode;
1539 int hw_mode;
1540 int inter_digestsize;
1541 struct cc_drvdata *drvdata;
1542 u32 min_hw_rev;
1545 #define CC_STATE_SIZE(_x) \
1546 ((_x) + HASH_MAX_LEN_SIZE + CC_MAX_HASH_BLCK_SIZE + (2 * sizeof(u32)))
1548 /* hash descriptors */
1549 static struct cc_hash_template driver_hash[] = {
1550 //Asynchronize hash template
1552 .name = "sha1",
1553 .driver_name = "sha1-ccree",
1554 .mac_name = "hmac(sha1)",
1555 .mac_driver_name = "hmac-sha1-ccree",
1556 .blocksize = SHA1_BLOCK_SIZE,
1557 .synchronize = false,
1558 .template_ahash = {
1559 .init = cc_hash_init,
1560 .update = cc_hash_update,
1561 .final = cc_hash_final,
1562 .finup = cc_hash_finup,
1563 .digest = cc_hash_digest,
1564 .export = cc_hash_export,
1565 .import = cc_hash_import,
1566 .setkey = cc_hash_setkey,
1567 .halg = {
1568 .digestsize = SHA1_DIGEST_SIZE,
1569 .statesize = CC_STATE_SIZE(SHA1_DIGEST_SIZE),
1572 .hash_mode = DRV_HASH_SHA1,
1573 .hw_mode = DRV_HASH_HW_SHA1,
1574 .inter_digestsize = SHA1_DIGEST_SIZE,
1575 .min_hw_rev = CC_HW_REV_630,
1578 .name = "sha256",
1579 .driver_name = "sha256-ccree",
1580 .mac_name = "hmac(sha256)",
1581 .mac_driver_name = "hmac-sha256-ccree",
1582 .blocksize = SHA256_BLOCK_SIZE,
1583 .template_ahash = {
1584 .init = cc_hash_init,
1585 .update = cc_hash_update,
1586 .final = cc_hash_final,
1587 .finup = cc_hash_finup,
1588 .digest = cc_hash_digest,
1589 .export = cc_hash_export,
1590 .import = cc_hash_import,
1591 .setkey = cc_hash_setkey,
1592 .halg = {
1593 .digestsize = SHA256_DIGEST_SIZE,
1594 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE)
1597 .hash_mode = DRV_HASH_SHA256,
1598 .hw_mode = DRV_HASH_HW_SHA256,
1599 .inter_digestsize = SHA256_DIGEST_SIZE,
1600 .min_hw_rev = CC_HW_REV_630,
1603 .name = "sha224",
1604 .driver_name = "sha224-ccree",
1605 .mac_name = "hmac(sha224)",
1606 .mac_driver_name = "hmac-sha224-ccree",
1607 .blocksize = SHA224_BLOCK_SIZE,
1608 .template_ahash = {
1609 .init = cc_hash_init,
1610 .update = cc_hash_update,
1611 .final = cc_hash_final,
1612 .finup = cc_hash_finup,
1613 .digest = cc_hash_digest,
1614 .export = cc_hash_export,
1615 .import = cc_hash_import,
1616 .setkey = cc_hash_setkey,
1617 .halg = {
1618 .digestsize = SHA224_DIGEST_SIZE,
1619 .statesize = CC_STATE_SIZE(SHA256_DIGEST_SIZE),
1622 .hash_mode = DRV_HASH_SHA224,
1623 .hw_mode = DRV_HASH_HW_SHA256,
1624 .inter_digestsize = SHA256_DIGEST_SIZE,
1625 .min_hw_rev = CC_HW_REV_630,
1628 .name = "sha384",
1629 .driver_name = "sha384-ccree",
1630 .mac_name = "hmac(sha384)",
1631 .mac_driver_name = "hmac-sha384-ccree",
1632 .blocksize = SHA384_BLOCK_SIZE,
1633 .template_ahash = {
1634 .init = cc_hash_init,
1635 .update = cc_hash_update,
1636 .final = cc_hash_final,
1637 .finup = cc_hash_finup,
1638 .digest = cc_hash_digest,
1639 .export = cc_hash_export,
1640 .import = cc_hash_import,
1641 .setkey = cc_hash_setkey,
1642 .halg = {
1643 .digestsize = SHA384_DIGEST_SIZE,
1644 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1647 .hash_mode = DRV_HASH_SHA384,
1648 .hw_mode = DRV_HASH_HW_SHA512,
1649 .inter_digestsize = SHA512_DIGEST_SIZE,
1650 .min_hw_rev = CC_HW_REV_712,
1653 .name = "sha512",
1654 .driver_name = "sha512-ccree",
1655 .mac_name = "hmac(sha512)",
1656 .mac_driver_name = "hmac-sha512-ccree",
1657 .blocksize = SHA512_BLOCK_SIZE,
1658 .template_ahash = {
1659 .init = cc_hash_init,
1660 .update = cc_hash_update,
1661 .final = cc_hash_final,
1662 .finup = cc_hash_finup,
1663 .digest = cc_hash_digest,
1664 .export = cc_hash_export,
1665 .import = cc_hash_import,
1666 .setkey = cc_hash_setkey,
1667 .halg = {
1668 .digestsize = SHA512_DIGEST_SIZE,
1669 .statesize = CC_STATE_SIZE(SHA512_DIGEST_SIZE),
1672 .hash_mode = DRV_HASH_SHA512,
1673 .hw_mode = DRV_HASH_HW_SHA512,
1674 .inter_digestsize = SHA512_DIGEST_SIZE,
1675 .min_hw_rev = CC_HW_REV_712,
1678 .name = "md5",
1679 .driver_name = "md5-ccree",
1680 .mac_name = "hmac(md5)",
1681 .mac_driver_name = "hmac-md5-ccree",
1682 .blocksize = MD5_HMAC_BLOCK_SIZE,
1683 .template_ahash = {
1684 .init = cc_hash_init,
1685 .update = cc_hash_update,
1686 .final = cc_hash_final,
1687 .finup = cc_hash_finup,
1688 .digest = cc_hash_digest,
1689 .export = cc_hash_export,
1690 .import = cc_hash_import,
1691 .setkey = cc_hash_setkey,
1692 .halg = {
1693 .digestsize = MD5_DIGEST_SIZE,
1694 .statesize = CC_STATE_SIZE(MD5_DIGEST_SIZE),
1697 .hash_mode = DRV_HASH_MD5,
1698 .hw_mode = DRV_HASH_HW_MD5,
1699 .inter_digestsize = MD5_DIGEST_SIZE,
1700 .min_hw_rev = CC_HW_REV_630,
1703 .mac_name = "xcbc(aes)",
1704 .mac_driver_name = "xcbc-aes-ccree",
1705 .blocksize = AES_BLOCK_SIZE,
1706 .template_ahash = {
1707 .init = cc_hash_init,
1708 .update = cc_mac_update,
1709 .final = cc_mac_final,
1710 .finup = cc_mac_finup,
1711 .digest = cc_mac_digest,
1712 .setkey = cc_xcbc_setkey,
1713 .export = cc_hash_export,
1714 .import = cc_hash_import,
1715 .halg = {
1716 .digestsize = AES_BLOCK_SIZE,
1717 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1720 .hash_mode = DRV_HASH_NULL,
1721 .hw_mode = DRV_CIPHER_XCBC_MAC,
1722 .inter_digestsize = AES_BLOCK_SIZE,
1723 .min_hw_rev = CC_HW_REV_630,
1726 .mac_name = "cmac(aes)",
1727 .mac_driver_name = "cmac-aes-ccree",
1728 .blocksize = AES_BLOCK_SIZE,
1729 .template_ahash = {
1730 .init = cc_hash_init,
1731 .update = cc_mac_update,
1732 .final = cc_mac_final,
1733 .finup = cc_mac_finup,
1734 .digest = cc_mac_digest,
1735 .setkey = cc_cmac_setkey,
1736 .export = cc_hash_export,
1737 .import = cc_hash_import,
1738 .halg = {
1739 .digestsize = AES_BLOCK_SIZE,
1740 .statesize = CC_STATE_SIZE(AES_BLOCK_SIZE),
1743 .hash_mode = DRV_HASH_NULL,
1744 .hw_mode = DRV_CIPHER_CMAC,
1745 .inter_digestsize = AES_BLOCK_SIZE,
1746 .min_hw_rev = CC_HW_REV_630,
1750 static struct cc_hash_alg *cc_alloc_hash_alg(struct cc_hash_template *template,
1751 struct device *dev, bool keyed)
1753 struct cc_hash_alg *t_crypto_alg;
1754 struct crypto_alg *alg;
1755 struct ahash_alg *halg;
1757 t_crypto_alg = kzalloc(sizeof(*t_crypto_alg), GFP_KERNEL);
1758 if (!t_crypto_alg)
1759 return ERR_PTR(-ENOMEM);
1761 t_crypto_alg->ahash_alg = template->template_ahash;
1762 halg = &t_crypto_alg->ahash_alg;
1763 alg = &halg->halg.base;
1765 if (keyed) {
1766 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1767 template->mac_name);
1768 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1769 template->mac_driver_name);
1770 } else {
1771 halg->setkey = NULL;
1772 snprintf(alg->cra_name, CRYPTO_MAX_ALG_NAME, "%s",
1773 template->name);
1774 snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
1775 template->driver_name);
1777 alg->cra_module = THIS_MODULE;
1778 alg->cra_ctxsize = sizeof(struct cc_hash_ctx);
1779 alg->cra_priority = CC_CRA_PRIO;
1780 alg->cra_blocksize = template->blocksize;
1781 alg->cra_alignmask = 0;
1782 alg->cra_exit = cc_cra_exit;
1784 alg->cra_init = cc_cra_init;
1785 alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY;
1787 t_crypto_alg->hash_mode = template->hash_mode;
1788 t_crypto_alg->hw_mode = template->hw_mode;
1789 t_crypto_alg->inter_digestsize = template->inter_digestsize;
1791 return t_crypto_alg;
1794 int cc_init_hash_sram(struct cc_drvdata *drvdata)
1796 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
1797 cc_sram_addr_t sram_buff_ofs = hash_handle->digest_len_sram_addr;
1798 unsigned int larval_seq_len = 0;
1799 struct cc_hw_desc larval_seq[CC_DIGEST_SIZE_MAX / sizeof(u32)];
1800 bool large_sha_supported = (drvdata->hw_rev >= CC_HW_REV_712);
1801 int rc = 0;
1803 /* Copy-to-sram digest-len */
1804 cc_set_sram_desc(digest_len_init, sram_buff_ofs,
1805 ARRAY_SIZE(digest_len_init), larval_seq,
1806 &larval_seq_len);
1807 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1808 if (rc)
1809 goto init_digest_const_err;
1811 sram_buff_ofs += sizeof(digest_len_init);
1812 larval_seq_len = 0;
1814 if (large_sha_supported) {
1815 /* Copy-to-sram digest-len for sha384/512 */
1816 cc_set_sram_desc(digest_len_sha512_init, sram_buff_ofs,
1817 ARRAY_SIZE(digest_len_sha512_init),
1818 larval_seq, &larval_seq_len);
1819 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1820 if (rc)
1821 goto init_digest_const_err;
1823 sram_buff_ofs += sizeof(digest_len_sha512_init);
1824 larval_seq_len = 0;
1827 /* The initial digests offset */
1828 hash_handle->larval_digest_sram_addr = sram_buff_ofs;
1830 /* Copy-to-sram initial SHA* digests */
1831 cc_set_sram_desc(md5_init, sram_buff_ofs, ARRAY_SIZE(md5_init),
1832 larval_seq, &larval_seq_len);
1833 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1834 if (rc)
1835 goto init_digest_const_err;
1836 sram_buff_ofs += sizeof(md5_init);
1837 larval_seq_len = 0;
1839 cc_set_sram_desc(sha1_init, sram_buff_ofs,
1840 ARRAY_SIZE(sha1_init), larval_seq,
1841 &larval_seq_len);
1842 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1843 if (rc)
1844 goto init_digest_const_err;
1845 sram_buff_ofs += sizeof(sha1_init);
1846 larval_seq_len = 0;
1848 cc_set_sram_desc(sha224_init, sram_buff_ofs,
1849 ARRAY_SIZE(sha224_init), larval_seq,
1850 &larval_seq_len);
1851 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1852 if (rc)
1853 goto init_digest_const_err;
1854 sram_buff_ofs += sizeof(sha224_init);
1855 larval_seq_len = 0;
1857 cc_set_sram_desc(sha256_init, sram_buff_ofs,
1858 ARRAY_SIZE(sha256_init), larval_seq,
1859 &larval_seq_len);
1860 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1861 if (rc)
1862 goto init_digest_const_err;
1863 sram_buff_ofs += sizeof(sha256_init);
1864 larval_seq_len = 0;
1866 if (large_sha_supported) {
1867 cc_set_sram_desc((u32 *)sha384_init, sram_buff_ofs,
1868 (ARRAY_SIZE(sha384_init) * 2), larval_seq,
1869 &larval_seq_len);
1870 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1871 if (rc)
1872 goto init_digest_const_err;
1873 sram_buff_ofs += sizeof(sha384_init);
1874 larval_seq_len = 0;
1876 cc_set_sram_desc((u32 *)sha512_init, sram_buff_ofs,
1877 (ARRAY_SIZE(sha512_init) * 2), larval_seq,
1878 &larval_seq_len);
1879 rc = send_request_init(drvdata, larval_seq, larval_seq_len);
1880 if (rc)
1881 goto init_digest_const_err;
1884 init_digest_const_err:
1885 return rc;
1888 static void __init cc_swap_dwords(u32 *buf, unsigned long size)
1890 int i;
1891 u32 tmp;
1893 for (i = 0; i < size; i += 2) {
1894 tmp = buf[i];
1895 buf[i] = buf[i + 1];
1896 buf[i + 1] = tmp;
1901 * Due to the way the HW works we need to swap every
1902 * double word in the SHA384 and SHA512 larval hashes
1904 void __init cc_hash_global_init(void)
1906 cc_swap_dwords((u32 *)&sha384_init, (ARRAY_SIZE(sha384_init) * 2));
1907 cc_swap_dwords((u32 *)&sha512_init, (ARRAY_SIZE(sha512_init) * 2));
1910 int cc_hash_alloc(struct cc_drvdata *drvdata)
1912 struct cc_hash_handle *hash_handle;
1913 cc_sram_addr_t sram_buff;
1914 u32 sram_size_to_alloc;
1915 struct device *dev = drvdata_to_dev(drvdata);
1916 int rc = 0;
1917 int alg;
1919 hash_handle = kzalloc(sizeof(*hash_handle), GFP_KERNEL);
1920 if (!hash_handle)
1921 return -ENOMEM;
1923 INIT_LIST_HEAD(&hash_handle->hash_list);
1924 drvdata->hash_handle = hash_handle;
1926 sram_size_to_alloc = sizeof(digest_len_init) +
1927 sizeof(md5_init) +
1928 sizeof(sha1_init) +
1929 sizeof(sha224_init) +
1930 sizeof(sha256_init);
1932 if (drvdata->hw_rev >= CC_HW_REV_712)
1933 sram_size_to_alloc += sizeof(digest_len_sha512_init) +
1934 sizeof(sha384_init) + sizeof(sha512_init);
1936 sram_buff = cc_sram_alloc(drvdata, sram_size_to_alloc);
1937 if (sram_buff == NULL_SRAM_ADDR) {
1938 dev_err(dev, "SRAM pool exhausted\n");
1939 rc = -ENOMEM;
1940 goto fail;
1943 /* The initial digest-len offset */
1944 hash_handle->digest_len_sram_addr = sram_buff;
1946 /*must be set before the alg registration as it is being used there*/
1947 rc = cc_init_hash_sram(drvdata);
1948 if (rc) {
1949 dev_err(dev, "Init digest CONST failed (rc=%d)\n", rc);
1950 goto fail;
1953 /* ahash registration */
1954 for (alg = 0; alg < ARRAY_SIZE(driver_hash); alg++) {
1955 struct cc_hash_alg *t_alg;
1956 int hw_mode = driver_hash[alg].hw_mode;
1958 /* We either support both HASH and MAC or none */
1959 if (driver_hash[alg].min_hw_rev > drvdata->hw_rev)
1960 continue;
1962 /* register hmac version */
1963 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, true);
1964 if (IS_ERR(t_alg)) {
1965 rc = PTR_ERR(t_alg);
1966 dev_err(dev, "%s alg allocation failed\n",
1967 driver_hash[alg].driver_name);
1968 goto fail;
1970 t_alg->drvdata = drvdata;
1972 rc = crypto_register_ahash(&t_alg->ahash_alg);
1973 if (rc) {
1974 dev_err(dev, "%s alg registration failed\n",
1975 driver_hash[alg].driver_name);
1976 kfree(t_alg);
1977 goto fail;
1978 } else {
1979 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
1982 if (hw_mode == DRV_CIPHER_XCBC_MAC ||
1983 hw_mode == DRV_CIPHER_CMAC)
1984 continue;
1986 /* register hash version */
1987 t_alg = cc_alloc_hash_alg(&driver_hash[alg], dev, false);
1988 if (IS_ERR(t_alg)) {
1989 rc = PTR_ERR(t_alg);
1990 dev_err(dev, "%s alg allocation failed\n",
1991 driver_hash[alg].driver_name);
1992 goto fail;
1994 t_alg->drvdata = drvdata;
1996 rc = crypto_register_ahash(&t_alg->ahash_alg);
1997 if (rc) {
1998 dev_err(dev, "%s alg registration failed\n",
1999 driver_hash[alg].driver_name);
2000 kfree(t_alg);
2001 goto fail;
2002 } else {
2003 list_add_tail(&t_alg->entry, &hash_handle->hash_list);
2007 return 0;
2009 fail:
2010 kfree(drvdata->hash_handle);
2011 drvdata->hash_handle = NULL;
2012 return rc;
2015 int cc_hash_free(struct cc_drvdata *drvdata)
2017 struct cc_hash_alg *t_hash_alg, *hash_n;
2018 struct cc_hash_handle *hash_handle = drvdata->hash_handle;
2020 if (hash_handle) {
2021 list_for_each_entry_safe(t_hash_alg, hash_n,
2022 &hash_handle->hash_list, entry) {
2023 crypto_unregister_ahash(&t_hash_alg->ahash_alg);
2024 list_del(&t_hash_alg->entry);
2025 kfree(t_hash_alg);
2028 kfree(hash_handle);
2029 drvdata->hash_handle = NULL;
2031 return 0;
2034 static void cc_setup_xcbc(struct ahash_request *areq, struct cc_hw_desc desc[],
2035 unsigned int *seq_size)
2037 unsigned int idx = *seq_size;
2038 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2039 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2040 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2042 /* Setup XCBC MAC K1 */
2043 hw_desc_init(&desc[idx]);
2044 set_din_type(&desc[idx], DMA_DLLI, (ctx->opad_tmp_keys_dma_addr +
2045 XCBC_MAC_K1_OFFSET),
2046 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2047 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2048 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2049 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2050 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2051 set_flow_mode(&desc[idx], S_DIN_to_AES);
2052 idx++;
2054 /* Setup XCBC MAC K2 */
2055 hw_desc_init(&desc[idx]);
2056 set_din_type(&desc[idx], DMA_DLLI,
2057 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K2_OFFSET),
2058 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2059 set_setup_mode(&desc[idx], SETUP_LOAD_STATE1);
2060 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2061 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2062 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2063 set_flow_mode(&desc[idx], S_DIN_to_AES);
2064 idx++;
2066 /* Setup XCBC MAC K3 */
2067 hw_desc_init(&desc[idx]);
2068 set_din_type(&desc[idx], DMA_DLLI,
2069 (ctx->opad_tmp_keys_dma_addr + XCBC_MAC_K3_OFFSET),
2070 CC_AES_128_BIT_KEY_SIZE, NS_BIT);
2071 set_setup_mode(&desc[idx], SETUP_LOAD_STATE2);
2072 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2073 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2074 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2075 set_flow_mode(&desc[idx], S_DIN_to_AES);
2076 idx++;
2078 /* Loading MAC state */
2079 hw_desc_init(&desc[idx]);
2080 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2081 CC_AES_BLOCK_SIZE, NS_BIT);
2082 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2083 set_cipher_mode(&desc[idx], DRV_CIPHER_XCBC_MAC);
2084 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2085 set_key_size_aes(&desc[idx], CC_AES_128_BIT_KEY_SIZE);
2086 set_flow_mode(&desc[idx], S_DIN_to_AES);
2087 idx++;
2088 *seq_size = idx;
2091 static void cc_setup_cmac(struct ahash_request *areq, struct cc_hw_desc desc[],
2092 unsigned int *seq_size)
2094 unsigned int idx = *seq_size;
2095 struct ahash_req_ctx *state = ahash_request_ctx(areq);
2096 struct crypto_ahash *tfm = crypto_ahash_reqtfm(areq);
2097 struct cc_hash_ctx *ctx = crypto_ahash_ctx(tfm);
2099 /* Setup CMAC Key */
2100 hw_desc_init(&desc[idx]);
2101 set_din_type(&desc[idx], DMA_DLLI, ctx->opad_tmp_keys_dma_addr,
2102 ((ctx->key_params.keylen == 24) ? AES_MAX_KEY_SIZE :
2103 ctx->key_params.keylen), NS_BIT);
2104 set_setup_mode(&desc[idx], SETUP_LOAD_KEY0);
2105 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2106 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2107 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2108 set_flow_mode(&desc[idx], S_DIN_to_AES);
2109 idx++;
2111 /* Load MAC state */
2112 hw_desc_init(&desc[idx]);
2113 set_din_type(&desc[idx], DMA_DLLI, state->digest_buff_dma_addr,
2114 CC_AES_BLOCK_SIZE, NS_BIT);
2115 set_setup_mode(&desc[idx], SETUP_LOAD_STATE0);
2116 set_cipher_mode(&desc[idx], DRV_CIPHER_CMAC);
2117 set_cipher_config0(&desc[idx], DESC_DIRECTION_ENCRYPT_ENCRYPT);
2118 set_key_size_aes(&desc[idx], ctx->key_params.keylen);
2119 set_flow_mode(&desc[idx], S_DIN_to_AES);
2120 idx++;
2121 *seq_size = idx;
2124 static void cc_set_desc(struct ahash_req_ctx *areq_ctx,
2125 struct cc_hash_ctx *ctx, unsigned int flow_mode,
2126 struct cc_hw_desc desc[], bool is_not_last_data,
2127 unsigned int *seq_size)
2129 unsigned int idx = *seq_size;
2130 struct device *dev = drvdata_to_dev(ctx->drvdata);
2132 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_DLLI) {
2133 hw_desc_init(&desc[idx]);
2134 set_din_type(&desc[idx], DMA_DLLI,
2135 sg_dma_address(areq_ctx->curr_sg),
2136 areq_ctx->curr_sg->length, NS_BIT);
2137 set_flow_mode(&desc[idx], flow_mode);
2138 idx++;
2139 } else {
2140 if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) {
2141 dev_dbg(dev, " NULL mode\n");
2142 /* nothing to build */
2143 return;
2145 /* bypass */
2146 hw_desc_init(&desc[idx]);
2147 set_din_type(&desc[idx], DMA_DLLI,
2148 areq_ctx->mlli_params.mlli_dma_addr,
2149 areq_ctx->mlli_params.mlli_len, NS_BIT);
2150 set_dout_sram(&desc[idx], ctx->drvdata->mlli_sram_addr,
2151 areq_ctx->mlli_params.mlli_len);
2152 set_flow_mode(&desc[idx], BYPASS);
2153 idx++;
2154 /* process */
2155 hw_desc_init(&desc[idx]);
2156 set_din_type(&desc[idx], DMA_MLLI,
2157 ctx->drvdata->mlli_sram_addr,
2158 areq_ctx->mlli_nents, NS_BIT);
2159 set_flow_mode(&desc[idx], flow_mode);
2160 idx++;
2162 if (is_not_last_data)
2163 set_din_not_last_indication(&desc[(idx - 1)]);
2164 /* return updated desc sequence size */
2165 *seq_size = idx;
2168 static const void *cc_larval_digest(struct device *dev, u32 mode)
2170 switch (mode) {
2171 case DRV_HASH_MD5:
2172 return md5_init;
2173 case DRV_HASH_SHA1:
2174 return sha1_init;
2175 case DRV_HASH_SHA224:
2176 return sha224_init;
2177 case DRV_HASH_SHA256:
2178 return sha256_init;
2179 case DRV_HASH_SHA384:
2180 return sha384_init;
2181 case DRV_HASH_SHA512:
2182 return sha512_init;
2183 default:
2184 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2185 return md5_init;
2190 * Gets the address of the initial digest in SRAM
2191 * according to the given hash mode
2193 * \param drvdata
2194 * \param mode The Hash mode. Supported modes: MD5/SHA1/SHA224/SHA256
2196 * \return u32 The address of the initial digest in SRAM
2198 cc_sram_addr_t cc_larval_digest_addr(void *drvdata, u32 mode)
2200 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2201 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2202 struct device *dev = drvdata_to_dev(_drvdata);
2204 switch (mode) {
2205 case DRV_HASH_NULL:
2206 break; /*Ignore*/
2207 case DRV_HASH_MD5:
2208 return (hash_handle->larval_digest_sram_addr);
2209 case DRV_HASH_SHA1:
2210 return (hash_handle->larval_digest_sram_addr +
2211 sizeof(md5_init));
2212 case DRV_HASH_SHA224:
2213 return (hash_handle->larval_digest_sram_addr +
2214 sizeof(md5_init) +
2215 sizeof(sha1_init));
2216 case DRV_HASH_SHA256:
2217 return (hash_handle->larval_digest_sram_addr +
2218 sizeof(md5_init) +
2219 sizeof(sha1_init) +
2220 sizeof(sha224_init));
2221 case DRV_HASH_SHA384:
2222 return (hash_handle->larval_digest_sram_addr +
2223 sizeof(md5_init) +
2224 sizeof(sha1_init) +
2225 sizeof(sha224_init) +
2226 sizeof(sha256_init));
2227 case DRV_HASH_SHA512:
2228 return (hash_handle->larval_digest_sram_addr +
2229 sizeof(md5_init) +
2230 sizeof(sha1_init) +
2231 sizeof(sha224_init) +
2232 sizeof(sha256_init) +
2233 sizeof(sha384_init));
2234 default:
2235 dev_err(dev, "Invalid hash mode (%d)\n", mode);
2238 /*This is valid wrong value to avoid kernel crash*/
2239 return hash_handle->larval_digest_sram_addr;
2242 cc_sram_addr_t
2243 cc_digest_len_addr(void *drvdata, u32 mode)
2245 struct cc_drvdata *_drvdata = (struct cc_drvdata *)drvdata;
2246 struct cc_hash_handle *hash_handle = _drvdata->hash_handle;
2247 cc_sram_addr_t digest_len_addr = hash_handle->digest_len_sram_addr;
2249 switch (mode) {
2250 case DRV_HASH_SHA1:
2251 case DRV_HASH_SHA224:
2252 case DRV_HASH_SHA256:
2253 case DRV_HASH_MD5:
2254 return digest_len_addr;
2255 #if (CC_DEV_SHA_MAX > 256)
2256 case DRV_HASH_SHA384:
2257 case DRV_HASH_SHA512:
2258 return digest_len_addr + sizeof(digest_len_init);
2259 #endif
2260 default:
2261 return digest_len_addr; /*to avoid kernel crash*/