1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/internal/aead.h>
8 #include <crypto/authenc.h>
9 #include <crypto/des.h>
10 #include <linux/rtnetlink.h>
11 #include "cc_driver.h"
12 #include "cc_buffer_mgr.h"
14 #include "cc_request_mgr.h"
16 #include "cc_sram_mgr.h"
18 #define template_aead template_u.aead
20 #define MAX_AEAD_SETKEY_SEQ 12
21 #define MAX_AEAD_PROCESS_SEQ 23
23 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
24 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
26 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
28 struct cc_aead_handle
{
29 cc_sram_addr_t sram_workspace_addr
;
30 struct list_head aead_list
;
35 u8
*ipad_opad
; /* IPAD, OPAD*/
36 dma_addr_t padded_authkey_dma_addr
;
37 dma_addr_t ipad_opad_dma_addr
;
41 u8
*xcbc_keys
; /* K1,K2,K3 */
42 dma_addr_t xcbc_keys_dma_addr
;
46 struct cc_drvdata
*drvdata
;
47 u8 ctr_nonce
[MAX_NONCE_SIZE
]; /* used for ctr3686 iv and aes ccm */
49 dma_addr_t enckey_dma_addr
;
51 struct cc_hmac_s hmac
;
52 struct cc_xcbc_s xcbc
;
54 unsigned int enc_keylen
;
55 unsigned int auth_keylen
;
56 unsigned int authsize
; /* Actual (reduced?) size of the MAC/ICv */
57 unsigned int hash_len
;
58 enum drv_cipher_mode cipher_mode
;
59 enum cc_flow_mode flow_mode
;
60 enum drv_hash_mode auth_mode
;
63 static inline bool valid_assoclen(struct aead_request
*req
)
65 return ((req
->assoclen
== 16) || (req
->assoclen
== 20));
68 static void cc_aead_exit(struct crypto_aead
*tfm
)
70 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
71 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
73 dev_dbg(dev
, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm
),
74 crypto_tfm_alg_name(&tfm
->base
));
76 /* Unmap enckey buffer */
78 dma_free_coherent(dev
, AES_MAX_KEY_SIZE
, ctx
->enckey
,
79 ctx
->enckey_dma_addr
);
80 dev_dbg(dev
, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
81 &ctx
->enckey_dma_addr
);
82 ctx
->enckey_dma_addr
= 0;
86 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
) { /* XCBC authetication */
87 struct cc_xcbc_s
*xcbc
= &ctx
->auth_state
.xcbc
;
89 if (xcbc
->xcbc_keys
) {
90 dma_free_coherent(dev
, CC_AES_128_BIT_KEY_SIZE
* 3,
92 xcbc
->xcbc_keys_dma_addr
);
94 dev_dbg(dev
, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
95 &xcbc
->xcbc_keys_dma_addr
);
96 xcbc
->xcbc_keys_dma_addr
= 0;
97 xcbc
->xcbc_keys
= NULL
;
98 } else if (ctx
->auth_mode
!= DRV_HASH_NULL
) { /* HMAC auth. */
99 struct cc_hmac_s
*hmac
= &ctx
->auth_state
.hmac
;
101 if (hmac
->ipad_opad
) {
102 dma_free_coherent(dev
, 2 * MAX_HMAC_DIGEST_SIZE
,
104 hmac
->ipad_opad_dma_addr
);
105 dev_dbg(dev
, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
106 &hmac
->ipad_opad_dma_addr
);
107 hmac
->ipad_opad_dma_addr
= 0;
108 hmac
->ipad_opad
= NULL
;
110 if (hmac
->padded_authkey
) {
111 dma_free_coherent(dev
, MAX_HMAC_BLOCK_SIZE
,
112 hmac
->padded_authkey
,
113 hmac
->padded_authkey_dma_addr
);
114 dev_dbg(dev
, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
115 &hmac
->padded_authkey_dma_addr
);
116 hmac
->padded_authkey_dma_addr
= 0;
117 hmac
->padded_authkey
= NULL
;
122 static unsigned int cc_get_aead_hash_len(struct crypto_aead
*tfm
)
124 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
126 return cc_get_default_hash_len(ctx
->drvdata
);
129 static int cc_aead_init(struct crypto_aead
*tfm
)
131 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
132 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
133 struct cc_crypto_alg
*cc_alg
=
134 container_of(alg
, struct cc_crypto_alg
, aead_alg
);
135 struct device
*dev
= drvdata_to_dev(cc_alg
->drvdata
);
137 dev_dbg(dev
, "Initializing context @%p for %s\n", ctx
,
138 crypto_tfm_alg_name(&tfm
->base
));
140 /* Initialize modes in instance */
141 ctx
->cipher_mode
= cc_alg
->cipher_mode
;
142 ctx
->flow_mode
= cc_alg
->flow_mode
;
143 ctx
->auth_mode
= cc_alg
->auth_mode
;
144 ctx
->drvdata
= cc_alg
->drvdata
;
145 crypto_aead_set_reqsize(tfm
, sizeof(struct aead_req_ctx
));
147 /* Allocate key buffer, cache line aligned */
148 ctx
->enckey
= dma_alloc_coherent(dev
, AES_MAX_KEY_SIZE
,
149 &ctx
->enckey_dma_addr
, GFP_KERNEL
);
151 dev_err(dev
, "Failed allocating key buffer\n");
154 dev_dbg(dev
, "Allocated enckey buffer in context ctx->enckey=@%p\n",
157 /* Set default authlen value */
159 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
) { /* XCBC authetication */
160 struct cc_xcbc_s
*xcbc
= &ctx
->auth_state
.xcbc
;
161 const unsigned int key_size
= CC_AES_128_BIT_KEY_SIZE
* 3;
163 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
164 /* (and temporary for user key - up to 256b) */
165 xcbc
->xcbc_keys
= dma_alloc_coherent(dev
, key_size
,
166 &xcbc
->xcbc_keys_dma_addr
,
168 if (!xcbc
->xcbc_keys
) {
169 dev_err(dev
, "Failed allocating buffer for XCBC keys\n");
172 } else if (ctx
->auth_mode
!= DRV_HASH_NULL
) { /* HMAC authentication */
173 struct cc_hmac_s
*hmac
= &ctx
->auth_state
.hmac
;
174 const unsigned int digest_size
= 2 * MAX_HMAC_DIGEST_SIZE
;
175 dma_addr_t
*pkey_dma
= &hmac
->padded_authkey_dma_addr
;
177 /* Allocate dma-coherent buffer for IPAD + OPAD */
178 hmac
->ipad_opad
= dma_alloc_coherent(dev
, digest_size
,
179 &hmac
->ipad_opad_dma_addr
,
182 if (!hmac
->ipad_opad
) {
183 dev_err(dev
, "Failed allocating IPAD/OPAD buffer\n");
187 dev_dbg(dev
, "Allocated authkey buffer in context ctx->authkey=@%p\n",
190 hmac
->padded_authkey
= dma_alloc_coherent(dev
,
195 if (!hmac
->padded_authkey
) {
196 dev_err(dev
, "failed to allocate padded_authkey\n");
200 ctx
->auth_state
.hmac
.ipad_opad
= NULL
;
201 ctx
->auth_state
.hmac
.padded_authkey
= NULL
;
203 ctx
->hash_len
= cc_get_aead_hash_len(tfm
);
212 static void cc_aead_complete(struct device
*dev
, void *cc_req
, int err
)
214 struct aead_request
*areq
= (struct aead_request
*)cc_req
;
215 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(areq
);
216 struct crypto_aead
*tfm
= crypto_aead_reqtfm(cc_req
);
217 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
219 /* BACKLOG notification */
220 if (err
== -EINPROGRESS
)
223 cc_unmap_aead_request(dev
, areq
);
225 /* Restore ordinary iv pointer */
226 areq
->iv
= areq_ctx
->backup_iv
;
231 if (areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
232 if (memcmp(areq_ctx
->mac_buf
, areq_ctx
->icv_virt_addr
,
233 ctx
->authsize
) != 0) {
234 dev_dbg(dev
, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
235 ctx
->authsize
, ctx
->cipher_mode
);
236 /* In case of payload authentication failure, MUST NOT
237 * revealed the decrypted message --> zero its memory.
239 cc_zero_sgl(areq
->dst
, areq_ctx
->cryptlen
);
243 if (areq_ctx
->is_icv_fragmented
) {
244 u32 skip
= areq
->cryptlen
+ areq_ctx
->dst_offset
;
246 cc_copy_sg_portion(dev
, areq_ctx
->mac_buf
,
247 areq_ctx
->dst_sgl
, skip
,
248 (skip
+ ctx
->authsize
),
252 /* If an IV was generated, copy it back to the user provided
255 if (areq_ctx
->backup_giv
) {
256 if (ctx
->cipher_mode
== DRV_CIPHER_CTR
)
257 memcpy(areq_ctx
->backup_giv
, areq_ctx
->ctr_iv
+
258 CTR_RFC3686_NONCE_SIZE
,
259 CTR_RFC3686_IV_SIZE
);
260 else if (ctx
->cipher_mode
== DRV_CIPHER_CCM
)
261 memcpy(areq_ctx
->backup_giv
, areq_ctx
->ctr_iv
+
262 CCM_BLOCK_IV_OFFSET
, CCM_BLOCK_IV_SIZE
);
266 aead_request_complete(areq
, err
);
269 static unsigned int xcbc_setkey(struct cc_hw_desc
*desc
,
270 struct cc_aead_ctx
*ctx
)
272 /* Load the AES key */
273 hw_desc_init(&desc
[0]);
274 /* We are using for the source/user key the same buffer
275 * as for the output keys, * because after this key loading it
276 * is not needed anymore
278 set_din_type(&desc
[0], DMA_DLLI
,
279 ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
, ctx
->auth_keylen
,
281 set_cipher_mode(&desc
[0], DRV_CIPHER_ECB
);
282 set_cipher_config0(&desc
[0], DRV_CRYPTO_DIRECTION_ENCRYPT
);
283 set_key_size_aes(&desc
[0], ctx
->auth_keylen
);
284 set_flow_mode(&desc
[0], S_DIN_to_AES
);
285 set_setup_mode(&desc
[0], SETUP_LOAD_KEY0
);
287 hw_desc_init(&desc
[1]);
288 set_din_const(&desc
[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE
);
289 set_flow_mode(&desc
[1], DIN_AES_DOUT
);
290 set_dout_dlli(&desc
[1], ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
,
291 AES_KEYSIZE_128
, NS_BIT
, 0);
293 hw_desc_init(&desc
[2]);
294 set_din_const(&desc
[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE
);
295 set_flow_mode(&desc
[2], DIN_AES_DOUT
);
296 set_dout_dlli(&desc
[2], (ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
298 AES_KEYSIZE_128
, NS_BIT
, 0);
300 hw_desc_init(&desc
[3]);
301 set_din_const(&desc
[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE
);
302 set_flow_mode(&desc
[3], DIN_AES_DOUT
);
303 set_dout_dlli(&desc
[3], (ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
304 + 2 * AES_KEYSIZE_128
),
305 AES_KEYSIZE_128
, NS_BIT
, 0);
310 static int hmac_setkey(struct cc_hw_desc
*desc
, struct cc_aead_ctx
*ctx
)
312 unsigned int hmac_pad_const
[2] = { HMAC_IPAD_CONST
, HMAC_OPAD_CONST
};
313 unsigned int digest_ofs
= 0;
314 unsigned int hash_mode
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
315 DRV_HASH_HW_SHA1
: DRV_HASH_HW_SHA256
;
316 unsigned int digest_size
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
317 CC_SHA1_DIGEST_SIZE
: CC_SHA256_DIGEST_SIZE
;
318 struct cc_hmac_s
*hmac
= &ctx
->auth_state
.hmac
;
320 unsigned int idx
= 0;
323 /* calc derived HMAC key */
324 for (i
= 0; i
< 2; i
++) {
325 /* Load hash initial state */
326 hw_desc_init(&desc
[idx
]);
327 set_cipher_mode(&desc
[idx
], hash_mode
);
328 set_din_sram(&desc
[idx
],
329 cc_larval_digest_addr(ctx
->drvdata
,
332 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
333 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
336 /* Load the hash current length*/
337 hw_desc_init(&desc
[idx
]);
338 set_cipher_mode(&desc
[idx
], hash_mode
);
339 set_din_const(&desc
[idx
], 0, ctx
->hash_len
);
340 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
341 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
344 /* Prepare ipad key */
345 hw_desc_init(&desc
[idx
]);
346 set_xor_val(&desc
[idx
], hmac_pad_const
[i
]);
347 set_cipher_mode(&desc
[idx
], hash_mode
);
348 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
349 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
352 /* Perform HASH update */
353 hw_desc_init(&desc
[idx
]);
354 set_din_type(&desc
[idx
], DMA_DLLI
,
355 hmac
->padded_authkey_dma_addr
,
356 SHA256_BLOCK_SIZE
, NS_BIT
);
357 set_cipher_mode(&desc
[idx
], hash_mode
);
358 set_xor_active(&desc
[idx
]);
359 set_flow_mode(&desc
[idx
], DIN_HASH
);
363 hw_desc_init(&desc
[idx
]);
364 set_cipher_mode(&desc
[idx
], hash_mode
);
365 set_dout_dlli(&desc
[idx
],
366 (hmac
->ipad_opad_dma_addr
+ digest_ofs
),
367 digest_size
, NS_BIT
, 0);
368 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
369 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
370 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
373 digest_ofs
+= digest_size
;
379 static int validate_keys_sizes(struct cc_aead_ctx
*ctx
)
381 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
383 dev_dbg(dev
, "enc_keylen=%u authkeylen=%u\n",
384 ctx
->enc_keylen
, ctx
->auth_keylen
);
386 switch (ctx
->auth_mode
) {
388 case DRV_HASH_SHA256
:
390 case DRV_HASH_XCBC_MAC
:
391 if (ctx
->auth_keylen
!= AES_KEYSIZE_128
&&
392 ctx
->auth_keylen
!= AES_KEYSIZE_192
&&
393 ctx
->auth_keylen
!= AES_KEYSIZE_256
)
396 case DRV_HASH_NULL
: /* Not authenc (e.g., CCM) - no auth_key) */
397 if (ctx
->auth_keylen
> 0)
401 dev_err(dev
, "Invalid auth_mode=%d\n", ctx
->auth_mode
);
404 /* Check cipher key size */
405 if (ctx
->flow_mode
== S_DIN_to_DES
) {
406 if (ctx
->enc_keylen
!= DES3_EDE_KEY_SIZE
) {
407 dev_err(dev
, "Invalid cipher(3DES) key size: %u\n",
411 } else { /* Default assumed to be AES ciphers */
412 if (ctx
->enc_keylen
!= AES_KEYSIZE_128
&&
413 ctx
->enc_keylen
!= AES_KEYSIZE_192
&&
414 ctx
->enc_keylen
!= AES_KEYSIZE_256
) {
415 dev_err(dev
, "Invalid cipher(AES) key size: %u\n",
421 return 0; /* All tests of keys sizes passed */
424 /* This function prepers the user key so it can pass to the hmac processing
425 * (copy to intenral buffer or hash in case of key longer than block
427 static int cc_get_plain_hmac_key(struct crypto_aead
*tfm
, const u8
*authkey
,
430 dma_addr_t key_dma_addr
= 0;
431 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
432 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
433 u32 larval_addr
= cc_larval_digest_addr(ctx
->drvdata
, ctx
->auth_mode
);
434 struct cc_crypto_req cc_req
= {};
435 unsigned int blocksize
;
436 unsigned int digestsize
;
437 unsigned int hashmode
;
438 unsigned int idx
= 0;
441 struct cc_hw_desc desc
[MAX_AEAD_SETKEY_SEQ
];
442 dma_addr_t padded_authkey_dma_addr
=
443 ctx
->auth_state
.hmac
.padded_authkey_dma_addr
;
445 switch (ctx
->auth_mode
) { /* auth_key required and >0 */
447 blocksize
= SHA1_BLOCK_SIZE
;
448 digestsize
= SHA1_DIGEST_SIZE
;
449 hashmode
= DRV_HASH_HW_SHA1
;
451 case DRV_HASH_SHA256
:
453 blocksize
= SHA256_BLOCK_SIZE
;
454 digestsize
= SHA256_DIGEST_SIZE
;
455 hashmode
= DRV_HASH_HW_SHA256
;
460 key
= kmemdup(authkey
, keylen
, GFP_KERNEL
);
464 key_dma_addr
= dma_map_single(dev
, (void *)key
, keylen
,
466 if (dma_mapping_error(dev
, key_dma_addr
)) {
467 dev_err(dev
, "Mapping key va=0x%p len=%u for DMA failed\n",
472 if (keylen
> blocksize
) {
473 /* Load hash initial state */
474 hw_desc_init(&desc
[idx
]);
475 set_cipher_mode(&desc
[idx
], hashmode
);
476 set_din_sram(&desc
[idx
], larval_addr
, digestsize
);
477 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
478 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
481 /* Load the hash current length*/
482 hw_desc_init(&desc
[idx
]);
483 set_cipher_mode(&desc
[idx
], hashmode
);
484 set_din_const(&desc
[idx
], 0, ctx
->hash_len
);
485 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
486 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
487 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
490 hw_desc_init(&desc
[idx
]);
491 set_din_type(&desc
[idx
], DMA_DLLI
,
492 key_dma_addr
, keylen
, NS_BIT
);
493 set_flow_mode(&desc
[idx
], DIN_HASH
);
497 hw_desc_init(&desc
[idx
]);
498 set_cipher_mode(&desc
[idx
], hashmode
);
499 set_dout_dlli(&desc
[idx
], padded_authkey_dma_addr
,
500 digestsize
, NS_BIT
, 0);
501 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
502 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
503 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
504 set_cipher_config0(&desc
[idx
],
505 HASH_DIGEST_RESULT_LITTLE_ENDIAN
);
508 hw_desc_init(&desc
[idx
]);
509 set_din_const(&desc
[idx
], 0, (blocksize
- digestsize
));
510 set_flow_mode(&desc
[idx
], BYPASS
);
511 set_dout_dlli(&desc
[idx
], (padded_authkey_dma_addr
+
512 digestsize
), (blocksize
- digestsize
),
516 hw_desc_init(&desc
[idx
]);
517 set_din_type(&desc
[idx
], DMA_DLLI
, key_dma_addr
,
519 set_flow_mode(&desc
[idx
], BYPASS
);
520 set_dout_dlli(&desc
[idx
], padded_authkey_dma_addr
,
524 if ((blocksize
- keylen
) != 0) {
525 hw_desc_init(&desc
[idx
]);
526 set_din_const(&desc
[idx
], 0,
527 (blocksize
- keylen
));
528 set_flow_mode(&desc
[idx
], BYPASS
);
529 set_dout_dlli(&desc
[idx
],
530 (padded_authkey_dma_addr
+
532 (blocksize
- keylen
), NS_BIT
, 0);
537 hw_desc_init(&desc
[idx
]);
538 set_din_const(&desc
[idx
], 0, (blocksize
- keylen
));
539 set_flow_mode(&desc
[idx
], BYPASS
);
540 set_dout_dlli(&desc
[idx
], padded_authkey_dma_addr
,
541 blocksize
, NS_BIT
, 0);
545 rc
= cc_send_sync_request(ctx
->drvdata
, &cc_req
, desc
, idx
);
547 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
550 dma_unmap_single(dev
, key_dma_addr
, keylen
, DMA_TO_DEVICE
);
557 static int cc_aead_setkey(struct crypto_aead
*tfm
, const u8
*key
,
560 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
561 struct cc_crypto_req cc_req
= {};
562 struct cc_hw_desc desc
[MAX_AEAD_SETKEY_SEQ
];
563 unsigned int seq_len
= 0;
564 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
565 const u8
*enckey
, *authkey
;
568 dev_dbg(dev
, "Setting key in context @%p for %s. key=%p keylen=%u\n",
569 ctx
, crypto_tfm_alg_name(crypto_aead_tfm(tfm
)), key
, keylen
);
571 /* STAT_PHASE_0: Init and sanity checks */
573 if (ctx
->auth_mode
!= DRV_HASH_NULL
) { /* authenc() alg. */
574 struct crypto_authenc_keys keys
;
576 rc
= crypto_authenc_extractkeys(&keys
, key
, keylen
);
579 enckey
= keys
.enckey
;
580 authkey
= keys
.authkey
;
581 ctx
->enc_keylen
= keys
.enckeylen
;
582 ctx
->auth_keylen
= keys
.authkeylen
;
584 if (ctx
->cipher_mode
== DRV_CIPHER_CTR
) {
585 /* the nonce is stored in bytes at end of key */
587 if (ctx
->enc_keylen
<
588 (AES_MIN_KEY_SIZE
+ CTR_RFC3686_NONCE_SIZE
))
590 /* Copy nonce from last 4 bytes in CTR key to
591 * first 4 bytes in CTR IV
593 memcpy(ctx
->ctr_nonce
, enckey
+ ctx
->enc_keylen
-
594 CTR_RFC3686_NONCE_SIZE
, CTR_RFC3686_NONCE_SIZE
);
595 /* Set CTR key size */
596 ctx
->enc_keylen
-= CTR_RFC3686_NONCE_SIZE
;
598 } else { /* non-authenc - has just one key */
601 ctx
->enc_keylen
= keylen
;
602 ctx
->auth_keylen
= 0;
605 rc
= validate_keys_sizes(ctx
);
609 /* STAT_PHASE_1: Copy key to ctx */
611 /* Get key material */
612 memcpy(ctx
->enckey
, enckey
, ctx
->enc_keylen
);
613 if (ctx
->enc_keylen
== 24)
614 memset(ctx
->enckey
+ 24, 0, CC_AES_KEY_SIZE_MAX
- 24);
615 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
) {
616 memcpy(ctx
->auth_state
.xcbc
.xcbc_keys
, authkey
,
618 } else if (ctx
->auth_mode
!= DRV_HASH_NULL
) { /* HMAC */
619 rc
= cc_get_plain_hmac_key(tfm
, authkey
, ctx
->auth_keylen
);
624 /* STAT_PHASE_2: Create sequence */
626 switch (ctx
->auth_mode
) {
628 case DRV_HASH_SHA256
:
629 seq_len
= hmac_setkey(desc
, ctx
);
631 case DRV_HASH_XCBC_MAC
:
632 seq_len
= xcbc_setkey(desc
, ctx
);
634 case DRV_HASH_NULL
: /* non-authenc modes, e.g., CCM */
635 break; /* No auth. key setup */
637 dev_err(dev
, "Unsupported authenc (%d)\n", ctx
->auth_mode
);
642 /* STAT_PHASE_3: Submit sequence to HW */
644 if (seq_len
> 0) { /* For CCM there is no sequence to setup the key */
645 rc
= cc_send_sync_request(ctx
->drvdata
, &cc_req
, desc
, seq_len
);
647 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
652 /* Update STAT_PHASE_3 */
656 crypto_aead_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
662 static int cc_des3_aead_setkey(struct crypto_aead
*aead
, const u8
*key
,
665 struct crypto_authenc_keys keys
;
669 err
= crypto_authenc_extractkeys(&keys
, key
, keylen
);
674 if (keys
.enckeylen
!= DES3_EDE_KEY_SIZE
)
677 flags
= crypto_aead_get_flags(aead
);
678 err
= __des3_verify_key(&flags
, keys
.enckey
);
680 crypto_aead_set_flags(aead
, flags
);
684 err
= cc_aead_setkey(aead
, key
, keylen
);
687 memzero_explicit(&keys
, sizeof(keys
));
691 crypto_aead_set_flags(aead
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
695 static int cc_rfc4309_ccm_setkey(struct crypto_aead
*tfm
, const u8
*key
,
698 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
704 memcpy(ctx
->ctr_nonce
, key
+ keylen
, 3);
706 return cc_aead_setkey(tfm
, key
, keylen
);
709 static int cc_aead_setauthsize(struct crypto_aead
*authenc
,
710 unsigned int authsize
)
712 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(authenc
);
713 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
715 /* Unsupported auth. sizes */
717 authsize
> crypto_aead_maxauthsize(authenc
)) {
721 ctx
->authsize
= authsize
;
722 dev_dbg(dev
, "authlen=%d\n", ctx
->authsize
);
727 static int cc_rfc4309_ccm_setauthsize(struct crypto_aead
*authenc
,
728 unsigned int authsize
)
739 return cc_aead_setauthsize(authenc
, authsize
);
742 static int cc_ccm_setauthsize(struct crypto_aead
*authenc
,
743 unsigned int authsize
)
758 return cc_aead_setauthsize(authenc
, authsize
);
761 static void cc_set_assoc_desc(struct aead_request
*areq
, unsigned int flow_mode
,
762 struct cc_hw_desc desc
[], unsigned int *seq_size
)
764 struct crypto_aead
*tfm
= crypto_aead_reqtfm(areq
);
765 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
766 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(areq
);
767 enum cc_req_dma_buf_type assoc_dma_type
= areq_ctx
->assoc_buff_type
;
768 unsigned int idx
= *seq_size
;
769 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
771 switch (assoc_dma_type
) {
772 case CC_DMA_BUF_DLLI
:
773 dev_dbg(dev
, "ASSOC buffer type DLLI\n");
774 hw_desc_init(&desc
[idx
]);
775 set_din_type(&desc
[idx
], DMA_DLLI
, sg_dma_address(areq
->src
),
776 areq_ctx
->assoclen
, NS_BIT
);
777 set_flow_mode(&desc
[idx
], flow_mode
);
778 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
&&
779 areq_ctx
->cryptlen
> 0)
780 set_din_not_last_indication(&desc
[idx
]);
782 case CC_DMA_BUF_MLLI
:
783 dev_dbg(dev
, "ASSOC buffer type MLLI\n");
784 hw_desc_init(&desc
[idx
]);
785 set_din_type(&desc
[idx
], DMA_MLLI
, areq_ctx
->assoc
.sram_addr
,
786 areq_ctx
->assoc
.mlli_nents
, NS_BIT
);
787 set_flow_mode(&desc
[idx
], flow_mode
);
788 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
&&
789 areq_ctx
->cryptlen
> 0)
790 set_din_not_last_indication(&desc
[idx
]);
792 case CC_DMA_BUF_NULL
:
794 dev_err(dev
, "Invalid ASSOC buffer type\n");
800 static void cc_proc_authen_desc(struct aead_request
*areq
,
801 unsigned int flow_mode
,
802 struct cc_hw_desc desc
[],
803 unsigned int *seq_size
, int direct
)
805 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(areq
);
806 enum cc_req_dma_buf_type data_dma_type
= areq_ctx
->data_buff_type
;
807 unsigned int idx
= *seq_size
;
808 struct crypto_aead
*tfm
= crypto_aead_reqtfm(areq
);
809 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
810 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
812 switch (data_dma_type
) {
813 case CC_DMA_BUF_DLLI
:
815 struct scatterlist
*cipher
=
816 (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
817 areq_ctx
->dst_sgl
: areq_ctx
->src_sgl
;
819 unsigned int offset
=
820 (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
821 areq_ctx
->dst_offset
: areq_ctx
->src_offset
;
822 dev_dbg(dev
, "AUTHENC: SRC/DST buffer type DLLI\n");
823 hw_desc_init(&desc
[idx
]);
824 set_din_type(&desc
[idx
], DMA_DLLI
,
825 (sg_dma_address(cipher
) + offset
),
826 areq_ctx
->cryptlen
, NS_BIT
);
827 set_flow_mode(&desc
[idx
], flow_mode
);
830 case CC_DMA_BUF_MLLI
:
832 /* DOUBLE-PASS flow (as default)
833 * assoc. + iv + data -compact in one table
834 * if assoclen is ZERO only IV perform
836 cc_sram_addr_t mlli_addr
= areq_ctx
->assoc
.sram_addr
;
837 u32 mlli_nents
= areq_ctx
->assoc
.mlli_nents
;
839 if (areq_ctx
->is_single_pass
) {
840 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) {
841 mlli_addr
= areq_ctx
->dst
.sram_addr
;
842 mlli_nents
= areq_ctx
->dst
.mlli_nents
;
844 mlli_addr
= areq_ctx
->src
.sram_addr
;
845 mlli_nents
= areq_ctx
->src
.mlli_nents
;
849 dev_dbg(dev
, "AUTHENC: SRC/DST buffer type MLLI\n");
850 hw_desc_init(&desc
[idx
]);
851 set_din_type(&desc
[idx
], DMA_MLLI
, mlli_addr
, mlli_nents
,
853 set_flow_mode(&desc
[idx
], flow_mode
);
856 case CC_DMA_BUF_NULL
:
858 dev_err(dev
, "AUTHENC: Invalid SRC/DST buffer type\n");
864 static void cc_proc_cipher_desc(struct aead_request
*areq
,
865 unsigned int flow_mode
,
866 struct cc_hw_desc desc
[],
867 unsigned int *seq_size
)
869 unsigned int idx
= *seq_size
;
870 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(areq
);
871 enum cc_req_dma_buf_type data_dma_type
= areq_ctx
->data_buff_type
;
872 struct crypto_aead
*tfm
= crypto_aead_reqtfm(areq
);
873 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
874 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
876 if (areq_ctx
->cryptlen
== 0)
877 return; /*null processing*/
879 switch (data_dma_type
) {
880 case CC_DMA_BUF_DLLI
:
881 dev_dbg(dev
, "CIPHER: SRC/DST buffer type DLLI\n");
882 hw_desc_init(&desc
[idx
]);
883 set_din_type(&desc
[idx
], DMA_DLLI
,
884 (sg_dma_address(areq_ctx
->src_sgl
) +
885 areq_ctx
->src_offset
), areq_ctx
->cryptlen
,
887 set_dout_dlli(&desc
[idx
],
888 (sg_dma_address(areq_ctx
->dst_sgl
) +
889 areq_ctx
->dst_offset
),
890 areq_ctx
->cryptlen
, NS_BIT
, 0);
891 set_flow_mode(&desc
[idx
], flow_mode
);
893 case CC_DMA_BUF_MLLI
:
894 dev_dbg(dev
, "CIPHER: SRC/DST buffer type MLLI\n");
895 hw_desc_init(&desc
[idx
]);
896 set_din_type(&desc
[idx
], DMA_MLLI
, areq_ctx
->src
.sram_addr
,
897 areq_ctx
->src
.mlli_nents
, NS_BIT
);
898 set_dout_mlli(&desc
[idx
], areq_ctx
->dst
.sram_addr
,
899 areq_ctx
->dst
.mlli_nents
, NS_BIT
, 0);
900 set_flow_mode(&desc
[idx
], flow_mode
);
902 case CC_DMA_BUF_NULL
:
904 dev_err(dev
, "CIPHER: Invalid SRC/DST buffer type\n");
910 static void cc_proc_digest_desc(struct aead_request
*req
,
911 struct cc_hw_desc desc
[],
912 unsigned int *seq_size
)
914 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
915 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
916 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
917 unsigned int idx
= *seq_size
;
918 unsigned int hash_mode
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
919 DRV_HASH_HW_SHA1
: DRV_HASH_HW_SHA256
;
920 int direct
= req_ctx
->gen_ctx
.op_type
;
922 /* Get final ICV result */
923 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) {
924 hw_desc_init(&desc
[idx
]);
925 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
926 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
927 set_dout_dlli(&desc
[idx
], req_ctx
->icv_dma_addr
, ctx
->authsize
,
929 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
930 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
) {
931 set_aes_not_hash_mode(&desc
[idx
]);
932 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
934 set_cipher_config0(&desc
[idx
],
935 HASH_DIGEST_RESULT_LITTLE_ENDIAN
);
936 set_cipher_mode(&desc
[idx
], hash_mode
);
939 /* Get ICV out from hardware */
940 hw_desc_init(&desc
[idx
]);
941 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
942 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
943 set_dout_dlli(&desc
[idx
], req_ctx
->mac_buf_dma_addr
,
944 ctx
->authsize
, NS_BIT
, 1);
945 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
946 set_cipher_config0(&desc
[idx
],
947 HASH_DIGEST_RESULT_LITTLE_ENDIAN
);
948 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
949 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
) {
950 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
951 set_aes_not_hash_mode(&desc
[idx
]);
953 set_cipher_mode(&desc
[idx
], hash_mode
);
960 static void cc_set_cipher_desc(struct aead_request
*req
,
961 struct cc_hw_desc desc
[],
962 unsigned int *seq_size
)
964 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
965 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
966 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
967 unsigned int hw_iv_size
= req_ctx
->hw_iv_size
;
968 unsigned int idx
= *seq_size
;
969 int direct
= req_ctx
->gen_ctx
.op_type
;
971 /* Setup cipher state */
972 hw_desc_init(&desc
[idx
]);
973 set_cipher_config0(&desc
[idx
], direct
);
974 set_flow_mode(&desc
[idx
], ctx
->flow_mode
);
975 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->gen_ctx
.iv_dma_addr
,
977 if (ctx
->cipher_mode
== DRV_CIPHER_CTR
)
978 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
980 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
981 set_cipher_mode(&desc
[idx
], ctx
->cipher_mode
);
985 hw_desc_init(&desc
[idx
]);
986 set_cipher_config0(&desc
[idx
], direct
);
987 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
988 set_flow_mode(&desc
[idx
], ctx
->flow_mode
);
989 if (ctx
->flow_mode
== S_DIN_to_AES
) {
990 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->enckey_dma_addr
,
991 ((ctx
->enc_keylen
== 24) ? CC_AES_KEY_SIZE_MAX
:
992 ctx
->enc_keylen
), NS_BIT
);
993 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
995 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->enckey_dma_addr
,
996 ctx
->enc_keylen
, NS_BIT
);
997 set_key_size_des(&desc
[idx
], ctx
->enc_keylen
);
999 set_cipher_mode(&desc
[idx
], ctx
->cipher_mode
);
1005 static void cc_proc_cipher(struct aead_request
*req
, struct cc_hw_desc desc
[],
1006 unsigned int *seq_size
, unsigned int data_flow_mode
)
1008 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1009 int direct
= req_ctx
->gen_ctx
.op_type
;
1010 unsigned int idx
= *seq_size
;
1012 if (req_ctx
->cryptlen
== 0)
1013 return; /*null processing*/
1015 cc_set_cipher_desc(req
, desc
, &idx
);
1016 cc_proc_cipher_desc(req
, data_flow_mode
, desc
, &idx
);
1017 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) {
1018 /* We must wait for DMA to write all cipher */
1019 hw_desc_init(&desc
[idx
]);
1020 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1021 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1028 static void cc_set_hmac_desc(struct aead_request
*req
, struct cc_hw_desc desc
[],
1029 unsigned int *seq_size
)
1031 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1032 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1033 unsigned int hash_mode
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
1034 DRV_HASH_HW_SHA1
: DRV_HASH_HW_SHA256
;
1035 unsigned int digest_size
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
1036 CC_SHA1_DIGEST_SIZE
: CC_SHA256_DIGEST_SIZE
;
1037 unsigned int idx
= *seq_size
;
1039 /* Loading hash ipad xor key state */
1040 hw_desc_init(&desc
[idx
]);
1041 set_cipher_mode(&desc
[idx
], hash_mode
);
1042 set_din_type(&desc
[idx
], DMA_DLLI
,
1043 ctx
->auth_state
.hmac
.ipad_opad_dma_addr
, digest_size
,
1045 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1046 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
1049 /* Load init. digest len (64 bytes) */
1050 hw_desc_init(&desc
[idx
]);
1051 set_cipher_mode(&desc
[idx
], hash_mode
);
1052 set_din_sram(&desc
[idx
], cc_digest_len_addr(ctx
->drvdata
, hash_mode
),
1054 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1055 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1061 static void cc_set_xcbc_desc(struct aead_request
*req
, struct cc_hw_desc desc
[],
1062 unsigned int *seq_size
)
1064 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1065 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1066 unsigned int idx
= *seq_size
;
1068 /* Loading MAC state */
1069 hw_desc_init(&desc
[idx
]);
1070 set_din_const(&desc
[idx
], 0, CC_AES_BLOCK_SIZE
);
1071 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
1072 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
1073 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1074 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
1075 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1076 set_aes_not_hash_mode(&desc
[idx
]);
1079 /* Setup XCBC MAC K1 */
1080 hw_desc_init(&desc
[idx
]);
1081 set_din_type(&desc
[idx
], DMA_DLLI
,
1082 ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
,
1083 AES_KEYSIZE_128
, NS_BIT
);
1084 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1085 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
1086 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1087 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
1088 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1089 set_aes_not_hash_mode(&desc
[idx
]);
1092 /* Setup XCBC MAC K2 */
1093 hw_desc_init(&desc
[idx
]);
1094 set_din_type(&desc
[idx
], DMA_DLLI
,
1095 (ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
+
1096 AES_KEYSIZE_128
), AES_KEYSIZE_128
, NS_BIT
);
1097 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
1098 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
1099 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1100 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
1101 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1102 set_aes_not_hash_mode(&desc
[idx
]);
1105 /* Setup XCBC MAC K3 */
1106 hw_desc_init(&desc
[idx
]);
1107 set_din_type(&desc
[idx
], DMA_DLLI
,
1108 (ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
+
1109 2 * AES_KEYSIZE_128
), AES_KEYSIZE_128
, NS_BIT
);
1110 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE2
);
1111 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
1112 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1113 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
1114 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1115 set_aes_not_hash_mode(&desc
[idx
]);
1121 static void cc_proc_header_desc(struct aead_request
*req
,
1122 struct cc_hw_desc desc
[],
1123 unsigned int *seq_size
)
1125 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1126 unsigned int idx
= *seq_size
;
1128 /* Hash associated data */
1129 if (areq_ctx
->assoclen
> 0)
1130 cc_set_assoc_desc(req
, DIN_HASH
, desc
, &idx
);
1136 static void cc_proc_scheme_desc(struct aead_request
*req
,
1137 struct cc_hw_desc desc
[],
1138 unsigned int *seq_size
)
1140 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1141 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1142 struct cc_aead_handle
*aead_handle
= ctx
->drvdata
->aead_handle
;
1143 unsigned int hash_mode
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
1144 DRV_HASH_HW_SHA1
: DRV_HASH_HW_SHA256
;
1145 unsigned int digest_size
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
1146 CC_SHA1_DIGEST_SIZE
: CC_SHA256_DIGEST_SIZE
;
1147 unsigned int idx
= *seq_size
;
1149 hw_desc_init(&desc
[idx
]);
1150 set_cipher_mode(&desc
[idx
], hash_mode
);
1151 set_dout_sram(&desc
[idx
], aead_handle
->sram_workspace_addr
,
1153 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
1154 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE1
);
1155 set_cipher_do(&desc
[idx
], DO_PAD
);
1158 /* Get final ICV result */
1159 hw_desc_init(&desc
[idx
]);
1160 set_dout_sram(&desc
[idx
], aead_handle
->sram_workspace_addr
,
1162 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
1163 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1164 set_cipher_config0(&desc
[idx
], HASH_DIGEST_RESULT_LITTLE_ENDIAN
);
1165 set_cipher_mode(&desc
[idx
], hash_mode
);
1168 /* Loading hash opad xor key state */
1169 hw_desc_init(&desc
[idx
]);
1170 set_cipher_mode(&desc
[idx
], hash_mode
);
1171 set_din_type(&desc
[idx
], DMA_DLLI
,
1172 (ctx
->auth_state
.hmac
.ipad_opad_dma_addr
+ digest_size
),
1173 digest_size
, NS_BIT
);
1174 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1175 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
1178 /* Load init. digest len (64 bytes) */
1179 hw_desc_init(&desc
[idx
]);
1180 set_cipher_mode(&desc
[idx
], hash_mode
);
1181 set_din_sram(&desc
[idx
], cc_digest_len_addr(ctx
->drvdata
, hash_mode
),
1183 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
1184 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1185 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1188 /* Perform HASH update */
1189 hw_desc_init(&desc
[idx
]);
1190 set_din_sram(&desc
[idx
], aead_handle
->sram_workspace_addr
,
1192 set_flow_mode(&desc
[idx
], DIN_HASH
);
1198 static void cc_mlli_to_sram(struct aead_request
*req
,
1199 struct cc_hw_desc desc
[], unsigned int *seq_size
)
1201 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1202 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1203 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1204 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1206 if ((req_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
||
1207 req_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
||
1208 !req_ctx
->is_single_pass
) && req_ctx
->mlli_params
.mlli_len
) {
1209 dev_dbg(dev
, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1210 (unsigned int)ctx
->drvdata
->mlli_sram_addr
,
1211 req_ctx
->mlli_params
.mlli_len
);
1212 /* Copy MLLI table host-to-sram */
1213 hw_desc_init(&desc
[*seq_size
]);
1214 set_din_type(&desc
[*seq_size
], DMA_DLLI
,
1215 req_ctx
->mlli_params
.mlli_dma_addr
,
1216 req_ctx
->mlli_params
.mlli_len
, NS_BIT
);
1217 set_dout_sram(&desc
[*seq_size
],
1218 ctx
->drvdata
->mlli_sram_addr
,
1219 req_ctx
->mlli_params
.mlli_len
);
1220 set_flow_mode(&desc
[*seq_size
], BYPASS
);
1225 static enum cc_flow_mode
cc_get_data_flow(enum drv_crypto_direction direct
,
1226 enum cc_flow_mode setup_flow_mode
,
1227 bool is_single_pass
)
1229 enum cc_flow_mode data_flow_mode
;
1231 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) {
1232 if (setup_flow_mode
== S_DIN_to_AES
)
1233 data_flow_mode
= is_single_pass
?
1234 AES_to_HASH_and_DOUT
: DIN_AES_DOUT
;
1236 data_flow_mode
= is_single_pass
?
1237 DES_to_HASH_and_DOUT
: DIN_DES_DOUT
;
1238 } else { /* Decrypt */
1239 if (setup_flow_mode
== S_DIN_to_AES
)
1240 data_flow_mode
= is_single_pass
?
1241 AES_and_HASH
: DIN_AES_DOUT
;
1243 data_flow_mode
= is_single_pass
?
1244 DES_and_HASH
: DIN_DES_DOUT
;
1247 return data_flow_mode
;
1250 static void cc_hmac_authenc(struct aead_request
*req
, struct cc_hw_desc desc
[],
1251 unsigned int *seq_size
)
1253 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1254 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1255 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1256 int direct
= req_ctx
->gen_ctx
.op_type
;
1257 unsigned int data_flow_mode
=
1258 cc_get_data_flow(direct
, ctx
->flow_mode
,
1259 req_ctx
->is_single_pass
);
1261 if (req_ctx
->is_single_pass
) {
1265 cc_set_hmac_desc(req
, desc
, seq_size
);
1266 cc_set_cipher_desc(req
, desc
, seq_size
);
1267 cc_proc_header_desc(req
, desc
, seq_size
);
1268 cc_proc_cipher_desc(req
, data_flow_mode
, desc
, seq_size
);
1269 cc_proc_scheme_desc(req
, desc
, seq_size
);
1270 cc_proc_digest_desc(req
, desc
, seq_size
);
1276 * Fallback for unsupported single-pass modes,
1277 * i.e. using assoc. data of non-word-multiple
1279 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) {
1280 /* encrypt first.. */
1281 cc_proc_cipher(req
, desc
, seq_size
, data_flow_mode
);
1282 /* authenc after..*/
1283 cc_set_hmac_desc(req
, desc
, seq_size
);
1284 cc_proc_authen_desc(req
, DIN_HASH
, desc
, seq_size
, direct
);
1285 cc_proc_scheme_desc(req
, desc
, seq_size
);
1286 cc_proc_digest_desc(req
, desc
, seq_size
);
1288 } else { /*DECRYPT*/
1289 /* authenc first..*/
1290 cc_set_hmac_desc(req
, desc
, seq_size
);
1291 cc_proc_authen_desc(req
, DIN_HASH
, desc
, seq_size
, direct
);
1292 cc_proc_scheme_desc(req
, desc
, seq_size
);
1293 /* decrypt after.. */
1294 cc_proc_cipher(req
, desc
, seq_size
, data_flow_mode
);
1295 /* read the digest result with setting the completion bit
1296 * must be after the cipher operation
1298 cc_proc_digest_desc(req
, desc
, seq_size
);
1303 cc_xcbc_authenc(struct aead_request
*req
, struct cc_hw_desc desc
[],
1304 unsigned int *seq_size
)
1306 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1307 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1308 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1309 int direct
= req_ctx
->gen_ctx
.op_type
;
1310 unsigned int data_flow_mode
=
1311 cc_get_data_flow(direct
, ctx
->flow_mode
,
1312 req_ctx
->is_single_pass
);
1314 if (req_ctx
->is_single_pass
) {
1318 cc_set_xcbc_desc(req
, desc
, seq_size
);
1319 cc_set_cipher_desc(req
, desc
, seq_size
);
1320 cc_proc_header_desc(req
, desc
, seq_size
);
1321 cc_proc_cipher_desc(req
, data_flow_mode
, desc
, seq_size
);
1322 cc_proc_digest_desc(req
, desc
, seq_size
);
1328 * Fallback for unsupported single-pass modes,
1329 * i.e. using assoc. data of non-word-multiple
1331 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) {
1332 /* encrypt first.. */
1333 cc_proc_cipher(req
, desc
, seq_size
, data_flow_mode
);
1334 /* authenc after.. */
1335 cc_set_xcbc_desc(req
, desc
, seq_size
);
1336 cc_proc_authen_desc(req
, DIN_HASH
, desc
, seq_size
, direct
);
1337 cc_proc_digest_desc(req
, desc
, seq_size
);
1338 } else { /*DECRYPT*/
1339 /* authenc first.. */
1340 cc_set_xcbc_desc(req
, desc
, seq_size
);
1341 cc_proc_authen_desc(req
, DIN_HASH
, desc
, seq_size
, direct
);
1342 /* decrypt after..*/
1343 cc_proc_cipher(req
, desc
, seq_size
, data_flow_mode
);
1344 /* read the digest result with setting the completion bit
1345 * must be after the cipher operation
1347 cc_proc_digest_desc(req
, desc
, seq_size
);
1351 static int validate_data_size(struct cc_aead_ctx
*ctx
,
1352 enum drv_crypto_direction direct
,
1353 struct aead_request
*req
)
1355 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1356 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1357 unsigned int assoclen
= areq_ctx
->assoclen
;
1358 unsigned int cipherlen
= (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) ?
1359 (req
->cryptlen
- ctx
->authsize
) : req
->cryptlen
;
1361 if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
&&
1362 req
->cryptlen
< ctx
->authsize
)
1365 areq_ctx
->is_single_pass
= true; /*defaulted to fast flow*/
1367 switch (ctx
->flow_mode
) {
1369 if (ctx
->cipher_mode
== DRV_CIPHER_CBC
&&
1370 !IS_ALIGNED(cipherlen
, AES_BLOCK_SIZE
))
1372 if (ctx
->cipher_mode
== DRV_CIPHER_CCM
)
1374 if (ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
1375 if (areq_ctx
->plaintext_authenticate_only
)
1376 areq_ctx
->is_single_pass
= false;
1380 if (!IS_ALIGNED(assoclen
, sizeof(u32
)))
1381 areq_ctx
->is_single_pass
= false;
1383 if (ctx
->cipher_mode
== DRV_CIPHER_CTR
&&
1384 !IS_ALIGNED(cipherlen
, sizeof(u32
)))
1385 areq_ctx
->is_single_pass
= false;
1389 if (!IS_ALIGNED(cipherlen
, DES_BLOCK_SIZE
))
1391 if (!IS_ALIGNED(assoclen
, DES_BLOCK_SIZE
))
1392 areq_ctx
->is_single_pass
= false;
1395 dev_err(dev
, "Unexpected flow mode (%d)\n", ctx
->flow_mode
);
1405 static unsigned int format_ccm_a0(u8
*pa0_buff
, u32 header_size
)
1407 unsigned int len
= 0;
1409 if (header_size
== 0)
1412 if (header_size
< ((1UL << 16) - (1UL << 8))) {
1415 pa0_buff
[0] = (header_size
>> 8) & 0xFF;
1416 pa0_buff
[1] = header_size
& 0xFF;
1422 pa0_buff
[2] = (header_size
>> 24) & 0xFF;
1423 pa0_buff
[3] = (header_size
>> 16) & 0xFF;
1424 pa0_buff
[4] = (header_size
>> 8) & 0xFF;
1425 pa0_buff
[5] = header_size
& 0xFF;
1431 static int set_msg_len(u8
*block
, unsigned int msglen
, unsigned int csize
)
1435 memset(block
, 0, csize
);
1440 else if (msglen
> (1 << (8 * csize
)))
1443 data
= cpu_to_be32(msglen
);
1444 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
1449 static int cc_ccm(struct aead_request
*req
, struct cc_hw_desc desc
[],
1450 unsigned int *seq_size
)
1452 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1453 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1454 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1455 unsigned int idx
= *seq_size
;
1456 unsigned int cipher_flow_mode
;
1457 dma_addr_t mac_result
;
1459 if (req_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
1460 cipher_flow_mode
= AES_to_HASH_and_DOUT
;
1461 mac_result
= req_ctx
->mac_buf_dma_addr
;
1462 } else { /* Encrypt */
1463 cipher_flow_mode
= AES_and_HASH
;
1464 mac_result
= req_ctx
->icv_dma_addr
;
1468 hw_desc_init(&desc
[idx
]);
1469 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CTR
);
1470 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->enckey_dma_addr
,
1471 ((ctx
->enc_keylen
== 24) ? CC_AES_KEY_SIZE_MAX
:
1472 ctx
->enc_keylen
), NS_BIT
);
1473 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1474 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1475 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1476 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1479 /* load ctr state */
1480 hw_desc_init(&desc
[idx
]);
1481 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CTR
);
1482 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1483 set_din_type(&desc
[idx
], DMA_DLLI
,
1484 req_ctx
->gen_ctx
.iv_dma_addr
, AES_BLOCK_SIZE
, NS_BIT
);
1485 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1486 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
1487 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1491 hw_desc_init(&desc
[idx
]);
1492 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CBC_MAC
);
1493 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->enckey_dma_addr
,
1494 ((ctx
->enc_keylen
== 24) ? CC_AES_KEY_SIZE_MAX
:
1495 ctx
->enc_keylen
), NS_BIT
);
1496 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1497 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1498 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1499 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1500 set_aes_not_hash_mode(&desc
[idx
]);
1503 /* load MAC state */
1504 hw_desc_init(&desc
[idx
]);
1505 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CBC_MAC
);
1506 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1507 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->mac_buf_dma_addr
,
1508 AES_BLOCK_SIZE
, NS_BIT
);
1509 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1510 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
1511 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1512 set_aes_not_hash_mode(&desc
[idx
]);
1515 /* process assoc data */
1516 if (req_ctx
->assoclen
> 0) {
1517 cc_set_assoc_desc(req
, DIN_HASH
, desc
, &idx
);
1519 hw_desc_init(&desc
[idx
]);
1520 set_din_type(&desc
[idx
], DMA_DLLI
,
1521 sg_dma_address(&req_ctx
->ccm_adata_sg
),
1522 AES_BLOCK_SIZE
+ req_ctx
->ccm_hdr_size
, NS_BIT
);
1523 set_flow_mode(&desc
[idx
], DIN_HASH
);
1527 /* process the cipher */
1528 if (req_ctx
->cryptlen
)
1529 cc_proc_cipher_desc(req
, cipher_flow_mode
, desc
, &idx
);
1531 /* Read temporal MAC */
1532 hw_desc_init(&desc
[idx
]);
1533 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CBC_MAC
);
1534 set_dout_dlli(&desc
[idx
], req_ctx
->mac_buf_dma_addr
, ctx
->authsize
,
1536 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1537 set_cipher_config0(&desc
[idx
], HASH_DIGEST_RESULT_LITTLE_ENDIAN
);
1538 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
1539 set_aes_not_hash_mode(&desc
[idx
]);
1542 /* load AES-CTR state (for last MAC calculation)*/
1543 hw_desc_init(&desc
[idx
]);
1544 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CTR
);
1545 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1546 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->ccm_iv0_dma_addr
,
1547 AES_BLOCK_SIZE
, NS_BIT
);
1548 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1549 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
1550 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1553 hw_desc_init(&desc
[idx
]);
1554 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1555 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1558 /* encrypt the "T" value and store MAC in mac_state */
1559 hw_desc_init(&desc
[idx
]);
1560 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->mac_buf_dma_addr
,
1561 ctx
->authsize
, NS_BIT
);
1562 set_dout_dlli(&desc
[idx
], mac_result
, ctx
->authsize
, NS_BIT
, 1);
1563 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
1564 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1571 static int config_ccm_adata(struct aead_request
*req
)
1573 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1574 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1575 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1576 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1577 //unsigned int size_of_a = 0, rem_a_size = 0;
1578 unsigned int lp
= req
->iv
[0];
1579 /* Note: The code assume that req->iv[0] already contains the value
1582 unsigned int l
= lp
+ 1; /* This is L' of RFC 3610. */
1583 unsigned int m
= ctx
->authsize
; /* This is M' of RFC 3610. */
1584 u8
*b0
= req_ctx
->ccm_config
+ CCM_B0_OFFSET
;
1585 u8
*a0
= req_ctx
->ccm_config
+ CCM_A0_OFFSET
;
1586 u8
*ctr_count_0
= req_ctx
->ccm_config
+ CCM_CTR_COUNT_0_OFFSET
;
1587 unsigned int cryptlen
= (req_ctx
->gen_ctx
.op_type
==
1588 DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
1590 (req
->cryptlen
- ctx
->authsize
);
1593 memset(req_ctx
->mac_buf
, 0, AES_BLOCK_SIZE
);
1594 memset(req_ctx
->ccm_config
, 0, AES_BLOCK_SIZE
* 3);
1596 /* taken from crypto/ccm.c */
1597 /* 2 <= L <= 8, so 1 <= L' <= 7. */
1598 if (l
< 2 || l
> 8) {
1599 dev_err(dev
, "illegal iv value %X\n", req
->iv
[0]);
1602 memcpy(b0
, req
->iv
, AES_BLOCK_SIZE
);
1604 /* format control info per RFC 3610 and
1605 * NIST Special Publication 800-38C
1607 *b0
|= (8 * ((m
- 2) / 2));
1608 if (req_ctx
->assoclen
> 0)
1609 *b0
|= 64; /* Enable bit 6 if Adata exists. */
1611 rc
= set_msg_len(b0
+ 16 - l
, cryptlen
, l
); /* Write L'. */
1613 dev_err(dev
, "message len overflow detected");
1616 /* END of "taken from crypto/ccm.c" */
1618 /* l(a) - size of associated data. */
1619 req_ctx
->ccm_hdr_size
= format_ccm_a0(a0
, req_ctx
->assoclen
);
1621 memset(req
->iv
+ 15 - req
->iv
[0], 0, req
->iv
[0] + 1);
1624 memcpy(ctr_count_0
, req
->iv
, AES_BLOCK_SIZE
);
1625 ctr_count_0
[15] = 0;
1630 static void cc_proc_rfc4309_ccm(struct aead_request
*req
)
1632 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1633 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1634 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1637 memset(areq_ctx
->ctr_iv
, 0, AES_BLOCK_SIZE
);
1638 /* For RFC 4309, always use 4 bytes for message length
1639 * (at most 2^32-1 bytes).
1641 areq_ctx
->ctr_iv
[0] = 3;
1643 /* In RFC 4309 there is an 11-bytes nonce+IV part,
1644 * that we build here.
1646 memcpy(areq_ctx
->ctr_iv
+ CCM_BLOCK_NONCE_OFFSET
, ctx
->ctr_nonce
,
1647 CCM_BLOCK_NONCE_SIZE
);
1648 memcpy(areq_ctx
->ctr_iv
+ CCM_BLOCK_IV_OFFSET
, req
->iv
,
1650 req
->iv
= areq_ctx
->ctr_iv
;
1651 areq_ctx
->assoclen
-= CCM_BLOCK_IV_SIZE
;
1654 static void cc_set_ghash_desc(struct aead_request
*req
,
1655 struct cc_hw_desc desc
[], unsigned int *seq_size
)
1657 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1658 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1659 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1660 unsigned int idx
= *seq_size
;
1662 /* load key to AES*/
1663 hw_desc_init(&desc
[idx
]);
1664 set_cipher_mode(&desc
[idx
], DRV_CIPHER_ECB
);
1665 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1666 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->enckey_dma_addr
,
1667 ctx
->enc_keylen
, NS_BIT
);
1668 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1669 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1670 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1673 /* process one zero block to generate hkey */
1674 hw_desc_init(&desc
[idx
]);
1675 set_din_const(&desc
[idx
], 0x0, AES_BLOCK_SIZE
);
1676 set_dout_dlli(&desc
[idx
], req_ctx
->hkey_dma_addr
, AES_BLOCK_SIZE
,
1678 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1681 /* Memory Barrier */
1682 hw_desc_init(&desc
[idx
]);
1683 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1684 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1687 /* Load GHASH subkey */
1688 hw_desc_init(&desc
[idx
]);
1689 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->hkey_dma_addr
,
1690 AES_BLOCK_SIZE
, NS_BIT
);
1691 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1692 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1693 set_aes_not_hash_mode(&desc
[idx
]);
1694 set_cipher_mode(&desc
[idx
], DRV_HASH_HW_GHASH
);
1695 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
1696 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1699 /* Configure Hash Engine to work with GHASH.
1700 * Since it was not possible to extend HASH submodes to add GHASH,
1701 * The following command is necessary in order to
1702 * select GHASH (according to HW designers)
1704 hw_desc_init(&desc
[idx
]);
1705 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1706 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1707 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1708 set_aes_not_hash_mode(&desc
[idx
]);
1709 set_cipher_mode(&desc
[idx
], DRV_HASH_HW_GHASH
);
1710 set_cipher_do(&desc
[idx
], 1); //1=AES_SK RKEK
1711 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1712 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
1713 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1716 /* Load GHASH initial STATE (which is 0). (for any hash there is an
1719 hw_desc_init(&desc
[idx
]);
1720 set_din_const(&desc
[idx
], 0x0, AES_BLOCK_SIZE
);
1721 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1722 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1723 set_aes_not_hash_mode(&desc
[idx
]);
1724 set_cipher_mode(&desc
[idx
], DRV_HASH_HW_GHASH
);
1725 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
1726 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
1732 static void cc_set_gctr_desc(struct aead_request
*req
, struct cc_hw_desc desc
[],
1733 unsigned int *seq_size
)
1735 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1736 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1737 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1738 unsigned int idx
= *seq_size
;
1740 /* load key to AES*/
1741 hw_desc_init(&desc
[idx
]);
1742 set_cipher_mode(&desc
[idx
], DRV_CIPHER_GCTR
);
1743 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1744 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->enckey_dma_addr
,
1745 ctx
->enc_keylen
, NS_BIT
);
1746 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1747 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1748 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1751 if (req_ctx
->cryptlen
&& !req_ctx
->plaintext_authenticate_only
) {
1752 /* load AES/CTR initial CTR value inc by 2*/
1753 hw_desc_init(&desc
[idx
]);
1754 set_cipher_mode(&desc
[idx
], DRV_CIPHER_GCTR
);
1755 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1756 set_din_type(&desc
[idx
], DMA_DLLI
,
1757 req_ctx
->gcm_iv_inc2_dma_addr
, AES_BLOCK_SIZE
,
1759 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1760 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
1761 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1768 static void cc_proc_gcm_result(struct aead_request
*req
,
1769 struct cc_hw_desc desc
[],
1770 unsigned int *seq_size
)
1772 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1773 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1774 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1775 dma_addr_t mac_result
;
1776 unsigned int idx
= *seq_size
;
1778 if (req_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
1779 mac_result
= req_ctx
->mac_buf_dma_addr
;
1780 } else { /* Encrypt */
1781 mac_result
= req_ctx
->icv_dma_addr
;
1784 /* process(ghash) gcm_block_len */
1785 hw_desc_init(&desc
[idx
]);
1786 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->gcm_block_len_dma_addr
,
1787 AES_BLOCK_SIZE
, NS_BIT
);
1788 set_flow_mode(&desc
[idx
], DIN_HASH
);
1791 /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1792 hw_desc_init(&desc
[idx
]);
1793 set_cipher_mode(&desc
[idx
], DRV_HASH_HW_GHASH
);
1794 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1795 set_dout_dlli(&desc
[idx
], req_ctx
->mac_buf_dma_addr
, AES_BLOCK_SIZE
,
1797 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1798 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
1799 set_aes_not_hash_mode(&desc
[idx
]);
1803 /* load AES/CTR initial CTR value inc by 1*/
1804 hw_desc_init(&desc
[idx
]);
1805 set_cipher_mode(&desc
[idx
], DRV_CIPHER_GCTR
);
1806 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1807 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->gcm_iv_inc1_dma_addr
,
1808 AES_BLOCK_SIZE
, NS_BIT
);
1809 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1810 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
1811 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1814 /* Memory Barrier */
1815 hw_desc_init(&desc
[idx
]);
1816 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1817 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1820 /* process GCTR on stored GHASH and store MAC in mac_state*/
1821 hw_desc_init(&desc
[idx
]);
1822 set_cipher_mode(&desc
[idx
], DRV_CIPHER_GCTR
);
1823 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->mac_buf_dma_addr
,
1824 AES_BLOCK_SIZE
, NS_BIT
);
1825 set_dout_dlli(&desc
[idx
], mac_result
, ctx
->authsize
, NS_BIT
, 1);
1826 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
1827 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1833 static int cc_gcm(struct aead_request
*req
, struct cc_hw_desc desc
[],
1834 unsigned int *seq_size
)
1836 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1837 unsigned int cipher_flow_mode
;
1839 if (req_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
1840 cipher_flow_mode
= AES_and_HASH
;
1841 } else { /* Encrypt */
1842 cipher_flow_mode
= AES_to_HASH_and_DOUT
;
1845 //in RFC4543 no data to encrypt. just copy data from src to dest.
1846 if (req_ctx
->plaintext_authenticate_only
) {
1847 cc_proc_cipher_desc(req
, BYPASS
, desc
, seq_size
);
1848 cc_set_ghash_desc(req
, desc
, seq_size
);
1849 /* process(ghash) assoc data */
1850 cc_set_assoc_desc(req
, DIN_HASH
, desc
, seq_size
);
1851 cc_set_gctr_desc(req
, desc
, seq_size
);
1852 cc_proc_gcm_result(req
, desc
, seq_size
);
1856 // for gcm and rfc4106.
1857 cc_set_ghash_desc(req
, desc
, seq_size
);
1858 /* process(ghash) assoc data */
1859 if (req_ctx
->assoclen
> 0)
1860 cc_set_assoc_desc(req
, DIN_HASH
, desc
, seq_size
);
1861 cc_set_gctr_desc(req
, desc
, seq_size
);
1862 /* process(gctr+ghash) */
1863 if (req_ctx
->cryptlen
)
1864 cc_proc_cipher_desc(req
, cipher_flow_mode
, desc
, seq_size
);
1865 cc_proc_gcm_result(req
, desc
, seq_size
);
1870 static int config_gcm_context(struct aead_request
*req
)
1872 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1873 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1874 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1875 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1877 unsigned int cryptlen
= (req_ctx
->gen_ctx
.op_type
==
1878 DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
1880 (req
->cryptlen
- ctx
->authsize
);
1881 __be32 counter
= cpu_to_be32(2);
1883 dev_dbg(dev
, "%s() cryptlen = %d, req_ctx->assoclen = %d ctx->authsize = %d\n",
1884 __func__
, cryptlen
, req_ctx
->assoclen
, ctx
->authsize
);
1886 memset(req_ctx
->hkey
, 0, AES_BLOCK_SIZE
);
1888 memset(req_ctx
->mac_buf
, 0, AES_BLOCK_SIZE
);
1890 memcpy(req
->iv
+ 12, &counter
, 4);
1891 memcpy(req_ctx
->gcm_iv_inc2
, req
->iv
, 16);
1893 counter
= cpu_to_be32(1);
1894 memcpy(req
->iv
+ 12, &counter
, 4);
1895 memcpy(req_ctx
->gcm_iv_inc1
, req
->iv
, 16);
1897 if (!req_ctx
->plaintext_authenticate_only
) {
1900 temp64
= cpu_to_be64(req_ctx
->assoclen
* 8);
1901 memcpy(&req_ctx
->gcm_len_block
.len_a
, &temp64
, sizeof(temp64
));
1902 temp64
= cpu_to_be64(cryptlen
* 8);
1903 memcpy(&req_ctx
->gcm_len_block
.len_c
, &temp64
, 8);
1905 /* rfc4543=> all data(AAD,IV,Plain) are considered additional
1906 * data that is nothing is encrypted.
1910 temp64
= cpu_to_be64((req_ctx
->assoclen
+
1911 GCM_BLOCK_RFC4_IV_SIZE
+ cryptlen
) * 8);
1912 memcpy(&req_ctx
->gcm_len_block
.len_a
, &temp64
, sizeof(temp64
));
1914 memcpy(&req_ctx
->gcm_len_block
.len_c
, &temp64
, 8);
1920 static void cc_proc_rfc4_gcm(struct aead_request
*req
)
1922 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1923 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1924 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1926 memcpy(areq_ctx
->ctr_iv
+ GCM_BLOCK_RFC4_NONCE_OFFSET
,
1927 ctx
->ctr_nonce
, GCM_BLOCK_RFC4_NONCE_SIZE
);
1928 memcpy(areq_ctx
->ctr_iv
+ GCM_BLOCK_RFC4_IV_OFFSET
, req
->iv
,
1929 GCM_BLOCK_RFC4_IV_SIZE
);
1930 req
->iv
= areq_ctx
->ctr_iv
;
1931 areq_ctx
->assoclen
-= GCM_BLOCK_RFC4_IV_SIZE
;
1934 static int cc_proc_aead(struct aead_request
*req
,
1935 enum drv_crypto_direction direct
)
1939 struct cc_hw_desc desc
[MAX_AEAD_PROCESS_SEQ
];
1940 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1941 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1942 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1943 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1944 struct cc_crypto_req cc_req
= {};
1946 dev_dbg(dev
, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1947 ((direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ? "Enc" : "Dec"),
1948 ctx
, req
, req
->iv
, sg_virt(req
->src
), req
->src
->offset
,
1949 sg_virt(req
->dst
), req
->dst
->offset
, req
->cryptlen
);
1951 /* STAT_PHASE_0: Init and sanity checks */
1953 /* Check data length according to mode */
1954 if (validate_data_size(ctx
, direct
, req
)) {
1955 dev_err(dev
, "Unsupported crypt/assoc len %d/%d.\n",
1956 req
->cryptlen
, areq_ctx
->assoclen
);
1957 crypto_aead_set_flags(tfm
, CRYPTO_TFM_RES_BAD_BLOCK_LEN
);
1961 /* Setup request structure */
1962 cc_req
.user_cb
= (void *)cc_aead_complete
;
1963 cc_req
.user_arg
= (void *)req
;
1965 /* Setup request context */
1966 areq_ctx
->gen_ctx
.op_type
= direct
;
1967 areq_ctx
->req_authsize
= ctx
->authsize
;
1968 areq_ctx
->cipher_mode
= ctx
->cipher_mode
;
1970 /* STAT_PHASE_1: Map buffers */
1972 if (ctx
->cipher_mode
== DRV_CIPHER_CTR
) {
1973 /* Build CTR IV - Copy nonce from last 4 bytes in
1974 * CTR key to first 4 bytes in CTR IV
1976 memcpy(areq_ctx
->ctr_iv
, ctx
->ctr_nonce
,
1977 CTR_RFC3686_NONCE_SIZE
);
1978 if (!areq_ctx
->backup_giv
) /*User none-generated IV*/
1979 memcpy(areq_ctx
->ctr_iv
+ CTR_RFC3686_NONCE_SIZE
,
1980 req
->iv
, CTR_RFC3686_IV_SIZE
);
1981 /* Initialize counter portion of counter block */
1982 *(__be32
*)(areq_ctx
->ctr_iv
+ CTR_RFC3686_NONCE_SIZE
+
1983 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
1985 /* Replace with counter iv */
1986 req
->iv
= areq_ctx
->ctr_iv
;
1987 areq_ctx
->hw_iv_size
= CTR_RFC3686_BLOCK_SIZE
;
1988 } else if ((ctx
->cipher_mode
== DRV_CIPHER_CCM
) ||
1989 (ctx
->cipher_mode
== DRV_CIPHER_GCTR
)) {
1990 areq_ctx
->hw_iv_size
= AES_BLOCK_SIZE
;
1991 if (areq_ctx
->ctr_iv
!= req
->iv
) {
1992 memcpy(areq_ctx
->ctr_iv
, req
->iv
,
1993 crypto_aead_ivsize(tfm
));
1994 req
->iv
= areq_ctx
->ctr_iv
;
1997 areq_ctx
->hw_iv_size
= crypto_aead_ivsize(tfm
);
2000 if (ctx
->cipher_mode
== DRV_CIPHER_CCM
) {
2001 rc
= config_ccm_adata(req
);
2003 dev_dbg(dev
, "config_ccm_adata() returned with a failure %d!",
2008 areq_ctx
->ccm_hdr_size
= ccm_header_size_null
;
2011 if (ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
2012 rc
= config_gcm_context(req
);
2014 dev_dbg(dev
, "config_gcm_context() returned with a failure %d!",
2020 rc
= cc_map_aead_request(ctx
->drvdata
, req
);
2022 dev_err(dev
, "map_request() failed\n");
2026 /* do we need to generate IV? */
2027 if (areq_ctx
->backup_giv
) {
2028 /* set the DMA mapped IV address*/
2029 if (ctx
->cipher_mode
== DRV_CIPHER_CTR
) {
2030 cc_req
.ivgen_dma_addr
[0] =
2031 areq_ctx
->gen_ctx
.iv_dma_addr
+
2032 CTR_RFC3686_NONCE_SIZE
;
2033 cc_req
.ivgen_dma_addr_len
= 1;
2034 } else if (ctx
->cipher_mode
== DRV_CIPHER_CCM
) {
2035 /* In ccm, the IV needs to exist both inside B0 and
2036 * inside the counter.It is also copied to iv_dma_addr
2037 * for other reasons (like returning it to the user).
2038 * So, using 3 (identical) IV outputs.
2040 cc_req
.ivgen_dma_addr
[0] =
2041 areq_ctx
->gen_ctx
.iv_dma_addr
+
2042 CCM_BLOCK_IV_OFFSET
;
2043 cc_req
.ivgen_dma_addr
[1] =
2044 sg_dma_address(&areq_ctx
->ccm_adata_sg
) +
2045 CCM_B0_OFFSET
+ CCM_BLOCK_IV_OFFSET
;
2046 cc_req
.ivgen_dma_addr
[2] =
2047 sg_dma_address(&areq_ctx
->ccm_adata_sg
) +
2048 CCM_CTR_COUNT_0_OFFSET
+ CCM_BLOCK_IV_OFFSET
;
2049 cc_req
.ivgen_dma_addr_len
= 3;
2051 cc_req
.ivgen_dma_addr
[0] =
2052 areq_ctx
->gen_ctx
.iv_dma_addr
;
2053 cc_req
.ivgen_dma_addr_len
= 1;
2056 /* set the IV size (8/16 B long)*/
2057 cc_req
.ivgen_size
= crypto_aead_ivsize(tfm
);
2060 /* STAT_PHASE_2: Create sequence */
2062 /* Load MLLI tables to SRAM if necessary */
2063 cc_mlli_to_sram(req
, desc
, &seq_len
);
2065 /*TODO: move seq len by reference */
2066 switch (ctx
->auth_mode
) {
2068 case DRV_HASH_SHA256
:
2069 cc_hmac_authenc(req
, desc
, &seq_len
);
2071 case DRV_HASH_XCBC_MAC
:
2072 cc_xcbc_authenc(req
, desc
, &seq_len
);
2075 if (ctx
->cipher_mode
== DRV_CIPHER_CCM
)
2076 cc_ccm(req
, desc
, &seq_len
);
2077 if (ctx
->cipher_mode
== DRV_CIPHER_GCTR
)
2078 cc_gcm(req
, desc
, &seq_len
);
2081 dev_err(dev
, "Unsupported authenc (%d)\n", ctx
->auth_mode
);
2082 cc_unmap_aead_request(dev
, req
);
2087 /* STAT_PHASE_3: Lock HW and push sequence */
2089 rc
= cc_send_request(ctx
->drvdata
, &cc_req
, desc
, seq_len
, &req
->base
);
2091 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
) {
2092 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
2093 cc_unmap_aead_request(dev
, req
);
2100 static int cc_aead_encrypt(struct aead_request
*req
)
2102 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2105 memset(areq_ctx
, 0, sizeof(*areq_ctx
));
2107 /* No generated IV required */
2108 areq_ctx
->backup_iv
= req
->iv
;
2109 areq_ctx
->assoclen
= req
->assoclen
;
2110 areq_ctx
->backup_giv
= NULL
;
2111 areq_ctx
->is_gcm4543
= false;
2113 areq_ctx
->plaintext_authenticate_only
= false;
2115 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_ENCRYPT
);
2116 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2117 req
->iv
= areq_ctx
->backup_iv
;
2122 static int cc_rfc4309_ccm_encrypt(struct aead_request
*req
)
2124 /* Very similar to cc_aead_encrypt() above. */
2126 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2127 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2128 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
2129 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2132 if (!valid_assoclen(req
)) {
2133 dev_err(dev
, "invalid Assoclen:%u\n", req
->assoclen
);
2137 memset(areq_ctx
, 0, sizeof(*areq_ctx
));
2139 /* No generated IV required */
2140 areq_ctx
->backup_iv
= req
->iv
;
2141 areq_ctx
->assoclen
= req
->assoclen
;
2142 areq_ctx
->backup_giv
= NULL
;
2143 areq_ctx
->is_gcm4543
= true;
2145 cc_proc_rfc4309_ccm(req
);
2147 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_ENCRYPT
);
2148 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2149 req
->iv
= areq_ctx
->backup_iv
;
2154 static int cc_aead_decrypt(struct aead_request
*req
)
2156 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2159 memset(areq_ctx
, 0, sizeof(*areq_ctx
));
2161 /* No generated IV required */
2162 areq_ctx
->backup_iv
= req
->iv
;
2163 areq_ctx
->assoclen
= req
->assoclen
;
2164 areq_ctx
->backup_giv
= NULL
;
2165 areq_ctx
->is_gcm4543
= false;
2167 areq_ctx
->plaintext_authenticate_only
= false;
2169 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_DECRYPT
);
2170 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2171 req
->iv
= areq_ctx
->backup_iv
;
2176 static int cc_rfc4309_ccm_decrypt(struct aead_request
*req
)
2178 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2179 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
2180 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2181 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2184 if (!valid_assoclen(req
)) {
2185 dev_err(dev
, "invalid Assoclen:%u\n", req
->assoclen
);
2189 memset(areq_ctx
, 0, sizeof(*areq_ctx
));
2191 /* No generated IV required */
2192 areq_ctx
->backup_iv
= req
->iv
;
2193 areq_ctx
->assoclen
= req
->assoclen
;
2194 areq_ctx
->backup_giv
= NULL
;
2196 areq_ctx
->is_gcm4543
= true;
2197 cc_proc_rfc4309_ccm(req
);
2199 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_DECRYPT
);
2200 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2201 req
->iv
= areq_ctx
->backup_iv
;
2207 static int cc_rfc4106_gcm_setkey(struct crypto_aead
*tfm
, const u8
*key
,
2208 unsigned int keylen
)
2210 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
2211 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2213 dev_dbg(dev
, "%s() keylen %d, key %p\n", __func__
, keylen
, key
);
2219 memcpy(ctx
->ctr_nonce
, key
+ keylen
, 4);
2221 return cc_aead_setkey(tfm
, key
, keylen
);
2224 static int cc_rfc4543_gcm_setkey(struct crypto_aead
*tfm
, const u8
*key
,
2225 unsigned int keylen
)
2227 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
2228 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2230 dev_dbg(dev
, "%s() keylen %d, key %p\n", __func__
, keylen
, key
);
2236 memcpy(ctx
->ctr_nonce
, key
+ keylen
, 4);
2238 return cc_aead_setkey(tfm
, key
, keylen
);
2241 static int cc_gcm_setauthsize(struct crypto_aead
*authenc
,
2242 unsigned int authsize
)
2257 return cc_aead_setauthsize(authenc
, authsize
);
2260 static int cc_rfc4106_gcm_setauthsize(struct crypto_aead
*authenc
,
2261 unsigned int authsize
)
2263 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(authenc
);
2264 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2266 dev_dbg(dev
, "authsize %d\n", authsize
);
2277 return cc_aead_setauthsize(authenc
, authsize
);
2280 static int cc_rfc4543_gcm_setauthsize(struct crypto_aead
*authenc
,
2281 unsigned int authsize
)
2283 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(authenc
);
2284 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2286 dev_dbg(dev
, "authsize %d\n", authsize
);
2291 return cc_aead_setauthsize(authenc
, authsize
);
2294 static int cc_rfc4106_gcm_encrypt(struct aead_request
*req
)
2296 /* Very similar to cc_aead_encrypt() above. */
2298 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2299 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
2300 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2301 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2304 if (!valid_assoclen(req
)) {
2305 dev_err(dev
, "invalid Assoclen:%u\n", req
->assoclen
);
2309 memset(areq_ctx
, 0, sizeof(*areq_ctx
));
2311 /* No generated IV required */
2312 areq_ctx
->backup_iv
= req
->iv
;
2313 areq_ctx
->assoclen
= req
->assoclen
;
2314 areq_ctx
->backup_giv
= NULL
;
2316 areq_ctx
->plaintext_authenticate_only
= false;
2318 cc_proc_rfc4_gcm(req
);
2319 areq_ctx
->is_gcm4543
= true;
2321 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_ENCRYPT
);
2322 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2323 req
->iv
= areq_ctx
->backup_iv
;
2328 static int cc_rfc4543_gcm_encrypt(struct aead_request
*req
)
2330 /* Very similar to cc_aead_encrypt() above. */
2332 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2335 memset(areq_ctx
, 0, sizeof(*areq_ctx
));
2337 //plaintext is not encryped with rfc4543
2338 areq_ctx
->plaintext_authenticate_only
= true;
2340 /* No generated IV required */
2341 areq_ctx
->backup_iv
= req
->iv
;
2342 areq_ctx
->assoclen
= req
->assoclen
;
2343 areq_ctx
->backup_giv
= NULL
;
2345 cc_proc_rfc4_gcm(req
);
2346 areq_ctx
->is_gcm4543
= true;
2348 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_ENCRYPT
);
2349 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2350 req
->iv
= areq_ctx
->backup_iv
;
2355 static int cc_rfc4106_gcm_decrypt(struct aead_request
*req
)
2357 /* Very similar to cc_aead_decrypt() above. */
2359 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2360 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
2361 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2362 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2365 if (!valid_assoclen(req
)) {
2366 dev_err(dev
, "invalid Assoclen:%u\n", req
->assoclen
);
2370 memset(areq_ctx
, 0, sizeof(*areq_ctx
));
2372 /* No generated IV required */
2373 areq_ctx
->backup_iv
= req
->iv
;
2374 areq_ctx
->assoclen
= req
->assoclen
;
2375 areq_ctx
->backup_giv
= NULL
;
2377 areq_ctx
->plaintext_authenticate_only
= false;
2379 cc_proc_rfc4_gcm(req
);
2380 areq_ctx
->is_gcm4543
= true;
2382 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_DECRYPT
);
2383 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2384 req
->iv
= areq_ctx
->backup_iv
;
2389 static int cc_rfc4543_gcm_decrypt(struct aead_request
*req
)
2391 /* Very similar to cc_aead_decrypt() above. */
2393 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2396 memset(areq_ctx
, 0, sizeof(*areq_ctx
));
2398 //plaintext is not decryped with rfc4543
2399 areq_ctx
->plaintext_authenticate_only
= true;
2401 /* No generated IV required */
2402 areq_ctx
->backup_iv
= req
->iv
;
2403 areq_ctx
->assoclen
= req
->assoclen
;
2404 areq_ctx
->backup_giv
= NULL
;
2406 cc_proc_rfc4_gcm(req
);
2407 areq_ctx
->is_gcm4543
= true;
2409 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_DECRYPT
);
2410 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2411 req
->iv
= areq_ctx
->backup_iv
;
2417 static struct cc_alg_template aead_algs
[] = {
2419 .name
= "authenc(hmac(sha1),cbc(aes))",
2420 .driver_name
= "authenc-hmac-sha1-cbc-aes-ccree",
2421 .blocksize
= AES_BLOCK_SIZE
,
2423 .setkey
= cc_aead_setkey
,
2424 .setauthsize
= cc_aead_setauthsize
,
2425 .encrypt
= cc_aead_encrypt
,
2426 .decrypt
= cc_aead_decrypt
,
2427 .init
= cc_aead_init
,
2428 .exit
= cc_aead_exit
,
2429 .ivsize
= AES_BLOCK_SIZE
,
2430 .maxauthsize
= SHA1_DIGEST_SIZE
,
2432 .cipher_mode
= DRV_CIPHER_CBC
,
2433 .flow_mode
= S_DIN_to_AES
,
2434 .auth_mode
= DRV_HASH_SHA1
,
2435 .min_hw_rev
= CC_HW_REV_630
,
2436 .std_body
= CC_STD_NIST
,
2439 .name
= "authenc(hmac(sha1),cbc(des3_ede))",
2440 .driver_name
= "authenc-hmac-sha1-cbc-des3-ccree",
2441 .blocksize
= DES3_EDE_BLOCK_SIZE
,
2443 .setkey
= cc_des3_aead_setkey
,
2444 .setauthsize
= cc_aead_setauthsize
,
2445 .encrypt
= cc_aead_encrypt
,
2446 .decrypt
= cc_aead_decrypt
,
2447 .init
= cc_aead_init
,
2448 .exit
= cc_aead_exit
,
2449 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2450 .maxauthsize
= SHA1_DIGEST_SIZE
,
2452 .cipher_mode
= DRV_CIPHER_CBC
,
2453 .flow_mode
= S_DIN_to_DES
,
2454 .auth_mode
= DRV_HASH_SHA1
,
2455 .min_hw_rev
= CC_HW_REV_630
,
2456 .std_body
= CC_STD_NIST
,
2459 .name
= "authenc(hmac(sha256),cbc(aes))",
2460 .driver_name
= "authenc-hmac-sha256-cbc-aes-ccree",
2461 .blocksize
= AES_BLOCK_SIZE
,
2463 .setkey
= cc_aead_setkey
,
2464 .setauthsize
= cc_aead_setauthsize
,
2465 .encrypt
= cc_aead_encrypt
,
2466 .decrypt
= cc_aead_decrypt
,
2467 .init
= cc_aead_init
,
2468 .exit
= cc_aead_exit
,
2469 .ivsize
= AES_BLOCK_SIZE
,
2470 .maxauthsize
= SHA256_DIGEST_SIZE
,
2472 .cipher_mode
= DRV_CIPHER_CBC
,
2473 .flow_mode
= S_DIN_to_AES
,
2474 .auth_mode
= DRV_HASH_SHA256
,
2475 .min_hw_rev
= CC_HW_REV_630
,
2476 .std_body
= CC_STD_NIST
,
2479 .name
= "authenc(hmac(sha256),cbc(des3_ede))",
2480 .driver_name
= "authenc-hmac-sha256-cbc-des3-ccree",
2481 .blocksize
= DES3_EDE_BLOCK_SIZE
,
2483 .setkey
= cc_des3_aead_setkey
,
2484 .setauthsize
= cc_aead_setauthsize
,
2485 .encrypt
= cc_aead_encrypt
,
2486 .decrypt
= cc_aead_decrypt
,
2487 .init
= cc_aead_init
,
2488 .exit
= cc_aead_exit
,
2489 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2490 .maxauthsize
= SHA256_DIGEST_SIZE
,
2492 .cipher_mode
= DRV_CIPHER_CBC
,
2493 .flow_mode
= S_DIN_to_DES
,
2494 .auth_mode
= DRV_HASH_SHA256
,
2495 .min_hw_rev
= CC_HW_REV_630
,
2496 .std_body
= CC_STD_NIST
,
2499 .name
= "authenc(xcbc(aes),cbc(aes))",
2500 .driver_name
= "authenc-xcbc-aes-cbc-aes-ccree",
2501 .blocksize
= AES_BLOCK_SIZE
,
2503 .setkey
= cc_aead_setkey
,
2504 .setauthsize
= cc_aead_setauthsize
,
2505 .encrypt
= cc_aead_encrypt
,
2506 .decrypt
= cc_aead_decrypt
,
2507 .init
= cc_aead_init
,
2508 .exit
= cc_aead_exit
,
2509 .ivsize
= AES_BLOCK_SIZE
,
2510 .maxauthsize
= AES_BLOCK_SIZE
,
2512 .cipher_mode
= DRV_CIPHER_CBC
,
2513 .flow_mode
= S_DIN_to_AES
,
2514 .auth_mode
= DRV_HASH_XCBC_MAC
,
2515 .min_hw_rev
= CC_HW_REV_630
,
2516 .std_body
= CC_STD_NIST
,
2519 .name
= "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2520 .driver_name
= "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2523 .setkey
= cc_aead_setkey
,
2524 .setauthsize
= cc_aead_setauthsize
,
2525 .encrypt
= cc_aead_encrypt
,
2526 .decrypt
= cc_aead_decrypt
,
2527 .init
= cc_aead_init
,
2528 .exit
= cc_aead_exit
,
2529 .ivsize
= CTR_RFC3686_IV_SIZE
,
2530 .maxauthsize
= SHA1_DIGEST_SIZE
,
2532 .cipher_mode
= DRV_CIPHER_CTR
,
2533 .flow_mode
= S_DIN_to_AES
,
2534 .auth_mode
= DRV_HASH_SHA1
,
2535 .min_hw_rev
= CC_HW_REV_630
,
2536 .std_body
= CC_STD_NIST
,
2539 .name
= "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2540 .driver_name
= "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2543 .setkey
= cc_aead_setkey
,
2544 .setauthsize
= cc_aead_setauthsize
,
2545 .encrypt
= cc_aead_encrypt
,
2546 .decrypt
= cc_aead_decrypt
,
2547 .init
= cc_aead_init
,
2548 .exit
= cc_aead_exit
,
2549 .ivsize
= CTR_RFC3686_IV_SIZE
,
2550 .maxauthsize
= SHA256_DIGEST_SIZE
,
2552 .cipher_mode
= DRV_CIPHER_CTR
,
2553 .flow_mode
= S_DIN_to_AES
,
2554 .auth_mode
= DRV_HASH_SHA256
,
2555 .min_hw_rev
= CC_HW_REV_630
,
2556 .std_body
= CC_STD_NIST
,
2559 .name
= "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2560 .driver_name
= "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2563 .setkey
= cc_aead_setkey
,
2564 .setauthsize
= cc_aead_setauthsize
,
2565 .encrypt
= cc_aead_encrypt
,
2566 .decrypt
= cc_aead_decrypt
,
2567 .init
= cc_aead_init
,
2568 .exit
= cc_aead_exit
,
2569 .ivsize
= CTR_RFC3686_IV_SIZE
,
2570 .maxauthsize
= AES_BLOCK_SIZE
,
2572 .cipher_mode
= DRV_CIPHER_CTR
,
2573 .flow_mode
= S_DIN_to_AES
,
2574 .auth_mode
= DRV_HASH_XCBC_MAC
,
2575 .min_hw_rev
= CC_HW_REV_630
,
2576 .std_body
= CC_STD_NIST
,
2580 .driver_name
= "ccm-aes-ccree",
2583 .setkey
= cc_aead_setkey
,
2584 .setauthsize
= cc_ccm_setauthsize
,
2585 .encrypt
= cc_aead_encrypt
,
2586 .decrypt
= cc_aead_decrypt
,
2587 .init
= cc_aead_init
,
2588 .exit
= cc_aead_exit
,
2589 .ivsize
= AES_BLOCK_SIZE
,
2590 .maxauthsize
= AES_BLOCK_SIZE
,
2592 .cipher_mode
= DRV_CIPHER_CCM
,
2593 .flow_mode
= S_DIN_to_AES
,
2594 .auth_mode
= DRV_HASH_NULL
,
2595 .min_hw_rev
= CC_HW_REV_630
,
2596 .std_body
= CC_STD_NIST
,
2599 .name
= "rfc4309(ccm(aes))",
2600 .driver_name
= "rfc4309-ccm-aes-ccree",
2603 .setkey
= cc_rfc4309_ccm_setkey
,
2604 .setauthsize
= cc_rfc4309_ccm_setauthsize
,
2605 .encrypt
= cc_rfc4309_ccm_encrypt
,
2606 .decrypt
= cc_rfc4309_ccm_decrypt
,
2607 .init
= cc_aead_init
,
2608 .exit
= cc_aead_exit
,
2609 .ivsize
= CCM_BLOCK_IV_SIZE
,
2610 .maxauthsize
= AES_BLOCK_SIZE
,
2612 .cipher_mode
= DRV_CIPHER_CCM
,
2613 .flow_mode
= S_DIN_to_AES
,
2614 .auth_mode
= DRV_HASH_NULL
,
2615 .min_hw_rev
= CC_HW_REV_630
,
2616 .std_body
= CC_STD_NIST
,
2620 .driver_name
= "gcm-aes-ccree",
2623 .setkey
= cc_aead_setkey
,
2624 .setauthsize
= cc_gcm_setauthsize
,
2625 .encrypt
= cc_aead_encrypt
,
2626 .decrypt
= cc_aead_decrypt
,
2627 .init
= cc_aead_init
,
2628 .exit
= cc_aead_exit
,
2630 .maxauthsize
= AES_BLOCK_SIZE
,
2632 .cipher_mode
= DRV_CIPHER_GCTR
,
2633 .flow_mode
= S_DIN_to_AES
,
2634 .auth_mode
= DRV_HASH_NULL
,
2635 .min_hw_rev
= CC_HW_REV_630
,
2636 .std_body
= CC_STD_NIST
,
2639 .name
= "rfc4106(gcm(aes))",
2640 .driver_name
= "rfc4106-gcm-aes-ccree",
2643 .setkey
= cc_rfc4106_gcm_setkey
,
2644 .setauthsize
= cc_rfc4106_gcm_setauthsize
,
2645 .encrypt
= cc_rfc4106_gcm_encrypt
,
2646 .decrypt
= cc_rfc4106_gcm_decrypt
,
2647 .init
= cc_aead_init
,
2648 .exit
= cc_aead_exit
,
2649 .ivsize
= GCM_BLOCK_RFC4_IV_SIZE
,
2650 .maxauthsize
= AES_BLOCK_SIZE
,
2652 .cipher_mode
= DRV_CIPHER_GCTR
,
2653 .flow_mode
= S_DIN_to_AES
,
2654 .auth_mode
= DRV_HASH_NULL
,
2655 .min_hw_rev
= CC_HW_REV_630
,
2656 .std_body
= CC_STD_NIST
,
2659 .name
= "rfc4543(gcm(aes))",
2660 .driver_name
= "rfc4543-gcm-aes-ccree",
2663 .setkey
= cc_rfc4543_gcm_setkey
,
2664 .setauthsize
= cc_rfc4543_gcm_setauthsize
,
2665 .encrypt
= cc_rfc4543_gcm_encrypt
,
2666 .decrypt
= cc_rfc4543_gcm_decrypt
,
2667 .init
= cc_aead_init
,
2668 .exit
= cc_aead_exit
,
2669 .ivsize
= GCM_BLOCK_RFC4_IV_SIZE
,
2670 .maxauthsize
= AES_BLOCK_SIZE
,
2672 .cipher_mode
= DRV_CIPHER_GCTR
,
2673 .flow_mode
= S_DIN_to_AES
,
2674 .auth_mode
= DRV_HASH_NULL
,
2675 .min_hw_rev
= CC_HW_REV_630
,
2676 .std_body
= CC_STD_NIST
,
2680 static struct cc_crypto_alg
*cc_create_aead_alg(struct cc_alg_template
*tmpl
,
2683 struct cc_crypto_alg
*t_alg
;
2684 struct aead_alg
*alg
;
2686 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
2688 return ERR_PTR(-ENOMEM
);
2690 alg
= &tmpl
->template_aead
;
2692 snprintf(alg
->base
.cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", tmpl
->name
);
2693 snprintf(alg
->base
.cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
2695 alg
->base
.cra_module
= THIS_MODULE
;
2696 alg
->base
.cra_priority
= CC_CRA_PRIO
;
2698 alg
->base
.cra_ctxsize
= sizeof(struct cc_aead_ctx
);
2699 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
;
2700 alg
->init
= cc_aead_init
;
2701 alg
->exit
= cc_aead_exit
;
2703 t_alg
->aead_alg
= *alg
;
2705 t_alg
->cipher_mode
= tmpl
->cipher_mode
;
2706 t_alg
->flow_mode
= tmpl
->flow_mode
;
2707 t_alg
->auth_mode
= tmpl
->auth_mode
;
2712 int cc_aead_free(struct cc_drvdata
*drvdata
)
2714 struct cc_crypto_alg
*t_alg
, *n
;
2715 struct cc_aead_handle
*aead_handle
=
2716 (struct cc_aead_handle
*)drvdata
->aead_handle
;
2719 /* Remove registered algs */
2720 list_for_each_entry_safe(t_alg
, n
, &aead_handle
->aead_list
,
2722 crypto_unregister_aead(&t_alg
->aead_alg
);
2723 list_del(&t_alg
->entry
);
2727 drvdata
->aead_handle
= NULL
;
2733 int cc_aead_alloc(struct cc_drvdata
*drvdata
)
2735 struct cc_aead_handle
*aead_handle
;
2736 struct cc_crypto_alg
*t_alg
;
2739 struct device
*dev
= drvdata_to_dev(drvdata
);
2741 aead_handle
= kmalloc(sizeof(*aead_handle
), GFP_KERNEL
);
2747 INIT_LIST_HEAD(&aead_handle
->aead_list
);
2748 drvdata
->aead_handle
= aead_handle
;
2750 aead_handle
->sram_workspace_addr
= cc_sram_alloc(drvdata
,
2751 MAX_HMAC_DIGEST_SIZE
);
2753 if (aead_handle
->sram_workspace_addr
== NULL_SRAM_ADDR
) {
2754 dev_err(dev
, "SRAM pool exhausted\n");
2760 for (alg
= 0; alg
< ARRAY_SIZE(aead_algs
); alg
++) {
2761 if ((aead_algs
[alg
].min_hw_rev
> drvdata
->hw_rev
) ||
2762 !(drvdata
->std_bodies
& aead_algs
[alg
].std_body
))
2765 t_alg
= cc_create_aead_alg(&aead_algs
[alg
], dev
);
2766 if (IS_ERR(t_alg
)) {
2767 rc
= PTR_ERR(t_alg
);
2768 dev_err(dev
, "%s alg allocation failed\n",
2769 aead_algs
[alg
].driver_name
);
2772 t_alg
->drvdata
= drvdata
;
2773 rc
= crypto_register_aead(&t_alg
->aead_alg
);
2775 dev_err(dev
, "%s alg registration failed\n",
2776 t_alg
->aead_alg
.base
.cra_driver_name
);
2779 list_add_tail(&t_alg
->entry
, &aead_handle
->aead_list
);
2780 dev_dbg(dev
, "Registered %s\n",
2781 t_alg
->aead_alg
.base
.cra_driver_name
);
2790 cc_aead_free(drvdata
);