1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2012-2018 ARM Limited or its affiliates. */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <crypto/algapi.h>
7 #include <crypto/internal/aead.h>
8 #include <crypto/authenc.h>
9 #include <crypto/des.h>
10 #include <linux/rtnetlink.h>
11 #include "cc_driver.h"
12 #include "cc_buffer_mgr.h"
14 #include "cc_request_mgr.h"
16 #include "cc_sram_mgr.h"
18 #define template_aead template_u.aead
20 #define MAX_AEAD_SETKEY_SEQ 12
21 #define MAX_AEAD_PROCESS_SEQ 23
23 #define MAX_HMAC_DIGEST_SIZE (SHA256_DIGEST_SIZE)
24 #define MAX_HMAC_BLOCK_SIZE (SHA256_BLOCK_SIZE)
26 #define AES_CCM_RFC4309_NONCE_SIZE 3
27 #define MAX_NONCE_SIZE CTR_RFC3686_NONCE_SIZE
29 /* Value of each ICV_CMP byte (of 8) in case of success */
30 #define ICV_VERIF_OK 0x01
32 struct cc_aead_handle
{
33 cc_sram_addr_t sram_workspace_addr
;
34 struct list_head aead_list
;
39 u8
*ipad_opad
; /* IPAD, OPAD*/
40 dma_addr_t padded_authkey_dma_addr
;
41 dma_addr_t ipad_opad_dma_addr
;
45 u8
*xcbc_keys
; /* K1,K2,K3 */
46 dma_addr_t xcbc_keys_dma_addr
;
50 struct cc_drvdata
*drvdata
;
51 u8 ctr_nonce
[MAX_NONCE_SIZE
]; /* used for ctr3686 iv and aes ccm */
53 dma_addr_t enckey_dma_addr
;
55 struct cc_hmac_s hmac
;
56 struct cc_xcbc_s xcbc
;
58 unsigned int enc_keylen
;
59 unsigned int auth_keylen
;
60 unsigned int authsize
; /* Actual (reduced?) size of the MAC/ICv */
61 enum drv_cipher_mode cipher_mode
;
62 enum cc_flow_mode flow_mode
;
63 enum drv_hash_mode auth_mode
;
66 static inline bool valid_assoclen(struct aead_request
*req
)
68 return ((req
->assoclen
== 16) || (req
->assoclen
== 20));
71 static void cc_aead_exit(struct crypto_aead
*tfm
)
73 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
74 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
76 dev_dbg(dev
, "Clearing context @%p for %s\n", crypto_aead_ctx(tfm
),
77 crypto_tfm_alg_name(&tfm
->base
));
79 /* Unmap enckey buffer */
81 dma_free_coherent(dev
, AES_MAX_KEY_SIZE
, ctx
->enckey
,
82 ctx
->enckey_dma_addr
);
83 dev_dbg(dev
, "Freed enckey DMA buffer enckey_dma_addr=%pad\n",
84 &ctx
->enckey_dma_addr
);
85 ctx
->enckey_dma_addr
= 0;
89 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
) { /* XCBC authetication */
90 struct cc_xcbc_s
*xcbc
= &ctx
->auth_state
.xcbc
;
92 if (xcbc
->xcbc_keys
) {
93 dma_free_coherent(dev
, CC_AES_128_BIT_KEY_SIZE
* 3,
95 xcbc
->xcbc_keys_dma_addr
);
97 dev_dbg(dev
, "Freed xcbc_keys DMA buffer xcbc_keys_dma_addr=%pad\n",
98 &xcbc
->xcbc_keys_dma_addr
);
99 xcbc
->xcbc_keys_dma_addr
= 0;
100 xcbc
->xcbc_keys
= NULL
;
101 } else if (ctx
->auth_mode
!= DRV_HASH_NULL
) { /* HMAC auth. */
102 struct cc_hmac_s
*hmac
= &ctx
->auth_state
.hmac
;
104 if (hmac
->ipad_opad
) {
105 dma_free_coherent(dev
, 2 * MAX_HMAC_DIGEST_SIZE
,
107 hmac
->ipad_opad_dma_addr
);
108 dev_dbg(dev
, "Freed ipad_opad DMA buffer ipad_opad_dma_addr=%pad\n",
109 &hmac
->ipad_opad_dma_addr
);
110 hmac
->ipad_opad_dma_addr
= 0;
111 hmac
->ipad_opad
= NULL
;
113 if (hmac
->padded_authkey
) {
114 dma_free_coherent(dev
, MAX_HMAC_BLOCK_SIZE
,
115 hmac
->padded_authkey
,
116 hmac
->padded_authkey_dma_addr
);
117 dev_dbg(dev
, "Freed padded_authkey DMA buffer padded_authkey_dma_addr=%pad\n",
118 &hmac
->padded_authkey_dma_addr
);
119 hmac
->padded_authkey_dma_addr
= 0;
120 hmac
->padded_authkey
= NULL
;
125 static int cc_aead_init(struct crypto_aead
*tfm
)
127 struct aead_alg
*alg
= crypto_aead_alg(tfm
);
128 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
129 struct cc_crypto_alg
*cc_alg
=
130 container_of(alg
, struct cc_crypto_alg
, aead_alg
);
131 struct device
*dev
= drvdata_to_dev(cc_alg
->drvdata
);
133 dev_dbg(dev
, "Initializing context @%p for %s\n", ctx
,
134 crypto_tfm_alg_name(&tfm
->base
));
136 /* Initialize modes in instance */
137 ctx
->cipher_mode
= cc_alg
->cipher_mode
;
138 ctx
->flow_mode
= cc_alg
->flow_mode
;
139 ctx
->auth_mode
= cc_alg
->auth_mode
;
140 ctx
->drvdata
= cc_alg
->drvdata
;
141 crypto_aead_set_reqsize(tfm
, sizeof(struct aead_req_ctx
));
143 /* Allocate key buffer, cache line aligned */
144 ctx
->enckey
= dma_alloc_coherent(dev
, AES_MAX_KEY_SIZE
,
145 &ctx
->enckey_dma_addr
, GFP_KERNEL
);
147 dev_err(dev
, "Failed allocating key buffer\n");
150 dev_dbg(dev
, "Allocated enckey buffer in context ctx->enckey=@%p\n",
153 /* Set default authlen value */
155 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
) { /* XCBC authetication */
156 struct cc_xcbc_s
*xcbc
= &ctx
->auth_state
.xcbc
;
157 const unsigned int key_size
= CC_AES_128_BIT_KEY_SIZE
* 3;
159 /* Allocate dma-coherent buffer for XCBC's K1+K2+K3 */
160 /* (and temporary for user key - up to 256b) */
161 xcbc
->xcbc_keys
= dma_alloc_coherent(dev
, key_size
,
162 &xcbc
->xcbc_keys_dma_addr
,
164 if (!xcbc
->xcbc_keys
) {
165 dev_err(dev
, "Failed allocating buffer for XCBC keys\n");
168 } else if (ctx
->auth_mode
!= DRV_HASH_NULL
) { /* HMAC authentication */
169 struct cc_hmac_s
*hmac
= &ctx
->auth_state
.hmac
;
170 const unsigned int digest_size
= 2 * MAX_HMAC_DIGEST_SIZE
;
171 dma_addr_t
*pkey_dma
= &hmac
->padded_authkey_dma_addr
;
173 /* Allocate dma-coherent buffer for IPAD + OPAD */
174 hmac
->ipad_opad
= dma_alloc_coherent(dev
, digest_size
,
175 &hmac
->ipad_opad_dma_addr
,
178 if (!hmac
->ipad_opad
) {
179 dev_err(dev
, "Failed allocating IPAD/OPAD buffer\n");
183 dev_dbg(dev
, "Allocated authkey buffer in context ctx->authkey=@%p\n",
186 hmac
->padded_authkey
= dma_alloc_coherent(dev
,
191 if (!hmac
->padded_authkey
) {
192 dev_err(dev
, "failed to allocate padded_authkey\n");
196 ctx
->auth_state
.hmac
.ipad_opad
= NULL
;
197 ctx
->auth_state
.hmac
.padded_authkey
= NULL
;
207 static void cc_aead_complete(struct device
*dev
, void *cc_req
, int err
)
209 struct aead_request
*areq
= (struct aead_request
*)cc_req
;
210 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(areq
);
211 struct crypto_aead
*tfm
= crypto_aead_reqtfm(cc_req
);
212 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
214 cc_unmap_aead_request(dev
, areq
);
216 /* Restore ordinary iv pointer */
217 areq
->iv
= areq_ctx
->backup_iv
;
222 if (areq_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
223 if (memcmp(areq_ctx
->mac_buf
, areq_ctx
->icv_virt_addr
,
224 ctx
->authsize
) != 0) {
225 dev_dbg(dev
, "Payload authentication failure, (auth-size=%d, cipher=%d)\n",
226 ctx
->authsize
, ctx
->cipher_mode
);
227 /* In case of payload authentication failure, MUST NOT
228 * revealed the decrypted message --> zero its memory.
230 cc_zero_sgl(areq
->dst
, areq_ctx
->cryptlen
);
234 if (areq_ctx
->is_icv_fragmented
) {
235 u32 skip
= areq
->cryptlen
+ areq_ctx
->dst_offset
;
237 cc_copy_sg_portion(dev
, areq_ctx
->mac_buf
,
238 areq_ctx
->dst_sgl
, skip
,
239 (skip
+ ctx
->authsize
),
243 /* If an IV was generated, copy it back to the user provided
246 if (areq_ctx
->backup_giv
) {
247 if (ctx
->cipher_mode
== DRV_CIPHER_CTR
)
248 memcpy(areq_ctx
->backup_giv
, areq_ctx
->ctr_iv
+
249 CTR_RFC3686_NONCE_SIZE
,
250 CTR_RFC3686_IV_SIZE
);
251 else if (ctx
->cipher_mode
== DRV_CIPHER_CCM
)
252 memcpy(areq_ctx
->backup_giv
, areq_ctx
->ctr_iv
+
253 CCM_BLOCK_IV_OFFSET
, CCM_BLOCK_IV_SIZE
);
257 aead_request_complete(areq
, err
);
260 static unsigned int xcbc_setkey(struct cc_hw_desc
*desc
,
261 struct cc_aead_ctx
*ctx
)
263 /* Load the AES key */
264 hw_desc_init(&desc
[0]);
265 /* We are using for the source/user key the same buffer
266 * as for the output keys, * because after this key loading it
267 * is not needed anymore
269 set_din_type(&desc
[0], DMA_DLLI
,
270 ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
, ctx
->auth_keylen
,
272 set_cipher_mode(&desc
[0], DRV_CIPHER_ECB
);
273 set_cipher_config0(&desc
[0], DRV_CRYPTO_DIRECTION_ENCRYPT
);
274 set_key_size_aes(&desc
[0], ctx
->auth_keylen
);
275 set_flow_mode(&desc
[0], S_DIN_to_AES
);
276 set_setup_mode(&desc
[0], SETUP_LOAD_KEY0
);
278 hw_desc_init(&desc
[1]);
279 set_din_const(&desc
[1], 0x01010101, CC_AES_128_BIT_KEY_SIZE
);
280 set_flow_mode(&desc
[1], DIN_AES_DOUT
);
281 set_dout_dlli(&desc
[1], ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
,
282 AES_KEYSIZE_128
, NS_BIT
, 0);
284 hw_desc_init(&desc
[2]);
285 set_din_const(&desc
[2], 0x02020202, CC_AES_128_BIT_KEY_SIZE
);
286 set_flow_mode(&desc
[2], DIN_AES_DOUT
);
287 set_dout_dlli(&desc
[2], (ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
289 AES_KEYSIZE_128
, NS_BIT
, 0);
291 hw_desc_init(&desc
[3]);
292 set_din_const(&desc
[3], 0x03030303, CC_AES_128_BIT_KEY_SIZE
);
293 set_flow_mode(&desc
[3], DIN_AES_DOUT
);
294 set_dout_dlli(&desc
[3], (ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
295 + 2 * AES_KEYSIZE_128
),
296 AES_KEYSIZE_128
, NS_BIT
, 0);
301 static int hmac_setkey(struct cc_hw_desc
*desc
, struct cc_aead_ctx
*ctx
)
303 unsigned int hmac_pad_const
[2] = { HMAC_IPAD_CONST
, HMAC_OPAD_CONST
};
304 unsigned int digest_ofs
= 0;
305 unsigned int hash_mode
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
306 DRV_HASH_HW_SHA1
: DRV_HASH_HW_SHA256
;
307 unsigned int digest_size
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
308 CC_SHA1_DIGEST_SIZE
: CC_SHA256_DIGEST_SIZE
;
309 struct cc_hmac_s
*hmac
= &ctx
->auth_state
.hmac
;
311 unsigned int idx
= 0;
314 /* calc derived HMAC key */
315 for (i
= 0; i
< 2; i
++) {
316 /* Load hash initial state */
317 hw_desc_init(&desc
[idx
]);
318 set_cipher_mode(&desc
[idx
], hash_mode
);
319 set_din_sram(&desc
[idx
],
320 cc_larval_digest_addr(ctx
->drvdata
,
323 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
324 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
327 /* Load the hash current length*/
328 hw_desc_init(&desc
[idx
]);
329 set_cipher_mode(&desc
[idx
], hash_mode
);
330 set_din_const(&desc
[idx
], 0, ctx
->drvdata
->hash_len_sz
);
331 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
332 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
335 /* Prepare ipad key */
336 hw_desc_init(&desc
[idx
]);
337 set_xor_val(&desc
[idx
], hmac_pad_const
[i
]);
338 set_cipher_mode(&desc
[idx
], hash_mode
);
339 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
340 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
343 /* Perform HASH update */
344 hw_desc_init(&desc
[idx
]);
345 set_din_type(&desc
[idx
], DMA_DLLI
,
346 hmac
->padded_authkey_dma_addr
,
347 SHA256_BLOCK_SIZE
, NS_BIT
);
348 set_cipher_mode(&desc
[idx
], hash_mode
);
349 set_xor_active(&desc
[idx
]);
350 set_flow_mode(&desc
[idx
], DIN_HASH
);
354 hw_desc_init(&desc
[idx
]);
355 set_cipher_mode(&desc
[idx
], hash_mode
);
356 set_dout_dlli(&desc
[idx
],
357 (hmac
->ipad_opad_dma_addr
+ digest_ofs
),
358 digest_size
, NS_BIT
, 0);
359 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
360 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
361 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
364 digest_ofs
+= digest_size
;
370 static int validate_keys_sizes(struct cc_aead_ctx
*ctx
)
372 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
374 dev_dbg(dev
, "enc_keylen=%u authkeylen=%u\n",
375 ctx
->enc_keylen
, ctx
->auth_keylen
);
377 switch (ctx
->auth_mode
) {
379 case DRV_HASH_SHA256
:
381 case DRV_HASH_XCBC_MAC
:
382 if (ctx
->auth_keylen
!= AES_KEYSIZE_128
&&
383 ctx
->auth_keylen
!= AES_KEYSIZE_192
&&
384 ctx
->auth_keylen
!= AES_KEYSIZE_256
)
387 case DRV_HASH_NULL
: /* Not authenc (e.g., CCM) - no auth_key) */
388 if (ctx
->auth_keylen
> 0)
392 dev_err(dev
, "Invalid auth_mode=%d\n", ctx
->auth_mode
);
395 /* Check cipher key size */
396 if (ctx
->flow_mode
== S_DIN_to_DES
) {
397 if (ctx
->enc_keylen
!= DES3_EDE_KEY_SIZE
) {
398 dev_err(dev
, "Invalid cipher(3DES) key size: %u\n",
402 } else { /* Default assumed to be AES ciphers */
403 if (ctx
->enc_keylen
!= AES_KEYSIZE_128
&&
404 ctx
->enc_keylen
!= AES_KEYSIZE_192
&&
405 ctx
->enc_keylen
!= AES_KEYSIZE_256
) {
406 dev_err(dev
, "Invalid cipher(AES) key size: %u\n",
412 return 0; /* All tests of keys sizes passed */
415 /* This function prepers the user key so it can pass to the hmac processing
416 * (copy to intenral buffer or hash in case of key longer than block
418 static int cc_get_plain_hmac_key(struct crypto_aead
*tfm
, const u8
*key
,
421 dma_addr_t key_dma_addr
= 0;
422 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
423 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
424 u32 larval_addr
= cc_larval_digest_addr(ctx
->drvdata
, ctx
->auth_mode
);
425 struct cc_crypto_req cc_req
= {};
426 unsigned int blocksize
;
427 unsigned int digestsize
;
428 unsigned int hashmode
;
429 unsigned int idx
= 0;
431 struct cc_hw_desc desc
[MAX_AEAD_SETKEY_SEQ
];
432 dma_addr_t padded_authkey_dma_addr
=
433 ctx
->auth_state
.hmac
.padded_authkey_dma_addr
;
435 switch (ctx
->auth_mode
) { /* auth_key required and >0 */
437 blocksize
= SHA1_BLOCK_SIZE
;
438 digestsize
= SHA1_DIGEST_SIZE
;
439 hashmode
= DRV_HASH_HW_SHA1
;
441 case DRV_HASH_SHA256
:
443 blocksize
= SHA256_BLOCK_SIZE
;
444 digestsize
= SHA256_DIGEST_SIZE
;
445 hashmode
= DRV_HASH_HW_SHA256
;
449 key_dma_addr
= dma_map_single(dev
, (void *)key
, keylen
,
451 if (dma_mapping_error(dev
, key_dma_addr
)) {
452 dev_err(dev
, "Mapping key va=0x%p len=%u for DMA failed\n",
456 if (keylen
> blocksize
) {
457 /* Load hash initial state */
458 hw_desc_init(&desc
[idx
]);
459 set_cipher_mode(&desc
[idx
], hashmode
);
460 set_din_sram(&desc
[idx
], larval_addr
, digestsize
);
461 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
462 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
465 /* Load the hash current length*/
466 hw_desc_init(&desc
[idx
]);
467 set_cipher_mode(&desc
[idx
], hashmode
);
468 set_din_const(&desc
[idx
], 0, ctx
->drvdata
->hash_len_sz
);
469 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
470 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
471 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
474 hw_desc_init(&desc
[idx
]);
475 set_din_type(&desc
[idx
], DMA_DLLI
,
476 key_dma_addr
, keylen
, NS_BIT
);
477 set_flow_mode(&desc
[idx
], DIN_HASH
);
481 hw_desc_init(&desc
[idx
]);
482 set_cipher_mode(&desc
[idx
], hashmode
);
483 set_dout_dlli(&desc
[idx
], padded_authkey_dma_addr
,
484 digestsize
, NS_BIT
, 0);
485 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
486 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
487 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
488 set_cipher_config0(&desc
[idx
],
489 HASH_DIGEST_RESULT_LITTLE_ENDIAN
);
492 hw_desc_init(&desc
[idx
]);
493 set_din_const(&desc
[idx
], 0, (blocksize
- digestsize
));
494 set_flow_mode(&desc
[idx
], BYPASS
);
495 set_dout_dlli(&desc
[idx
], (padded_authkey_dma_addr
+
496 digestsize
), (blocksize
- digestsize
),
500 hw_desc_init(&desc
[idx
]);
501 set_din_type(&desc
[idx
], DMA_DLLI
, key_dma_addr
,
503 set_flow_mode(&desc
[idx
], BYPASS
);
504 set_dout_dlli(&desc
[idx
], padded_authkey_dma_addr
,
508 if ((blocksize
- keylen
) != 0) {
509 hw_desc_init(&desc
[idx
]);
510 set_din_const(&desc
[idx
], 0,
511 (blocksize
- keylen
));
512 set_flow_mode(&desc
[idx
], BYPASS
);
513 set_dout_dlli(&desc
[idx
],
514 (padded_authkey_dma_addr
+
516 (blocksize
- keylen
), NS_BIT
, 0);
521 hw_desc_init(&desc
[idx
]);
522 set_din_const(&desc
[idx
], 0, (blocksize
- keylen
));
523 set_flow_mode(&desc
[idx
], BYPASS
);
524 set_dout_dlli(&desc
[idx
], padded_authkey_dma_addr
,
525 blocksize
, NS_BIT
, 0);
529 rc
= cc_send_sync_request(ctx
->drvdata
, &cc_req
, desc
, idx
);
531 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
534 dma_unmap_single(dev
, key_dma_addr
, keylen
, DMA_TO_DEVICE
);
539 static int cc_aead_setkey(struct crypto_aead
*tfm
, const u8
*key
,
542 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
543 struct rtattr
*rta
= (struct rtattr
*)key
;
544 struct cc_crypto_req cc_req
= {};
545 struct crypto_authenc_key_param
*param
;
546 struct cc_hw_desc desc
[MAX_AEAD_SETKEY_SEQ
];
548 unsigned int seq_len
= 0;
549 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
551 dev_dbg(dev
, "Setting key in context @%p for %s. key=%p keylen=%u\n",
552 ctx
, crypto_tfm_alg_name(crypto_aead_tfm(tfm
)), key
, keylen
);
554 /* STAT_PHASE_0: Init and sanity checks */
556 if (ctx
->auth_mode
!= DRV_HASH_NULL
) { /* authenc() alg. */
557 if (!RTA_OK(rta
, keylen
))
559 if (rta
->rta_type
!= CRYPTO_AUTHENC_KEYA_PARAM
)
561 if (RTA_PAYLOAD(rta
) < sizeof(*param
))
563 param
= RTA_DATA(rta
);
564 ctx
->enc_keylen
= be32_to_cpu(param
->enckeylen
);
565 key
+= RTA_ALIGN(rta
->rta_len
);
566 keylen
-= RTA_ALIGN(rta
->rta_len
);
567 if (keylen
< ctx
->enc_keylen
)
569 ctx
->auth_keylen
= keylen
- ctx
->enc_keylen
;
571 if (ctx
->cipher_mode
== DRV_CIPHER_CTR
) {
572 /* the nonce is stored in bytes at end of key */
573 if (ctx
->enc_keylen
<
574 (AES_MIN_KEY_SIZE
+ CTR_RFC3686_NONCE_SIZE
))
576 /* Copy nonce from last 4 bytes in CTR key to
577 * first 4 bytes in CTR IV
579 memcpy(ctx
->ctr_nonce
, key
+ ctx
->auth_keylen
+
580 ctx
->enc_keylen
- CTR_RFC3686_NONCE_SIZE
,
581 CTR_RFC3686_NONCE_SIZE
);
582 /* Set CTR key size */
583 ctx
->enc_keylen
-= CTR_RFC3686_NONCE_SIZE
;
585 } else { /* non-authenc - has just one key */
586 ctx
->enc_keylen
= keylen
;
587 ctx
->auth_keylen
= 0;
590 rc
= validate_keys_sizes(ctx
);
594 /* STAT_PHASE_1: Copy key to ctx */
596 /* Get key material */
597 memcpy(ctx
->enckey
, key
+ ctx
->auth_keylen
, ctx
->enc_keylen
);
598 if (ctx
->enc_keylen
== 24)
599 memset(ctx
->enckey
+ 24, 0, CC_AES_KEY_SIZE_MAX
- 24);
600 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
) {
601 memcpy(ctx
->auth_state
.xcbc
.xcbc_keys
, key
, ctx
->auth_keylen
);
602 } else if (ctx
->auth_mode
!= DRV_HASH_NULL
) { /* HMAC */
603 rc
= cc_get_plain_hmac_key(tfm
, key
, ctx
->auth_keylen
);
608 /* STAT_PHASE_2: Create sequence */
610 switch (ctx
->auth_mode
) {
612 case DRV_HASH_SHA256
:
613 seq_len
= hmac_setkey(desc
, ctx
);
615 case DRV_HASH_XCBC_MAC
:
616 seq_len
= xcbc_setkey(desc
, ctx
);
618 case DRV_HASH_NULL
: /* non-authenc modes, e.g., CCM */
619 break; /* No auth. key setup */
621 dev_err(dev
, "Unsupported authenc (%d)\n", ctx
->auth_mode
);
626 /* STAT_PHASE_3: Submit sequence to HW */
628 if (seq_len
> 0) { /* For CCM there is no sequence to setup the key */
629 rc
= cc_send_sync_request(ctx
->drvdata
, &cc_req
, desc
, seq_len
);
631 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
636 /* Update STAT_PHASE_3 */
640 crypto_aead_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
646 static int cc_rfc4309_ccm_setkey(struct crypto_aead
*tfm
, const u8
*key
,
649 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
655 memcpy(ctx
->ctr_nonce
, key
+ keylen
, 3);
657 return cc_aead_setkey(tfm
, key
, keylen
);
660 static int cc_aead_setauthsize(struct crypto_aead
*authenc
,
661 unsigned int authsize
)
663 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(authenc
);
664 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
666 /* Unsupported auth. sizes */
668 authsize
> crypto_aead_maxauthsize(authenc
)) {
672 ctx
->authsize
= authsize
;
673 dev_dbg(dev
, "authlen=%d\n", ctx
->authsize
);
678 static int cc_rfc4309_ccm_setauthsize(struct crypto_aead
*authenc
,
679 unsigned int authsize
)
690 return cc_aead_setauthsize(authenc
, authsize
);
693 static int cc_ccm_setauthsize(struct crypto_aead
*authenc
,
694 unsigned int authsize
)
709 return cc_aead_setauthsize(authenc
, authsize
);
712 static void cc_set_assoc_desc(struct aead_request
*areq
, unsigned int flow_mode
,
713 struct cc_hw_desc desc
[], unsigned int *seq_size
)
715 struct crypto_aead
*tfm
= crypto_aead_reqtfm(areq
);
716 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
717 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(areq
);
718 enum cc_req_dma_buf_type assoc_dma_type
= areq_ctx
->assoc_buff_type
;
719 unsigned int idx
= *seq_size
;
720 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
722 switch (assoc_dma_type
) {
723 case CC_DMA_BUF_DLLI
:
724 dev_dbg(dev
, "ASSOC buffer type DLLI\n");
725 hw_desc_init(&desc
[idx
]);
726 set_din_type(&desc
[idx
], DMA_DLLI
, sg_dma_address(areq
->src
),
727 areq
->assoclen
, NS_BIT
);
728 set_flow_mode(&desc
[idx
], flow_mode
);
729 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
&&
730 areq_ctx
->cryptlen
> 0)
731 set_din_not_last_indication(&desc
[idx
]);
733 case CC_DMA_BUF_MLLI
:
734 dev_dbg(dev
, "ASSOC buffer type MLLI\n");
735 hw_desc_init(&desc
[idx
]);
736 set_din_type(&desc
[idx
], DMA_MLLI
, areq_ctx
->assoc
.sram_addr
,
737 areq_ctx
->assoc
.mlli_nents
, NS_BIT
);
738 set_flow_mode(&desc
[idx
], flow_mode
);
739 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
&&
740 areq_ctx
->cryptlen
> 0)
741 set_din_not_last_indication(&desc
[idx
]);
743 case CC_DMA_BUF_NULL
:
745 dev_err(dev
, "Invalid ASSOC buffer type\n");
751 static void cc_proc_authen_desc(struct aead_request
*areq
,
752 unsigned int flow_mode
,
753 struct cc_hw_desc desc
[],
754 unsigned int *seq_size
, int direct
)
756 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(areq
);
757 enum cc_req_dma_buf_type data_dma_type
= areq_ctx
->data_buff_type
;
758 unsigned int idx
= *seq_size
;
759 struct crypto_aead
*tfm
= crypto_aead_reqtfm(areq
);
760 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
761 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
763 switch (data_dma_type
) {
764 case CC_DMA_BUF_DLLI
:
766 struct scatterlist
*cipher
=
767 (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
768 areq_ctx
->dst_sgl
: areq_ctx
->src_sgl
;
770 unsigned int offset
=
771 (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
772 areq_ctx
->dst_offset
: areq_ctx
->src_offset
;
773 dev_dbg(dev
, "AUTHENC: SRC/DST buffer type DLLI\n");
774 hw_desc_init(&desc
[idx
]);
775 set_din_type(&desc
[idx
], DMA_DLLI
,
776 (sg_dma_address(cipher
) + offset
),
777 areq_ctx
->cryptlen
, NS_BIT
);
778 set_flow_mode(&desc
[idx
], flow_mode
);
781 case CC_DMA_BUF_MLLI
:
783 /* DOUBLE-PASS flow (as default)
784 * assoc. + iv + data -compact in one table
785 * if assoclen is ZERO only IV perform
787 cc_sram_addr_t mlli_addr
= areq_ctx
->assoc
.sram_addr
;
788 u32 mlli_nents
= areq_ctx
->assoc
.mlli_nents
;
790 if (areq_ctx
->is_single_pass
) {
791 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) {
792 mlli_addr
= areq_ctx
->dst
.sram_addr
;
793 mlli_nents
= areq_ctx
->dst
.mlli_nents
;
795 mlli_addr
= areq_ctx
->src
.sram_addr
;
796 mlli_nents
= areq_ctx
->src
.mlli_nents
;
800 dev_dbg(dev
, "AUTHENC: SRC/DST buffer type MLLI\n");
801 hw_desc_init(&desc
[idx
]);
802 set_din_type(&desc
[idx
], DMA_MLLI
, mlli_addr
, mlli_nents
,
804 set_flow_mode(&desc
[idx
], flow_mode
);
807 case CC_DMA_BUF_NULL
:
809 dev_err(dev
, "AUTHENC: Invalid SRC/DST buffer type\n");
815 static void cc_proc_cipher_desc(struct aead_request
*areq
,
816 unsigned int flow_mode
,
817 struct cc_hw_desc desc
[],
818 unsigned int *seq_size
)
820 unsigned int idx
= *seq_size
;
821 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(areq
);
822 enum cc_req_dma_buf_type data_dma_type
= areq_ctx
->data_buff_type
;
823 struct crypto_aead
*tfm
= crypto_aead_reqtfm(areq
);
824 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
825 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
827 if (areq_ctx
->cryptlen
== 0)
828 return; /*null processing*/
830 switch (data_dma_type
) {
831 case CC_DMA_BUF_DLLI
:
832 dev_dbg(dev
, "CIPHER: SRC/DST buffer type DLLI\n");
833 hw_desc_init(&desc
[idx
]);
834 set_din_type(&desc
[idx
], DMA_DLLI
,
835 (sg_dma_address(areq_ctx
->src_sgl
) +
836 areq_ctx
->src_offset
), areq_ctx
->cryptlen
,
838 set_dout_dlli(&desc
[idx
],
839 (sg_dma_address(areq_ctx
->dst_sgl
) +
840 areq_ctx
->dst_offset
),
841 areq_ctx
->cryptlen
, NS_BIT
, 0);
842 set_flow_mode(&desc
[idx
], flow_mode
);
844 case CC_DMA_BUF_MLLI
:
845 dev_dbg(dev
, "CIPHER: SRC/DST buffer type MLLI\n");
846 hw_desc_init(&desc
[idx
]);
847 set_din_type(&desc
[idx
], DMA_MLLI
, areq_ctx
->src
.sram_addr
,
848 areq_ctx
->src
.mlli_nents
, NS_BIT
);
849 set_dout_mlli(&desc
[idx
], areq_ctx
->dst
.sram_addr
,
850 areq_ctx
->dst
.mlli_nents
, NS_BIT
, 0);
851 set_flow_mode(&desc
[idx
], flow_mode
);
853 case CC_DMA_BUF_NULL
:
855 dev_err(dev
, "CIPHER: Invalid SRC/DST buffer type\n");
861 static void cc_proc_digest_desc(struct aead_request
*req
,
862 struct cc_hw_desc desc
[],
863 unsigned int *seq_size
)
865 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
866 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
867 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
868 unsigned int idx
= *seq_size
;
869 unsigned int hash_mode
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
870 DRV_HASH_HW_SHA1
: DRV_HASH_HW_SHA256
;
871 int direct
= req_ctx
->gen_ctx
.op_type
;
873 /* Get final ICV result */
874 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) {
875 hw_desc_init(&desc
[idx
]);
876 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
877 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
878 set_dout_dlli(&desc
[idx
], req_ctx
->icv_dma_addr
, ctx
->authsize
,
880 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
881 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
) {
882 set_aes_not_hash_mode(&desc
[idx
]);
883 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
885 set_cipher_config0(&desc
[idx
],
886 HASH_DIGEST_RESULT_LITTLE_ENDIAN
);
887 set_cipher_mode(&desc
[idx
], hash_mode
);
890 /* Get ICV out from hardware */
891 hw_desc_init(&desc
[idx
]);
892 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
893 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
894 set_dout_dlli(&desc
[idx
], req_ctx
->mac_buf_dma_addr
,
895 ctx
->authsize
, NS_BIT
, 1);
896 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
897 set_cipher_config0(&desc
[idx
],
898 HASH_DIGEST_RESULT_LITTLE_ENDIAN
);
899 set_cipher_config1(&desc
[idx
], HASH_PADDING_DISABLED
);
900 if (ctx
->auth_mode
== DRV_HASH_XCBC_MAC
) {
901 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
902 set_aes_not_hash_mode(&desc
[idx
]);
904 set_cipher_mode(&desc
[idx
], hash_mode
);
911 static void cc_set_cipher_desc(struct aead_request
*req
,
912 struct cc_hw_desc desc
[],
913 unsigned int *seq_size
)
915 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
916 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
917 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
918 unsigned int hw_iv_size
= req_ctx
->hw_iv_size
;
919 unsigned int idx
= *seq_size
;
920 int direct
= req_ctx
->gen_ctx
.op_type
;
922 /* Setup cipher state */
923 hw_desc_init(&desc
[idx
]);
924 set_cipher_config0(&desc
[idx
], direct
);
925 set_flow_mode(&desc
[idx
], ctx
->flow_mode
);
926 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->gen_ctx
.iv_dma_addr
,
928 if (ctx
->cipher_mode
== DRV_CIPHER_CTR
)
929 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
931 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
932 set_cipher_mode(&desc
[idx
], ctx
->cipher_mode
);
936 hw_desc_init(&desc
[idx
]);
937 set_cipher_config0(&desc
[idx
], direct
);
938 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
939 set_flow_mode(&desc
[idx
], ctx
->flow_mode
);
940 if (ctx
->flow_mode
== S_DIN_to_AES
) {
941 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->enckey_dma_addr
,
942 ((ctx
->enc_keylen
== 24) ? CC_AES_KEY_SIZE_MAX
:
943 ctx
->enc_keylen
), NS_BIT
);
944 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
946 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->enckey_dma_addr
,
947 ctx
->enc_keylen
, NS_BIT
);
948 set_key_size_des(&desc
[idx
], ctx
->enc_keylen
);
950 set_cipher_mode(&desc
[idx
], ctx
->cipher_mode
);
956 static void cc_proc_cipher(struct aead_request
*req
, struct cc_hw_desc desc
[],
957 unsigned int *seq_size
, unsigned int data_flow_mode
)
959 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
960 int direct
= req_ctx
->gen_ctx
.op_type
;
961 unsigned int idx
= *seq_size
;
963 if (req_ctx
->cryptlen
== 0)
964 return; /*null processing*/
966 cc_set_cipher_desc(req
, desc
, &idx
);
967 cc_proc_cipher_desc(req
, data_flow_mode
, desc
, &idx
);
968 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) {
969 /* We must wait for DMA to write all cipher */
970 hw_desc_init(&desc
[idx
]);
971 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
972 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
979 static void cc_set_hmac_desc(struct aead_request
*req
, struct cc_hw_desc desc
[],
980 unsigned int *seq_size
)
982 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
983 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
984 unsigned int hash_mode
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
985 DRV_HASH_HW_SHA1
: DRV_HASH_HW_SHA256
;
986 unsigned int digest_size
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
987 CC_SHA1_DIGEST_SIZE
: CC_SHA256_DIGEST_SIZE
;
988 unsigned int idx
= *seq_size
;
990 /* Loading hash ipad xor key state */
991 hw_desc_init(&desc
[idx
]);
992 set_cipher_mode(&desc
[idx
], hash_mode
);
993 set_din_type(&desc
[idx
], DMA_DLLI
,
994 ctx
->auth_state
.hmac
.ipad_opad_dma_addr
, digest_size
,
996 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
997 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
1000 /* Load init. digest len (64 bytes) */
1001 hw_desc_init(&desc
[idx
]);
1002 set_cipher_mode(&desc
[idx
], hash_mode
);
1003 set_din_sram(&desc
[idx
], cc_digest_len_addr(ctx
->drvdata
, hash_mode
),
1004 ctx
->drvdata
->hash_len_sz
);
1005 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1006 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1012 static void cc_set_xcbc_desc(struct aead_request
*req
, struct cc_hw_desc desc
[],
1013 unsigned int *seq_size
)
1015 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1016 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1017 unsigned int idx
= *seq_size
;
1019 /* Loading MAC state */
1020 hw_desc_init(&desc
[idx
]);
1021 set_din_const(&desc
[idx
], 0, CC_AES_BLOCK_SIZE
);
1022 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
1023 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
1024 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1025 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
1026 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1027 set_aes_not_hash_mode(&desc
[idx
]);
1030 /* Setup XCBC MAC K1 */
1031 hw_desc_init(&desc
[idx
]);
1032 set_din_type(&desc
[idx
], DMA_DLLI
,
1033 ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
,
1034 AES_KEYSIZE_128
, NS_BIT
);
1035 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1036 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
1037 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1038 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
1039 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1040 set_aes_not_hash_mode(&desc
[idx
]);
1043 /* Setup XCBC MAC K2 */
1044 hw_desc_init(&desc
[idx
]);
1045 set_din_type(&desc
[idx
], DMA_DLLI
,
1046 (ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
+
1047 AES_KEYSIZE_128
), AES_KEYSIZE_128
, NS_BIT
);
1048 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
1049 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
1050 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1051 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
1052 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1053 set_aes_not_hash_mode(&desc
[idx
]);
1056 /* Setup XCBC MAC K3 */
1057 hw_desc_init(&desc
[idx
]);
1058 set_din_type(&desc
[idx
], DMA_DLLI
,
1059 (ctx
->auth_state
.xcbc
.xcbc_keys_dma_addr
+
1060 2 * AES_KEYSIZE_128
), AES_KEYSIZE_128
, NS_BIT
);
1061 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE2
);
1062 set_cipher_mode(&desc
[idx
], DRV_CIPHER_XCBC_MAC
);
1063 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1064 set_key_size_aes(&desc
[idx
], CC_AES_128_BIT_KEY_SIZE
);
1065 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1066 set_aes_not_hash_mode(&desc
[idx
]);
1072 static void cc_proc_header_desc(struct aead_request
*req
,
1073 struct cc_hw_desc desc
[],
1074 unsigned int *seq_size
)
1076 unsigned int idx
= *seq_size
;
1077 /* Hash associated data */
1078 if (req
->assoclen
> 0)
1079 cc_set_assoc_desc(req
, DIN_HASH
, desc
, &idx
);
1085 static void cc_proc_scheme_desc(struct aead_request
*req
,
1086 struct cc_hw_desc desc
[],
1087 unsigned int *seq_size
)
1089 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1090 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1091 struct cc_aead_handle
*aead_handle
= ctx
->drvdata
->aead_handle
;
1092 unsigned int hash_mode
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
1093 DRV_HASH_HW_SHA1
: DRV_HASH_HW_SHA256
;
1094 unsigned int digest_size
= (ctx
->auth_mode
== DRV_HASH_SHA1
) ?
1095 CC_SHA1_DIGEST_SIZE
: CC_SHA256_DIGEST_SIZE
;
1096 unsigned int idx
= *seq_size
;
1098 hw_desc_init(&desc
[idx
]);
1099 set_cipher_mode(&desc
[idx
], hash_mode
);
1100 set_dout_sram(&desc
[idx
], aead_handle
->sram_workspace_addr
,
1101 ctx
->drvdata
->hash_len_sz
);
1102 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
1103 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE1
);
1104 set_cipher_do(&desc
[idx
], DO_PAD
);
1107 /* Get final ICV result */
1108 hw_desc_init(&desc
[idx
]);
1109 set_dout_sram(&desc
[idx
], aead_handle
->sram_workspace_addr
,
1111 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
1112 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1113 set_cipher_config0(&desc
[idx
], HASH_DIGEST_RESULT_LITTLE_ENDIAN
);
1114 set_cipher_mode(&desc
[idx
], hash_mode
);
1117 /* Loading hash opad xor key state */
1118 hw_desc_init(&desc
[idx
]);
1119 set_cipher_mode(&desc
[idx
], hash_mode
);
1120 set_din_type(&desc
[idx
], DMA_DLLI
,
1121 (ctx
->auth_state
.hmac
.ipad_opad_dma_addr
+ digest_size
),
1122 digest_size
, NS_BIT
);
1123 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1124 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
1127 /* Load init. digest len (64 bytes) */
1128 hw_desc_init(&desc
[idx
]);
1129 set_cipher_mode(&desc
[idx
], hash_mode
);
1130 set_din_sram(&desc
[idx
], cc_digest_len_addr(ctx
->drvdata
, hash_mode
),
1131 ctx
->drvdata
->hash_len_sz
);
1132 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
1133 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1134 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1137 /* Perform HASH update */
1138 hw_desc_init(&desc
[idx
]);
1139 set_din_sram(&desc
[idx
], aead_handle
->sram_workspace_addr
,
1141 set_flow_mode(&desc
[idx
], DIN_HASH
);
1147 static void cc_mlli_to_sram(struct aead_request
*req
,
1148 struct cc_hw_desc desc
[], unsigned int *seq_size
)
1150 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1151 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1152 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1153 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1155 if (req_ctx
->assoc_buff_type
== CC_DMA_BUF_MLLI
||
1156 req_ctx
->data_buff_type
== CC_DMA_BUF_MLLI
||
1157 !req_ctx
->is_single_pass
) {
1158 dev_dbg(dev
, "Copy-to-sram: mlli_dma=%08x, mlli_size=%u\n",
1159 (unsigned int)ctx
->drvdata
->mlli_sram_addr
,
1160 req_ctx
->mlli_params
.mlli_len
);
1161 /* Copy MLLI table host-to-sram */
1162 hw_desc_init(&desc
[*seq_size
]);
1163 set_din_type(&desc
[*seq_size
], DMA_DLLI
,
1164 req_ctx
->mlli_params
.mlli_dma_addr
,
1165 req_ctx
->mlli_params
.mlli_len
, NS_BIT
);
1166 set_dout_sram(&desc
[*seq_size
],
1167 ctx
->drvdata
->mlli_sram_addr
,
1168 req_ctx
->mlli_params
.mlli_len
);
1169 set_flow_mode(&desc
[*seq_size
], BYPASS
);
1174 static enum cc_flow_mode
cc_get_data_flow(enum drv_crypto_direction direct
,
1175 enum cc_flow_mode setup_flow_mode
,
1176 bool is_single_pass
)
1178 enum cc_flow_mode data_flow_mode
;
1180 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) {
1181 if (setup_flow_mode
== S_DIN_to_AES
)
1182 data_flow_mode
= is_single_pass
?
1183 AES_to_HASH_and_DOUT
: DIN_AES_DOUT
;
1185 data_flow_mode
= is_single_pass
?
1186 DES_to_HASH_and_DOUT
: DIN_DES_DOUT
;
1187 } else { /* Decrypt */
1188 if (setup_flow_mode
== S_DIN_to_AES
)
1189 data_flow_mode
= is_single_pass
?
1190 AES_and_HASH
: DIN_AES_DOUT
;
1192 data_flow_mode
= is_single_pass
?
1193 DES_and_HASH
: DIN_DES_DOUT
;
1196 return data_flow_mode
;
1199 static void cc_hmac_authenc(struct aead_request
*req
, struct cc_hw_desc desc
[],
1200 unsigned int *seq_size
)
1202 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1203 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1204 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1205 int direct
= req_ctx
->gen_ctx
.op_type
;
1206 unsigned int data_flow_mode
=
1207 cc_get_data_flow(direct
, ctx
->flow_mode
,
1208 req_ctx
->is_single_pass
);
1210 if (req_ctx
->is_single_pass
) {
1214 cc_set_hmac_desc(req
, desc
, seq_size
);
1215 cc_set_cipher_desc(req
, desc
, seq_size
);
1216 cc_proc_header_desc(req
, desc
, seq_size
);
1217 cc_proc_cipher_desc(req
, data_flow_mode
, desc
, seq_size
);
1218 cc_proc_scheme_desc(req
, desc
, seq_size
);
1219 cc_proc_digest_desc(req
, desc
, seq_size
);
1225 * Fallback for unsupported single-pass modes,
1226 * i.e. using assoc. data of non-word-multiple
1228 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) {
1229 /* encrypt first.. */
1230 cc_proc_cipher(req
, desc
, seq_size
, data_flow_mode
);
1231 /* authenc after..*/
1232 cc_set_hmac_desc(req
, desc
, seq_size
);
1233 cc_proc_authen_desc(req
, DIN_HASH
, desc
, seq_size
, direct
);
1234 cc_proc_scheme_desc(req
, desc
, seq_size
);
1235 cc_proc_digest_desc(req
, desc
, seq_size
);
1237 } else { /*DECRYPT*/
1238 /* authenc first..*/
1239 cc_set_hmac_desc(req
, desc
, seq_size
);
1240 cc_proc_authen_desc(req
, DIN_HASH
, desc
, seq_size
, direct
);
1241 cc_proc_scheme_desc(req
, desc
, seq_size
);
1242 /* decrypt after.. */
1243 cc_proc_cipher(req
, desc
, seq_size
, data_flow_mode
);
1244 /* read the digest result with setting the completion bit
1245 * must be after the cipher operation
1247 cc_proc_digest_desc(req
, desc
, seq_size
);
1252 cc_xcbc_authenc(struct aead_request
*req
, struct cc_hw_desc desc
[],
1253 unsigned int *seq_size
)
1255 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1256 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1257 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1258 int direct
= req_ctx
->gen_ctx
.op_type
;
1259 unsigned int data_flow_mode
=
1260 cc_get_data_flow(direct
, ctx
->flow_mode
,
1261 req_ctx
->is_single_pass
);
1263 if (req_ctx
->is_single_pass
) {
1267 cc_set_xcbc_desc(req
, desc
, seq_size
);
1268 cc_set_cipher_desc(req
, desc
, seq_size
);
1269 cc_proc_header_desc(req
, desc
, seq_size
);
1270 cc_proc_cipher_desc(req
, data_flow_mode
, desc
, seq_size
);
1271 cc_proc_digest_desc(req
, desc
, seq_size
);
1277 * Fallback for unsupported single-pass modes,
1278 * i.e. using assoc. data of non-word-multiple
1280 if (direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) {
1281 /* encrypt first.. */
1282 cc_proc_cipher(req
, desc
, seq_size
, data_flow_mode
);
1283 /* authenc after.. */
1284 cc_set_xcbc_desc(req
, desc
, seq_size
);
1285 cc_proc_authen_desc(req
, DIN_HASH
, desc
, seq_size
, direct
);
1286 cc_proc_digest_desc(req
, desc
, seq_size
);
1287 } else { /*DECRYPT*/
1288 /* authenc first.. */
1289 cc_set_xcbc_desc(req
, desc
, seq_size
);
1290 cc_proc_authen_desc(req
, DIN_HASH
, desc
, seq_size
, direct
);
1291 /* decrypt after..*/
1292 cc_proc_cipher(req
, desc
, seq_size
, data_flow_mode
);
1293 /* read the digest result with setting the completion bit
1294 * must be after the cipher operation
1296 cc_proc_digest_desc(req
, desc
, seq_size
);
1300 static int validate_data_size(struct cc_aead_ctx
*ctx
,
1301 enum drv_crypto_direction direct
,
1302 struct aead_request
*req
)
1304 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1305 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1306 unsigned int assoclen
= req
->assoclen
;
1307 unsigned int cipherlen
= (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
) ?
1308 (req
->cryptlen
- ctx
->authsize
) : req
->cryptlen
;
1310 if (direct
== DRV_CRYPTO_DIRECTION_DECRYPT
&&
1311 req
->cryptlen
< ctx
->authsize
)
1314 areq_ctx
->is_single_pass
= true; /*defaulted to fast flow*/
1316 switch (ctx
->flow_mode
) {
1318 if (ctx
->cipher_mode
== DRV_CIPHER_CBC
&&
1319 !IS_ALIGNED(cipherlen
, AES_BLOCK_SIZE
))
1321 if (ctx
->cipher_mode
== DRV_CIPHER_CCM
)
1323 if (ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
1324 if (areq_ctx
->plaintext_authenticate_only
)
1325 areq_ctx
->is_single_pass
= false;
1329 if (!IS_ALIGNED(assoclen
, sizeof(u32
)))
1330 areq_ctx
->is_single_pass
= false;
1332 if (ctx
->cipher_mode
== DRV_CIPHER_CTR
&&
1333 !IS_ALIGNED(cipherlen
, sizeof(u32
)))
1334 areq_ctx
->is_single_pass
= false;
1338 if (!IS_ALIGNED(cipherlen
, DES_BLOCK_SIZE
))
1340 if (!IS_ALIGNED(assoclen
, DES_BLOCK_SIZE
))
1341 areq_ctx
->is_single_pass
= false;
1344 dev_err(dev
, "Unexpected flow mode (%d)\n", ctx
->flow_mode
);
1354 static unsigned int format_ccm_a0(u8
*pa0_buff
, u32 header_size
)
1356 unsigned int len
= 0;
1358 if (header_size
== 0)
1361 if (header_size
< ((1UL << 16) - (1UL << 8))) {
1364 pa0_buff
[0] = (header_size
>> 8) & 0xFF;
1365 pa0_buff
[1] = header_size
& 0xFF;
1371 pa0_buff
[2] = (header_size
>> 24) & 0xFF;
1372 pa0_buff
[3] = (header_size
>> 16) & 0xFF;
1373 pa0_buff
[4] = (header_size
>> 8) & 0xFF;
1374 pa0_buff
[5] = header_size
& 0xFF;
1380 static int set_msg_len(u8
*block
, unsigned int msglen
, unsigned int csize
)
1384 memset(block
, 0, csize
);
1389 else if (msglen
> (1 << (8 * csize
)))
1392 data
= cpu_to_be32(msglen
);
1393 memcpy(block
- csize
, (u8
*)&data
+ 4 - csize
, csize
);
1398 static int cc_ccm(struct aead_request
*req
, struct cc_hw_desc desc
[],
1399 unsigned int *seq_size
)
1401 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1402 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1403 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1404 unsigned int idx
= *seq_size
;
1405 unsigned int cipher_flow_mode
;
1406 dma_addr_t mac_result
;
1408 if (req_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
1409 cipher_flow_mode
= AES_to_HASH_and_DOUT
;
1410 mac_result
= req_ctx
->mac_buf_dma_addr
;
1411 } else { /* Encrypt */
1412 cipher_flow_mode
= AES_and_HASH
;
1413 mac_result
= req_ctx
->icv_dma_addr
;
1417 hw_desc_init(&desc
[idx
]);
1418 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CTR
);
1419 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->enckey_dma_addr
,
1420 ((ctx
->enc_keylen
== 24) ? CC_AES_KEY_SIZE_MAX
:
1421 ctx
->enc_keylen
), NS_BIT
);
1422 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1423 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1424 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1425 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1428 /* load ctr state */
1429 hw_desc_init(&desc
[idx
]);
1430 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CTR
);
1431 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1432 set_din_type(&desc
[idx
], DMA_DLLI
,
1433 req_ctx
->gen_ctx
.iv_dma_addr
, AES_BLOCK_SIZE
, NS_BIT
);
1434 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1435 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
1436 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1440 hw_desc_init(&desc
[idx
]);
1441 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CBC_MAC
);
1442 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->enckey_dma_addr
,
1443 ((ctx
->enc_keylen
== 24) ? CC_AES_KEY_SIZE_MAX
:
1444 ctx
->enc_keylen
), NS_BIT
);
1445 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1446 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1447 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1448 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1449 set_aes_not_hash_mode(&desc
[idx
]);
1452 /* load MAC state */
1453 hw_desc_init(&desc
[idx
]);
1454 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CBC_MAC
);
1455 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1456 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->mac_buf_dma_addr
,
1457 AES_BLOCK_SIZE
, NS_BIT
);
1458 set_cipher_config0(&desc
[idx
], DESC_DIRECTION_ENCRYPT_ENCRYPT
);
1459 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
1460 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1461 set_aes_not_hash_mode(&desc
[idx
]);
1464 /* process assoc data */
1465 if (req
->assoclen
> 0) {
1466 cc_set_assoc_desc(req
, DIN_HASH
, desc
, &idx
);
1468 hw_desc_init(&desc
[idx
]);
1469 set_din_type(&desc
[idx
], DMA_DLLI
,
1470 sg_dma_address(&req_ctx
->ccm_adata_sg
),
1471 AES_BLOCK_SIZE
+ req_ctx
->ccm_hdr_size
, NS_BIT
);
1472 set_flow_mode(&desc
[idx
], DIN_HASH
);
1476 /* process the cipher */
1477 if (req_ctx
->cryptlen
)
1478 cc_proc_cipher_desc(req
, cipher_flow_mode
, desc
, &idx
);
1480 /* Read temporal MAC */
1481 hw_desc_init(&desc
[idx
]);
1482 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CBC_MAC
);
1483 set_dout_dlli(&desc
[idx
], req_ctx
->mac_buf_dma_addr
, ctx
->authsize
,
1485 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1486 set_cipher_config0(&desc
[idx
], HASH_DIGEST_RESULT_LITTLE_ENDIAN
);
1487 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
1488 set_aes_not_hash_mode(&desc
[idx
]);
1491 /* load AES-CTR state (for last MAC calculation)*/
1492 hw_desc_init(&desc
[idx
]);
1493 set_cipher_mode(&desc
[idx
], DRV_CIPHER_CTR
);
1494 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1495 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->ccm_iv0_dma_addr
,
1496 AES_BLOCK_SIZE
, NS_BIT
);
1497 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1498 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
1499 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1502 hw_desc_init(&desc
[idx
]);
1503 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1504 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1507 /* encrypt the "T" value and store MAC in mac_state */
1508 hw_desc_init(&desc
[idx
]);
1509 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->mac_buf_dma_addr
,
1510 ctx
->authsize
, NS_BIT
);
1511 set_dout_dlli(&desc
[idx
], mac_result
, ctx
->authsize
, NS_BIT
, 1);
1512 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
1513 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1520 static int config_ccm_adata(struct aead_request
*req
)
1522 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1523 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1524 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1525 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1526 //unsigned int size_of_a = 0, rem_a_size = 0;
1527 unsigned int lp
= req
->iv
[0];
1528 /* Note: The code assume that req->iv[0] already contains the value
1531 unsigned int l
= lp
+ 1; /* This is L' of RFC 3610. */
1532 unsigned int m
= ctx
->authsize
; /* This is M' of RFC 3610. */
1533 u8
*b0
= req_ctx
->ccm_config
+ CCM_B0_OFFSET
;
1534 u8
*a0
= req_ctx
->ccm_config
+ CCM_A0_OFFSET
;
1535 u8
*ctr_count_0
= req_ctx
->ccm_config
+ CCM_CTR_COUNT_0_OFFSET
;
1536 unsigned int cryptlen
= (req_ctx
->gen_ctx
.op_type
==
1537 DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
1539 (req
->cryptlen
- ctx
->authsize
);
1542 memset(req_ctx
->mac_buf
, 0, AES_BLOCK_SIZE
);
1543 memset(req_ctx
->ccm_config
, 0, AES_BLOCK_SIZE
* 3);
1545 /* taken from crypto/ccm.c */
1546 /* 2 <= L <= 8, so 1 <= L' <= 7. */
1547 if (l
< 2 || l
> 8) {
1548 dev_err(dev
, "illegal iv value %X\n", req
->iv
[0]);
1551 memcpy(b0
, req
->iv
, AES_BLOCK_SIZE
);
1553 /* format control info per RFC 3610 and
1554 * NIST Special Publication 800-38C
1556 *b0
|= (8 * ((m
- 2) / 2));
1557 if (req
->assoclen
> 0)
1558 *b0
|= 64; /* Enable bit 6 if Adata exists. */
1560 rc
= set_msg_len(b0
+ 16 - l
, cryptlen
, l
); /* Write L'. */
1562 dev_err(dev
, "message len overflow detected");
1565 /* END of "taken from crypto/ccm.c" */
1567 /* l(a) - size of associated data. */
1568 req_ctx
->ccm_hdr_size
= format_ccm_a0(a0
, req
->assoclen
);
1570 memset(req
->iv
+ 15 - req
->iv
[0], 0, req
->iv
[0] + 1);
1573 memcpy(ctr_count_0
, req
->iv
, AES_BLOCK_SIZE
);
1574 ctr_count_0
[15] = 0;
1579 static void cc_proc_rfc4309_ccm(struct aead_request
*req
)
1581 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1582 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1583 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1586 memset(areq_ctx
->ctr_iv
, 0, AES_BLOCK_SIZE
);
1587 /* For RFC 4309, always use 4 bytes for message length
1588 * (at most 2^32-1 bytes).
1590 areq_ctx
->ctr_iv
[0] = 3;
1592 /* In RFC 4309 there is an 11-bytes nonce+IV part,
1593 * that we build here.
1595 memcpy(areq_ctx
->ctr_iv
+ CCM_BLOCK_NONCE_OFFSET
, ctx
->ctr_nonce
,
1596 CCM_BLOCK_NONCE_SIZE
);
1597 memcpy(areq_ctx
->ctr_iv
+ CCM_BLOCK_IV_OFFSET
, req
->iv
,
1599 req
->iv
= areq_ctx
->ctr_iv
;
1600 req
->assoclen
-= CCM_BLOCK_IV_SIZE
;
1603 static void cc_set_ghash_desc(struct aead_request
*req
,
1604 struct cc_hw_desc desc
[], unsigned int *seq_size
)
1606 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1607 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1608 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1609 unsigned int idx
= *seq_size
;
1611 /* load key to AES*/
1612 hw_desc_init(&desc
[idx
]);
1613 set_cipher_mode(&desc
[idx
], DRV_CIPHER_ECB
);
1614 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1615 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->enckey_dma_addr
,
1616 ctx
->enc_keylen
, NS_BIT
);
1617 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1618 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1619 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1622 /* process one zero block to generate hkey */
1623 hw_desc_init(&desc
[idx
]);
1624 set_din_const(&desc
[idx
], 0x0, AES_BLOCK_SIZE
);
1625 set_dout_dlli(&desc
[idx
], req_ctx
->hkey_dma_addr
, AES_BLOCK_SIZE
,
1627 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1630 /* Memory Barrier */
1631 hw_desc_init(&desc
[idx
]);
1632 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1633 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1636 /* Load GHASH subkey */
1637 hw_desc_init(&desc
[idx
]);
1638 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->hkey_dma_addr
,
1639 AES_BLOCK_SIZE
, NS_BIT
);
1640 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1641 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1642 set_aes_not_hash_mode(&desc
[idx
]);
1643 set_cipher_mode(&desc
[idx
], DRV_HASH_HW_GHASH
);
1644 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
1645 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1648 /* Configure Hash Engine to work with GHASH.
1649 * Since it was not possible to extend HASH submodes to add GHASH,
1650 * The following command is necessary in order to
1651 * select GHASH (according to HW designers)
1653 hw_desc_init(&desc
[idx
]);
1654 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1655 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1656 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1657 set_aes_not_hash_mode(&desc
[idx
]);
1658 set_cipher_mode(&desc
[idx
], DRV_HASH_HW_GHASH
);
1659 set_cipher_do(&desc
[idx
], 1); //1=AES_SK RKEK
1660 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1661 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
1662 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1665 /* Load GHASH initial STATE (which is 0). (for any hash there is an
1668 hw_desc_init(&desc
[idx
]);
1669 set_din_const(&desc
[idx
], 0x0, AES_BLOCK_SIZE
);
1670 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1671 set_flow_mode(&desc
[idx
], S_DIN_to_HASH
);
1672 set_aes_not_hash_mode(&desc
[idx
]);
1673 set_cipher_mode(&desc
[idx
], DRV_HASH_HW_GHASH
);
1674 set_cipher_config1(&desc
[idx
], HASH_PADDING_ENABLED
);
1675 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE0
);
1681 static void cc_set_gctr_desc(struct aead_request
*req
, struct cc_hw_desc desc
[],
1682 unsigned int *seq_size
)
1684 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1685 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1686 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1687 unsigned int idx
= *seq_size
;
1689 /* load key to AES*/
1690 hw_desc_init(&desc
[idx
]);
1691 set_cipher_mode(&desc
[idx
], DRV_CIPHER_GCTR
);
1692 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1693 set_din_type(&desc
[idx
], DMA_DLLI
, ctx
->enckey_dma_addr
,
1694 ctx
->enc_keylen
, NS_BIT
);
1695 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1696 set_setup_mode(&desc
[idx
], SETUP_LOAD_KEY0
);
1697 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1700 if (req_ctx
->cryptlen
&& !req_ctx
->plaintext_authenticate_only
) {
1701 /* load AES/CTR initial CTR value inc by 2*/
1702 hw_desc_init(&desc
[idx
]);
1703 set_cipher_mode(&desc
[idx
], DRV_CIPHER_GCTR
);
1704 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1705 set_din_type(&desc
[idx
], DMA_DLLI
,
1706 req_ctx
->gcm_iv_inc2_dma_addr
, AES_BLOCK_SIZE
,
1708 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1709 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
1710 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1717 static void cc_proc_gcm_result(struct aead_request
*req
,
1718 struct cc_hw_desc desc
[],
1719 unsigned int *seq_size
)
1721 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1722 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1723 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1724 dma_addr_t mac_result
;
1725 unsigned int idx
= *seq_size
;
1727 if (req_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
1728 mac_result
= req_ctx
->mac_buf_dma_addr
;
1729 } else { /* Encrypt */
1730 mac_result
= req_ctx
->icv_dma_addr
;
1733 /* process(ghash) gcm_block_len */
1734 hw_desc_init(&desc
[idx
]);
1735 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->gcm_block_len_dma_addr
,
1736 AES_BLOCK_SIZE
, NS_BIT
);
1737 set_flow_mode(&desc
[idx
], DIN_HASH
);
1740 /* Store GHASH state after GHASH(Associated Data + Cipher +LenBlock) */
1741 hw_desc_init(&desc
[idx
]);
1742 set_cipher_mode(&desc
[idx
], DRV_HASH_HW_GHASH
);
1743 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1744 set_dout_dlli(&desc
[idx
], req_ctx
->mac_buf_dma_addr
, AES_BLOCK_SIZE
,
1746 set_setup_mode(&desc
[idx
], SETUP_WRITE_STATE0
);
1747 set_flow_mode(&desc
[idx
], S_HASH_to_DOUT
);
1748 set_aes_not_hash_mode(&desc
[idx
]);
1752 /* load AES/CTR initial CTR value inc by 1*/
1753 hw_desc_init(&desc
[idx
]);
1754 set_cipher_mode(&desc
[idx
], DRV_CIPHER_GCTR
);
1755 set_key_size_aes(&desc
[idx
], ctx
->enc_keylen
);
1756 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->gcm_iv_inc1_dma_addr
,
1757 AES_BLOCK_SIZE
, NS_BIT
);
1758 set_cipher_config0(&desc
[idx
], DRV_CRYPTO_DIRECTION_ENCRYPT
);
1759 set_setup_mode(&desc
[idx
], SETUP_LOAD_STATE1
);
1760 set_flow_mode(&desc
[idx
], S_DIN_to_AES
);
1763 /* Memory Barrier */
1764 hw_desc_init(&desc
[idx
]);
1765 set_din_no_dma(&desc
[idx
], 0, 0xfffff0);
1766 set_dout_no_dma(&desc
[idx
], 0, 0, 1);
1769 /* process GCTR on stored GHASH and store MAC in mac_state*/
1770 hw_desc_init(&desc
[idx
]);
1771 set_cipher_mode(&desc
[idx
], DRV_CIPHER_GCTR
);
1772 set_din_type(&desc
[idx
], DMA_DLLI
, req_ctx
->mac_buf_dma_addr
,
1773 AES_BLOCK_SIZE
, NS_BIT
);
1774 set_dout_dlli(&desc
[idx
], mac_result
, ctx
->authsize
, NS_BIT
, 1);
1775 set_queue_last_ind(ctx
->drvdata
, &desc
[idx
]);
1776 set_flow_mode(&desc
[idx
], DIN_AES_DOUT
);
1782 static int cc_gcm(struct aead_request
*req
, struct cc_hw_desc desc
[],
1783 unsigned int *seq_size
)
1785 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1786 unsigned int cipher_flow_mode
;
1788 if (req_ctx
->gen_ctx
.op_type
== DRV_CRYPTO_DIRECTION_DECRYPT
) {
1789 cipher_flow_mode
= AES_and_HASH
;
1790 } else { /* Encrypt */
1791 cipher_flow_mode
= AES_to_HASH_and_DOUT
;
1794 //in RFC4543 no data to encrypt. just copy data from src to dest.
1795 if (req_ctx
->plaintext_authenticate_only
) {
1796 cc_proc_cipher_desc(req
, BYPASS
, desc
, seq_size
);
1797 cc_set_ghash_desc(req
, desc
, seq_size
);
1798 /* process(ghash) assoc data */
1799 cc_set_assoc_desc(req
, DIN_HASH
, desc
, seq_size
);
1800 cc_set_gctr_desc(req
, desc
, seq_size
);
1801 cc_proc_gcm_result(req
, desc
, seq_size
);
1805 // for gcm and rfc4106.
1806 cc_set_ghash_desc(req
, desc
, seq_size
);
1807 /* process(ghash) assoc data */
1808 if (req
->assoclen
> 0)
1809 cc_set_assoc_desc(req
, DIN_HASH
, desc
, seq_size
);
1810 cc_set_gctr_desc(req
, desc
, seq_size
);
1811 /* process(gctr+ghash) */
1812 if (req_ctx
->cryptlen
)
1813 cc_proc_cipher_desc(req
, cipher_flow_mode
, desc
, seq_size
);
1814 cc_proc_gcm_result(req
, desc
, seq_size
);
1819 static int config_gcm_context(struct aead_request
*req
)
1821 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1822 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1823 struct aead_req_ctx
*req_ctx
= aead_request_ctx(req
);
1824 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1826 unsigned int cryptlen
= (req_ctx
->gen_ctx
.op_type
==
1827 DRV_CRYPTO_DIRECTION_ENCRYPT
) ?
1829 (req
->cryptlen
- ctx
->authsize
);
1830 __be32 counter
= cpu_to_be32(2);
1832 dev_dbg(dev
, "%s() cryptlen = %d, req->assoclen = %d ctx->authsize = %d\n",
1833 __func__
, cryptlen
, req
->assoclen
, ctx
->authsize
);
1835 memset(req_ctx
->hkey
, 0, AES_BLOCK_SIZE
);
1837 memset(req_ctx
->mac_buf
, 0, AES_BLOCK_SIZE
);
1839 memcpy(req
->iv
+ 12, &counter
, 4);
1840 memcpy(req_ctx
->gcm_iv_inc2
, req
->iv
, 16);
1842 counter
= cpu_to_be32(1);
1843 memcpy(req
->iv
+ 12, &counter
, 4);
1844 memcpy(req_ctx
->gcm_iv_inc1
, req
->iv
, 16);
1846 if (!req_ctx
->plaintext_authenticate_only
) {
1849 temp64
= cpu_to_be64(req
->assoclen
* 8);
1850 memcpy(&req_ctx
->gcm_len_block
.len_a
, &temp64
, sizeof(temp64
));
1851 temp64
= cpu_to_be64(cryptlen
* 8);
1852 memcpy(&req_ctx
->gcm_len_block
.len_c
, &temp64
, 8);
1854 /* rfc4543=> all data(AAD,IV,Plain) are considered additional
1855 * data that is nothing is encrypted.
1859 temp64
= cpu_to_be64((req
->assoclen
+ GCM_BLOCK_RFC4_IV_SIZE
+
1861 memcpy(&req_ctx
->gcm_len_block
.len_a
, &temp64
, sizeof(temp64
));
1863 memcpy(&req_ctx
->gcm_len_block
.len_c
, &temp64
, 8);
1869 static void cc_proc_rfc4_gcm(struct aead_request
*req
)
1871 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1872 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1873 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1875 memcpy(areq_ctx
->ctr_iv
+ GCM_BLOCK_RFC4_NONCE_OFFSET
,
1876 ctx
->ctr_nonce
, GCM_BLOCK_RFC4_NONCE_SIZE
);
1877 memcpy(areq_ctx
->ctr_iv
+ GCM_BLOCK_RFC4_IV_OFFSET
, req
->iv
,
1878 GCM_BLOCK_RFC4_IV_SIZE
);
1879 req
->iv
= areq_ctx
->ctr_iv
;
1880 req
->assoclen
-= GCM_BLOCK_RFC4_IV_SIZE
;
1883 static int cc_proc_aead(struct aead_request
*req
,
1884 enum drv_crypto_direction direct
)
1888 struct cc_hw_desc desc
[MAX_AEAD_PROCESS_SEQ
];
1889 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
1890 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
1891 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
1892 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
1893 struct cc_crypto_req cc_req
= {};
1895 dev_dbg(dev
, "%s context=%p req=%p iv=%p src=%p src_ofs=%d dst=%p dst_ofs=%d cryptolen=%d\n",
1896 ((direct
== DRV_CRYPTO_DIRECTION_ENCRYPT
) ? "Enc" : "Dec"),
1897 ctx
, req
, req
->iv
, sg_virt(req
->src
), req
->src
->offset
,
1898 sg_virt(req
->dst
), req
->dst
->offset
, req
->cryptlen
);
1900 /* STAT_PHASE_0: Init and sanity checks */
1902 /* Check data length according to mode */
1903 if (validate_data_size(ctx
, direct
, req
)) {
1904 dev_err(dev
, "Unsupported crypt/assoc len %d/%d.\n",
1905 req
->cryptlen
, req
->assoclen
);
1906 crypto_aead_set_flags(tfm
, CRYPTO_TFM_RES_BAD_BLOCK_LEN
);
1910 /* Setup request structure */
1911 cc_req
.user_cb
= (void *)cc_aead_complete
;
1912 cc_req
.user_arg
= (void *)req
;
1914 /* Setup request context */
1915 areq_ctx
->gen_ctx
.op_type
= direct
;
1916 areq_ctx
->req_authsize
= ctx
->authsize
;
1917 areq_ctx
->cipher_mode
= ctx
->cipher_mode
;
1919 /* STAT_PHASE_1: Map buffers */
1921 if (ctx
->cipher_mode
== DRV_CIPHER_CTR
) {
1922 /* Build CTR IV - Copy nonce from last 4 bytes in
1923 * CTR key to first 4 bytes in CTR IV
1925 memcpy(areq_ctx
->ctr_iv
, ctx
->ctr_nonce
,
1926 CTR_RFC3686_NONCE_SIZE
);
1927 if (!areq_ctx
->backup_giv
) /*User none-generated IV*/
1928 memcpy(areq_ctx
->ctr_iv
+ CTR_RFC3686_NONCE_SIZE
,
1929 req
->iv
, CTR_RFC3686_IV_SIZE
);
1930 /* Initialize counter portion of counter block */
1931 *(__be32
*)(areq_ctx
->ctr_iv
+ CTR_RFC3686_NONCE_SIZE
+
1932 CTR_RFC3686_IV_SIZE
) = cpu_to_be32(1);
1934 /* Replace with counter iv */
1935 req
->iv
= areq_ctx
->ctr_iv
;
1936 areq_ctx
->hw_iv_size
= CTR_RFC3686_BLOCK_SIZE
;
1937 } else if ((ctx
->cipher_mode
== DRV_CIPHER_CCM
) ||
1938 (ctx
->cipher_mode
== DRV_CIPHER_GCTR
)) {
1939 areq_ctx
->hw_iv_size
= AES_BLOCK_SIZE
;
1940 if (areq_ctx
->ctr_iv
!= req
->iv
) {
1941 memcpy(areq_ctx
->ctr_iv
, req
->iv
,
1942 crypto_aead_ivsize(tfm
));
1943 req
->iv
= areq_ctx
->ctr_iv
;
1946 areq_ctx
->hw_iv_size
= crypto_aead_ivsize(tfm
);
1949 if (ctx
->cipher_mode
== DRV_CIPHER_CCM
) {
1950 rc
= config_ccm_adata(req
);
1952 dev_dbg(dev
, "config_ccm_adata() returned with a failure %d!",
1957 areq_ctx
->ccm_hdr_size
= ccm_header_size_null
;
1960 if (ctx
->cipher_mode
== DRV_CIPHER_GCTR
) {
1961 rc
= config_gcm_context(req
);
1963 dev_dbg(dev
, "config_gcm_context() returned with a failure %d!",
1969 rc
= cc_map_aead_request(ctx
->drvdata
, req
);
1971 dev_err(dev
, "map_request() failed\n");
1975 /* do we need to generate IV? */
1976 if (areq_ctx
->backup_giv
) {
1977 /* set the DMA mapped IV address*/
1978 if (ctx
->cipher_mode
== DRV_CIPHER_CTR
) {
1979 cc_req
.ivgen_dma_addr
[0] =
1980 areq_ctx
->gen_ctx
.iv_dma_addr
+
1981 CTR_RFC3686_NONCE_SIZE
;
1982 cc_req
.ivgen_dma_addr_len
= 1;
1983 } else if (ctx
->cipher_mode
== DRV_CIPHER_CCM
) {
1984 /* In ccm, the IV needs to exist both inside B0 and
1985 * inside the counter.It is also copied to iv_dma_addr
1986 * for other reasons (like returning it to the user).
1987 * So, using 3 (identical) IV outputs.
1989 cc_req
.ivgen_dma_addr
[0] =
1990 areq_ctx
->gen_ctx
.iv_dma_addr
+
1991 CCM_BLOCK_IV_OFFSET
;
1992 cc_req
.ivgen_dma_addr
[1] =
1993 sg_dma_address(&areq_ctx
->ccm_adata_sg
) +
1994 CCM_B0_OFFSET
+ CCM_BLOCK_IV_OFFSET
;
1995 cc_req
.ivgen_dma_addr
[2] =
1996 sg_dma_address(&areq_ctx
->ccm_adata_sg
) +
1997 CCM_CTR_COUNT_0_OFFSET
+ CCM_BLOCK_IV_OFFSET
;
1998 cc_req
.ivgen_dma_addr_len
= 3;
2000 cc_req
.ivgen_dma_addr
[0] =
2001 areq_ctx
->gen_ctx
.iv_dma_addr
;
2002 cc_req
.ivgen_dma_addr_len
= 1;
2005 /* set the IV size (8/16 B long)*/
2006 cc_req
.ivgen_size
= crypto_aead_ivsize(tfm
);
2009 /* STAT_PHASE_2: Create sequence */
2011 /* Load MLLI tables to SRAM if necessary */
2012 cc_mlli_to_sram(req
, desc
, &seq_len
);
2014 /*TODO: move seq len by reference */
2015 switch (ctx
->auth_mode
) {
2017 case DRV_HASH_SHA256
:
2018 cc_hmac_authenc(req
, desc
, &seq_len
);
2020 case DRV_HASH_XCBC_MAC
:
2021 cc_xcbc_authenc(req
, desc
, &seq_len
);
2024 if (ctx
->cipher_mode
== DRV_CIPHER_CCM
)
2025 cc_ccm(req
, desc
, &seq_len
);
2026 if (ctx
->cipher_mode
== DRV_CIPHER_GCTR
)
2027 cc_gcm(req
, desc
, &seq_len
);
2030 dev_err(dev
, "Unsupported authenc (%d)\n", ctx
->auth_mode
);
2031 cc_unmap_aead_request(dev
, req
);
2036 /* STAT_PHASE_3: Lock HW and push sequence */
2038 rc
= cc_send_request(ctx
->drvdata
, &cc_req
, desc
, seq_len
, &req
->base
);
2040 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
) {
2041 dev_err(dev
, "send_request() failed (rc=%d)\n", rc
);
2042 cc_unmap_aead_request(dev
, req
);
2049 static int cc_aead_encrypt(struct aead_request
*req
)
2051 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2054 /* No generated IV required */
2055 areq_ctx
->backup_iv
= req
->iv
;
2056 areq_ctx
->backup_giv
= NULL
;
2057 areq_ctx
->is_gcm4543
= false;
2059 areq_ctx
->plaintext_authenticate_only
= false;
2061 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_ENCRYPT
);
2062 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2063 req
->iv
= areq_ctx
->backup_iv
;
2068 static int cc_rfc4309_ccm_encrypt(struct aead_request
*req
)
2070 /* Very similar to cc_aead_encrypt() above. */
2072 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2073 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2074 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
2075 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2078 if (!valid_assoclen(req
)) {
2079 dev_err(dev
, "invalid Assoclen:%u\n", req
->assoclen
);
2083 /* No generated IV required */
2084 areq_ctx
->backup_iv
= req
->iv
;
2085 areq_ctx
->backup_giv
= NULL
;
2086 areq_ctx
->is_gcm4543
= true;
2088 cc_proc_rfc4309_ccm(req
);
2090 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_ENCRYPT
);
2091 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2092 req
->iv
= areq_ctx
->backup_iv
;
2097 static int cc_aead_decrypt(struct aead_request
*req
)
2099 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2102 /* No generated IV required */
2103 areq_ctx
->backup_iv
= req
->iv
;
2104 areq_ctx
->backup_giv
= NULL
;
2105 areq_ctx
->is_gcm4543
= false;
2107 areq_ctx
->plaintext_authenticate_only
= false;
2109 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_DECRYPT
);
2110 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2111 req
->iv
= areq_ctx
->backup_iv
;
2116 static int cc_rfc4309_ccm_decrypt(struct aead_request
*req
)
2118 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2119 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
2120 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2121 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2124 if (!valid_assoclen(req
)) {
2125 dev_err(dev
, "invalid Assoclen:%u\n", req
->assoclen
);
2129 /* No generated IV required */
2130 areq_ctx
->backup_iv
= req
->iv
;
2131 areq_ctx
->backup_giv
= NULL
;
2133 areq_ctx
->is_gcm4543
= true;
2134 cc_proc_rfc4309_ccm(req
);
2136 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_DECRYPT
);
2137 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2138 req
->iv
= areq_ctx
->backup_iv
;
2144 static int cc_rfc4106_gcm_setkey(struct crypto_aead
*tfm
, const u8
*key
,
2145 unsigned int keylen
)
2147 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
2148 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2150 dev_dbg(dev
, "%s() keylen %d, key %p\n", __func__
, keylen
, key
);
2156 memcpy(ctx
->ctr_nonce
, key
+ keylen
, 4);
2158 return cc_aead_setkey(tfm
, key
, keylen
);
2161 static int cc_rfc4543_gcm_setkey(struct crypto_aead
*tfm
, const u8
*key
,
2162 unsigned int keylen
)
2164 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
2165 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2167 dev_dbg(dev
, "%s() keylen %d, key %p\n", __func__
, keylen
, key
);
2173 memcpy(ctx
->ctr_nonce
, key
+ keylen
, 4);
2175 return cc_aead_setkey(tfm
, key
, keylen
);
2178 static int cc_gcm_setauthsize(struct crypto_aead
*authenc
,
2179 unsigned int authsize
)
2194 return cc_aead_setauthsize(authenc
, authsize
);
2197 static int cc_rfc4106_gcm_setauthsize(struct crypto_aead
*authenc
,
2198 unsigned int authsize
)
2200 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(authenc
);
2201 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2203 dev_dbg(dev
, "authsize %d\n", authsize
);
2214 return cc_aead_setauthsize(authenc
, authsize
);
2217 static int cc_rfc4543_gcm_setauthsize(struct crypto_aead
*authenc
,
2218 unsigned int authsize
)
2220 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(authenc
);
2221 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2223 dev_dbg(dev
, "authsize %d\n", authsize
);
2228 return cc_aead_setauthsize(authenc
, authsize
);
2231 static int cc_rfc4106_gcm_encrypt(struct aead_request
*req
)
2233 /* Very similar to cc_aead_encrypt() above. */
2235 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2236 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
2237 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2238 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2241 if (!valid_assoclen(req
)) {
2242 dev_err(dev
, "invalid Assoclen:%u\n", req
->assoclen
);
2246 /* No generated IV required */
2247 areq_ctx
->backup_iv
= req
->iv
;
2248 areq_ctx
->backup_giv
= NULL
;
2250 areq_ctx
->plaintext_authenticate_only
= false;
2252 cc_proc_rfc4_gcm(req
);
2253 areq_ctx
->is_gcm4543
= true;
2255 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_ENCRYPT
);
2256 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2257 req
->iv
= areq_ctx
->backup_iv
;
2262 static int cc_rfc4543_gcm_encrypt(struct aead_request
*req
)
2264 /* Very similar to cc_aead_encrypt() above. */
2266 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2269 //plaintext is not encryped with rfc4543
2270 areq_ctx
->plaintext_authenticate_only
= true;
2272 /* No generated IV required */
2273 areq_ctx
->backup_iv
= req
->iv
;
2274 areq_ctx
->backup_giv
= NULL
;
2276 cc_proc_rfc4_gcm(req
);
2277 areq_ctx
->is_gcm4543
= true;
2279 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_ENCRYPT
);
2280 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2281 req
->iv
= areq_ctx
->backup_iv
;
2286 static int cc_rfc4106_gcm_decrypt(struct aead_request
*req
)
2288 /* Very similar to cc_aead_decrypt() above. */
2290 struct crypto_aead
*tfm
= crypto_aead_reqtfm(req
);
2291 struct cc_aead_ctx
*ctx
= crypto_aead_ctx(tfm
);
2292 struct device
*dev
= drvdata_to_dev(ctx
->drvdata
);
2293 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2296 if (!valid_assoclen(req
)) {
2297 dev_err(dev
, "invalid Assoclen:%u\n", req
->assoclen
);
2301 /* No generated IV required */
2302 areq_ctx
->backup_iv
= req
->iv
;
2303 areq_ctx
->backup_giv
= NULL
;
2305 areq_ctx
->plaintext_authenticate_only
= false;
2307 cc_proc_rfc4_gcm(req
);
2308 areq_ctx
->is_gcm4543
= true;
2310 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_DECRYPT
);
2311 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2312 req
->iv
= areq_ctx
->backup_iv
;
2317 static int cc_rfc4543_gcm_decrypt(struct aead_request
*req
)
2319 /* Very similar to cc_aead_decrypt() above. */
2321 struct aead_req_ctx
*areq_ctx
= aead_request_ctx(req
);
2324 //plaintext is not decryped with rfc4543
2325 areq_ctx
->plaintext_authenticate_only
= true;
2327 /* No generated IV required */
2328 areq_ctx
->backup_iv
= req
->iv
;
2329 areq_ctx
->backup_giv
= NULL
;
2331 cc_proc_rfc4_gcm(req
);
2332 areq_ctx
->is_gcm4543
= true;
2334 rc
= cc_proc_aead(req
, DRV_CRYPTO_DIRECTION_DECRYPT
);
2335 if (rc
!= -EINPROGRESS
&& rc
!= -EBUSY
)
2336 req
->iv
= areq_ctx
->backup_iv
;
2342 static struct cc_alg_template aead_algs
[] = {
2344 .name
= "authenc(hmac(sha1),cbc(aes))",
2345 .driver_name
= "authenc-hmac-sha1-cbc-aes-ccree",
2346 .blocksize
= AES_BLOCK_SIZE
,
2347 .type
= CRYPTO_ALG_TYPE_AEAD
,
2349 .setkey
= cc_aead_setkey
,
2350 .setauthsize
= cc_aead_setauthsize
,
2351 .encrypt
= cc_aead_encrypt
,
2352 .decrypt
= cc_aead_decrypt
,
2353 .init
= cc_aead_init
,
2354 .exit
= cc_aead_exit
,
2355 .ivsize
= AES_BLOCK_SIZE
,
2356 .maxauthsize
= SHA1_DIGEST_SIZE
,
2358 .cipher_mode
= DRV_CIPHER_CBC
,
2359 .flow_mode
= S_DIN_to_AES
,
2360 .auth_mode
= DRV_HASH_SHA1
,
2361 .min_hw_rev
= CC_HW_REV_630
,
2364 .name
= "authenc(hmac(sha1),cbc(des3_ede))",
2365 .driver_name
= "authenc-hmac-sha1-cbc-des3-ccree",
2366 .blocksize
= DES3_EDE_BLOCK_SIZE
,
2367 .type
= CRYPTO_ALG_TYPE_AEAD
,
2369 .setkey
= cc_aead_setkey
,
2370 .setauthsize
= cc_aead_setauthsize
,
2371 .encrypt
= cc_aead_encrypt
,
2372 .decrypt
= cc_aead_decrypt
,
2373 .init
= cc_aead_init
,
2374 .exit
= cc_aead_exit
,
2375 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2376 .maxauthsize
= SHA1_DIGEST_SIZE
,
2378 .cipher_mode
= DRV_CIPHER_CBC
,
2379 .flow_mode
= S_DIN_to_DES
,
2380 .auth_mode
= DRV_HASH_SHA1
,
2381 .min_hw_rev
= CC_HW_REV_630
,
2384 .name
= "authenc(hmac(sha256),cbc(aes))",
2385 .driver_name
= "authenc-hmac-sha256-cbc-aes-ccree",
2386 .blocksize
= AES_BLOCK_SIZE
,
2387 .type
= CRYPTO_ALG_TYPE_AEAD
,
2389 .setkey
= cc_aead_setkey
,
2390 .setauthsize
= cc_aead_setauthsize
,
2391 .encrypt
= cc_aead_encrypt
,
2392 .decrypt
= cc_aead_decrypt
,
2393 .init
= cc_aead_init
,
2394 .exit
= cc_aead_exit
,
2395 .ivsize
= AES_BLOCK_SIZE
,
2396 .maxauthsize
= SHA256_DIGEST_SIZE
,
2398 .cipher_mode
= DRV_CIPHER_CBC
,
2399 .flow_mode
= S_DIN_to_AES
,
2400 .auth_mode
= DRV_HASH_SHA256
,
2401 .min_hw_rev
= CC_HW_REV_630
,
2404 .name
= "authenc(hmac(sha256),cbc(des3_ede))",
2405 .driver_name
= "authenc-hmac-sha256-cbc-des3-ccree",
2406 .blocksize
= DES3_EDE_BLOCK_SIZE
,
2407 .type
= CRYPTO_ALG_TYPE_AEAD
,
2409 .setkey
= cc_aead_setkey
,
2410 .setauthsize
= cc_aead_setauthsize
,
2411 .encrypt
= cc_aead_encrypt
,
2412 .decrypt
= cc_aead_decrypt
,
2413 .init
= cc_aead_init
,
2414 .exit
= cc_aead_exit
,
2415 .ivsize
= DES3_EDE_BLOCK_SIZE
,
2416 .maxauthsize
= SHA256_DIGEST_SIZE
,
2418 .cipher_mode
= DRV_CIPHER_CBC
,
2419 .flow_mode
= S_DIN_to_DES
,
2420 .auth_mode
= DRV_HASH_SHA256
,
2421 .min_hw_rev
= CC_HW_REV_630
,
2424 .name
= "authenc(xcbc(aes),cbc(aes))",
2425 .driver_name
= "authenc-xcbc-aes-cbc-aes-ccree",
2426 .blocksize
= AES_BLOCK_SIZE
,
2427 .type
= CRYPTO_ALG_TYPE_AEAD
,
2429 .setkey
= cc_aead_setkey
,
2430 .setauthsize
= cc_aead_setauthsize
,
2431 .encrypt
= cc_aead_encrypt
,
2432 .decrypt
= cc_aead_decrypt
,
2433 .init
= cc_aead_init
,
2434 .exit
= cc_aead_exit
,
2435 .ivsize
= AES_BLOCK_SIZE
,
2436 .maxauthsize
= AES_BLOCK_SIZE
,
2438 .cipher_mode
= DRV_CIPHER_CBC
,
2439 .flow_mode
= S_DIN_to_AES
,
2440 .auth_mode
= DRV_HASH_XCBC_MAC
,
2441 .min_hw_rev
= CC_HW_REV_630
,
2444 .name
= "authenc(hmac(sha1),rfc3686(ctr(aes)))",
2445 .driver_name
= "authenc-hmac-sha1-rfc3686-ctr-aes-ccree",
2447 .type
= CRYPTO_ALG_TYPE_AEAD
,
2449 .setkey
= cc_aead_setkey
,
2450 .setauthsize
= cc_aead_setauthsize
,
2451 .encrypt
= cc_aead_encrypt
,
2452 .decrypt
= cc_aead_decrypt
,
2453 .init
= cc_aead_init
,
2454 .exit
= cc_aead_exit
,
2455 .ivsize
= CTR_RFC3686_IV_SIZE
,
2456 .maxauthsize
= SHA1_DIGEST_SIZE
,
2458 .cipher_mode
= DRV_CIPHER_CTR
,
2459 .flow_mode
= S_DIN_to_AES
,
2460 .auth_mode
= DRV_HASH_SHA1
,
2461 .min_hw_rev
= CC_HW_REV_630
,
2464 .name
= "authenc(hmac(sha256),rfc3686(ctr(aes)))",
2465 .driver_name
= "authenc-hmac-sha256-rfc3686-ctr-aes-ccree",
2467 .type
= CRYPTO_ALG_TYPE_AEAD
,
2469 .setkey
= cc_aead_setkey
,
2470 .setauthsize
= cc_aead_setauthsize
,
2471 .encrypt
= cc_aead_encrypt
,
2472 .decrypt
= cc_aead_decrypt
,
2473 .init
= cc_aead_init
,
2474 .exit
= cc_aead_exit
,
2475 .ivsize
= CTR_RFC3686_IV_SIZE
,
2476 .maxauthsize
= SHA256_DIGEST_SIZE
,
2478 .cipher_mode
= DRV_CIPHER_CTR
,
2479 .flow_mode
= S_DIN_to_AES
,
2480 .auth_mode
= DRV_HASH_SHA256
,
2481 .min_hw_rev
= CC_HW_REV_630
,
2484 .name
= "authenc(xcbc(aes),rfc3686(ctr(aes)))",
2485 .driver_name
= "authenc-xcbc-aes-rfc3686-ctr-aes-ccree",
2487 .type
= CRYPTO_ALG_TYPE_AEAD
,
2489 .setkey
= cc_aead_setkey
,
2490 .setauthsize
= cc_aead_setauthsize
,
2491 .encrypt
= cc_aead_encrypt
,
2492 .decrypt
= cc_aead_decrypt
,
2493 .init
= cc_aead_init
,
2494 .exit
= cc_aead_exit
,
2495 .ivsize
= CTR_RFC3686_IV_SIZE
,
2496 .maxauthsize
= AES_BLOCK_SIZE
,
2498 .cipher_mode
= DRV_CIPHER_CTR
,
2499 .flow_mode
= S_DIN_to_AES
,
2500 .auth_mode
= DRV_HASH_XCBC_MAC
,
2501 .min_hw_rev
= CC_HW_REV_630
,
2505 .driver_name
= "ccm-aes-ccree",
2507 .type
= CRYPTO_ALG_TYPE_AEAD
,
2509 .setkey
= cc_aead_setkey
,
2510 .setauthsize
= cc_ccm_setauthsize
,
2511 .encrypt
= cc_aead_encrypt
,
2512 .decrypt
= cc_aead_decrypt
,
2513 .init
= cc_aead_init
,
2514 .exit
= cc_aead_exit
,
2515 .ivsize
= AES_BLOCK_SIZE
,
2516 .maxauthsize
= AES_BLOCK_SIZE
,
2518 .cipher_mode
= DRV_CIPHER_CCM
,
2519 .flow_mode
= S_DIN_to_AES
,
2520 .auth_mode
= DRV_HASH_NULL
,
2521 .min_hw_rev
= CC_HW_REV_630
,
2524 .name
= "rfc4309(ccm(aes))",
2525 .driver_name
= "rfc4309-ccm-aes-ccree",
2527 .type
= CRYPTO_ALG_TYPE_AEAD
,
2529 .setkey
= cc_rfc4309_ccm_setkey
,
2530 .setauthsize
= cc_rfc4309_ccm_setauthsize
,
2531 .encrypt
= cc_rfc4309_ccm_encrypt
,
2532 .decrypt
= cc_rfc4309_ccm_decrypt
,
2533 .init
= cc_aead_init
,
2534 .exit
= cc_aead_exit
,
2535 .ivsize
= CCM_BLOCK_IV_SIZE
,
2536 .maxauthsize
= AES_BLOCK_SIZE
,
2538 .cipher_mode
= DRV_CIPHER_CCM
,
2539 .flow_mode
= S_DIN_to_AES
,
2540 .auth_mode
= DRV_HASH_NULL
,
2541 .min_hw_rev
= CC_HW_REV_630
,
2545 .driver_name
= "gcm-aes-ccree",
2547 .type
= CRYPTO_ALG_TYPE_AEAD
,
2549 .setkey
= cc_aead_setkey
,
2550 .setauthsize
= cc_gcm_setauthsize
,
2551 .encrypt
= cc_aead_encrypt
,
2552 .decrypt
= cc_aead_decrypt
,
2553 .init
= cc_aead_init
,
2554 .exit
= cc_aead_exit
,
2556 .maxauthsize
= AES_BLOCK_SIZE
,
2558 .cipher_mode
= DRV_CIPHER_GCTR
,
2559 .flow_mode
= S_DIN_to_AES
,
2560 .auth_mode
= DRV_HASH_NULL
,
2561 .min_hw_rev
= CC_HW_REV_630
,
2564 .name
= "rfc4106(gcm(aes))",
2565 .driver_name
= "rfc4106-gcm-aes-ccree",
2567 .type
= CRYPTO_ALG_TYPE_AEAD
,
2569 .setkey
= cc_rfc4106_gcm_setkey
,
2570 .setauthsize
= cc_rfc4106_gcm_setauthsize
,
2571 .encrypt
= cc_rfc4106_gcm_encrypt
,
2572 .decrypt
= cc_rfc4106_gcm_decrypt
,
2573 .init
= cc_aead_init
,
2574 .exit
= cc_aead_exit
,
2575 .ivsize
= GCM_BLOCK_RFC4_IV_SIZE
,
2576 .maxauthsize
= AES_BLOCK_SIZE
,
2578 .cipher_mode
= DRV_CIPHER_GCTR
,
2579 .flow_mode
= S_DIN_to_AES
,
2580 .auth_mode
= DRV_HASH_NULL
,
2581 .min_hw_rev
= CC_HW_REV_630
,
2584 .name
= "rfc4543(gcm(aes))",
2585 .driver_name
= "rfc4543-gcm-aes-ccree",
2587 .type
= CRYPTO_ALG_TYPE_AEAD
,
2589 .setkey
= cc_rfc4543_gcm_setkey
,
2590 .setauthsize
= cc_rfc4543_gcm_setauthsize
,
2591 .encrypt
= cc_rfc4543_gcm_encrypt
,
2592 .decrypt
= cc_rfc4543_gcm_decrypt
,
2593 .init
= cc_aead_init
,
2594 .exit
= cc_aead_exit
,
2595 .ivsize
= GCM_BLOCK_RFC4_IV_SIZE
,
2596 .maxauthsize
= AES_BLOCK_SIZE
,
2598 .cipher_mode
= DRV_CIPHER_GCTR
,
2599 .flow_mode
= S_DIN_to_AES
,
2600 .auth_mode
= DRV_HASH_NULL
,
2601 .min_hw_rev
= CC_HW_REV_630
,
2605 static struct cc_crypto_alg
*cc_create_aead_alg(struct cc_alg_template
*tmpl
,
2608 struct cc_crypto_alg
*t_alg
;
2609 struct aead_alg
*alg
;
2611 t_alg
= kzalloc(sizeof(*t_alg
), GFP_KERNEL
);
2613 return ERR_PTR(-ENOMEM
);
2615 alg
= &tmpl
->template_aead
;
2617 snprintf(alg
->base
.cra_name
, CRYPTO_MAX_ALG_NAME
, "%s", tmpl
->name
);
2618 snprintf(alg
->base
.cra_driver_name
, CRYPTO_MAX_ALG_NAME
, "%s",
2620 alg
->base
.cra_module
= THIS_MODULE
;
2621 alg
->base
.cra_priority
= CC_CRA_PRIO
;
2623 alg
->base
.cra_ctxsize
= sizeof(struct cc_aead_ctx
);
2624 alg
->base
.cra_flags
= CRYPTO_ALG_ASYNC
| CRYPTO_ALG_KERN_DRIVER_ONLY
|
2626 alg
->init
= cc_aead_init
;
2627 alg
->exit
= cc_aead_exit
;
2629 t_alg
->aead_alg
= *alg
;
2631 t_alg
->cipher_mode
= tmpl
->cipher_mode
;
2632 t_alg
->flow_mode
= tmpl
->flow_mode
;
2633 t_alg
->auth_mode
= tmpl
->auth_mode
;
2638 int cc_aead_free(struct cc_drvdata
*drvdata
)
2640 struct cc_crypto_alg
*t_alg
, *n
;
2641 struct cc_aead_handle
*aead_handle
=
2642 (struct cc_aead_handle
*)drvdata
->aead_handle
;
2645 /* Remove registered algs */
2646 list_for_each_entry_safe(t_alg
, n
, &aead_handle
->aead_list
,
2648 crypto_unregister_aead(&t_alg
->aead_alg
);
2649 list_del(&t_alg
->entry
);
2653 drvdata
->aead_handle
= NULL
;
2659 int cc_aead_alloc(struct cc_drvdata
*drvdata
)
2661 struct cc_aead_handle
*aead_handle
;
2662 struct cc_crypto_alg
*t_alg
;
2665 struct device
*dev
= drvdata_to_dev(drvdata
);
2667 aead_handle
= kmalloc(sizeof(*aead_handle
), GFP_KERNEL
);
2673 INIT_LIST_HEAD(&aead_handle
->aead_list
);
2674 drvdata
->aead_handle
= aead_handle
;
2676 aead_handle
->sram_workspace_addr
= cc_sram_alloc(drvdata
,
2677 MAX_HMAC_DIGEST_SIZE
);
2679 if (aead_handle
->sram_workspace_addr
== NULL_SRAM_ADDR
) {
2680 dev_err(dev
, "SRAM pool exhausted\n");
2686 for (alg
= 0; alg
< ARRAY_SIZE(aead_algs
); alg
++) {
2687 if (aead_algs
[alg
].min_hw_rev
> drvdata
->hw_rev
)
2690 t_alg
= cc_create_aead_alg(&aead_algs
[alg
], dev
);
2691 if (IS_ERR(t_alg
)) {
2692 rc
= PTR_ERR(t_alg
);
2693 dev_err(dev
, "%s alg allocation failed\n",
2694 aead_algs
[alg
].driver_name
);
2697 t_alg
->drvdata
= drvdata
;
2698 rc
= crypto_register_aead(&t_alg
->aead_alg
);
2700 dev_err(dev
, "%s alg registration failed\n",
2701 t_alg
->aead_alg
.base
.cra_driver_name
);
2704 list_add_tail(&t_alg
->entry
, &aead_handle
->aead_list
);
2705 dev_dbg(dev
, "Registered %s\n",
2706 t_alg
->aead_alg
.base
.cra_driver_name
);
2715 cc_aead_free(drvdata
);