1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2016 Broadcom
6 #include <linux/kernel.h>
7 #include <linux/string.h>
14 char *hash_alg_name
[] = { "None", "md5", "sha1", "sha224", "sha256", "aes",
15 "sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" };
17 char *aead_alg_name
[] = { "ccm(aes)", "gcm(aes)", "authenc" };
19 /* Assumes SPU-M messages are in big endian */
20 void spum_dump_msg_hdr(u8
*buf
, unsigned int buf_len
)
23 struct SPUHEADER
*spuh
= (struct SPUHEADER
*)buf
;
24 unsigned int hash_key_len
= 0;
25 unsigned int hash_state_len
= 0;
26 unsigned int cipher_key_len
= 0;
37 u32 sctx_size
; /* SCTX length in words */
38 u32 sctx_pl_len
; /* SCTX payload length in bytes */
41 packet_log("SPU Message header %p len: %u\n", buf
, buf_len
);
43 /* ========== Decode MH ========== */
44 packet_log(" MH 0x%08x\n", be32_to_cpup((__be32
*)ptr
));
45 if (spuh
->mh
.flags
& MH_SCTX_PRES
)
46 packet_log(" SCTX present\n");
47 if (spuh
->mh
.flags
& MH_BDESC_PRES
)
48 packet_log(" BDESC present\n");
49 if (spuh
->mh
.flags
& MH_MFM_PRES
)
50 packet_log(" MFM present\n");
51 if (spuh
->mh
.flags
& MH_BD_PRES
)
52 packet_log(" BD present\n");
53 if (spuh
->mh
.flags
& MH_HASH_PRES
)
54 packet_log(" HASH present\n");
55 if (spuh
->mh
.flags
& MH_SUPDT_PRES
)
56 packet_log(" SUPDT present\n");
57 packet_log(" Opcode 0x%02x\n", spuh
->mh
.op_code
);
59 ptr
+= sizeof(spuh
->mh
) + sizeof(spuh
->emh
); /* skip emh. unused */
61 /* ========== Decode SCTX ========== */
62 if (spuh
->mh
.flags
& MH_SCTX_PRES
) {
63 pflags
= be32_to_cpu(spuh
->sa
.proto_flags
);
64 packet_log(" SCTX[0] 0x%08x\n", pflags
);
65 sctx_size
= pflags
& SCTX_SIZE
;
66 packet_log(" Size %u words\n", sctx_size
);
68 cflags
= be32_to_cpu(spuh
->sa
.cipher_flags
);
69 packet_log(" SCTX[1] 0x%08x\n", cflags
);
70 packet_log(" Inbound:%lu (1:decrypt/vrfy 0:encrypt/auth)\n",
71 (cflags
& CIPHER_INBOUND
) >> CIPHER_INBOUND_SHIFT
);
72 packet_log(" Order:%lu (1:AuthFirst 0:EncFirst)\n",
73 (cflags
& CIPHER_ORDER
) >> CIPHER_ORDER_SHIFT
);
74 packet_log(" ICV_IS_512:%lx\n",
75 (cflags
& ICV_IS_512
) >> ICV_IS_512_SHIFT
);
76 cipher_alg
= (cflags
& CIPHER_ALG
) >> CIPHER_ALG_SHIFT
;
77 cipher_mode
= (cflags
& CIPHER_MODE
) >> CIPHER_MODE_SHIFT
;
78 cipher_type
= (cflags
& CIPHER_TYPE
) >> CIPHER_TYPE_SHIFT
;
79 packet_log(" Crypto Alg:%u Mode:%u Type:%u\n",
80 cipher_alg
, cipher_mode
, cipher_type
);
81 hash_alg
= (cflags
& HASH_ALG
) >> HASH_ALG_SHIFT
;
82 hash_mode
= (cflags
& HASH_MODE
) >> HASH_MODE_SHIFT
;
83 hash_type
= (cflags
& HASH_TYPE
) >> HASH_TYPE_SHIFT
;
84 packet_log(" Hash Alg:%x Mode:%x Type:%x\n",
85 hash_alg
, hash_mode
, hash_type
);
86 packet_log(" UPDT_Offset:%u\n", cflags
& UPDT_OFST
);
88 ecf
= be32_to_cpu(spuh
->sa
.ecf
);
89 packet_log(" SCTX[2] 0x%08x\n", ecf
);
90 packet_log(" WriteICV:%lu CheckICV:%lu ICV_SIZE:%u ",
91 (ecf
& INSERT_ICV
) >> INSERT_ICV_SHIFT
,
92 (ecf
& CHECK_ICV
) >> CHECK_ICV_SHIFT
,
93 (ecf
& ICV_SIZE
) >> ICV_SIZE_SHIFT
);
94 packet_log("BD_SUPPRESS:%lu\n",
95 (ecf
& BD_SUPPRESS
) >> BD_SUPPRESS_SHIFT
);
96 packet_log(" SCTX_IV:%lu ExplicitIV:%lu GenIV:%lu ",
97 (ecf
& SCTX_IV
) >> SCTX_IV_SHIFT
,
98 (ecf
& EXPLICIT_IV
) >> EXPLICIT_IV_SHIFT
,
99 (ecf
& GEN_IV
) >> GEN_IV_SHIFT
);
100 packet_log("IV_OV_OFST:%lu EXP_IV_SIZE:%u\n",
101 (ecf
& IV_OFFSET
) >> IV_OFFSET_SHIFT
,
104 ptr
+= sizeof(struct SCTX
);
106 if (hash_alg
&& hash_mode
) {
118 case HASH_ALG_SHA224
:
122 case HASH_ALG_SHA256
:
126 case HASH_ALG_SHA384
:
130 case HASH_ALG_SHA512
:
142 packet_log(" Auth Key Type:%s Length:%u Bytes\n",
144 packet_dump(" KEY: ", ptr
, hash_key_len
);
146 } else if ((hash_alg
== HASH_ALG_AES
) &&
147 (hash_mode
== HASH_MODE_XCBC
)) {
150 switch (cipher_type
) {
151 case CIPHER_TYPE_AES128
:
153 name
= "AES128-XCBC";
155 case CIPHER_TYPE_AES192
:
157 name
= "AES192-XCBC";
159 case CIPHER_TYPE_AES256
:
161 name
= "AES256-XCBC";
164 packet_log(" Auth Key Type:%s Length:%u Bytes\n",
166 packet_dump(" KEY: ", ptr
, hash_key_len
);
170 if (hash_alg
&& (hash_mode
== HASH_MODE_NONE
) &&
171 (hash_type
== HASH_TYPE_UPDT
)) {
183 case HASH_ALG_SHA224
:
187 case HASH_ALG_SHA256
:
191 case HASH_ALG_SHA384
:
195 case HASH_ALG_SHA512
:
207 packet_log(" Auth State Type:%s Length:%u Bytes\n",
208 name
, hash_state_len
);
209 packet_dump(" State: ", ptr
, hash_state_len
);
210 ptr
+= hash_state_len
;
216 switch (cipher_alg
) {
221 case CIPHER_ALG_3DES
:
226 switch (cipher_type
) {
227 case CIPHER_TYPE_AES128
:
231 case CIPHER_TYPE_AES192
:
235 case CIPHER_TYPE_AES256
:
241 case CIPHER_ALG_NONE
:
245 packet_log(" Cipher Key Type:%s Length:%u Bytes\n",
246 name
, cipher_key_len
);
248 /* XTS has two keys */
249 if (cipher_mode
== CIPHER_MODE_XTS
) {
250 packet_dump(" KEY2: ", ptr
, cipher_key_len
);
251 ptr
+= cipher_key_len
;
252 packet_dump(" KEY1: ", ptr
, cipher_key_len
);
253 ptr
+= cipher_key_len
;
257 packet_dump(" KEY: ", ptr
, cipher_key_len
);
258 ptr
+= cipher_key_len
;
262 sctx_pl_len
= sctx_size
* sizeof(u32
) -
264 iv_len
= sctx_pl_len
-
265 (hash_key_len
+ hash_state_len
+
267 packet_log(" IV Length:%u Bytes\n", iv_len
);
268 packet_dump(" IV: ", ptr
, iv_len
);
274 /* ========== Decode BDESC ========== */
275 if (spuh
->mh
.flags
& MH_BDESC_PRES
) {
276 struct BDESC_HEADER
*bdesc
= (struct BDESC_HEADER
*)ptr
;
278 packet_log(" BDESC[0] 0x%08x\n", be32_to_cpup((__be32
*)ptr
));
279 packet_log(" OffsetMAC:%u LengthMAC:%u\n",
280 be16_to_cpu(bdesc
->offset_mac
),
281 be16_to_cpu(bdesc
->length_mac
));
284 packet_log(" BDESC[1] 0x%08x\n", be32_to_cpup((__be32
*)ptr
));
285 packet_log(" OffsetCrypto:%u LengthCrypto:%u\n",
286 be16_to_cpu(bdesc
->offset_crypto
),
287 be16_to_cpu(bdesc
->length_crypto
));
290 packet_log(" BDESC[2] 0x%08x\n", be32_to_cpup((__be32
*)ptr
));
291 packet_log(" OffsetICV:%u OffsetIV:%u\n",
292 be16_to_cpu(bdesc
->offset_icv
),
293 be16_to_cpu(bdesc
->offset_iv
));
297 /* ========== Decode BD ========== */
298 if (spuh
->mh
.flags
& MH_BD_PRES
) {
299 struct BD_HEADER
*bd
= (struct BD_HEADER
*)ptr
;
301 packet_log(" BD[0] 0x%08x\n", be32_to_cpup((__be32
*)ptr
));
302 packet_log(" Size:%ubytes PrevLength:%u\n",
303 be16_to_cpu(bd
->size
), be16_to_cpu(bd
->prev_length
));
307 /* Double check sanity */
308 if (buf
+ buf_len
!= ptr
) {
309 packet_log(" Packet parsed incorrectly. ");
310 packet_log("buf:%p buf_len:%u buf+buf_len:%p ptr:%p\n",
311 buf
, buf_len
, buf
+ buf_len
, ptr
);
318 * spum_ns2_ctx_max_payload() - Determine the max length of the payload for a
319 * SPU message for a given cipher and hash alg context.
320 * @cipher_alg: The cipher algorithm
321 * @cipher_mode: The cipher mode
322 * @blocksize: The size of a block of data for this algo
324 * The max payload must be a multiple of the blocksize so that if a request is
325 * too large to fit in a single SPU message, the request can be broken into
326 * max_payload sized chunks. Each chunk must be a multiple of blocksize.
328 * Return: Max payload length in bytes
330 u32
spum_ns2_ctx_max_payload(enum spu_cipher_alg cipher_alg
,
331 enum spu_cipher_mode cipher_mode
,
332 unsigned int blocksize
)
334 u32 max_payload
= SPUM_NS2_MAX_PAYLOAD
;
337 /* In XTS on SPU-M, we'll need to insert tweak before input data */
338 if (cipher_mode
== CIPHER_MODE_XTS
)
339 max_payload
-= SPU_XTS_TWEAK_SIZE
;
341 excess
= max_payload
% blocksize
;
343 return max_payload
- excess
;
347 * spum_nsp_ctx_max_payload() - Determine the max length of the payload for a
348 * SPU message for a given cipher and hash alg context.
349 * @cipher_alg: The cipher algorithm
350 * @cipher_mode: The cipher mode
351 * @blocksize: The size of a block of data for this algo
353 * The max payload must be a multiple of the blocksize so that if a request is
354 * too large to fit in a single SPU message, the request can be broken into
355 * max_payload sized chunks. Each chunk must be a multiple of blocksize.
357 * Return: Max payload length in bytes
359 u32
spum_nsp_ctx_max_payload(enum spu_cipher_alg cipher_alg
,
360 enum spu_cipher_mode cipher_mode
,
361 unsigned int blocksize
)
363 u32 max_payload
= SPUM_NSP_MAX_PAYLOAD
;
366 /* In XTS on SPU-M, we'll need to insert tweak before input data */
367 if (cipher_mode
== CIPHER_MODE_XTS
)
368 max_payload
-= SPU_XTS_TWEAK_SIZE
;
370 excess
= max_payload
% blocksize
;
372 return max_payload
- excess
;
375 /** spum_payload_length() - Given a SPU-M message header, extract the payload
377 * @spu_hdr: Start of SPU header
379 * Assumes just MH, EMH, BD (no SCTX, BDESC. Works for response frames.
381 * Return: payload length in bytes
383 u32
spum_payload_length(u8
*spu_hdr
)
385 struct BD_HEADER
*bd
;
388 /* Find BD header. skip MH, EMH */
389 bd
= (struct BD_HEADER
*)(spu_hdr
+ 8);
390 pl_len
= be16_to_cpu(bd
->size
);
396 * spum_response_hdr_len() - Given the length of the hash key and encryption
397 * key, determine the expected length of a SPU response header.
398 * @auth_key_len: authentication key length (bytes)
399 * @enc_key_len: encryption key length (bytes)
400 * @is_hash: true if response message is for a hash operation
402 * Return: length of SPU response header (bytes)
404 u16
spum_response_hdr_len(u16 auth_key_len
, u16 enc_key_len
, bool is_hash
)
407 return SPU_HASH_RESP_HDR_LEN
;
409 return SPU_RESP_HDR_LEN
;
413 * spum_hash_pad_len() - Calculate the length of hash padding required to extend
414 * data to a full block size.
415 * @hash_alg: hash algorithm
416 * @hash_mode: hash mode
417 * @chunksize: length of data, in bytes
418 * @hash_block_size: size of a block of data for hash algorithm
420 * Reserve space for 1 byte (0x80) start of pad and the total length as u64
422 * Return: length of hash pad in bytes
424 u16
spum_hash_pad_len(enum hash_alg hash_alg
, enum hash_mode hash_mode
,
425 u32 chunksize
, u16 hash_block_size
)
427 unsigned int length_len
;
428 unsigned int used_space_last_block
;
431 /* AES-XCBC hash requires just padding to next block boundary */
432 if ((hash_alg
== HASH_ALG_AES
) && (hash_mode
== HASH_MODE_XCBC
)) {
433 used_space_last_block
= chunksize
% hash_block_size
;
434 hash_pad_len
= hash_block_size
- used_space_last_block
;
435 if (hash_pad_len
>= hash_block_size
)
436 hash_pad_len
-= hash_block_size
;
440 used_space_last_block
= chunksize
% hash_block_size
+ 1;
441 if ((hash_alg
== HASH_ALG_SHA384
) || (hash_alg
== HASH_ALG_SHA512
))
442 length_len
= 2 * sizeof(u64
);
444 length_len
= sizeof(u64
);
446 used_space_last_block
+= length_len
;
447 hash_pad_len
= hash_block_size
- used_space_last_block
;
448 if (hash_pad_len
< 0)
449 hash_pad_len
+= hash_block_size
;
451 hash_pad_len
+= 1 + length_len
;
456 * spum_gcm_ccm_pad_len() - Determine the required length of GCM or CCM padding.
457 * @cipher_mode: Algo type
458 * @data_size: Length of plaintext (bytes)
460 * Return: Length of padding, in bytes
462 u32
spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode
,
463 unsigned int data_size
)
466 u32 m1
= SPU_GCM_CCM_ALIGN
- 1;
468 if ((cipher_mode
== CIPHER_MODE_GCM
) ||
469 (cipher_mode
== CIPHER_MODE_CCM
))
470 pad_len
= ((data_size
+ m1
) & ~m1
) - data_size
;
476 * spum_assoc_resp_len() - Determine the size of the receive buffer required to
477 * catch associated data.
478 * @cipher_mode: cipher mode
479 * @assoc_len: length of associated data (bytes)
480 * @iv_len: length of IV (bytes)
481 * @is_encrypt: true if encrypting. false if decrypting.
483 * Return: length of associated data in response message (bytes)
485 u32
spum_assoc_resp_len(enum spu_cipher_mode cipher_mode
,
486 unsigned int assoc_len
, unsigned int iv_len
,
495 if (cipher_mode
== CIPHER_MODE_GCM
) {
496 /* AAD needs to be padded in responses too */
497 pad
= spum_gcm_ccm_pad_len(cipher_mode
, buflen
);
500 if (cipher_mode
== CIPHER_MODE_CCM
) {
502 * AAD needs to be padded in responses too
503 * for CCM, len + 2 needs to be 128-bit aligned.
505 pad
= spum_gcm_ccm_pad_len(cipher_mode
, buflen
+ 2);
513 * spum_aead_ivlen() - Calculate the length of the AEAD IV to be included
514 * in a SPU request after the AAD and before the payload.
515 * @cipher_mode: cipher mode
516 * @iv_len: initialization vector length in bytes
518 * In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need
519 * to include the IV as a separate field in the SPU request msg.
521 * Return: Length of AEAD IV in bytes
523 u8
spum_aead_ivlen(enum spu_cipher_mode cipher_mode
, u16 iv_len
)
529 * spum_hash_type() - Determine the type of hash operation.
530 * @src_sent: The number of bytes in the current request that have already
531 * been sent to the SPU to be hashed.
533 * We do not use HASH_TYPE_FULL for requests that fit in a single SPU message.
534 * Using FULL causes failures (such as when the string to be hashed is empty).
535 * For similar reasons, we never use HASH_TYPE_FIN. Instead, submit messages
536 * as INIT or UPDT and do the hash padding in sw.
538 enum hash_type
spum_hash_type(u32 src_sent
)
540 return src_sent
? HASH_TYPE_UPDT
: HASH_TYPE_INIT
;
544 * spum_digest_size() - Determine the size of a hash digest to expect the SPU to
546 * @alg_digest_size: Number of bytes in the final digest for the given algo
547 * @alg: The hash algorithm
548 * @htype: Type of hash operation (init, update, full, etc)
550 * When doing incremental hashing for an algorithm with a truncated hash
551 * (e.g., SHA224), the SPU returns the full digest so that it can be fed back as
552 * a partial result for the next chunk.
554 u32
spum_digest_size(u32 alg_digest_size
, enum hash_alg alg
,
555 enum hash_type htype
)
557 u32 digestsize
= alg_digest_size
;
559 /* SPU returns complete digest when doing incremental hash and truncated
562 if ((htype
== HASH_TYPE_INIT
) || (htype
== HASH_TYPE_UPDT
)) {
563 if (alg
== HASH_ALG_SHA224
)
564 digestsize
= SHA256_DIGEST_SIZE
;
565 else if (alg
== HASH_ALG_SHA384
)
566 digestsize
= SHA512_DIGEST_SIZE
;
572 * spum_create_request() - Build a SPU request message header, up to and
573 * including the BD header. Construct the message starting at spu_hdr. Caller
574 * should allocate this buffer in DMA-able memory at least SPU_HEADER_ALLOC_LEN
576 * @spu_hdr: Start of buffer where SPU request header is to be written
577 * @req_opts: SPU request message options
578 * @cipher_parms: Parameters related to cipher algorithm
579 * @hash_parms: Parameters related to hash algorithm
580 * @aead_parms: Parameters related to AEAD operation
581 * @data_size: Length of data to be encrypted or authenticated. If AEAD, does
582 * not include length of AAD.
584 * Return: the length of the SPU header in bytes. 0 if an error occurs.
586 u32
spum_create_request(u8
*spu_hdr
,
587 struct spu_request_opts
*req_opts
,
588 struct spu_cipher_parms
*cipher_parms
,
589 struct spu_hash_parms
*hash_parms
,
590 struct spu_aead_parms
*aead_parms
,
591 unsigned int data_size
)
593 struct SPUHEADER
*spuh
;
594 struct BDESC_HEADER
*bdesc
;
595 struct BD_HEADER
*bd
;
598 u32 protocol_bits
= 0;
602 unsigned int buf_len
= 0;
604 /* size of the cipher payload */
605 unsigned int cipher_len
= hash_parms
->prebuf_len
+ data_size
+
608 /* offset of prebuf or data from end of BD header */
609 unsigned int cipher_offset
= aead_parms
->assoc_size
+
610 aead_parms
->iv_len
+ aead_parms
->aad_pad_len
;
612 /* total size of the DB data (without STAT word padding) */
613 unsigned int real_db_size
= spu_real_db_size(aead_parms
->assoc_size
,
615 hash_parms
->prebuf_len
,
617 aead_parms
->aad_pad_len
,
618 aead_parms
->data_pad_len
,
619 hash_parms
->pad_len
);
621 unsigned int auth_offset
= 0;
622 unsigned int offset_iv
= 0;
624 /* size/offset of the auth payload */
625 unsigned int auth_len
;
627 auth_len
= real_db_size
;
629 if (req_opts
->is_aead
&& req_opts
->is_inbound
)
630 cipher_len
-= hash_parms
->digestsize
;
632 if (req_opts
->is_aead
&& req_opts
->is_inbound
)
633 auth_len
-= hash_parms
->digestsize
;
635 if ((hash_parms
->alg
== HASH_ALG_AES
) &&
636 (hash_parms
->mode
== HASH_MODE_XCBC
)) {
637 auth_len
-= hash_parms
->pad_len
;
638 cipher_len
-= hash_parms
->pad_len
;
641 flow_log("%s()\n", __func__
);
642 flow_log(" in:%u authFirst:%u\n",
643 req_opts
->is_inbound
, req_opts
->auth_first
);
644 flow_log(" %s. cipher alg:%u mode:%u type %u\n",
645 spu_alg_name(cipher_parms
->alg
, cipher_parms
->mode
),
646 cipher_parms
->alg
, cipher_parms
->mode
, cipher_parms
->type
);
647 flow_log(" key: %d\n", cipher_parms
->key_len
);
648 flow_dump(" key: ", cipher_parms
->key_buf
, cipher_parms
->key_len
);
649 flow_log(" iv: %d\n", cipher_parms
->iv_len
);
650 flow_dump(" iv: ", cipher_parms
->iv_buf
, cipher_parms
->iv_len
);
651 flow_log(" auth alg:%u mode:%u type %u\n",
652 hash_parms
->alg
, hash_parms
->mode
, hash_parms
->type
);
653 flow_log(" digestsize: %u\n", hash_parms
->digestsize
);
654 flow_log(" authkey: %d\n", hash_parms
->key_len
);
655 flow_dump(" authkey: ", hash_parms
->key_buf
, hash_parms
->key_len
);
656 flow_log(" assoc_size:%u\n", aead_parms
->assoc_size
);
657 flow_log(" prebuf_len:%u\n", hash_parms
->prebuf_len
);
658 flow_log(" data_size:%u\n", data_size
);
659 flow_log(" hash_pad_len:%u\n", hash_parms
->pad_len
);
660 flow_log(" real_db_size:%u\n", real_db_size
);
661 flow_log(" auth_offset:%u auth_len:%u cipher_offset:%u cipher_len:%u\n",
662 auth_offset
, auth_len
, cipher_offset
, cipher_len
);
663 flow_log(" aead_iv: %u\n", aead_parms
->iv_len
);
665 /* starting out: zero the header (plus some) */
667 memset(ptr
, 0, sizeof(struct SPUHEADER
));
669 /* format master header word */
670 /* Do not set the next bit even though the datasheet says to */
671 spuh
= (struct SPUHEADER
*)ptr
;
672 ptr
+= sizeof(struct SPUHEADER
);
673 buf_len
+= sizeof(struct SPUHEADER
);
675 spuh
->mh
.op_code
= SPU_CRYPTO_OPERATION_GENERIC
;
676 spuh
->mh
.flags
|= (MH_SCTX_PRES
| MH_BDESC_PRES
| MH_BD_PRES
);
678 /* Format sctx word 0 (protocol_bits) */
679 sctx_words
= 3; /* size in words */
681 /* Format sctx word 1 (cipher_bits) */
682 if (req_opts
->is_inbound
)
683 cipher_bits
|= CIPHER_INBOUND
;
684 if (req_opts
->auth_first
)
685 cipher_bits
|= CIPHER_ORDER
;
687 /* Set the crypto parameters in the cipher.flags */
688 cipher_bits
|= cipher_parms
->alg
<< CIPHER_ALG_SHIFT
;
689 cipher_bits
|= cipher_parms
->mode
<< CIPHER_MODE_SHIFT
;
690 cipher_bits
|= cipher_parms
->type
<< CIPHER_TYPE_SHIFT
;
692 /* Set the auth parameters in the cipher.flags */
693 cipher_bits
|= hash_parms
->alg
<< HASH_ALG_SHIFT
;
694 cipher_bits
|= hash_parms
->mode
<< HASH_MODE_SHIFT
;
695 cipher_bits
|= hash_parms
->type
<< HASH_TYPE_SHIFT
;
698 * Format sctx extensions if required, and update main fields if
701 if (hash_parms
->alg
) {
702 /* Write the authentication key material if present */
703 if (hash_parms
->key_len
) {
704 memcpy(ptr
, hash_parms
->key_buf
, hash_parms
->key_len
);
705 ptr
+= hash_parms
->key_len
;
706 buf_len
+= hash_parms
->key_len
;
707 sctx_words
+= hash_parms
->key_len
/ 4;
710 if ((cipher_parms
->mode
== CIPHER_MODE_GCM
) ||
711 (cipher_parms
->mode
== CIPHER_MODE_CCM
))
712 /* unpadded length */
713 offset_iv
= aead_parms
->assoc_size
;
715 /* if GCM/CCM we need to write ICV into the payload */
716 if (!req_opts
->is_inbound
) {
717 if ((cipher_parms
->mode
== CIPHER_MODE_GCM
) ||
718 (cipher_parms
->mode
== CIPHER_MODE_CCM
))
719 ecf_bits
|= 1 << INSERT_ICV_SHIFT
;
721 ecf_bits
|= CHECK_ICV
;
724 /* Inform the SPU of the ICV size (in words) */
725 if (hash_parms
->digestsize
== 64)
726 cipher_bits
|= ICV_IS_512
;
729 (hash_parms
->digestsize
/ 4) << ICV_SIZE_SHIFT
;
732 if (req_opts
->bd_suppress
)
733 ecf_bits
|= BD_SUPPRESS
;
735 /* copy the encryption keys in the SAD entry */
736 if (cipher_parms
->alg
) {
737 if (cipher_parms
->key_len
) {
738 memcpy(ptr
, cipher_parms
->key_buf
,
739 cipher_parms
->key_len
);
740 ptr
+= cipher_parms
->key_len
;
741 buf_len
+= cipher_parms
->key_len
;
742 sctx_words
+= cipher_parms
->key_len
/ 4;
746 * if encrypting then set IV size, use SCTX IV unless no IV
749 if (cipher_parms
->iv_buf
&& cipher_parms
->iv_len
) {
753 /* cipher iv provided so put it in here */
754 memcpy(ptr
, cipher_parms
->iv_buf
, cipher_parms
->iv_len
);
756 ptr
+= cipher_parms
->iv_len
;
757 buf_len
+= cipher_parms
->iv_len
;
758 sctx_words
+= cipher_parms
->iv_len
/ 4;
763 * RFC4543 (GMAC/ESP) requires data to be sent as part of AAD
764 * so we need to override the BDESC parameters.
766 if (req_opts
->is_rfc4543
) {
767 if (req_opts
->is_inbound
)
768 data_size
-= hash_parms
->digestsize
;
769 offset_iv
= aead_parms
->assoc_size
+ data_size
;
771 cipher_offset
= offset_iv
;
772 auth_len
= cipher_offset
+ aead_parms
->data_pad_len
;
775 /* write in the total sctx length now that we know it */
776 protocol_bits
|= sctx_words
;
778 /* Endian adjust the SCTX */
779 spuh
->sa
.proto_flags
= cpu_to_be32(protocol_bits
);
780 spuh
->sa
.cipher_flags
= cpu_to_be32(cipher_bits
);
781 spuh
->sa
.ecf
= cpu_to_be32(ecf_bits
);
783 /* === create the BDESC section === */
784 bdesc
= (struct BDESC_HEADER
*)ptr
;
786 bdesc
->offset_mac
= cpu_to_be16(auth_offset
);
787 bdesc
->length_mac
= cpu_to_be16(auth_len
);
788 bdesc
->offset_crypto
= cpu_to_be16(cipher_offset
);
789 bdesc
->length_crypto
= cpu_to_be16(cipher_len
);
792 * CCM in SPU-M requires that ICV not be in same 32-bit word as data or
793 * padding. So account for padding as necessary.
795 if (cipher_parms
->mode
== CIPHER_MODE_CCM
)
796 auth_len
+= spum_wordalign_padlen(auth_len
);
798 bdesc
->offset_icv
= cpu_to_be16(auth_len
);
799 bdesc
->offset_iv
= cpu_to_be16(offset_iv
);
801 ptr
+= sizeof(struct BDESC_HEADER
);
802 buf_len
+= sizeof(struct BDESC_HEADER
);
804 /* === no MFM section === */
806 /* === create the BD section === */
808 /* add the BD header */
809 bd
= (struct BD_HEADER
*)ptr
;
810 bd
->size
= cpu_to_be16(real_db_size
);
813 ptr
+= sizeof(struct BD_HEADER
);
814 buf_len
+= sizeof(struct BD_HEADER
);
816 packet_dump(" SPU request header: ", spu_hdr
, buf_len
);
822 * spum_cipher_req_init() - Build a SPU request message header, up to and
823 * including the BD header.
824 * @spu_hdr: Start of SPU request header (MH)
825 * @cipher_parms: Parameters that describe the cipher request
827 * Construct the message starting at spu_hdr. Caller should allocate this buffer
828 * in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long.
830 * Return: the length of the SPU header in bytes. 0 if an error occurs.
832 u16
spum_cipher_req_init(u8
*spu_hdr
, struct spu_cipher_parms
*cipher_parms
)
834 struct SPUHEADER
*spuh
;
835 u32 protocol_bits
= 0;
841 flow_log("%s()\n", __func__
);
842 flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms
->alg
,
843 cipher_parms
->mode
, cipher_parms
->type
);
844 flow_log(" cipher_iv_len: %u\n", cipher_parms
->iv_len
);
845 flow_log(" key: %d\n", cipher_parms
->key_len
);
846 flow_dump(" key: ", cipher_parms
->key_buf
, cipher_parms
->key_len
);
848 /* starting out: zero the header (plus some) */
849 memset(spu_hdr
, 0, sizeof(struct SPUHEADER
));
850 ptr
+= sizeof(struct SPUHEADER
);
852 /* format master header word */
853 /* Do not set the next bit even though the datasheet says to */
854 spuh
= (struct SPUHEADER
*)spu_hdr
;
856 spuh
->mh
.op_code
= SPU_CRYPTO_OPERATION_GENERIC
;
857 spuh
->mh
.flags
|= (MH_SCTX_PRES
| MH_BDESC_PRES
| MH_BD_PRES
);
859 /* Format sctx word 0 (protocol_bits) */
860 sctx_words
= 3; /* size in words */
862 /* copy the encryption keys in the SAD entry */
863 if (cipher_parms
->alg
) {
864 if (cipher_parms
->key_len
) {
865 ptr
+= cipher_parms
->key_len
;
866 sctx_words
+= cipher_parms
->key_len
/ 4;
870 * if encrypting then set IV size, use SCTX IV unless no IV
873 if (cipher_parms
->iv_len
) {
876 ptr
+= cipher_parms
->iv_len
;
877 sctx_words
+= cipher_parms
->iv_len
/ 4;
881 /* Set the crypto parameters in the cipher.flags */
882 cipher_bits
|= cipher_parms
->alg
<< CIPHER_ALG_SHIFT
;
883 cipher_bits
|= cipher_parms
->mode
<< CIPHER_MODE_SHIFT
;
884 cipher_bits
|= cipher_parms
->type
<< CIPHER_TYPE_SHIFT
;
886 /* copy the encryption keys in the SAD entry */
887 if (cipher_parms
->alg
&& cipher_parms
->key_len
)
888 memcpy(spuh
+ 1, cipher_parms
->key_buf
, cipher_parms
->key_len
);
890 /* write in the total sctx length now that we know it */
891 protocol_bits
|= sctx_words
;
893 /* Endian adjust the SCTX */
894 spuh
->sa
.proto_flags
= cpu_to_be32(protocol_bits
);
896 /* Endian adjust the SCTX */
897 spuh
->sa
.cipher_flags
= cpu_to_be32(cipher_bits
);
898 spuh
->sa
.ecf
= cpu_to_be32(ecf_bits
);
900 packet_dump(" SPU request header: ", spu_hdr
,
901 sizeof(struct SPUHEADER
));
903 return sizeof(struct SPUHEADER
) + cipher_parms
->key_len
+
904 cipher_parms
->iv_len
+ sizeof(struct BDESC_HEADER
) +
905 sizeof(struct BD_HEADER
);
909 * spum_cipher_req_finish() - Finish building a SPU request message header for a
910 * block cipher request. Assumes much of the header was already filled in at
911 * setkey() time in spu_cipher_req_init().
912 * @spu_hdr: Start of the request message header (MH field)
913 * @spu_req_hdr_len: Length in bytes of the SPU request header
914 * @is_inbound: 0 encrypt, 1 decrypt
915 * @cipher_parms: Parameters describing cipher operation to be performed
916 * @data_size: Length of the data in the BD field
918 * Assumes much of the header was already filled in at setkey() time in
919 * spum_cipher_req_init().
920 * spum_cipher_req_init() fills in the encryption key.
922 void spum_cipher_req_finish(u8
*spu_hdr
,
924 unsigned int is_inbound
,
925 struct spu_cipher_parms
*cipher_parms
,
926 unsigned int data_size
)
928 struct SPUHEADER
*spuh
;
929 struct BDESC_HEADER
*bdesc
;
930 struct BD_HEADER
*bd
;
931 u8
*bdesc_ptr
= spu_hdr
+ spu_req_hdr_len
-
932 (sizeof(struct BD_HEADER
) + sizeof(struct BDESC_HEADER
));
936 flow_log("%s()\n", __func__
);
937 flow_log(" in: %u\n", is_inbound
);
938 flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms
->alg
,
942 * In XTS mode, API puts "i" parameter (block tweak) in IV. For
943 * SPU-M, should be in start of the BD; tx_sg_create() copies it there.
944 * IV in SPU msg for SPU-M should be 0, since that's the "j" parameter
945 * (block ctr within larger data unit) - given we can send entire disk
946 * block (<= 4KB) in 1 SPU msg, don't need to use this parameter.
948 if (cipher_parms
->mode
== CIPHER_MODE_XTS
)
949 memset(cipher_parms
->iv_buf
, 0, cipher_parms
->iv_len
);
951 flow_log(" iv len: %d\n", cipher_parms
->iv_len
);
952 flow_dump(" iv: ", cipher_parms
->iv_buf
, cipher_parms
->iv_len
);
953 flow_log(" data_size: %u\n", data_size
);
955 /* format master header word */
956 /* Do not set the next bit even though the datasheet says to */
957 spuh
= (struct SPUHEADER
*)spu_hdr
;
959 /* cipher_bits was initialized at setkey time */
960 cipher_bits
= be32_to_cpu(spuh
->sa
.cipher_flags
);
962 /* Format sctx word 1 (cipher_bits) */
964 cipher_bits
|= CIPHER_INBOUND
;
966 cipher_bits
&= ~CIPHER_INBOUND
;
968 if (cipher_parms
->alg
&& cipher_parms
->iv_buf
&& cipher_parms
->iv_len
)
969 /* cipher iv provided so put it in here */
970 memcpy(bdesc_ptr
- cipher_parms
->iv_len
, cipher_parms
->iv_buf
,
971 cipher_parms
->iv_len
);
973 spuh
->sa
.cipher_flags
= cpu_to_be32(cipher_bits
);
975 /* === create the BDESC section === */
976 bdesc
= (struct BDESC_HEADER
*)bdesc_ptr
;
977 bdesc
->offset_mac
= 0;
978 bdesc
->length_mac
= 0;
979 bdesc
->offset_crypto
= 0;
981 /* XTS mode, data_size needs to include tweak parameter */
982 if (cipher_parms
->mode
== CIPHER_MODE_XTS
)
983 bdesc
->length_crypto
= cpu_to_be16(data_size
+
986 bdesc
->length_crypto
= cpu_to_be16(data_size
);
988 bdesc
->offset_icv
= 0;
989 bdesc
->offset_iv
= 0;
991 /* === no MFM section === */
993 /* === create the BD section === */
994 /* add the BD header */
995 bd
= (struct BD_HEADER
*)(bdesc_ptr
+ sizeof(struct BDESC_HEADER
));
996 bd
->size
= cpu_to_be16(data_size
);
998 /* XTS mode, data_size needs to include tweak parameter */
999 if (cipher_parms
->mode
== CIPHER_MODE_XTS
)
1000 bd
->size
= cpu_to_be16(data_size
+ SPU_XTS_TWEAK_SIZE
);
1002 bd
->size
= cpu_to_be16(data_size
);
1004 bd
->prev_length
= 0;
1006 packet_dump(" SPU request header: ", spu_hdr
, spu_req_hdr_len
);
1010 * spum_request_pad() - Create pad bytes at the end of the data.
1011 * @pad_start: Start of buffer where pad bytes are to be written
1012 * @gcm_ccm_padding: length of GCM/CCM padding, in bytes
1013 * @hash_pad_len: Number of bytes of padding extend data to full block
1014 * @auth_alg: authentication algorithm
1015 * @auth_mode: authentication mode
1016 * @total_sent: length inserted at end of hash pad
1017 * @status_padding: Number of bytes of padding to align STATUS word
1019 * There may be three forms of pad:
1020 * 1. GCM/CCM pad - for GCM/CCM mode ciphers, pad to 16-byte alignment
1021 * 2. hash pad - pad to a block length, with 0x80 data terminator and
1023 * 3. STAT pad - to ensure the STAT field is 4-byte aligned
1025 void spum_request_pad(u8
*pad_start
,
1026 u32 gcm_ccm_padding
,
1028 enum hash_alg auth_alg
,
1029 enum hash_mode auth_mode
,
1030 unsigned int total_sent
, u32 status_padding
)
1032 u8
*ptr
= pad_start
;
1034 /* fix data alignent for GCM/CCM */
1035 if (gcm_ccm_padding
> 0) {
1036 flow_log(" GCM: padding to 16 byte alignment: %u bytes\n",
1038 memset(ptr
, 0, gcm_ccm_padding
);
1039 ptr
+= gcm_ccm_padding
;
1042 if (hash_pad_len
> 0) {
1043 /* clear the padding section */
1044 memset(ptr
, 0, hash_pad_len
);
1046 if ((auth_alg
== HASH_ALG_AES
) &&
1047 (auth_mode
== HASH_MODE_XCBC
)) {
1048 /* AES/XCBC just requires padding to be 0s */
1049 ptr
+= hash_pad_len
;
1051 /* terminate the data */
1053 ptr
+= (hash_pad_len
- sizeof(u64
));
1055 /* add the size at the end as required per alg */
1056 if (auth_alg
== HASH_ALG_MD5
)
1057 *(__le64
*)ptr
= cpu_to_le64(total_sent
* 8ull);
1058 else /* SHA1, SHA2-224, SHA2-256 */
1059 *(__be64
*)ptr
= cpu_to_be64(total_sent
* 8ull);
1064 /* pad to a 4byte alignment for STAT */
1065 if (status_padding
> 0) {
1066 flow_log(" STAT: padding to 4 byte alignment: %u bytes\n",
1069 memset(ptr
, 0, status_padding
);
1070 ptr
+= status_padding
;
1075 * spum_xts_tweak_in_payload() - Indicate that SPUM DOES place the XTS tweak
1076 * field in the packet payload (rather than using IV)
1080 u8
spum_xts_tweak_in_payload(void)
1086 * spum_tx_status_len() - Return the length of the STATUS field in a SPU
1089 * Return: Length of STATUS field in bytes.
1091 u8
spum_tx_status_len(void)
1093 return SPU_TX_STATUS_LEN
;
1097 * spum_rx_status_len() - Return the length of the STATUS field in a SPU
1100 * Return: Length of STATUS field in bytes.
1102 u8
spum_rx_status_len(void)
1104 return SPU_RX_STATUS_LEN
;
1108 * spum_status_process() - Process the status from a SPU response message.
1109 * @statp: start of STATUS word
1111 * 0 - if status is good and response should be processed
1112 * !0 - status indicates an error and response is invalid
1114 int spum_status_process(u8
*statp
)
1118 status
= __be32_to_cpu(*(__be32
*)statp
);
1119 flow_log("SPU response STATUS %#08x\n", status
);
1120 if (status
& SPU_STATUS_ERROR_FLAG
) {
1121 pr_err("%s() Warning: Error result from SPU: %#08x\n",
1123 if (status
& SPU_STATUS_INVALID_ICV
)
1124 return SPU_INVALID_ICV
;
1131 * spum_ccm_update_iv() - Update the IV as per the requirements for CCM mode.
1133 * @digestsize: Digest size of this request
1134 * @cipher_parms: (pointer to) cipher parmaeters, includes IV buf & IV len
1135 * @assoclen: Length of AAD data
1136 * @chunksize: length of input data to be sent in this req
1137 * @is_encrypt: true if this is an output/encrypt operation
1138 * @is_esp: true if this is an ESP / RFC4309 operation
1141 void spum_ccm_update_iv(unsigned int digestsize
,
1142 struct spu_cipher_parms
*cipher_parms
,
1143 unsigned int assoclen
,
1144 unsigned int chunksize
,
1148 u8 L
; /* L from CCM algorithm, length of plaintext data */
1149 u8 mprime
; /* M' from CCM algo, (M - 2) / 2, where M=authsize */
1152 if (cipher_parms
->iv_len
!= CCM_AES_IV_SIZE
) {
1153 pr_err("%s(): Invalid IV len %d for CCM mode, should be %d\n",
1154 __func__
, cipher_parms
->iv_len
, CCM_AES_IV_SIZE
);
1159 * IV needs to be formatted as follows:
1161 * | Byte 0 | Bytes 1 - N | Bytes (N+1) - 15 |
1162 * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | Bits 7 - 0 | Bits 7 - 0 |
1163 * | 0 |Ad?|(M - 2) / 2| L - 1 | Nonce | Plaintext Length |
1165 * Ad? = 1 if AAD present, 0 if not present
1166 * M = size of auth field, 8, 12, or 16 bytes (SPU-M) -or-
1167 * 4, 6, 8, 10, 12, 14, 16 bytes (SPU2)
1168 * L = Size of Plaintext Length field; Nonce size = 15 - L
1170 * It appears that the crypto API already expects the L-1 portion
1171 * to be set in the first byte of the IV, which implicitly determines
1172 * the nonce size, and also fills in the nonce. But the other bits
1173 * in byte 0 as well as the plaintext length need to be filled in.
1175 * In rfc4309/esp mode, L is not already in the supplied IV and
1176 * we need to fill it in, as well as move the IV data to be after
1180 L
= CCM_ESP_L_VALUE
; /* RFC4309 has fixed L */
1182 /* L' = plaintext length - 1 so Plaintext length is L' + 1 */
1183 L
= ((cipher_parms
->iv_buf
[0] & CCM_B0_L_PRIME
) >>
1184 CCM_B0_L_PRIME_SHIFT
) + 1;
1187 mprime
= (digestsize
- 2) >> 1; /* M' = (M - 2) / 2 */
1188 adata
= (assoclen
> 0); /* adata = 1 if any associated data */
1190 cipher_parms
->iv_buf
[0] = (adata
<< CCM_B0_ADATA_SHIFT
) |
1191 (mprime
<< CCM_B0_M_PRIME_SHIFT
) |
1192 ((L
- 1) << CCM_B0_L_PRIME_SHIFT
);
1194 /* Nonce is already filled in by crypto API, and is 15 - L bytes */
1196 /* Don't include digest in plaintext size when decrypting */
1198 chunksize
-= digestsize
;
1200 /* Fill in length of plaintext, formatted to be L bytes long */
1201 format_value_ccm(chunksize
, &cipher_parms
->iv_buf
[15 - L
+ 1], L
);
1205 * spum_wordalign_padlen() - Given the length of a data field, determine the
1206 * padding required to align the data following this field on a 4-byte boundary.
1207 * @data_size: length of data field in bytes
1209 * Return: length of status field padding, in bytes
1211 u32
spum_wordalign_padlen(u32 data_size
)
1213 return ((data_size
+ 3) & ~3) - data_size
;