1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2016 Broadcom
6 #include <linux/kernel.h>
7 #include <linux/string.h>
14 char *hash_alg_name
[] = { "None", "md5", "sha1", "sha224", "sha256", "aes",
15 "sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" };
17 char *aead_alg_name
[] = { "ccm(aes)", "gcm(aes)", "authenc" };
19 /* Assumes SPU-M messages are in big endian */
20 void spum_dump_msg_hdr(u8
*buf
, unsigned int buf_len
)
23 struct SPUHEADER
*spuh
= (struct SPUHEADER
*)buf
;
24 unsigned int hash_key_len
= 0;
25 unsigned int hash_state_len
= 0;
26 unsigned int cipher_key_len
= 0;
37 u32 sctx_size
; /* SCTX length in words */
38 u32 sctx_pl_len
; /* SCTX payload length in bytes */
41 packet_log("SPU Message header %p len: %u\n", buf
, buf_len
);
43 /* ========== Decode MH ========== */
44 packet_log(" MH 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
45 if (spuh
->mh
.flags
& MH_SCTX_PRES
)
46 packet_log(" SCTX present\n");
47 if (spuh
->mh
.flags
& MH_BDESC_PRES
)
48 packet_log(" BDESC present\n");
49 if (spuh
->mh
.flags
& MH_MFM_PRES
)
50 packet_log(" MFM present\n");
51 if (spuh
->mh
.flags
& MH_BD_PRES
)
52 packet_log(" BD present\n");
53 if (spuh
->mh
.flags
& MH_HASH_PRES
)
54 packet_log(" HASH present\n");
55 if (spuh
->mh
.flags
& MH_SUPDT_PRES
)
56 packet_log(" SUPDT present\n");
57 packet_log(" Opcode 0x%02x\n", spuh
->mh
.op_code
);
59 ptr
+= sizeof(spuh
->mh
) + sizeof(spuh
->emh
); /* skip emh. unused */
61 /* ========== Decode SCTX ========== */
62 if (spuh
->mh
.flags
& MH_SCTX_PRES
) {
63 pflags
= be32_to_cpu(spuh
->sa
.proto_flags
);
64 packet_log(" SCTX[0] 0x%08x\n", pflags
);
65 sctx_size
= pflags
& SCTX_SIZE
;
66 packet_log(" Size %u words\n", sctx_size
);
68 cflags
= be32_to_cpu(spuh
->sa
.cipher_flags
);
69 packet_log(" SCTX[1] 0x%08x\n", cflags
);
70 packet_log(" Inbound:%lu (1:decrypt/vrfy 0:encrypt/auth)\n",
71 (cflags
& CIPHER_INBOUND
) >> CIPHER_INBOUND_SHIFT
);
72 packet_log(" Order:%lu (1:AuthFirst 0:EncFirst)\n",
73 (cflags
& CIPHER_ORDER
) >> CIPHER_ORDER_SHIFT
);
74 packet_log(" ICV_IS_512:%lx\n",
75 (cflags
& ICV_IS_512
) >> ICV_IS_512_SHIFT
);
76 cipher_alg
= (cflags
& CIPHER_ALG
) >> CIPHER_ALG_SHIFT
;
77 cipher_mode
= (cflags
& CIPHER_MODE
) >> CIPHER_MODE_SHIFT
;
78 cipher_type
= (cflags
& CIPHER_TYPE
) >> CIPHER_TYPE_SHIFT
;
79 packet_log(" Crypto Alg:%u Mode:%u Type:%u\n",
80 cipher_alg
, cipher_mode
, cipher_type
);
81 hash_alg
= (cflags
& HASH_ALG
) >> HASH_ALG_SHIFT
;
82 hash_mode
= (cflags
& HASH_MODE
) >> HASH_MODE_SHIFT
;
83 hash_type
= (cflags
& HASH_TYPE
) >> HASH_TYPE_SHIFT
;
84 packet_log(" Hash Alg:%x Mode:%x Type:%x\n",
85 hash_alg
, hash_mode
, hash_type
);
86 packet_log(" UPDT_Offset:%u\n", cflags
& UPDT_OFST
);
88 ecf
= be32_to_cpu(spuh
->sa
.ecf
);
89 packet_log(" SCTX[2] 0x%08x\n", ecf
);
90 packet_log(" WriteICV:%lu CheckICV:%lu ICV_SIZE:%u ",
91 (ecf
& INSERT_ICV
) >> INSERT_ICV_SHIFT
,
92 (ecf
& CHECK_ICV
) >> CHECK_ICV_SHIFT
,
93 (ecf
& ICV_SIZE
) >> ICV_SIZE_SHIFT
);
94 packet_log("BD_SUPPRESS:%lu\n",
95 (ecf
& BD_SUPPRESS
) >> BD_SUPPRESS_SHIFT
);
96 packet_log(" SCTX_IV:%lu ExplicitIV:%lu GenIV:%lu ",
97 (ecf
& SCTX_IV
) >> SCTX_IV_SHIFT
,
98 (ecf
& EXPLICIT_IV
) >> EXPLICIT_IV_SHIFT
,
99 (ecf
& GEN_IV
) >> GEN_IV_SHIFT
);
100 packet_log("IV_OV_OFST:%lu EXP_IV_SIZE:%u\n",
101 (ecf
& IV_OFFSET
) >> IV_OFFSET_SHIFT
,
104 ptr
+= sizeof(struct SCTX
);
106 if (hash_alg
&& hash_mode
) {
118 case HASH_ALG_SHA224
:
122 case HASH_ALG_SHA256
:
126 case HASH_ALG_SHA384
:
130 case HASH_ALG_SHA512
:
142 packet_log(" Auth Key Type:%s Length:%u Bytes\n",
144 packet_dump(" KEY: ", ptr
, hash_key_len
);
146 } else if ((hash_alg
== HASH_ALG_AES
) &&
147 (hash_mode
== HASH_MODE_XCBC
)) {
150 switch (cipher_type
) {
151 case CIPHER_TYPE_AES128
:
153 name
= "AES128-XCBC";
155 case CIPHER_TYPE_AES192
:
157 name
= "AES192-XCBC";
159 case CIPHER_TYPE_AES256
:
161 name
= "AES256-XCBC";
164 packet_log(" Auth Key Type:%s Length:%u Bytes\n",
166 packet_dump(" KEY: ", ptr
, hash_key_len
);
170 if (hash_alg
&& (hash_mode
== HASH_MODE_NONE
) &&
171 (hash_type
== HASH_TYPE_UPDT
)) {
183 case HASH_ALG_SHA224
:
187 case HASH_ALG_SHA256
:
191 case HASH_ALG_SHA384
:
195 case HASH_ALG_SHA512
:
207 packet_log(" Auth State Type:%s Length:%u Bytes\n",
208 name
, hash_state_len
);
209 packet_dump(" State: ", ptr
, hash_state_len
);
210 ptr
+= hash_state_len
;
216 switch (cipher_alg
) {
221 case CIPHER_ALG_3DES
:
226 cipher_key_len
= 260;
230 switch (cipher_type
) {
231 case CIPHER_TYPE_AES128
:
235 case CIPHER_TYPE_AES192
:
239 case CIPHER_TYPE_AES256
:
245 case CIPHER_ALG_NONE
:
249 packet_log(" Cipher Key Type:%s Length:%u Bytes\n",
250 name
, cipher_key_len
);
252 /* XTS has two keys */
253 if (cipher_mode
== CIPHER_MODE_XTS
) {
254 packet_dump(" KEY2: ", ptr
, cipher_key_len
);
255 ptr
+= cipher_key_len
;
256 packet_dump(" KEY1: ", ptr
, cipher_key_len
);
257 ptr
+= cipher_key_len
;
261 packet_dump(" KEY: ", ptr
, cipher_key_len
);
262 ptr
+= cipher_key_len
;
266 sctx_pl_len
= sctx_size
* sizeof(u32
) -
268 iv_len
= sctx_pl_len
-
269 (hash_key_len
+ hash_state_len
+
271 packet_log(" IV Length:%u Bytes\n", iv_len
);
272 packet_dump(" IV: ", ptr
, iv_len
);
278 /* ========== Decode BDESC ========== */
279 if (spuh
->mh
.flags
& MH_BDESC_PRES
) {
281 struct BDESC_HEADER
*bdesc
= (struct BDESC_HEADER
*)ptr
;
283 packet_log(" BDESC[0] 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
284 packet_log(" OffsetMAC:%u LengthMAC:%u\n",
285 be16_to_cpu(bdesc
->offset_mac
),
286 be16_to_cpu(bdesc
->length_mac
));
289 packet_log(" BDESC[1] 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
290 packet_log(" OffsetCrypto:%u LengthCrypto:%u\n",
291 be16_to_cpu(bdesc
->offset_crypto
),
292 be16_to_cpu(bdesc
->length_crypto
));
295 packet_log(" BDESC[2] 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
296 packet_log(" OffsetICV:%u OffsetIV:%u\n",
297 be16_to_cpu(bdesc
->offset_icv
),
298 be16_to_cpu(bdesc
->offset_iv
));
302 /* ========== Decode BD ========== */
303 if (spuh
->mh
.flags
& MH_BD_PRES
) {
305 struct BD_HEADER
*bd
= (struct BD_HEADER
*)ptr
;
307 packet_log(" BD[0] 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
308 packet_log(" Size:%ubytes PrevLength:%u\n",
309 be16_to_cpu(bd
->size
), be16_to_cpu(bd
->prev_length
));
313 /* Double check sanity */
314 if (buf
+ buf_len
!= ptr
) {
315 packet_log(" Packet parsed incorrectly. ");
316 packet_log("buf:%p buf_len:%u buf+buf_len:%p ptr:%p\n",
317 buf
, buf_len
, buf
+ buf_len
, ptr
);
324 * spum_ns2_ctx_max_payload() - Determine the max length of the payload for a
325 * SPU message for a given cipher and hash alg context.
326 * @cipher_alg: The cipher algorithm
327 * @cipher_mode: The cipher mode
328 * @blocksize: The size of a block of data for this algo
330 * The max payload must be a multiple of the blocksize so that if a request is
331 * too large to fit in a single SPU message, the request can be broken into
332 * max_payload sized chunks. Each chunk must be a multiple of blocksize.
334 * Return: Max payload length in bytes
336 u32
spum_ns2_ctx_max_payload(enum spu_cipher_alg cipher_alg
,
337 enum spu_cipher_mode cipher_mode
,
338 unsigned int blocksize
)
340 u32 max_payload
= SPUM_NS2_MAX_PAYLOAD
;
343 /* In XTS on SPU-M, we'll need to insert tweak before input data */
344 if (cipher_mode
== CIPHER_MODE_XTS
)
345 max_payload
-= SPU_XTS_TWEAK_SIZE
;
347 excess
= max_payload
% blocksize
;
349 return max_payload
- excess
;
353 * spum_nsp_ctx_max_payload() - Determine the max length of the payload for a
354 * SPU message for a given cipher and hash alg context.
355 * @cipher_alg: The cipher algorithm
356 * @cipher_mode: The cipher mode
357 * @blocksize: The size of a block of data for this algo
359 * The max payload must be a multiple of the blocksize so that if a request is
360 * too large to fit in a single SPU message, the request can be broken into
361 * max_payload sized chunks. Each chunk must be a multiple of blocksize.
363 * Return: Max payload length in bytes
365 u32
spum_nsp_ctx_max_payload(enum spu_cipher_alg cipher_alg
,
366 enum spu_cipher_mode cipher_mode
,
367 unsigned int blocksize
)
369 u32 max_payload
= SPUM_NSP_MAX_PAYLOAD
;
372 /* In XTS on SPU-M, we'll need to insert tweak before input data */
373 if (cipher_mode
== CIPHER_MODE_XTS
)
374 max_payload
-= SPU_XTS_TWEAK_SIZE
;
376 excess
= max_payload
% blocksize
;
378 return max_payload
- excess
;
381 /** spum_payload_length() - Given a SPU-M message header, extract the payload
383 * @spu_hdr: Start of SPU header
385 * Assumes just MH, EMH, BD (no SCTX, BDESC. Works for response frames.
387 * Return: payload length in bytes
389 u32
spum_payload_length(u8
*spu_hdr
)
391 struct BD_HEADER
*bd
;
394 /* Find BD header. skip MH, EMH */
395 bd
= (struct BD_HEADER
*)(spu_hdr
+ 8);
396 pl_len
= be16_to_cpu(bd
->size
);
402 * spum_response_hdr_len() - Given the length of the hash key and encryption
403 * key, determine the expected length of a SPU response header.
404 * @auth_key_len: authentication key length (bytes)
405 * @enc_key_len: encryption key length (bytes)
406 * @is_hash: true if response message is for a hash operation
408 * Return: length of SPU response header (bytes)
410 u16
spum_response_hdr_len(u16 auth_key_len
, u16 enc_key_len
, bool is_hash
)
413 return SPU_HASH_RESP_HDR_LEN
;
415 return SPU_RESP_HDR_LEN
;
419 * spum_hash_pad_len() - Calculate the length of hash padding required to extend
420 * data to a full block size.
421 * @hash_alg: hash algorithm
422 * @hash_mode: hash mode
423 * @chunksize: length of data, in bytes
424 * @hash_block_size: size of a block of data for hash algorithm
426 * Reserve space for 1 byte (0x80) start of pad and the total length as u64
428 * Return: length of hash pad in bytes
430 u16
spum_hash_pad_len(enum hash_alg hash_alg
, enum hash_mode hash_mode
,
431 u32 chunksize
, u16 hash_block_size
)
433 unsigned int length_len
;
434 unsigned int used_space_last_block
;
437 /* AES-XCBC hash requires just padding to next block boundary */
438 if ((hash_alg
== HASH_ALG_AES
) && (hash_mode
== HASH_MODE_XCBC
)) {
439 used_space_last_block
= chunksize
% hash_block_size
;
440 hash_pad_len
= hash_block_size
- used_space_last_block
;
441 if (hash_pad_len
>= hash_block_size
)
442 hash_pad_len
-= hash_block_size
;
446 used_space_last_block
= chunksize
% hash_block_size
+ 1;
447 if ((hash_alg
== HASH_ALG_SHA384
) || (hash_alg
== HASH_ALG_SHA512
))
448 length_len
= 2 * sizeof(u64
);
450 length_len
= sizeof(u64
);
452 used_space_last_block
+= length_len
;
453 hash_pad_len
= hash_block_size
- used_space_last_block
;
454 if (hash_pad_len
< 0)
455 hash_pad_len
+= hash_block_size
;
457 hash_pad_len
+= 1 + length_len
;
462 * spum_gcm_ccm_pad_len() - Determine the required length of GCM or CCM padding.
463 * @cipher_mode: Algo type
464 * @data_size: Length of plaintext (bytes)
466 * @Return: Length of padding, in bytes
468 u32
spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode
,
469 unsigned int data_size
)
472 u32 m1
= SPU_GCM_CCM_ALIGN
- 1;
474 if ((cipher_mode
== CIPHER_MODE_GCM
) ||
475 (cipher_mode
== CIPHER_MODE_CCM
))
476 pad_len
= ((data_size
+ m1
) & ~m1
) - data_size
;
482 * spum_assoc_resp_len() - Determine the size of the receive buffer required to
483 * catch associated data.
484 * @cipher_mode: cipher mode
485 * @assoc_len: length of associated data (bytes)
486 * @iv_len: length of IV (bytes)
487 * @is_encrypt: true if encrypting. false if decrypting.
489 * Return: length of associated data in response message (bytes)
491 u32
spum_assoc_resp_len(enum spu_cipher_mode cipher_mode
,
492 unsigned int assoc_len
, unsigned int iv_len
,
501 if (cipher_mode
== CIPHER_MODE_GCM
) {
502 /* AAD needs to be padded in responses too */
503 pad
= spum_gcm_ccm_pad_len(cipher_mode
, buflen
);
506 if (cipher_mode
== CIPHER_MODE_CCM
) {
508 * AAD needs to be padded in responses too
509 * for CCM, len + 2 needs to be 128-bit aligned.
511 pad
= spum_gcm_ccm_pad_len(cipher_mode
, buflen
+ 2);
519 * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
520 * in a SPU request after the AAD and before the payload.
521 * @cipher_mode: cipher mode
522 * @iv_ctr_len: initialization vector length in bytes
524 * In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need
525 * to include the IV as a separate field in the SPU request msg.
527 * Return: Length of AEAD IV in bytes
529 u8
spum_aead_ivlen(enum spu_cipher_mode cipher_mode
, u16 iv_len
)
535 * spum_hash_type() - Determine the type of hash operation.
536 * @src_sent: The number of bytes in the current request that have already
537 * been sent to the SPU to be hashed.
539 * We do not use HASH_TYPE_FULL for requests that fit in a single SPU message.
540 * Using FULL causes failures (such as when the string to be hashed is empty).
541 * For similar reasons, we never use HASH_TYPE_FIN. Instead, submit messages
542 * as INIT or UPDT and do the hash padding in sw.
544 enum hash_type
spum_hash_type(u32 src_sent
)
546 return src_sent
? HASH_TYPE_UPDT
: HASH_TYPE_INIT
;
550 * spum_digest_size() - Determine the size of a hash digest to expect the SPU to
552 * alg_digest_size: Number of bytes in the final digest for the given algo
553 * alg: The hash algorithm
554 * htype: Type of hash operation (init, update, full, etc)
556 * When doing incremental hashing for an algorithm with a truncated hash
557 * (e.g., SHA224), the SPU returns the full digest so that it can be fed back as
558 * a partial result for the next chunk.
560 u32
spum_digest_size(u32 alg_digest_size
, enum hash_alg alg
,
561 enum hash_type htype
)
563 u32 digestsize
= alg_digest_size
;
565 /* SPU returns complete digest when doing incremental hash and truncated
568 if ((htype
== HASH_TYPE_INIT
) || (htype
== HASH_TYPE_UPDT
)) {
569 if (alg
== HASH_ALG_SHA224
)
570 digestsize
= SHA256_DIGEST_SIZE
;
571 else if (alg
== HASH_ALG_SHA384
)
572 digestsize
= SHA512_DIGEST_SIZE
;
578 * spum_create_request() - Build a SPU request message header, up to and
579 * including the BD header. Construct the message starting at spu_hdr. Caller
580 * should allocate this buffer in DMA-able memory at least SPU_HEADER_ALLOC_LEN
582 * @spu_hdr: Start of buffer where SPU request header is to be written
583 * @req_opts: SPU request message options
584 * @cipher_parms: Parameters related to cipher algorithm
585 * @hash_parms: Parameters related to hash algorithm
586 * @aead_parms: Parameters related to AEAD operation
587 * @data_size: Length of data to be encrypted or authenticated. If AEAD, does
588 * not include length of AAD.
590 * Return: the length of the SPU header in bytes. 0 if an error occurs.
592 u32
spum_create_request(u8
*spu_hdr
,
593 struct spu_request_opts
*req_opts
,
594 struct spu_cipher_parms
*cipher_parms
,
595 struct spu_hash_parms
*hash_parms
,
596 struct spu_aead_parms
*aead_parms
,
597 unsigned int data_size
)
599 struct SPUHEADER
*spuh
;
600 struct BDESC_HEADER
*bdesc
;
601 struct BD_HEADER
*bd
;
604 u32 protocol_bits
= 0;
608 unsigned int buf_len
= 0;
610 /* size of the cipher payload */
611 unsigned int cipher_len
= hash_parms
->prebuf_len
+ data_size
+
614 /* offset of prebuf or data from end of BD header */
615 unsigned int cipher_offset
= aead_parms
->assoc_size
+
616 aead_parms
->iv_len
+ aead_parms
->aad_pad_len
;
618 /* total size of the DB data (without STAT word padding) */
619 unsigned int real_db_size
= spu_real_db_size(aead_parms
->assoc_size
,
621 hash_parms
->prebuf_len
,
623 aead_parms
->aad_pad_len
,
624 aead_parms
->data_pad_len
,
625 hash_parms
->pad_len
);
627 unsigned int auth_offset
= 0;
628 unsigned int offset_iv
= 0;
630 /* size/offset of the auth payload */
631 unsigned int auth_len
;
633 auth_len
= real_db_size
;
635 if (req_opts
->is_aead
&& req_opts
->is_inbound
)
636 cipher_len
-= hash_parms
->digestsize
;
638 if (req_opts
->is_aead
&& req_opts
->is_inbound
)
639 auth_len
-= hash_parms
->digestsize
;
641 if ((hash_parms
->alg
== HASH_ALG_AES
) &&
642 (hash_parms
->mode
== HASH_MODE_XCBC
)) {
643 auth_len
-= hash_parms
->pad_len
;
644 cipher_len
-= hash_parms
->pad_len
;
647 flow_log("%s()\n", __func__
);
648 flow_log(" in:%u authFirst:%u\n",
649 req_opts
->is_inbound
, req_opts
->auth_first
);
650 flow_log(" %s. cipher alg:%u mode:%u type %u\n",
651 spu_alg_name(cipher_parms
->alg
, cipher_parms
->mode
),
652 cipher_parms
->alg
, cipher_parms
->mode
, cipher_parms
->type
);
653 flow_log(" key: %d\n", cipher_parms
->key_len
);
654 flow_dump(" key: ", cipher_parms
->key_buf
, cipher_parms
->key_len
);
655 flow_log(" iv: %d\n", cipher_parms
->iv_len
);
656 flow_dump(" iv: ", cipher_parms
->iv_buf
, cipher_parms
->iv_len
);
657 flow_log(" auth alg:%u mode:%u type %u\n",
658 hash_parms
->alg
, hash_parms
->mode
, hash_parms
->type
);
659 flow_log(" digestsize: %u\n", hash_parms
->digestsize
);
660 flow_log(" authkey: %d\n", hash_parms
->key_len
);
661 flow_dump(" authkey: ", hash_parms
->key_buf
, hash_parms
->key_len
);
662 flow_log(" assoc_size:%u\n", aead_parms
->assoc_size
);
663 flow_log(" prebuf_len:%u\n", hash_parms
->prebuf_len
);
664 flow_log(" data_size:%u\n", data_size
);
665 flow_log(" hash_pad_len:%u\n", hash_parms
->pad_len
);
666 flow_log(" real_db_size:%u\n", real_db_size
);
667 flow_log(" auth_offset:%u auth_len:%u cipher_offset:%u cipher_len:%u\n",
668 auth_offset
, auth_len
, cipher_offset
, cipher_len
);
669 flow_log(" aead_iv: %u\n", aead_parms
->iv_len
);
671 /* starting out: zero the header (plus some) */
673 memset(ptr
, 0, sizeof(struct SPUHEADER
));
675 /* format master header word */
676 /* Do not set the next bit even though the datasheet says to */
677 spuh
= (struct SPUHEADER
*)ptr
;
678 ptr
+= sizeof(struct SPUHEADER
);
679 buf_len
+= sizeof(struct SPUHEADER
);
681 spuh
->mh
.op_code
= SPU_CRYPTO_OPERATION_GENERIC
;
682 spuh
->mh
.flags
|= (MH_SCTX_PRES
| MH_BDESC_PRES
| MH_BD_PRES
);
684 /* Format sctx word 0 (protocol_bits) */
685 sctx_words
= 3; /* size in words */
687 /* Format sctx word 1 (cipher_bits) */
688 if (req_opts
->is_inbound
)
689 cipher_bits
|= CIPHER_INBOUND
;
690 if (req_opts
->auth_first
)
691 cipher_bits
|= CIPHER_ORDER
;
693 /* Set the crypto parameters in the cipher.flags */
694 cipher_bits
|= cipher_parms
->alg
<< CIPHER_ALG_SHIFT
;
695 cipher_bits
|= cipher_parms
->mode
<< CIPHER_MODE_SHIFT
;
696 cipher_bits
|= cipher_parms
->type
<< CIPHER_TYPE_SHIFT
;
698 /* Set the auth parameters in the cipher.flags */
699 cipher_bits
|= hash_parms
->alg
<< HASH_ALG_SHIFT
;
700 cipher_bits
|= hash_parms
->mode
<< HASH_MODE_SHIFT
;
701 cipher_bits
|= hash_parms
->type
<< HASH_TYPE_SHIFT
;
704 * Format sctx extensions if required, and update main fields if
707 if (hash_parms
->alg
) {
708 /* Write the authentication key material if present */
709 if (hash_parms
->key_len
) {
710 memcpy(ptr
, hash_parms
->key_buf
, hash_parms
->key_len
);
711 ptr
+= hash_parms
->key_len
;
712 buf_len
+= hash_parms
->key_len
;
713 sctx_words
+= hash_parms
->key_len
/ 4;
716 if ((cipher_parms
->mode
== CIPHER_MODE_GCM
) ||
717 (cipher_parms
->mode
== CIPHER_MODE_CCM
))
718 /* unpadded length */
719 offset_iv
= aead_parms
->assoc_size
;
721 /* if GCM/CCM we need to write ICV into the payload */
722 if (!req_opts
->is_inbound
) {
723 if ((cipher_parms
->mode
== CIPHER_MODE_GCM
) ||
724 (cipher_parms
->mode
== CIPHER_MODE_CCM
))
725 ecf_bits
|= 1 << INSERT_ICV_SHIFT
;
727 ecf_bits
|= CHECK_ICV
;
730 /* Inform the SPU of the ICV size (in words) */
731 if (hash_parms
->digestsize
== 64)
732 cipher_bits
|= ICV_IS_512
;
735 (hash_parms
->digestsize
/ 4) << ICV_SIZE_SHIFT
;
738 if (req_opts
->bd_suppress
)
739 ecf_bits
|= BD_SUPPRESS
;
741 /* copy the encryption keys in the SAD entry */
742 if (cipher_parms
->alg
) {
743 if (cipher_parms
->key_len
) {
744 memcpy(ptr
, cipher_parms
->key_buf
,
745 cipher_parms
->key_len
);
746 ptr
+= cipher_parms
->key_len
;
747 buf_len
+= cipher_parms
->key_len
;
748 sctx_words
+= cipher_parms
->key_len
/ 4;
752 * if encrypting then set IV size, use SCTX IV unless no IV
755 if (cipher_parms
->iv_buf
&& cipher_parms
->iv_len
) {
759 /* cipher iv provided so put it in here */
760 memcpy(ptr
, cipher_parms
->iv_buf
, cipher_parms
->iv_len
);
762 ptr
+= cipher_parms
->iv_len
;
763 buf_len
+= cipher_parms
->iv_len
;
764 sctx_words
+= cipher_parms
->iv_len
/ 4;
769 * RFC4543 (GMAC/ESP) requires data to be sent as part of AAD
770 * so we need to override the BDESC parameters.
772 if (req_opts
->is_rfc4543
) {
773 if (req_opts
->is_inbound
)
774 data_size
-= hash_parms
->digestsize
;
775 offset_iv
= aead_parms
->assoc_size
+ data_size
;
777 cipher_offset
= offset_iv
;
778 auth_len
= cipher_offset
+ aead_parms
->data_pad_len
;
781 /* write in the total sctx length now that we know it */
782 protocol_bits
|= sctx_words
;
784 /* Endian adjust the SCTX */
785 spuh
->sa
.proto_flags
= cpu_to_be32(protocol_bits
);
786 spuh
->sa
.cipher_flags
= cpu_to_be32(cipher_bits
);
787 spuh
->sa
.ecf
= cpu_to_be32(ecf_bits
);
789 /* === create the BDESC section === */
790 bdesc
= (struct BDESC_HEADER
*)ptr
;
792 bdesc
->offset_mac
= cpu_to_be16(auth_offset
);
793 bdesc
->length_mac
= cpu_to_be16(auth_len
);
794 bdesc
->offset_crypto
= cpu_to_be16(cipher_offset
);
795 bdesc
->length_crypto
= cpu_to_be16(cipher_len
);
798 * CCM in SPU-M requires that ICV not be in same 32-bit word as data or
799 * padding. So account for padding as necessary.
801 if (cipher_parms
->mode
== CIPHER_MODE_CCM
)
802 auth_len
+= spum_wordalign_padlen(auth_len
);
804 bdesc
->offset_icv
= cpu_to_be16(auth_len
);
805 bdesc
->offset_iv
= cpu_to_be16(offset_iv
);
807 ptr
+= sizeof(struct BDESC_HEADER
);
808 buf_len
+= sizeof(struct BDESC_HEADER
);
810 /* === no MFM section === */
812 /* === create the BD section === */
814 /* add the BD header */
815 bd
= (struct BD_HEADER
*)ptr
;
816 bd
->size
= cpu_to_be16(real_db_size
);
819 ptr
+= sizeof(struct BD_HEADER
);
820 buf_len
+= sizeof(struct BD_HEADER
);
822 packet_dump(" SPU request header: ", spu_hdr
, buf_len
);
828 * spum_cipher_req_init() - Build a SPU request message header, up to and
829 * including the BD header.
830 * @spu_hdr: Start of SPU request header (MH)
831 * @cipher_parms: Parameters that describe the cipher request
833 * Construct the message starting at spu_hdr. Caller should allocate this buffer
834 * in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long.
836 * Return: the length of the SPU header in bytes. 0 if an error occurs.
838 u16
spum_cipher_req_init(u8
*spu_hdr
, struct spu_cipher_parms
*cipher_parms
)
840 struct SPUHEADER
*spuh
;
841 u32 protocol_bits
= 0;
847 flow_log("%s()\n", __func__
);
848 flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms
->alg
,
849 cipher_parms
->mode
, cipher_parms
->type
);
850 flow_log(" cipher_iv_len: %u\n", cipher_parms
->iv_len
);
851 flow_log(" key: %d\n", cipher_parms
->key_len
);
852 flow_dump(" key: ", cipher_parms
->key_buf
, cipher_parms
->key_len
);
854 /* starting out: zero the header (plus some) */
855 memset(spu_hdr
, 0, sizeof(struct SPUHEADER
));
856 ptr
+= sizeof(struct SPUHEADER
);
858 /* format master header word */
859 /* Do not set the next bit even though the datasheet says to */
860 spuh
= (struct SPUHEADER
*)spu_hdr
;
862 spuh
->mh
.op_code
= SPU_CRYPTO_OPERATION_GENERIC
;
863 spuh
->mh
.flags
|= (MH_SCTX_PRES
| MH_BDESC_PRES
| MH_BD_PRES
);
865 /* Format sctx word 0 (protocol_bits) */
866 sctx_words
= 3; /* size in words */
868 /* copy the encryption keys in the SAD entry */
869 if (cipher_parms
->alg
) {
870 if (cipher_parms
->key_len
) {
871 ptr
+= cipher_parms
->key_len
;
872 sctx_words
+= cipher_parms
->key_len
/ 4;
876 * if encrypting then set IV size, use SCTX IV unless no IV
879 if (cipher_parms
->iv_len
) {
882 ptr
+= cipher_parms
->iv_len
;
883 sctx_words
+= cipher_parms
->iv_len
/ 4;
887 /* Set the crypto parameters in the cipher.flags */
888 cipher_bits
|= cipher_parms
->alg
<< CIPHER_ALG_SHIFT
;
889 cipher_bits
|= cipher_parms
->mode
<< CIPHER_MODE_SHIFT
;
890 cipher_bits
|= cipher_parms
->type
<< CIPHER_TYPE_SHIFT
;
892 /* copy the encryption keys in the SAD entry */
893 if (cipher_parms
->alg
&& cipher_parms
->key_len
)
894 memcpy(spuh
+ 1, cipher_parms
->key_buf
, cipher_parms
->key_len
);
896 /* write in the total sctx length now that we know it */
897 protocol_bits
|= sctx_words
;
899 /* Endian adjust the SCTX */
900 spuh
->sa
.proto_flags
= cpu_to_be32(protocol_bits
);
902 /* Endian adjust the SCTX */
903 spuh
->sa
.cipher_flags
= cpu_to_be32(cipher_bits
);
904 spuh
->sa
.ecf
= cpu_to_be32(ecf_bits
);
906 packet_dump(" SPU request header: ", spu_hdr
,
907 sizeof(struct SPUHEADER
));
909 return sizeof(struct SPUHEADER
) + cipher_parms
->key_len
+
910 cipher_parms
->iv_len
+ sizeof(struct BDESC_HEADER
) +
911 sizeof(struct BD_HEADER
);
915 * spum_cipher_req_finish() - Finish building a SPU request message header for a
916 * block cipher request. Assumes much of the header was already filled in at
917 * setkey() time in spu_cipher_req_init().
918 * @spu_hdr: Start of the request message header (MH field)
919 * @spu_req_hdr_len: Length in bytes of the SPU request header
920 * @isInbound: 0 encrypt, 1 decrypt
921 * @cipher_parms: Parameters describing cipher operation to be performed
922 * @update_key: If true, rewrite the cipher key in SCTX
923 * @data_size: Length of the data in the BD field
925 * Assumes much of the header was already filled in at setkey() time in
926 * spum_cipher_req_init().
927 * spum_cipher_req_init() fills in the encryption key. For RC4, when submitting
928 * a request for a non-first chunk, we use the 260-byte SUPDT field from the
929 * previous response as the key. update_key is true for this case. Unused in all
932 void spum_cipher_req_finish(u8
*spu_hdr
,
934 unsigned int is_inbound
,
935 struct spu_cipher_parms
*cipher_parms
,
937 unsigned int data_size
)
939 struct SPUHEADER
*spuh
;
940 struct BDESC_HEADER
*bdesc
;
941 struct BD_HEADER
*bd
;
942 u8
*bdesc_ptr
= spu_hdr
+ spu_req_hdr_len
-
943 (sizeof(struct BD_HEADER
) + sizeof(struct BDESC_HEADER
));
947 flow_log("%s()\n", __func__
);
948 flow_log(" in: %u\n", is_inbound
);
949 flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms
->alg
,
952 flow_log(" cipher key len: %u\n", cipher_parms
->key_len
);
953 flow_dump(" key: ", cipher_parms
->key_buf
,
954 cipher_parms
->key_len
);
958 * In XTS mode, API puts "i" parameter (block tweak) in IV. For
959 * SPU-M, should be in start of the BD; tx_sg_create() copies it there.
960 * IV in SPU msg for SPU-M should be 0, since that's the "j" parameter
961 * (block ctr within larger data unit) - given we can send entire disk
962 * block (<= 4KB) in 1 SPU msg, don't need to use this parameter.
964 if (cipher_parms
->mode
== CIPHER_MODE_XTS
)
965 memset(cipher_parms
->iv_buf
, 0, cipher_parms
->iv_len
);
967 flow_log(" iv len: %d\n", cipher_parms
->iv_len
);
968 flow_dump(" iv: ", cipher_parms
->iv_buf
, cipher_parms
->iv_len
);
969 flow_log(" data_size: %u\n", data_size
);
971 /* format master header word */
972 /* Do not set the next bit even though the datasheet says to */
973 spuh
= (struct SPUHEADER
*)spu_hdr
;
975 /* cipher_bits was initialized at setkey time */
976 cipher_bits
= be32_to_cpu(spuh
->sa
.cipher_flags
);
978 /* Format sctx word 1 (cipher_bits) */
980 cipher_bits
|= CIPHER_INBOUND
;
982 cipher_bits
&= ~CIPHER_INBOUND
;
984 /* update encryption key for RC4 on non-first chunk */
986 spuh
->sa
.cipher_flags
|=
987 cipher_parms
->type
<< CIPHER_TYPE_SHIFT
;
988 memcpy(spuh
+ 1, cipher_parms
->key_buf
, cipher_parms
->key_len
);
991 if (cipher_parms
->alg
&& cipher_parms
->iv_buf
&& cipher_parms
->iv_len
)
992 /* cipher iv provided so put it in here */
993 memcpy(bdesc_ptr
- cipher_parms
->iv_len
, cipher_parms
->iv_buf
,
994 cipher_parms
->iv_len
);
996 spuh
->sa
.cipher_flags
= cpu_to_be32(cipher_bits
);
998 /* === create the BDESC section === */
999 bdesc
= (struct BDESC_HEADER
*)bdesc_ptr
;
1000 bdesc
->offset_mac
= 0;
1001 bdesc
->length_mac
= 0;
1002 bdesc
->offset_crypto
= 0;
1004 /* XTS mode, data_size needs to include tweak parameter */
1005 if (cipher_parms
->mode
== CIPHER_MODE_XTS
)
1006 bdesc
->length_crypto
= cpu_to_be16(data_size
+
1007 SPU_XTS_TWEAK_SIZE
);
1009 bdesc
->length_crypto
= cpu_to_be16(data_size
);
1011 bdesc
->offset_icv
= 0;
1012 bdesc
->offset_iv
= 0;
1014 /* === no MFM section === */
1016 /* === create the BD section === */
1017 /* add the BD header */
1018 bd
= (struct BD_HEADER
*)(bdesc_ptr
+ sizeof(struct BDESC_HEADER
));
1019 bd
->size
= cpu_to_be16(data_size
);
1021 /* XTS mode, data_size needs to include tweak parameter */
1022 if (cipher_parms
->mode
== CIPHER_MODE_XTS
)
1023 bd
->size
= cpu_to_be16(data_size
+ SPU_XTS_TWEAK_SIZE
);
1025 bd
->size
= cpu_to_be16(data_size
);
1027 bd
->prev_length
= 0;
1029 packet_dump(" SPU request header: ", spu_hdr
, spu_req_hdr_len
);
1033 * spum_request_pad() - Create pad bytes at the end of the data.
1034 * @pad_start: Start of buffer where pad bytes are to be written
1035 * @gcm_ccm_padding: length of GCM/CCM padding, in bytes
1036 * @hash_pad_len: Number of bytes of padding extend data to full block
1037 * @auth_alg: authentication algorithm
1038 * @auth_mode: authentication mode
1039 * @total_sent: length inserted at end of hash pad
1040 * @status_padding: Number of bytes of padding to align STATUS word
1042 * There may be three forms of pad:
1043 * 1. GCM/CCM pad - for GCM/CCM mode ciphers, pad to 16-byte alignment
1044 * 2. hash pad - pad to a block length, with 0x80 data terminator and
1046 * 3. STAT pad - to ensure the STAT field is 4-byte aligned
1048 void spum_request_pad(u8
*pad_start
,
1049 u32 gcm_ccm_padding
,
1051 enum hash_alg auth_alg
,
1052 enum hash_mode auth_mode
,
1053 unsigned int total_sent
, u32 status_padding
)
1055 u8
*ptr
= pad_start
;
1057 /* fix data alignent for GCM/CCM */
1058 if (gcm_ccm_padding
> 0) {
1059 flow_log(" GCM: padding to 16 byte alignment: %u bytes\n",
1061 memset(ptr
, 0, gcm_ccm_padding
);
1062 ptr
+= gcm_ccm_padding
;
1065 if (hash_pad_len
> 0) {
1066 /* clear the padding section */
1067 memset(ptr
, 0, hash_pad_len
);
1069 if ((auth_alg
== HASH_ALG_AES
) &&
1070 (auth_mode
== HASH_MODE_XCBC
)) {
1071 /* AES/XCBC just requires padding to be 0s */
1072 ptr
+= hash_pad_len
;
1074 /* terminate the data */
1076 ptr
+= (hash_pad_len
- sizeof(u64
));
1078 /* add the size at the end as required per alg */
1079 if (auth_alg
== HASH_ALG_MD5
)
1080 *(u64
*)ptr
= cpu_to_le64((u64
)total_sent
* 8);
1081 else /* SHA1, SHA2-224, SHA2-256 */
1082 *(u64
*)ptr
= cpu_to_be64((u64
)total_sent
* 8);
1087 /* pad to a 4byte alignment for STAT */
1088 if (status_padding
> 0) {
1089 flow_log(" STAT: padding to 4 byte alignment: %u bytes\n",
1092 memset(ptr
, 0, status_padding
);
1093 ptr
+= status_padding
;
1098 * spum_xts_tweak_in_payload() - Indicate that SPUM DOES place the XTS tweak
1099 * field in the packet payload (rather than using IV)
1103 u8
spum_xts_tweak_in_payload(void)
1109 * spum_tx_status_len() - Return the length of the STATUS field in a SPU
1112 * Return: Length of STATUS field in bytes.
1114 u8
spum_tx_status_len(void)
1116 return SPU_TX_STATUS_LEN
;
1120 * spum_rx_status_len() - Return the length of the STATUS field in a SPU
1123 * Return: Length of STATUS field in bytes.
1125 u8
spum_rx_status_len(void)
1127 return SPU_RX_STATUS_LEN
;
1131 * spum_status_process() - Process the status from a SPU response message.
1132 * @statp: start of STATUS word
1134 * 0 - if status is good and response should be processed
1135 * !0 - status indicates an error and response is invalid
1137 int spum_status_process(u8
*statp
)
1141 status
= __be32_to_cpu(*(__be32
*)statp
);
1142 flow_log("SPU response STATUS %#08x\n", status
);
1143 if (status
& SPU_STATUS_ERROR_FLAG
) {
1144 pr_err("%s() Warning: Error result from SPU: %#08x\n",
1146 if (status
& SPU_STATUS_INVALID_ICV
)
1147 return SPU_INVALID_ICV
;
1154 * spum_ccm_update_iv() - Update the IV as per the requirements for CCM mode.
1156 * @digestsize: Digest size of this request
1157 * @cipher_parms: (pointer to) cipher parmaeters, includes IV buf & IV len
1158 * @assoclen: Length of AAD data
1159 * @chunksize: length of input data to be sent in this req
1160 * @is_encrypt: true if this is an output/encrypt operation
1161 * @is_esp: true if this is an ESP / RFC4309 operation
1164 void spum_ccm_update_iv(unsigned int digestsize
,
1165 struct spu_cipher_parms
*cipher_parms
,
1166 unsigned int assoclen
,
1167 unsigned int chunksize
,
1171 u8 L
; /* L from CCM algorithm, length of plaintext data */
1172 u8 mprime
; /* M' from CCM algo, (M - 2) / 2, where M=authsize */
1175 if (cipher_parms
->iv_len
!= CCM_AES_IV_SIZE
) {
1176 pr_err("%s(): Invalid IV len %d for CCM mode, should be %d\n",
1177 __func__
, cipher_parms
->iv_len
, CCM_AES_IV_SIZE
);
1182 * IV needs to be formatted as follows:
1184 * | Byte 0 | Bytes 1 - N | Bytes (N+1) - 15 |
1185 * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | Bits 7 - 0 | Bits 7 - 0 |
1186 * | 0 |Ad?|(M - 2) / 2| L - 1 | Nonce | Plaintext Length |
1188 * Ad? = 1 if AAD present, 0 if not present
1189 * M = size of auth field, 8, 12, or 16 bytes (SPU-M) -or-
1190 * 4, 6, 8, 10, 12, 14, 16 bytes (SPU2)
1191 * L = Size of Plaintext Length field; Nonce size = 15 - L
1193 * It appears that the crypto API already expects the L-1 portion
1194 * to be set in the first byte of the IV, which implicitly determines
1195 * the nonce size, and also fills in the nonce. But the other bits
1196 * in byte 0 as well as the plaintext length need to be filled in.
1198 * In rfc4309/esp mode, L is not already in the supplied IV and
1199 * we need to fill it in, as well as move the IV data to be after
1203 L
= CCM_ESP_L_VALUE
; /* RFC4309 has fixed L */
1205 /* L' = plaintext length - 1 so Plaintext length is L' + 1 */
1206 L
= ((cipher_parms
->iv_buf
[0] & CCM_B0_L_PRIME
) >>
1207 CCM_B0_L_PRIME_SHIFT
) + 1;
1210 mprime
= (digestsize
- 2) >> 1; /* M' = (M - 2) / 2 */
1211 adata
= (assoclen
> 0); /* adata = 1 if any associated data */
1213 cipher_parms
->iv_buf
[0] = (adata
<< CCM_B0_ADATA_SHIFT
) |
1214 (mprime
<< CCM_B0_M_PRIME_SHIFT
) |
1215 ((L
- 1) << CCM_B0_L_PRIME_SHIFT
);
1217 /* Nonce is already filled in by crypto API, and is 15 - L bytes */
1219 /* Don't include digest in plaintext size when decrypting */
1221 chunksize
-= digestsize
;
1223 /* Fill in length of plaintext, formatted to be L bytes long */
1224 format_value_ccm(chunksize
, &cipher_parms
->iv_buf
[15 - L
+ 1], L
);
1228 * spum_wordalign_padlen() - Given the length of a data field, determine the
1229 * padding required to align the data following this field on a 4-byte boundary.
1230 * @data_size: length of data field in bytes
1232 * Return: length of status field padding, in bytes
1234 u32
spum_wordalign_padlen(u32 data_size
)
1236 return ((data_size
+ 3) & ~3) - data_size
;