1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2016 Broadcom
6 #include <linux/kernel.h>
7 #include <linux/string.h>
14 char *hash_alg_name
[] = { "None", "md5", "sha1", "sha224", "sha256", "aes",
15 "sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" };
17 char *aead_alg_name
[] = { "ccm(aes)", "gcm(aes)", "authenc" };
19 /* Assumes SPU-M messages are in big endian */
20 void spum_dump_msg_hdr(u8
*buf
, unsigned int buf_len
)
23 struct SPUHEADER
*spuh
= (struct SPUHEADER
*)buf
;
24 unsigned int hash_key_len
= 0;
25 unsigned int hash_state_len
= 0;
26 unsigned int cipher_key_len
= 0;
37 u32 sctx_size
; /* SCTX length in words */
38 u32 sctx_pl_len
; /* SCTX payload length in bytes */
41 packet_log("SPU Message header %p len: %u\n", buf
, buf_len
);
43 /* ========== Decode MH ========== */
44 packet_log(" MH 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
45 if (spuh
->mh
.flags
& MH_SCTX_PRES
)
46 packet_log(" SCTX present\n");
47 if (spuh
->mh
.flags
& MH_BDESC_PRES
)
48 packet_log(" BDESC present\n");
49 if (spuh
->mh
.flags
& MH_MFM_PRES
)
50 packet_log(" MFM present\n");
51 if (spuh
->mh
.flags
& MH_BD_PRES
)
52 packet_log(" BD present\n");
53 if (spuh
->mh
.flags
& MH_HASH_PRES
)
54 packet_log(" HASH present\n");
55 if (spuh
->mh
.flags
& MH_SUPDT_PRES
)
56 packet_log(" SUPDT present\n");
57 packet_log(" Opcode 0x%02x\n", spuh
->mh
.op_code
);
59 ptr
+= sizeof(spuh
->mh
) + sizeof(spuh
->emh
); /* skip emh. unused */
61 /* ========== Decode SCTX ========== */
62 if (spuh
->mh
.flags
& MH_SCTX_PRES
) {
63 pflags
= be32_to_cpu(spuh
->sa
.proto_flags
);
64 packet_log(" SCTX[0] 0x%08x\n", pflags
);
65 sctx_size
= pflags
& SCTX_SIZE
;
66 packet_log(" Size %u words\n", sctx_size
);
68 cflags
= be32_to_cpu(spuh
->sa
.cipher_flags
);
69 packet_log(" SCTX[1] 0x%08x\n", cflags
);
70 packet_log(" Inbound:%lu (1:decrypt/vrfy 0:encrypt/auth)\n",
71 (cflags
& CIPHER_INBOUND
) >> CIPHER_INBOUND_SHIFT
);
72 packet_log(" Order:%lu (1:AuthFirst 0:EncFirst)\n",
73 (cflags
& CIPHER_ORDER
) >> CIPHER_ORDER_SHIFT
);
74 packet_log(" ICV_IS_512:%lx\n",
75 (cflags
& ICV_IS_512
) >> ICV_IS_512_SHIFT
);
76 cipher_alg
= (cflags
& CIPHER_ALG
) >> CIPHER_ALG_SHIFT
;
77 cipher_mode
= (cflags
& CIPHER_MODE
) >> CIPHER_MODE_SHIFT
;
78 cipher_type
= (cflags
& CIPHER_TYPE
) >> CIPHER_TYPE_SHIFT
;
79 packet_log(" Crypto Alg:%u Mode:%u Type:%u\n",
80 cipher_alg
, cipher_mode
, cipher_type
);
81 hash_alg
= (cflags
& HASH_ALG
) >> HASH_ALG_SHIFT
;
82 hash_mode
= (cflags
& HASH_MODE
) >> HASH_MODE_SHIFT
;
83 hash_type
= (cflags
& HASH_TYPE
) >> HASH_TYPE_SHIFT
;
84 packet_log(" Hash Alg:%x Mode:%x Type:%x\n",
85 hash_alg
, hash_mode
, hash_type
);
86 packet_log(" UPDT_Offset:%u\n", cflags
& UPDT_OFST
);
88 ecf
= be32_to_cpu(spuh
->sa
.ecf
);
89 packet_log(" SCTX[2] 0x%08x\n", ecf
);
90 packet_log(" WriteICV:%lu CheckICV:%lu ICV_SIZE:%u ",
91 (ecf
& INSERT_ICV
) >> INSERT_ICV_SHIFT
,
92 (ecf
& CHECK_ICV
) >> CHECK_ICV_SHIFT
,
93 (ecf
& ICV_SIZE
) >> ICV_SIZE_SHIFT
);
94 packet_log("BD_SUPPRESS:%lu\n",
95 (ecf
& BD_SUPPRESS
) >> BD_SUPPRESS_SHIFT
);
96 packet_log(" SCTX_IV:%lu ExplicitIV:%lu GenIV:%lu ",
97 (ecf
& SCTX_IV
) >> SCTX_IV_SHIFT
,
98 (ecf
& EXPLICIT_IV
) >> EXPLICIT_IV_SHIFT
,
99 (ecf
& GEN_IV
) >> GEN_IV_SHIFT
);
100 packet_log("IV_OV_OFST:%lu EXP_IV_SIZE:%u\n",
101 (ecf
& IV_OFFSET
) >> IV_OFFSET_SHIFT
,
104 ptr
+= sizeof(struct SCTX
);
106 if (hash_alg
&& hash_mode
) {
118 case HASH_ALG_SHA224
:
122 case HASH_ALG_SHA256
:
126 case HASH_ALG_SHA384
:
130 case HASH_ALG_SHA512
:
142 packet_log(" Auth Key Type:%s Length:%u Bytes\n",
144 packet_dump(" KEY: ", ptr
, hash_key_len
);
146 } else if ((hash_alg
== HASH_ALG_AES
) &&
147 (hash_mode
== HASH_MODE_XCBC
)) {
150 switch (cipher_type
) {
151 case CIPHER_TYPE_AES128
:
153 name
= "AES128-XCBC";
155 case CIPHER_TYPE_AES192
:
157 name
= "AES192-XCBC";
159 case CIPHER_TYPE_AES256
:
161 name
= "AES256-XCBC";
164 packet_log(" Auth Key Type:%s Length:%u Bytes\n",
166 packet_dump(" KEY: ", ptr
, hash_key_len
);
170 if (hash_alg
&& (hash_mode
== HASH_MODE_NONE
) &&
171 (hash_type
== HASH_TYPE_UPDT
)) {
183 case HASH_ALG_SHA224
:
187 case HASH_ALG_SHA256
:
191 case HASH_ALG_SHA384
:
195 case HASH_ALG_SHA512
:
207 packet_log(" Auth State Type:%s Length:%u Bytes\n",
208 name
, hash_state_len
);
209 packet_dump(" State: ", ptr
, hash_state_len
);
210 ptr
+= hash_state_len
;
216 switch (cipher_alg
) {
221 case CIPHER_ALG_3DES
:
226 switch (cipher_type
) {
227 case CIPHER_TYPE_AES128
:
231 case CIPHER_TYPE_AES192
:
235 case CIPHER_TYPE_AES256
:
241 case CIPHER_ALG_NONE
:
245 packet_log(" Cipher Key Type:%s Length:%u Bytes\n",
246 name
, cipher_key_len
);
248 /* XTS has two keys */
249 if (cipher_mode
== CIPHER_MODE_XTS
) {
250 packet_dump(" KEY2: ", ptr
, cipher_key_len
);
251 ptr
+= cipher_key_len
;
252 packet_dump(" KEY1: ", ptr
, cipher_key_len
);
253 ptr
+= cipher_key_len
;
257 packet_dump(" KEY: ", ptr
, cipher_key_len
);
258 ptr
+= cipher_key_len
;
262 sctx_pl_len
= sctx_size
* sizeof(u32
) -
264 iv_len
= sctx_pl_len
-
265 (hash_key_len
+ hash_state_len
+
267 packet_log(" IV Length:%u Bytes\n", iv_len
);
268 packet_dump(" IV: ", ptr
, iv_len
);
274 /* ========== Decode BDESC ========== */
275 if (spuh
->mh
.flags
& MH_BDESC_PRES
) {
277 struct BDESC_HEADER
*bdesc
= (struct BDESC_HEADER
*)ptr
;
279 packet_log(" BDESC[0] 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
280 packet_log(" OffsetMAC:%u LengthMAC:%u\n",
281 be16_to_cpu(bdesc
->offset_mac
),
282 be16_to_cpu(bdesc
->length_mac
));
285 packet_log(" BDESC[1] 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
286 packet_log(" OffsetCrypto:%u LengthCrypto:%u\n",
287 be16_to_cpu(bdesc
->offset_crypto
),
288 be16_to_cpu(bdesc
->length_crypto
));
291 packet_log(" BDESC[2] 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
292 packet_log(" OffsetICV:%u OffsetIV:%u\n",
293 be16_to_cpu(bdesc
->offset_icv
),
294 be16_to_cpu(bdesc
->offset_iv
));
298 /* ========== Decode BD ========== */
299 if (spuh
->mh
.flags
& MH_BD_PRES
) {
301 struct BD_HEADER
*bd
= (struct BD_HEADER
*)ptr
;
303 packet_log(" BD[0] 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
304 packet_log(" Size:%ubytes PrevLength:%u\n",
305 be16_to_cpu(bd
->size
), be16_to_cpu(bd
->prev_length
));
309 /* Double check sanity */
310 if (buf
+ buf_len
!= ptr
) {
311 packet_log(" Packet parsed incorrectly. ");
312 packet_log("buf:%p buf_len:%u buf+buf_len:%p ptr:%p\n",
313 buf
, buf_len
, buf
+ buf_len
, ptr
);
320 * spum_ns2_ctx_max_payload() - Determine the max length of the payload for a
321 * SPU message for a given cipher and hash alg context.
322 * @cipher_alg: The cipher algorithm
323 * @cipher_mode: The cipher mode
324 * @blocksize: The size of a block of data for this algo
326 * The max payload must be a multiple of the blocksize so that if a request is
327 * too large to fit in a single SPU message, the request can be broken into
328 * max_payload sized chunks. Each chunk must be a multiple of blocksize.
330 * Return: Max payload length in bytes
332 u32
spum_ns2_ctx_max_payload(enum spu_cipher_alg cipher_alg
,
333 enum spu_cipher_mode cipher_mode
,
334 unsigned int blocksize
)
336 u32 max_payload
= SPUM_NS2_MAX_PAYLOAD
;
339 /* In XTS on SPU-M, we'll need to insert tweak before input data */
340 if (cipher_mode
== CIPHER_MODE_XTS
)
341 max_payload
-= SPU_XTS_TWEAK_SIZE
;
343 excess
= max_payload
% blocksize
;
345 return max_payload
- excess
;
349 * spum_nsp_ctx_max_payload() - Determine the max length of the payload for a
350 * SPU message for a given cipher and hash alg context.
351 * @cipher_alg: The cipher algorithm
352 * @cipher_mode: The cipher mode
353 * @blocksize: The size of a block of data for this algo
355 * The max payload must be a multiple of the blocksize so that if a request is
356 * too large to fit in a single SPU message, the request can be broken into
357 * max_payload sized chunks. Each chunk must be a multiple of blocksize.
359 * Return: Max payload length in bytes
361 u32
spum_nsp_ctx_max_payload(enum spu_cipher_alg cipher_alg
,
362 enum spu_cipher_mode cipher_mode
,
363 unsigned int blocksize
)
365 u32 max_payload
= SPUM_NSP_MAX_PAYLOAD
;
368 /* In XTS on SPU-M, we'll need to insert tweak before input data */
369 if (cipher_mode
== CIPHER_MODE_XTS
)
370 max_payload
-= SPU_XTS_TWEAK_SIZE
;
372 excess
= max_payload
% blocksize
;
374 return max_payload
- excess
;
377 /** spum_payload_length() - Given a SPU-M message header, extract the payload
379 * @spu_hdr: Start of SPU header
381 * Assumes just MH, EMH, BD (no SCTX, BDESC. Works for response frames.
383 * Return: payload length in bytes
385 u32
spum_payload_length(u8
*spu_hdr
)
387 struct BD_HEADER
*bd
;
390 /* Find BD header. skip MH, EMH */
391 bd
= (struct BD_HEADER
*)(spu_hdr
+ 8);
392 pl_len
= be16_to_cpu(bd
->size
);
398 * spum_response_hdr_len() - Given the length of the hash key and encryption
399 * key, determine the expected length of a SPU response header.
400 * @auth_key_len: authentication key length (bytes)
401 * @enc_key_len: encryption key length (bytes)
402 * @is_hash: true if response message is for a hash operation
404 * Return: length of SPU response header (bytes)
406 u16
spum_response_hdr_len(u16 auth_key_len
, u16 enc_key_len
, bool is_hash
)
409 return SPU_HASH_RESP_HDR_LEN
;
411 return SPU_RESP_HDR_LEN
;
415 * spum_hash_pad_len() - Calculate the length of hash padding required to extend
416 * data to a full block size.
417 * @hash_alg: hash algorithm
418 * @hash_mode: hash mode
419 * @chunksize: length of data, in bytes
420 * @hash_block_size: size of a block of data for hash algorithm
422 * Reserve space for 1 byte (0x80) start of pad and the total length as u64
424 * Return: length of hash pad in bytes
426 u16
spum_hash_pad_len(enum hash_alg hash_alg
, enum hash_mode hash_mode
,
427 u32 chunksize
, u16 hash_block_size
)
429 unsigned int length_len
;
430 unsigned int used_space_last_block
;
433 /* AES-XCBC hash requires just padding to next block boundary */
434 if ((hash_alg
== HASH_ALG_AES
) && (hash_mode
== HASH_MODE_XCBC
)) {
435 used_space_last_block
= chunksize
% hash_block_size
;
436 hash_pad_len
= hash_block_size
- used_space_last_block
;
437 if (hash_pad_len
>= hash_block_size
)
438 hash_pad_len
-= hash_block_size
;
442 used_space_last_block
= chunksize
% hash_block_size
+ 1;
443 if ((hash_alg
== HASH_ALG_SHA384
) || (hash_alg
== HASH_ALG_SHA512
))
444 length_len
= 2 * sizeof(u64
);
446 length_len
= sizeof(u64
);
448 used_space_last_block
+= length_len
;
449 hash_pad_len
= hash_block_size
- used_space_last_block
;
450 if (hash_pad_len
< 0)
451 hash_pad_len
+= hash_block_size
;
453 hash_pad_len
+= 1 + length_len
;
458 * spum_gcm_ccm_pad_len() - Determine the required length of GCM or CCM padding.
459 * @cipher_mode: Algo type
460 * @data_size: Length of plaintext (bytes)
462 * @Return: Length of padding, in bytes
464 u32
spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode
,
465 unsigned int data_size
)
468 u32 m1
= SPU_GCM_CCM_ALIGN
- 1;
470 if ((cipher_mode
== CIPHER_MODE_GCM
) ||
471 (cipher_mode
== CIPHER_MODE_CCM
))
472 pad_len
= ((data_size
+ m1
) & ~m1
) - data_size
;
478 * spum_assoc_resp_len() - Determine the size of the receive buffer required to
479 * catch associated data.
480 * @cipher_mode: cipher mode
481 * @assoc_len: length of associated data (bytes)
482 * @iv_len: length of IV (bytes)
483 * @is_encrypt: true if encrypting. false if decrypting.
485 * Return: length of associated data in response message (bytes)
487 u32
spum_assoc_resp_len(enum spu_cipher_mode cipher_mode
,
488 unsigned int assoc_len
, unsigned int iv_len
,
497 if (cipher_mode
== CIPHER_MODE_GCM
) {
498 /* AAD needs to be padded in responses too */
499 pad
= spum_gcm_ccm_pad_len(cipher_mode
, buflen
);
502 if (cipher_mode
== CIPHER_MODE_CCM
) {
504 * AAD needs to be padded in responses too
505 * for CCM, len + 2 needs to be 128-bit aligned.
507 pad
= spum_gcm_ccm_pad_len(cipher_mode
, buflen
+ 2);
515 * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
516 * in a SPU request after the AAD and before the payload.
517 * @cipher_mode: cipher mode
518 * @iv_ctr_len: initialization vector length in bytes
520 * In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need
521 * to include the IV as a separate field in the SPU request msg.
523 * Return: Length of AEAD IV in bytes
525 u8
spum_aead_ivlen(enum spu_cipher_mode cipher_mode
, u16 iv_len
)
531 * spum_hash_type() - Determine the type of hash operation.
532 * @src_sent: The number of bytes in the current request that have already
533 * been sent to the SPU to be hashed.
535 * We do not use HASH_TYPE_FULL for requests that fit in a single SPU message.
536 * Using FULL causes failures (such as when the string to be hashed is empty).
537 * For similar reasons, we never use HASH_TYPE_FIN. Instead, submit messages
538 * as INIT or UPDT and do the hash padding in sw.
540 enum hash_type
spum_hash_type(u32 src_sent
)
542 return src_sent
? HASH_TYPE_UPDT
: HASH_TYPE_INIT
;
546 * spum_digest_size() - Determine the size of a hash digest to expect the SPU to
548 * alg_digest_size: Number of bytes in the final digest for the given algo
549 * alg: The hash algorithm
550 * htype: Type of hash operation (init, update, full, etc)
552 * When doing incremental hashing for an algorithm with a truncated hash
553 * (e.g., SHA224), the SPU returns the full digest so that it can be fed back as
554 * a partial result for the next chunk.
556 u32
spum_digest_size(u32 alg_digest_size
, enum hash_alg alg
,
557 enum hash_type htype
)
559 u32 digestsize
= alg_digest_size
;
561 /* SPU returns complete digest when doing incremental hash and truncated
564 if ((htype
== HASH_TYPE_INIT
) || (htype
== HASH_TYPE_UPDT
)) {
565 if (alg
== HASH_ALG_SHA224
)
566 digestsize
= SHA256_DIGEST_SIZE
;
567 else if (alg
== HASH_ALG_SHA384
)
568 digestsize
= SHA512_DIGEST_SIZE
;
574 * spum_create_request() - Build a SPU request message header, up to and
575 * including the BD header. Construct the message starting at spu_hdr. Caller
576 * should allocate this buffer in DMA-able memory at least SPU_HEADER_ALLOC_LEN
578 * @spu_hdr: Start of buffer where SPU request header is to be written
579 * @req_opts: SPU request message options
580 * @cipher_parms: Parameters related to cipher algorithm
581 * @hash_parms: Parameters related to hash algorithm
582 * @aead_parms: Parameters related to AEAD operation
583 * @data_size: Length of data to be encrypted or authenticated. If AEAD, does
584 * not include length of AAD.
586 * Return: the length of the SPU header in bytes. 0 if an error occurs.
588 u32
spum_create_request(u8
*spu_hdr
,
589 struct spu_request_opts
*req_opts
,
590 struct spu_cipher_parms
*cipher_parms
,
591 struct spu_hash_parms
*hash_parms
,
592 struct spu_aead_parms
*aead_parms
,
593 unsigned int data_size
)
595 struct SPUHEADER
*spuh
;
596 struct BDESC_HEADER
*bdesc
;
597 struct BD_HEADER
*bd
;
600 u32 protocol_bits
= 0;
604 unsigned int buf_len
= 0;
606 /* size of the cipher payload */
607 unsigned int cipher_len
= hash_parms
->prebuf_len
+ data_size
+
610 /* offset of prebuf or data from end of BD header */
611 unsigned int cipher_offset
= aead_parms
->assoc_size
+
612 aead_parms
->iv_len
+ aead_parms
->aad_pad_len
;
614 /* total size of the DB data (without STAT word padding) */
615 unsigned int real_db_size
= spu_real_db_size(aead_parms
->assoc_size
,
617 hash_parms
->prebuf_len
,
619 aead_parms
->aad_pad_len
,
620 aead_parms
->data_pad_len
,
621 hash_parms
->pad_len
);
623 unsigned int auth_offset
= 0;
624 unsigned int offset_iv
= 0;
626 /* size/offset of the auth payload */
627 unsigned int auth_len
;
629 auth_len
= real_db_size
;
631 if (req_opts
->is_aead
&& req_opts
->is_inbound
)
632 cipher_len
-= hash_parms
->digestsize
;
634 if (req_opts
->is_aead
&& req_opts
->is_inbound
)
635 auth_len
-= hash_parms
->digestsize
;
637 if ((hash_parms
->alg
== HASH_ALG_AES
) &&
638 (hash_parms
->mode
== HASH_MODE_XCBC
)) {
639 auth_len
-= hash_parms
->pad_len
;
640 cipher_len
-= hash_parms
->pad_len
;
643 flow_log("%s()\n", __func__
);
644 flow_log(" in:%u authFirst:%u\n",
645 req_opts
->is_inbound
, req_opts
->auth_first
);
646 flow_log(" %s. cipher alg:%u mode:%u type %u\n",
647 spu_alg_name(cipher_parms
->alg
, cipher_parms
->mode
),
648 cipher_parms
->alg
, cipher_parms
->mode
, cipher_parms
->type
);
649 flow_log(" key: %d\n", cipher_parms
->key_len
);
650 flow_dump(" key: ", cipher_parms
->key_buf
, cipher_parms
->key_len
);
651 flow_log(" iv: %d\n", cipher_parms
->iv_len
);
652 flow_dump(" iv: ", cipher_parms
->iv_buf
, cipher_parms
->iv_len
);
653 flow_log(" auth alg:%u mode:%u type %u\n",
654 hash_parms
->alg
, hash_parms
->mode
, hash_parms
->type
);
655 flow_log(" digestsize: %u\n", hash_parms
->digestsize
);
656 flow_log(" authkey: %d\n", hash_parms
->key_len
);
657 flow_dump(" authkey: ", hash_parms
->key_buf
, hash_parms
->key_len
);
658 flow_log(" assoc_size:%u\n", aead_parms
->assoc_size
);
659 flow_log(" prebuf_len:%u\n", hash_parms
->prebuf_len
);
660 flow_log(" data_size:%u\n", data_size
);
661 flow_log(" hash_pad_len:%u\n", hash_parms
->pad_len
);
662 flow_log(" real_db_size:%u\n", real_db_size
);
663 flow_log(" auth_offset:%u auth_len:%u cipher_offset:%u cipher_len:%u\n",
664 auth_offset
, auth_len
, cipher_offset
, cipher_len
);
665 flow_log(" aead_iv: %u\n", aead_parms
->iv_len
);
667 /* starting out: zero the header (plus some) */
669 memset(ptr
, 0, sizeof(struct SPUHEADER
));
671 /* format master header word */
672 /* Do not set the next bit even though the datasheet says to */
673 spuh
= (struct SPUHEADER
*)ptr
;
674 ptr
+= sizeof(struct SPUHEADER
);
675 buf_len
+= sizeof(struct SPUHEADER
);
677 spuh
->mh
.op_code
= SPU_CRYPTO_OPERATION_GENERIC
;
678 spuh
->mh
.flags
|= (MH_SCTX_PRES
| MH_BDESC_PRES
| MH_BD_PRES
);
680 /* Format sctx word 0 (protocol_bits) */
681 sctx_words
= 3; /* size in words */
683 /* Format sctx word 1 (cipher_bits) */
684 if (req_opts
->is_inbound
)
685 cipher_bits
|= CIPHER_INBOUND
;
686 if (req_opts
->auth_first
)
687 cipher_bits
|= CIPHER_ORDER
;
689 /* Set the crypto parameters in the cipher.flags */
690 cipher_bits
|= cipher_parms
->alg
<< CIPHER_ALG_SHIFT
;
691 cipher_bits
|= cipher_parms
->mode
<< CIPHER_MODE_SHIFT
;
692 cipher_bits
|= cipher_parms
->type
<< CIPHER_TYPE_SHIFT
;
694 /* Set the auth parameters in the cipher.flags */
695 cipher_bits
|= hash_parms
->alg
<< HASH_ALG_SHIFT
;
696 cipher_bits
|= hash_parms
->mode
<< HASH_MODE_SHIFT
;
697 cipher_bits
|= hash_parms
->type
<< HASH_TYPE_SHIFT
;
700 * Format sctx extensions if required, and update main fields if
703 if (hash_parms
->alg
) {
704 /* Write the authentication key material if present */
705 if (hash_parms
->key_len
) {
706 memcpy(ptr
, hash_parms
->key_buf
, hash_parms
->key_len
);
707 ptr
+= hash_parms
->key_len
;
708 buf_len
+= hash_parms
->key_len
;
709 sctx_words
+= hash_parms
->key_len
/ 4;
712 if ((cipher_parms
->mode
== CIPHER_MODE_GCM
) ||
713 (cipher_parms
->mode
== CIPHER_MODE_CCM
))
714 /* unpadded length */
715 offset_iv
= aead_parms
->assoc_size
;
717 /* if GCM/CCM we need to write ICV into the payload */
718 if (!req_opts
->is_inbound
) {
719 if ((cipher_parms
->mode
== CIPHER_MODE_GCM
) ||
720 (cipher_parms
->mode
== CIPHER_MODE_CCM
))
721 ecf_bits
|= 1 << INSERT_ICV_SHIFT
;
723 ecf_bits
|= CHECK_ICV
;
726 /* Inform the SPU of the ICV size (in words) */
727 if (hash_parms
->digestsize
== 64)
728 cipher_bits
|= ICV_IS_512
;
731 (hash_parms
->digestsize
/ 4) << ICV_SIZE_SHIFT
;
734 if (req_opts
->bd_suppress
)
735 ecf_bits
|= BD_SUPPRESS
;
737 /* copy the encryption keys in the SAD entry */
738 if (cipher_parms
->alg
) {
739 if (cipher_parms
->key_len
) {
740 memcpy(ptr
, cipher_parms
->key_buf
,
741 cipher_parms
->key_len
);
742 ptr
+= cipher_parms
->key_len
;
743 buf_len
+= cipher_parms
->key_len
;
744 sctx_words
+= cipher_parms
->key_len
/ 4;
748 * if encrypting then set IV size, use SCTX IV unless no IV
751 if (cipher_parms
->iv_buf
&& cipher_parms
->iv_len
) {
755 /* cipher iv provided so put it in here */
756 memcpy(ptr
, cipher_parms
->iv_buf
, cipher_parms
->iv_len
);
758 ptr
+= cipher_parms
->iv_len
;
759 buf_len
+= cipher_parms
->iv_len
;
760 sctx_words
+= cipher_parms
->iv_len
/ 4;
765 * RFC4543 (GMAC/ESP) requires data to be sent as part of AAD
766 * so we need to override the BDESC parameters.
768 if (req_opts
->is_rfc4543
) {
769 if (req_opts
->is_inbound
)
770 data_size
-= hash_parms
->digestsize
;
771 offset_iv
= aead_parms
->assoc_size
+ data_size
;
773 cipher_offset
= offset_iv
;
774 auth_len
= cipher_offset
+ aead_parms
->data_pad_len
;
777 /* write in the total sctx length now that we know it */
778 protocol_bits
|= sctx_words
;
780 /* Endian adjust the SCTX */
781 spuh
->sa
.proto_flags
= cpu_to_be32(protocol_bits
);
782 spuh
->sa
.cipher_flags
= cpu_to_be32(cipher_bits
);
783 spuh
->sa
.ecf
= cpu_to_be32(ecf_bits
);
785 /* === create the BDESC section === */
786 bdesc
= (struct BDESC_HEADER
*)ptr
;
788 bdesc
->offset_mac
= cpu_to_be16(auth_offset
);
789 bdesc
->length_mac
= cpu_to_be16(auth_len
);
790 bdesc
->offset_crypto
= cpu_to_be16(cipher_offset
);
791 bdesc
->length_crypto
= cpu_to_be16(cipher_len
);
794 * CCM in SPU-M requires that ICV not be in same 32-bit word as data or
795 * padding. So account for padding as necessary.
797 if (cipher_parms
->mode
== CIPHER_MODE_CCM
)
798 auth_len
+= spum_wordalign_padlen(auth_len
);
800 bdesc
->offset_icv
= cpu_to_be16(auth_len
);
801 bdesc
->offset_iv
= cpu_to_be16(offset_iv
);
803 ptr
+= sizeof(struct BDESC_HEADER
);
804 buf_len
+= sizeof(struct BDESC_HEADER
);
806 /* === no MFM section === */
808 /* === create the BD section === */
810 /* add the BD header */
811 bd
= (struct BD_HEADER
*)ptr
;
812 bd
->size
= cpu_to_be16(real_db_size
);
815 ptr
+= sizeof(struct BD_HEADER
);
816 buf_len
+= sizeof(struct BD_HEADER
);
818 packet_dump(" SPU request header: ", spu_hdr
, buf_len
);
824 * spum_cipher_req_init() - Build a SPU request message header, up to and
825 * including the BD header.
826 * @spu_hdr: Start of SPU request header (MH)
827 * @cipher_parms: Parameters that describe the cipher request
829 * Construct the message starting at spu_hdr. Caller should allocate this buffer
830 * in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long.
832 * Return: the length of the SPU header in bytes. 0 if an error occurs.
834 u16
spum_cipher_req_init(u8
*spu_hdr
, struct spu_cipher_parms
*cipher_parms
)
836 struct SPUHEADER
*spuh
;
837 u32 protocol_bits
= 0;
843 flow_log("%s()\n", __func__
);
844 flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms
->alg
,
845 cipher_parms
->mode
, cipher_parms
->type
);
846 flow_log(" cipher_iv_len: %u\n", cipher_parms
->iv_len
);
847 flow_log(" key: %d\n", cipher_parms
->key_len
);
848 flow_dump(" key: ", cipher_parms
->key_buf
, cipher_parms
->key_len
);
850 /* starting out: zero the header (plus some) */
851 memset(spu_hdr
, 0, sizeof(struct SPUHEADER
));
852 ptr
+= sizeof(struct SPUHEADER
);
854 /* format master header word */
855 /* Do not set the next bit even though the datasheet says to */
856 spuh
= (struct SPUHEADER
*)spu_hdr
;
858 spuh
->mh
.op_code
= SPU_CRYPTO_OPERATION_GENERIC
;
859 spuh
->mh
.flags
|= (MH_SCTX_PRES
| MH_BDESC_PRES
| MH_BD_PRES
);
861 /* Format sctx word 0 (protocol_bits) */
862 sctx_words
= 3; /* size in words */
864 /* copy the encryption keys in the SAD entry */
865 if (cipher_parms
->alg
) {
866 if (cipher_parms
->key_len
) {
867 ptr
+= cipher_parms
->key_len
;
868 sctx_words
+= cipher_parms
->key_len
/ 4;
872 * if encrypting then set IV size, use SCTX IV unless no IV
875 if (cipher_parms
->iv_len
) {
878 ptr
+= cipher_parms
->iv_len
;
879 sctx_words
+= cipher_parms
->iv_len
/ 4;
883 /* Set the crypto parameters in the cipher.flags */
884 cipher_bits
|= cipher_parms
->alg
<< CIPHER_ALG_SHIFT
;
885 cipher_bits
|= cipher_parms
->mode
<< CIPHER_MODE_SHIFT
;
886 cipher_bits
|= cipher_parms
->type
<< CIPHER_TYPE_SHIFT
;
888 /* copy the encryption keys in the SAD entry */
889 if (cipher_parms
->alg
&& cipher_parms
->key_len
)
890 memcpy(spuh
+ 1, cipher_parms
->key_buf
, cipher_parms
->key_len
);
892 /* write in the total sctx length now that we know it */
893 protocol_bits
|= sctx_words
;
895 /* Endian adjust the SCTX */
896 spuh
->sa
.proto_flags
= cpu_to_be32(protocol_bits
);
898 /* Endian adjust the SCTX */
899 spuh
->sa
.cipher_flags
= cpu_to_be32(cipher_bits
);
900 spuh
->sa
.ecf
= cpu_to_be32(ecf_bits
);
902 packet_dump(" SPU request header: ", spu_hdr
,
903 sizeof(struct SPUHEADER
));
905 return sizeof(struct SPUHEADER
) + cipher_parms
->key_len
+
906 cipher_parms
->iv_len
+ sizeof(struct BDESC_HEADER
) +
907 sizeof(struct BD_HEADER
);
911 * spum_cipher_req_finish() - Finish building a SPU request message header for a
912 * block cipher request. Assumes much of the header was already filled in at
913 * setkey() time in spu_cipher_req_init().
914 * @spu_hdr: Start of the request message header (MH field)
915 * @spu_req_hdr_len: Length in bytes of the SPU request header
916 * @isInbound: 0 encrypt, 1 decrypt
917 * @cipher_parms: Parameters describing cipher operation to be performed
918 * @data_size: Length of the data in the BD field
920 * Assumes much of the header was already filled in at setkey() time in
921 * spum_cipher_req_init().
922 * spum_cipher_req_init() fills in the encryption key.
924 void spum_cipher_req_finish(u8
*spu_hdr
,
926 unsigned int is_inbound
,
927 struct spu_cipher_parms
*cipher_parms
,
928 unsigned int data_size
)
930 struct SPUHEADER
*spuh
;
931 struct BDESC_HEADER
*bdesc
;
932 struct BD_HEADER
*bd
;
933 u8
*bdesc_ptr
= spu_hdr
+ spu_req_hdr_len
-
934 (sizeof(struct BD_HEADER
) + sizeof(struct BDESC_HEADER
));
938 flow_log("%s()\n", __func__
);
939 flow_log(" in: %u\n", is_inbound
);
940 flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms
->alg
,
944 * In XTS mode, API puts "i" parameter (block tweak) in IV. For
945 * SPU-M, should be in start of the BD; tx_sg_create() copies it there.
946 * IV in SPU msg for SPU-M should be 0, since that's the "j" parameter
947 * (block ctr within larger data unit) - given we can send entire disk
948 * block (<= 4KB) in 1 SPU msg, don't need to use this parameter.
950 if (cipher_parms
->mode
== CIPHER_MODE_XTS
)
951 memset(cipher_parms
->iv_buf
, 0, cipher_parms
->iv_len
);
953 flow_log(" iv len: %d\n", cipher_parms
->iv_len
);
954 flow_dump(" iv: ", cipher_parms
->iv_buf
, cipher_parms
->iv_len
);
955 flow_log(" data_size: %u\n", data_size
);
957 /* format master header word */
958 /* Do not set the next bit even though the datasheet says to */
959 spuh
= (struct SPUHEADER
*)spu_hdr
;
961 /* cipher_bits was initialized at setkey time */
962 cipher_bits
= be32_to_cpu(spuh
->sa
.cipher_flags
);
964 /* Format sctx word 1 (cipher_bits) */
966 cipher_bits
|= CIPHER_INBOUND
;
968 cipher_bits
&= ~CIPHER_INBOUND
;
970 if (cipher_parms
->alg
&& cipher_parms
->iv_buf
&& cipher_parms
->iv_len
)
971 /* cipher iv provided so put it in here */
972 memcpy(bdesc_ptr
- cipher_parms
->iv_len
, cipher_parms
->iv_buf
,
973 cipher_parms
->iv_len
);
975 spuh
->sa
.cipher_flags
= cpu_to_be32(cipher_bits
);
977 /* === create the BDESC section === */
978 bdesc
= (struct BDESC_HEADER
*)bdesc_ptr
;
979 bdesc
->offset_mac
= 0;
980 bdesc
->length_mac
= 0;
981 bdesc
->offset_crypto
= 0;
983 /* XTS mode, data_size needs to include tweak parameter */
984 if (cipher_parms
->mode
== CIPHER_MODE_XTS
)
985 bdesc
->length_crypto
= cpu_to_be16(data_size
+
988 bdesc
->length_crypto
= cpu_to_be16(data_size
);
990 bdesc
->offset_icv
= 0;
991 bdesc
->offset_iv
= 0;
993 /* === no MFM section === */
995 /* === create the BD section === */
996 /* add the BD header */
997 bd
= (struct BD_HEADER
*)(bdesc_ptr
+ sizeof(struct BDESC_HEADER
));
998 bd
->size
= cpu_to_be16(data_size
);
1000 /* XTS mode, data_size needs to include tweak parameter */
1001 if (cipher_parms
->mode
== CIPHER_MODE_XTS
)
1002 bd
->size
= cpu_to_be16(data_size
+ SPU_XTS_TWEAK_SIZE
);
1004 bd
->size
= cpu_to_be16(data_size
);
1006 bd
->prev_length
= 0;
1008 packet_dump(" SPU request header: ", spu_hdr
, spu_req_hdr_len
);
1012 * spum_request_pad() - Create pad bytes at the end of the data.
1013 * @pad_start: Start of buffer where pad bytes are to be written
1014 * @gcm_ccm_padding: length of GCM/CCM padding, in bytes
1015 * @hash_pad_len: Number of bytes of padding extend data to full block
1016 * @auth_alg: authentication algorithm
1017 * @auth_mode: authentication mode
1018 * @total_sent: length inserted at end of hash pad
1019 * @status_padding: Number of bytes of padding to align STATUS word
1021 * There may be three forms of pad:
1022 * 1. GCM/CCM pad - for GCM/CCM mode ciphers, pad to 16-byte alignment
1023 * 2. hash pad - pad to a block length, with 0x80 data terminator and
1025 * 3. STAT pad - to ensure the STAT field is 4-byte aligned
1027 void spum_request_pad(u8
*pad_start
,
1028 u32 gcm_ccm_padding
,
1030 enum hash_alg auth_alg
,
1031 enum hash_mode auth_mode
,
1032 unsigned int total_sent
, u32 status_padding
)
1034 u8
*ptr
= pad_start
;
1036 /* fix data alignent for GCM/CCM */
1037 if (gcm_ccm_padding
> 0) {
1038 flow_log(" GCM: padding to 16 byte alignment: %u bytes\n",
1040 memset(ptr
, 0, gcm_ccm_padding
);
1041 ptr
+= gcm_ccm_padding
;
1044 if (hash_pad_len
> 0) {
1045 /* clear the padding section */
1046 memset(ptr
, 0, hash_pad_len
);
1048 if ((auth_alg
== HASH_ALG_AES
) &&
1049 (auth_mode
== HASH_MODE_XCBC
)) {
1050 /* AES/XCBC just requires padding to be 0s */
1051 ptr
+= hash_pad_len
;
1053 /* terminate the data */
1055 ptr
+= (hash_pad_len
- sizeof(u64
));
1057 /* add the size at the end as required per alg */
1058 if (auth_alg
== HASH_ALG_MD5
)
1059 *(u64
*)ptr
= cpu_to_le64((u64
)total_sent
* 8);
1060 else /* SHA1, SHA2-224, SHA2-256 */
1061 *(u64
*)ptr
= cpu_to_be64((u64
)total_sent
* 8);
1066 /* pad to a 4byte alignment for STAT */
1067 if (status_padding
> 0) {
1068 flow_log(" STAT: padding to 4 byte alignment: %u bytes\n",
1071 memset(ptr
, 0, status_padding
);
1072 ptr
+= status_padding
;
1077 * spum_xts_tweak_in_payload() - Indicate that SPUM DOES place the XTS tweak
1078 * field in the packet payload (rather than using IV)
1082 u8
spum_xts_tweak_in_payload(void)
1088 * spum_tx_status_len() - Return the length of the STATUS field in a SPU
1091 * Return: Length of STATUS field in bytes.
1093 u8
spum_tx_status_len(void)
1095 return SPU_TX_STATUS_LEN
;
1099 * spum_rx_status_len() - Return the length of the STATUS field in a SPU
1102 * Return: Length of STATUS field in bytes.
1104 u8
spum_rx_status_len(void)
1106 return SPU_RX_STATUS_LEN
;
1110 * spum_status_process() - Process the status from a SPU response message.
1111 * @statp: start of STATUS word
1113 * 0 - if status is good and response should be processed
1114 * !0 - status indicates an error and response is invalid
1116 int spum_status_process(u8
*statp
)
1120 status
= __be32_to_cpu(*(__be32
*)statp
);
1121 flow_log("SPU response STATUS %#08x\n", status
);
1122 if (status
& SPU_STATUS_ERROR_FLAG
) {
1123 pr_err("%s() Warning: Error result from SPU: %#08x\n",
1125 if (status
& SPU_STATUS_INVALID_ICV
)
1126 return SPU_INVALID_ICV
;
1133 * spum_ccm_update_iv() - Update the IV as per the requirements for CCM mode.
1135 * @digestsize: Digest size of this request
1136 * @cipher_parms: (pointer to) cipher parmaeters, includes IV buf & IV len
1137 * @assoclen: Length of AAD data
1138 * @chunksize: length of input data to be sent in this req
1139 * @is_encrypt: true if this is an output/encrypt operation
1140 * @is_esp: true if this is an ESP / RFC4309 operation
1143 void spum_ccm_update_iv(unsigned int digestsize
,
1144 struct spu_cipher_parms
*cipher_parms
,
1145 unsigned int assoclen
,
1146 unsigned int chunksize
,
1150 u8 L
; /* L from CCM algorithm, length of plaintext data */
1151 u8 mprime
; /* M' from CCM algo, (M - 2) / 2, where M=authsize */
1154 if (cipher_parms
->iv_len
!= CCM_AES_IV_SIZE
) {
1155 pr_err("%s(): Invalid IV len %d for CCM mode, should be %d\n",
1156 __func__
, cipher_parms
->iv_len
, CCM_AES_IV_SIZE
);
1161 * IV needs to be formatted as follows:
1163 * | Byte 0 | Bytes 1 - N | Bytes (N+1) - 15 |
1164 * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | Bits 7 - 0 | Bits 7 - 0 |
1165 * | 0 |Ad?|(M - 2) / 2| L - 1 | Nonce | Plaintext Length |
1167 * Ad? = 1 if AAD present, 0 if not present
1168 * M = size of auth field, 8, 12, or 16 bytes (SPU-M) -or-
1169 * 4, 6, 8, 10, 12, 14, 16 bytes (SPU2)
1170 * L = Size of Plaintext Length field; Nonce size = 15 - L
1172 * It appears that the crypto API already expects the L-1 portion
1173 * to be set in the first byte of the IV, which implicitly determines
1174 * the nonce size, and also fills in the nonce. But the other bits
1175 * in byte 0 as well as the plaintext length need to be filled in.
1177 * In rfc4309/esp mode, L is not already in the supplied IV and
1178 * we need to fill it in, as well as move the IV data to be after
1182 L
= CCM_ESP_L_VALUE
; /* RFC4309 has fixed L */
1184 /* L' = plaintext length - 1 so Plaintext length is L' + 1 */
1185 L
= ((cipher_parms
->iv_buf
[0] & CCM_B0_L_PRIME
) >>
1186 CCM_B0_L_PRIME_SHIFT
) + 1;
1189 mprime
= (digestsize
- 2) >> 1; /* M' = (M - 2) / 2 */
1190 adata
= (assoclen
> 0); /* adata = 1 if any associated data */
1192 cipher_parms
->iv_buf
[0] = (adata
<< CCM_B0_ADATA_SHIFT
) |
1193 (mprime
<< CCM_B0_M_PRIME_SHIFT
) |
1194 ((L
- 1) << CCM_B0_L_PRIME_SHIFT
);
1196 /* Nonce is already filled in by crypto API, and is 15 - L bytes */
1198 /* Don't include digest in plaintext size when decrypting */
1200 chunksize
-= digestsize
;
1202 /* Fill in length of plaintext, formatted to be L bytes long */
1203 format_value_ccm(chunksize
, &cipher_parms
->iv_buf
[15 - L
+ 1], L
);
1207 * spum_wordalign_padlen() - Given the length of a data field, determine the
1208 * padding required to align the data following this field on a 4-byte boundary.
1209 * @data_size: length of data field in bytes
1211 * Return: length of status field padding, in bytes
1213 u32
spum_wordalign_padlen(u32 data_size
)
1215 return ((data_size
+ 3) & ~3) - data_size
;