2 * Copyright 2016 Broadcom
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation (the "GPL").
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License version 2 (GPLv2) for more details.
13 * You should have received a copy of the GNU General Public License
14 * version 2 (GPLv2) along with this source code.
17 #include <linux/kernel.h>
18 #include <linux/string.h>
25 /* This array is based on the hash algo type supported in spu.h */
26 char *tag_to_hash_idx
[] = { "none", "md5", "sha1", "sha224", "sha256" };
28 char *hash_alg_name
[] = { "None", "md5", "sha1", "sha224", "sha256", "aes",
29 "sha384", "sha512", "sha3_224", "sha3_256", "sha3_384", "sha3_512" };
31 char *aead_alg_name
[] = { "ccm(aes)", "gcm(aes)", "authenc" };
33 /* Assumes SPU-M messages are in big endian */
34 void spum_dump_msg_hdr(u8
*buf
, unsigned int buf_len
)
37 struct SPUHEADER
*spuh
= (struct SPUHEADER
*)buf
;
38 unsigned int hash_key_len
= 0;
39 unsigned int hash_state_len
= 0;
40 unsigned int cipher_key_len
= 0;
51 u32 sctx_size
; /* SCTX length in words */
52 u32 sctx_pl_len
; /* SCTX payload length in bytes */
55 packet_log("SPU Message header %p len: %u\n", buf
, buf_len
);
57 /* ========== Decode MH ========== */
58 packet_log(" MH 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
59 if (spuh
->mh
.flags
& MH_SCTX_PRES
)
60 packet_log(" SCTX present\n");
61 if (spuh
->mh
.flags
& MH_BDESC_PRES
)
62 packet_log(" BDESC present\n");
63 if (spuh
->mh
.flags
& MH_MFM_PRES
)
64 packet_log(" MFM present\n");
65 if (spuh
->mh
.flags
& MH_BD_PRES
)
66 packet_log(" BD present\n");
67 if (spuh
->mh
.flags
& MH_HASH_PRES
)
68 packet_log(" HASH present\n");
69 if (spuh
->mh
.flags
& MH_SUPDT_PRES
)
70 packet_log(" SUPDT present\n");
71 packet_log(" Opcode 0x%02x\n", spuh
->mh
.op_code
);
73 ptr
+= sizeof(spuh
->mh
) + sizeof(spuh
->emh
); /* skip emh. unused */
75 /* ========== Decode SCTX ========== */
76 if (spuh
->mh
.flags
& MH_SCTX_PRES
) {
77 pflags
= be32_to_cpu(spuh
->sa
.proto_flags
);
78 packet_log(" SCTX[0] 0x%08x\n", pflags
);
79 sctx_size
= pflags
& SCTX_SIZE
;
80 packet_log(" Size %u words\n", sctx_size
);
82 cflags
= be32_to_cpu(spuh
->sa
.cipher_flags
);
83 packet_log(" SCTX[1] 0x%08x\n", cflags
);
84 packet_log(" Inbound:%lu (1:decrypt/vrfy 0:encrypt/auth)\n",
85 (cflags
& CIPHER_INBOUND
) >> CIPHER_INBOUND_SHIFT
);
86 packet_log(" Order:%lu (1:AuthFirst 0:EncFirst)\n",
87 (cflags
& CIPHER_ORDER
) >> CIPHER_ORDER_SHIFT
);
88 packet_log(" ICV_IS_512:%lx\n",
89 (cflags
& ICV_IS_512
) >> ICV_IS_512_SHIFT
);
90 cipher_alg
= (cflags
& CIPHER_ALG
) >> CIPHER_ALG_SHIFT
;
91 cipher_mode
= (cflags
& CIPHER_MODE
) >> CIPHER_MODE_SHIFT
;
92 cipher_type
= (cflags
& CIPHER_TYPE
) >> CIPHER_TYPE_SHIFT
;
93 packet_log(" Crypto Alg:%u Mode:%u Type:%u\n",
94 cipher_alg
, cipher_mode
, cipher_type
);
95 hash_alg
= (cflags
& HASH_ALG
) >> HASH_ALG_SHIFT
;
96 hash_mode
= (cflags
& HASH_MODE
) >> HASH_MODE_SHIFT
;
97 hash_type
= (cflags
& HASH_TYPE
) >> HASH_TYPE_SHIFT
;
98 packet_log(" Hash Alg:%x Mode:%x Type:%x\n",
99 hash_alg
, hash_mode
, hash_type
);
100 packet_log(" UPDT_Offset:%u\n", cflags
& UPDT_OFST
);
102 ecf
= be32_to_cpu(spuh
->sa
.ecf
);
103 packet_log(" SCTX[2] 0x%08x\n", ecf
);
104 packet_log(" WriteICV:%lu CheckICV:%lu ICV_SIZE:%u ",
105 (ecf
& INSERT_ICV
) >> INSERT_ICV_SHIFT
,
106 (ecf
& CHECK_ICV
) >> CHECK_ICV_SHIFT
,
107 (ecf
& ICV_SIZE
) >> ICV_SIZE_SHIFT
);
108 packet_log("BD_SUPPRESS:%lu\n",
109 (ecf
& BD_SUPPRESS
) >> BD_SUPPRESS_SHIFT
);
110 packet_log(" SCTX_IV:%lu ExplicitIV:%lu GenIV:%lu ",
111 (ecf
& SCTX_IV
) >> SCTX_IV_SHIFT
,
112 (ecf
& EXPLICIT_IV
) >> EXPLICIT_IV_SHIFT
,
113 (ecf
& GEN_IV
) >> GEN_IV_SHIFT
);
114 packet_log("IV_OV_OFST:%lu EXP_IV_SIZE:%u\n",
115 (ecf
& IV_OFFSET
) >> IV_OFFSET_SHIFT
,
118 ptr
+= sizeof(struct SCTX
);
120 if (hash_alg
&& hash_mode
) {
132 case HASH_ALG_SHA224
:
136 case HASH_ALG_SHA256
:
140 case HASH_ALG_SHA384
:
144 case HASH_ALG_SHA512
:
156 packet_log(" Auth Key Type:%s Length:%u Bytes\n",
158 packet_dump(" KEY: ", ptr
, hash_key_len
);
160 } else if ((hash_alg
== HASH_ALG_AES
) &&
161 (hash_mode
== HASH_MODE_XCBC
)) {
164 switch (cipher_type
) {
165 case CIPHER_TYPE_AES128
:
167 name
= "AES128-XCBC";
169 case CIPHER_TYPE_AES192
:
171 name
= "AES192-XCBC";
173 case CIPHER_TYPE_AES256
:
175 name
= "AES256-XCBC";
178 packet_log(" Auth Key Type:%s Length:%u Bytes\n",
180 packet_dump(" KEY: ", ptr
, hash_key_len
);
184 if (hash_alg
&& (hash_mode
== HASH_MODE_NONE
) &&
185 (hash_type
== HASH_TYPE_UPDT
)) {
197 case HASH_ALG_SHA224
:
201 case HASH_ALG_SHA256
:
205 case HASH_ALG_SHA384
:
209 case HASH_ALG_SHA512
:
221 packet_log(" Auth State Type:%s Length:%u Bytes\n",
222 name
, hash_state_len
);
223 packet_dump(" State: ", ptr
, hash_state_len
);
224 ptr
+= hash_state_len
;
230 switch (cipher_alg
) {
235 case CIPHER_ALG_3DES
:
240 cipher_key_len
= 260;
244 switch (cipher_type
) {
245 case CIPHER_TYPE_AES128
:
249 case CIPHER_TYPE_AES192
:
253 case CIPHER_TYPE_AES256
:
259 case CIPHER_ALG_NONE
:
263 packet_log(" Cipher Key Type:%s Length:%u Bytes\n",
264 name
, cipher_key_len
);
266 /* XTS has two keys */
267 if (cipher_mode
== CIPHER_MODE_XTS
) {
268 packet_dump(" KEY2: ", ptr
, cipher_key_len
);
269 ptr
+= cipher_key_len
;
270 packet_dump(" KEY1: ", ptr
, cipher_key_len
);
271 ptr
+= cipher_key_len
;
275 packet_dump(" KEY: ", ptr
, cipher_key_len
);
276 ptr
+= cipher_key_len
;
280 sctx_pl_len
= sctx_size
* sizeof(u32
) -
282 iv_len
= sctx_pl_len
-
283 (hash_key_len
+ hash_state_len
+
285 packet_log(" IV Length:%u Bytes\n", iv_len
);
286 packet_dump(" IV: ", ptr
, iv_len
);
292 /* ========== Decode BDESC ========== */
293 if (spuh
->mh
.flags
& MH_BDESC_PRES
) {
295 struct BDESC_HEADER
*bdesc
= (struct BDESC_HEADER
*)ptr
;
297 packet_log(" BDESC[0] 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
298 packet_log(" OffsetMAC:%u LengthMAC:%u\n",
299 be16_to_cpu(bdesc
->offset_mac
),
300 be16_to_cpu(bdesc
->length_mac
));
303 packet_log(" BDESC[1] 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
304 packet_log(" OffsetCrypto:%u LengthCrypto:%u\n",
305 be16_to_cpu(bdesc
->offset_crypto
),
306 be16_to_cpu(bdesc
->length_crypto
));
309 packet_log(" BDESC[2] 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
310 packet_log(" OffsetICV:%u OffsetIV:%u\n",
311 be16_to_cpu(bdesc
->offset_icv
),
312 be16_to_cpu(bdesc
->offset_iv
));
316 /* ========== Decode BD ========== */
317 if (spuh
->mh
.flags
& MH_BD_PRES
) {
319 struct BD_HEADER
*bd
= (struct BD_HEADER
*)ptr
;
321 packet_log(" BD[0] 0x%08x\n", be32_to_cpu(*((u32
*)ptr
)));
322 packet_log(" Size:%ubytes PrevLength:%u\n",
323 be16_to_cpu(bd
->size
), be16_to_cpu(bd
->prev_length
));
327 /* Double check sanity */
328 if (buf
+ buf_len
!= ptr
) {
329 packet_log(" Packet parsed incorrectly. ");
330 packet_log("buf:%p buf_len:%u buf+buf_len:%p ptr:%p\n",
331 buf
, buf_len
, buf
+ buf_len
, ptr
);
338 * spum_ns2_ctx_max_payload() - Determine the max length of the payload for a
339 * SPU message for a given cipher and hash alg context.
340 * @cipher_alg: The cipher algorithm
341 * @cipher_mode: The cipher mode
342 * @blocksize: The size of a block of data for this algo
344 * The max payload must be a multiple of the blocksize so that if a request is
345 * too large to fit in a single SPU message, the request can be broken into
346 * max_payload sized chunks. Each chunk must be a multiple of blocksize.
348 * Return: Max payload length in bytes
350 u32
spum_ns2_ctx_max_payload(enum spu_cipher_alg cipher_alg
,
351 enum spu_cipher_mode cipher_mode
,
352 unsigned int blocksize
)
354 u32 max_payload
= SPUM_NS2_MAX_PAYLOAD
;
357 /* In XTS on SPU-M, we'll need to insert tweak before input data */
358 if (cipher_mode
== CIPHER_MODE_XTS
)
359 max_payload
-= SPU_XTS_TWEAK_SIZE
;
361 excess
= max_payload
% blocksize
;
363 return max_payload
- excess
;
367 * spum_nsp_ctx_max_payload() - Determine the max length of the payload for a
368 * SPU message for a given cipher and hash alg context.
369 * @cipher_alg: The cipher algorithm
370 * @cipher_mode: The cipher mode
371 * @blocksize: The size of a block of data for this algo
373 * The max payload must be a multiple of the blocksize so that if a request is
374 * too large to fit in a single SPU message, the request can be broken into
375 * max_payload sized chunks. Each chunk must be a multiple of blocksize.
377 * Return: Max payload length in bytes
379 u32
spum_nsp_ctx_max_payload(enum spu_cipher_alg cipher_alg
,
380 enum spu_cipher_mode cipher_mode
,
381 unsigned int blocksize
)
383 u32 max_payload
= SPUM_NSP_MAX_PAYLOAD
;
386 /* In XTS on SPU-M, we'll need to insert tweak before input data */
387 if (cipher_mode
== CIPHER_MODE_XTS
)
388 max_payload
-= SPU_XTS_TWEAK_SIZE
;
390 excess
= max_payload
% blocksize
;
392 return max_payload
- excess
;
395 /** spum_payload_length() - Given a SPU-M message header, extract the payload
397 * @spu_hdr: Start of SPU header
399 * Assumes just MH, EMH, BD (no SCTX, BDESC. Works for response frames.
401 * Return: payload length in bytes
403 u32
spum_payload_length(u8
*spu_hdr
)
405 struct BD_HEADER
*bd
;
408 /* Find BD header. skip MH, EMH */
409 bd
= (struct BD_HEADER
*)(spu_hdr
+ 8);
410 pl_len
= be16_to_cpu(bd
->size
);
416 * spum_response_hdr_len() - Given the length of the hash key and encryption
417 * key, determine the expected length of a SPU response header.
418 * @auth_key_len: authentication key length (bytes)
419 * @enc_key_len: encryption key length (bytes)
420 * @is_hash: true if response message is for a hash operation
422 * Return: length of SPU response header (bytes)
424 u16
spum_response_hdr_len(u16 auth_key_len
, u16 enc_key_len
, bool is_hash
)
427 return SPU_HASH_RESP_HDR_LEN
;
429 return SPU_RESP_HDR_LEN
;
433 * spum_hash_pad_len() - Calculate the length of hash padding required to extend
434 * data to a full block size.
435 * @hash_alg: hash algorithm
436 * @hash_mode: hash mode
437 * @chunksize: length of data, in bytes
438 * @hash_block_size: size of a block of data for hash algorithm
440 * Reserve space for 1 byte (0x80) start of pad and the total length as u64
442 * Return: length of hash pad in bytes
444 u16
spum_hash_pad_len(enum hash_alg hash_alg
, enum hash_mode hash_mode
,
445 u32 chunksize
, u16 hash_block_size
)
447 unsigned int length_len
;
448 unsigned int used_space_last_block
;
451 /* AES-XCBC hash requires just padding to next block boundary */
452 if ((hash_alg
== HASH_ALG_AES
) && (hash_mode
== HASH_MODE_XCBC
)) {
453 used_space_last_block
= chunksize
% hash_block_size
;
454 hash_pad_len
= hash_block_size
- used_space_last_block
;
455 if (hash_pad_len
>= hash_block_size
)
456 hash_pad_len
-= hash_block_size
;
460 used_space_last_block
= chunksize
% hash_block_size
+ 1;
461 if ((hash_alg
== HASH_ALG_SHA384
) || (hash_alg
== HASH_ALG_SHA512
))
462 length_len
= 2 * sizeof(u64
);
464 length_len
= sizeof(u64
);
466 used_space_last_block
+= length_len
;
467 hash_pad_len
= hash_block_size
- used_space_last_block
;
468 if (hash_pad_len
< 0)
469 hash_pad_len
+= hash_block_size
;
471 hash_pad_len
+= 1 + length_len
;
476 * spum_gcm_ccm_pad_len() - Determine the required length of GCM or CCM padding.
477 * @cipher_mode: Algo type
478 * @data_size: Length of plaintext (bytes)
480 * @Return: Length of padding, in bytes
482 u32
spum_gcm_ccm_pad_len(enum spu_cipher_mode cipher_mode
,
483 unsigned int data_size
)
486 u32 m1
= SPU_GCM_CCM_ALIGN
- 1;
488 if ((cipher_mode
== CIPHER_MODE_GCM
) ||
489 (cipher_mode
== CIPHER_MODE_CCM
))
490 pad_len
= ((data_size
+ m1
) & ~m1
) - data_size
;
496 * spum_assoc_resp_len() - Determine the size of the receive buffer required to
497 * catch associated data.
498 * @cipher_mode: cipher mode
499 * @assoc_len: length of associated data (bytes)
500 * @iv_len: length of IV (bytes)
501 * @is_encrypt: true if encrypting. false if decrypting.
503 * Return: length of associated data in response message (bytes)
505 u32
spum_assoc_resp_len(enum spu_cipher_mode cipher_mode
,
506 unsigned int assoc_len
, unsigned int iv_len
,
515 if (cipher_mode
== CIPHER_MODE_GCM
) {
516 /* AAD needs to be padded in responses too */
517 pad
= spum_gcm_ccm_pad_len(cipher_mode
, buflen
);
520 if (cipher_mode
== CIPHER_MODE_CCM
) {
522 * AAD needs to be padded in responses too
523 * for CCM, len + 2 needs to be 128-bit aligned.
525 pad
= spum_gcm_ccm_pad_len(cipher_mode
, buflen
+ 2);
533 * spu_aead_ivlen() - Calculate the length of the AEAD IV to be included
534 * in a SPU request after the AAD and before the payload.
535 * @cipher_mode: cipher mode
536 * @iv_ctr_len: initialization vector length in bytes
538 * In Linux ~4.2 and later, the assoc_data sg includes the IV. So no need
539 * to include the IV as a separate field in the SPU request msg.
541 * Return: Length of AEAD IV in bytes
543 u8
spum_aead_ivlen(enum spu_cipher_mode cipher_mode
, u16 iv_len
)
549 * spum_hash_type() - Determine the type of hash operation.
550 * @src_sent: The number of bytes in the current request that have already
551 * been sent to the SPU to be hashed.
553 * We do not use HASH_TYPE_FULL for requests that fit in a single SPU message.
554 * Using FULL causes failures (such as when the string to be hashed is empty).
555 * For similar reasons, we never use HASH_TYPE_FIN. Instead, submit messages
556 * as INIT or UPDT and do the hash padding in sw.
558 enum hash_type
spum_hash_type(u32 src_sent
)
560 return src_sent
? HASH_TYPE_UPDT
: HASH_TYPE_INIT
;
564 * spum_digest_size() - Determine the size of a hash digest to expect the SPU to
566 * alg_digest_size: Number of bytes in the final digest for the given algo
567 * alg: The hash algorithm
568 * htype: Type of hash operation (init, update, full, etc)
570 * When doing incremental hashing for an algorithm with a truncated hash
571 * (e.g., SHA224), the SPU returns the full digest so that it can be fed back as
572 * a partial result for the next chunk.
574 u32
spum_digest_size(u32 alg_digest_size
, enum hash_alg alg
,
575 enum hash_type htype
)
577 u32 digestsize
= alg_digest_size
;
579 /* SPU returns complete digest when doing incremental hash and truncated
582 if ((htype
== HASH_TYPE_INIT
) || (htype
== HASH_TYPE_UPDT
)) {
583 if (alg
== HASH_ALG_SHA224
)
584 digestsize
= SHA256_DIGEST_SIZE
;
585 else if (alg
== HASH_ALG_SHA384
)
586 digestsize
= SHA512_DIGEST_SIZE
;
592 * spum_create_request() - Build a SPU request message header, up to and
593 * including the BD header. Construct the message starting at spu_hdr. Caller
594 * should allocate this buffer in DMA-able memory at least SPU_HEADER_ALLOC_LEN
596 * @spu_hdr: Start of buffer where SPU request header is to be written
597 * @req_opts: SPU request message options
598 * @cipher_parms: Parameters related to cipher algorithm
599 * @hash_parms: Parameters related to hash algorithm
600 * @aead_parms: Parameters related to AEAD operation
601 * @data_size: Length of data to be encrypted or authenticated. If AEAD, does
602 * not include length of AAD.
604 * Return: the length of the SPU header in bytes. 0 if an error occurs.
606 u32
spum_create_request(u8
*spu_hdr
,
607 struct spu_request_opts
*req_opts
,
608 struct spu_cipher_parms
*cipher_parms
,
609 struct spu_hash_parms
*hash_parms
,
610 struct spu_aead_parms
*aead_parms
,
611 unsigned int data_size
)
613 struct SPUHEADER
*spuh
;
614 struct BDESC_HEADER
*bdesc
;
615 struct BD_HEADER
*bd
;
618 u32 protocol_bits
= 0;
622 unsigned int buf_len
= 0;
624 /* size of the cipher payload */
625 unsigned int cipher_len
= hash_parms
->prebuf_len
+ data_size
+
628 /* offset of prebuf or data from end of BD header */
629 unsigned int cipher_offset
= aead_parms
->assoc_size
+
630 aead_parms
->iv_len
+ aead_parms
->aad_pad_len
;
632 /* total size of the DB data (without STAT word padding) */
633 unsigned int real_db_size
= spu_real_db_size(aead_parms
->assoc_size
,
635 hash_parms
->prebuf_len
,
637 aead_parms
->aad_pad_len
,
638 aead_parms
->data_pad_len
,
639 hash_parms
->pad_len
);
641 unsigned int auth_offset
= 0;
642 unsigned int offset_iv
= 0;
644 /* size/offset of the auth payload */
645 unsigned int auth_len
;
647 auth_len
= real_db_size
;
649 if (req_opts
->is_aead
&& req_opts
->is_inbound
)
650 cipher_len
-= hash_parms
->digestsize
;
652 if (req_opts
->is_aead
&& req_opts
->is_inbound
)
653 auth_len
-= hash_parms
->digestsize
;
655 if ((hash_parms
->alg
== HASH_ALG_AES
) &&
656 (hash_parms
->mode
== HASH_MODE_XCBC
)) {
657 auth_len
-= hash_parms
->pad_len
;
658 cipher_len
-= hash_parms
->pad_len
;
661 flow_log("%s()\n", __func__
);
662 flow_log(" in:%u authFirst:%u\n",
663 req_opts
->is_inbound
, req_opts
->auth_first
);
664 flow_log(" %s. cipher alg:%u mode:%u type %u\n",
665 spu_alg_name(cipher_parms
->alg
, cipher_parms
->mode
),
666 cipher_parms
->alg
, cipher_parms
->mode
, cipher_parms
->type
);
667 flow_log(" key: %d\n", cipher_parms
->key_len
);
668 flow_dump(" key: ", cipher_parms
->key_buf
, cipher_parms
->key_len
);
669 flow_log(" iv: %d\n", cipher_parms
->iv_len
);
670 flow_dump(" iv: ", cipher_parms
->iv_buf
, cipher_parms
->iv_len
);
671 flow_log(" auth alg:%u mode:%u type %u\n",
672 hash_parms
->alg
, hash_parms
->mode
, hash_parms
->type
);
673 flow_log(" digestsize: %u\n", hash_parms
->digestsize
);
674 flow_log(" authkey: %d\n", hash_parms
->key_len
);
675 flow_dump(" authkey: ", hash_parms
->key_buf
, hash_parms
->key_len
);
676 flow_log(" assoc_size:%u\n", aead_parms
->assoc_size
);
677 flow_log(" prebuf_len:%u\n", hash_parms
->prebuf_len
);
678 flow_log(" data_size:%u\n", data_size
);
679 flow_log(" hash_pad_len:%u\n", hash_parms
->pad_len
);
680 flow_log(" real_db_size:%u\n", real_db_size
);
681 flow_log(" auth_offset:%u auth_len:%u cipher_offset:%u cipher_len:%u\n",
682 auth_offset
, auth_len
, cipher_offset
, cipher_len
);
683 flow_log(" aead_iv: %u\n", aead_parms
->iv_len
);
685 /* starting out: zero the header (plus some) */
687 memset(ptr
, 0, sizeof(struct SPUHEADER
));
689 /* format master header word */
690 /* Do not set the next bit even though the datasheet says to */
691 spuh
= (struct SPUHEADER
*)ptr
;
692 ptr
+= sizeof(struct SPUHEADER
);
693 buf_len
+= sizeof(struct SPUHEADER
);
695 spuh
->mh
.op_code
= SPU_CRYPTO_OPERATION_GENERIC
;
696 spuh
->mh
.flags
|= (MH_SCTX_PRES
| MH_BDESC_PRES
| MH_BD_PRES
);
698 /* Format sctx word 0 (protocol_bits) */
699 sctx_words
= 3; /* size in words */
701 /* Format sctx word 1 (cipher_bits) */
702 if (req_opts
->is_inbound
)
703 cipher_bits
|= CIPHER_INBOUND
;
704 if (req_opts
->auth_first
)
705 cipher_bits
|= CIPHER_ORDER
;
707 /* Set the crypto parameters in the cipher.flags */
708 cipher_bits
|= cipher_parms
->alg
<< CIPHER_ALG_SHIFT
;
709 cipher_bits
|= cipher_parms
->mode
<< CIPHER_MODE_SHIFT
;
710 cipher_bits
|= cipher_parms
->type
<< CIPHER_TYPE_SHIFT
;
712 /* Set the auth parameters in the cipher.flags */
713 cipher_bits
|= hash_parms
->alg
<< HASH_ALG_SHIFT
;
714 cipher_bits
|= hash_parms
->mode
<< HASH_MODE_SHIFT
;
715 cipher_bits
|= hash_parms
->type
<< HASH_TYPE_SHIFT
;
718 * Format sctx extensions if required, and update main fields if
721 if (hash_parms
->alg
) {
722 /* Write the authentication key material if present */
723 if (hash_parms
->key_len
) {
724 memcpy(ptr
, hash_parms
->key_buf
, hash_parms
->key_len
);
725 ptr
+= hash_parms
->key_len
;
726 buf_len
+= hash_parms
->key_len
;
727 sctx_words
+= hash_parms
->key_len
/ 4;
730 if ((cipher_parms
->mode
== CIPHER_MODE_GCM
) ||
731 (cipher_parms
->mode
== CIPHER_MODE_CCM
))
732 /* unpadded length */
733 offset_iv
= aead_parms
->assoc_size
;
735 /* if GCM/CCM we need to write ICV into the payload */
736 if (!req_opts
->is_inbound
) {
737 if ((cipher_parms
->mode
== CIPHER_MODE_GCM
) ||
738 (cipher_parms
->mode
== CIPHER_MODE_CCM
))
739 ecf_bits
|= 1 << INSERT_ICV_SHIFT
;
741 ecf_bits
|= CHECK_ICV
;
744 /* Inform the SPU of the ICV size (in words) */
745 if (hash_parms
->digestsize
== 64)
746 cipher_bits
|= ICV_IS_512
;
749 (hash_parms
->digestsize
/ 4) << ICV_SIZE_SHIFT
;
752 if (req_opts
->bd_suppress
)
753 ecf_bits
|= BD_SUPPRESS
;
755 /* copy the encryption keys in the SAD entry */
756 if (cipher_parms
->alg
) {
757 if (cipher_parms
->key_len
) {
758 memcpy(ptr
, cipher_parms
->key_buf
,
759 cipher_parms
->key_len
);
760 ptr
+= cipher_parms
->key_len
;
761 buf_len
+= cipher_parms
->key_len
;
762 sctx_words
+= cipher_parms
->key_len
/ 4;
766 * if encrypting then set IV size, use SCTX IV unless no IV
769 if (cipher_parms
->iv_buf
&& cipher_parms
->iv_len
) {
773 /* cipher iv provided so put it in here */
774 memcpy(ptr
, cipher_parms
->iv_buf
, cipher_parms
->iv_len
);
776 ptr
+= cipher_parms
->iv_len
;
777 buf_len
+= cipher_parms
->iv_len
;
778 sctx_words
+= cipher_parms
->iv_len
/ 4;
783 * RFC4543 (GMAC/ESP) requires data to be sent as part of AAD
784 * so we need to override the BDESC parameters.
786 if (req_opts
->is_rfc4543
) {
787 if (req_opts
->is_inbound
)
788 data_size
-= hash_parms
->digestsize
;
789 offset_iv
= aead_parms
->assoc_size
+ data_size
;
791 cipher_offset
= offset_iv
;
792 auth_len
= cipher_offset
+ aead_parms
->data_pad_len
;
795 /* write in the total sctx length now that we know it */
796 protocol_bits
|= sctx_words
;
798 /* Endian adjust the SCTX */
799 spuh
->sa
.proto_flags
= cpu_to_be32(protocol_bits
);
800 spuh
->sa
.cipher_flags
= cpu_to_be32(cipher_bits
);
801 spuh
->sa
.ecf
= cpu_to_be32(ecf_bits
);
803 /* === create the BDESC section === */
804 bdesc
= (struct BDESC_HEADER
*)ptr
;
806 bdesc
->offset_mac
= cpu_to_be16(auth_offset
);
807 bdesc
->length_mac
= cpu_to_be16(auth_len
);
808 bdesc
->offset_crypto
= cpu_to_be16(cipher_offset
);
809 bdesc
->length_crypto
= cpu_to_be16(cipher_len
);
812 * CCM in SPU-M requires that ICV not be in same 32-bit word as data or
813 * padding. So account for padding as necessary.
815 if (cipher_parms
->mode
== CIPHER_MODE_CCM
)
816 auth_len
+= spum_wordalign_padlen(auth_len
);
818 bdesc
->offset_icv
= cpu_to_be16(auth_len
);
819 bdesc
->offset_iv
= cpu_to_be16(offset_iv
);
821 ptr
+= sizeof(struct BDESC_HEADER
);
822 buf_len
+= sizeof(struct BDESC_HEADER
);
824 /* === no MFM section === */
826 /* === create the BD section === */
828 /* add the BD header */
829 bd
= (struct BD_HEADER
*)ptr
;
830 bd
->size
= cpu_to_be16(real_db_size
);
833 ptr
+= sizeof(struct BD_HEADER
);
834 buf_len
+= sizeof(struct BD_HEADER
);
836 packet_dump(" SPU request header: ", spu_hdr
, buf_len
);
842 * spum_cipher_req_init() - Build a SPU request message header, up to and
843 * including the BD header.
844 * @spu_hdr: Start of SPU request header (MH)
845 * @cipher_parms: Parameters that describe the cipher request
847 * Construct the message starting at spu_hdr. Caller should allocate this buffer
848 * in DMA-able memory at least SPU_HEADER_ALLOC_LEN bytes long.
850 * Return: the length of the SPU header in bytes. 0 if an error occurs.
852 u16
spum_cipher_req_init(u8
*spu_hdr
, struct spu_cipher_parms
*cipher_parms
)
854 struct SPUHEADER
*spuh
;
855 u32 protocol_bits
= 0;
861 flow_log("%s()\n", __func__
);
862 flow_log(" cipher alg:%u mode:%u type %u\n", cipher_parms
->alg
,
863 cipher_parms
->mode
, cipher_parms
->type
);
864 flow_log(" cipher_iv_len: %u\n", cipher_parms
->iv_len
);
865 flow_log(" key: %d\n", cipher_parms
->key_len
);
866 flow_dump(" key: ", cipher_parms
->key_buf
, cipher_parms
->key_len
);
868 /* starting out: zero the header (plus some) */
869 memset(spu_hdr
, 0, sizeof(struct SPUHEADER
));
870 ptr
+= sizeof(struct SPUHEADER
);
872 /* format master header word */
873 /* Do not set the next bit even though the datasheet says to */
874 spuh
= (struct SPUHEADER
*)spu_hdr
;
876 spuh
->mh
.op_code
= SPU_CRYPTO_OPERATION_GENERIC
;
877 spuh
->mh
.flags
|= (MH_SCTX_PRES
| MH_BDESC_PRES
| MH_BD_PRES
);
879 /* Format sctx word 0 (protocol_bits) */
880 sctx_words
= 3; /* size in words */
882 /* copy the encryption keys in the SAD entry */
883 if (cipher_parms
->alg
) {
884 if (cipher_parms
->key_len
) {
885 ptr
+= cipher_parms
->key_len
;
886 sctx_words
+= cipher_parms
->key_len
/ 4;
890 * if encrypting then set IV size, use SCTX IV unless no IV
893 if (cipher_parms
->iv_len
) {
896 ptr
+= cipher_parms
->iv_len
;
897 sctx_words
+= cipher_parms
->iv_len
/ 4;
901 /* Set the crypto parameters in the cipher.flags */
902 cipher_bits
|= cipher_parms
->alg
<< CIPHER_ALG_SHIFT
;
903 cipher_bits
|= cipher_parms
->mode
<< CIPHER_MODE_SHIFT
;
904 cipher_bits
|= cipher_parms
->type
<< CIPHER_TYPE_SHIFT
;
906 /* copy the encryption keys in the SAD entry */
907 if (cipher_parms
->alg
&& cipher_parms
->key_len
)
908 memcpy(spuh
+ 1, cipher_parms
->key_buf
, cipher_parms
->key_len
);
910 /* write in the total sctx length now that we know it */
911 protocol_bits
|= sctx_words
;
913 /* Endian adjust the SCTX */
914 spuh
->sa
.proto_flags
= cpu_to_be32(protocol_bits
);
916 /* Endian adjust the SCTX */
917 spuh
->sa
.cipher_flags
= cpu_to_be32(cipher_bits
);
918 spuh
->sa
.ecf
= cpu_to_be32(ecf_bits
);
920 packet_dump(" SPU request header: ", spu_hdr
,
921 sizeof(struct SPUHEADER
));
923 return sizeof(struct SPUHEADER
) + cipher_parms
->key_len
+
924 cipher_parms
->iv_len
+ sizeof(struct BDESC_HEADER
) +
925 sizeof(struct BD_HEADER
);
929 * spum_cipher_req_finish() - Finish building a SPU request message header for a
930 * block cipher request. Assumes much of the header was already filled in at
931 * setkey() time in spu_cipher_req_init().
932 * @spu_hdr: Start of the request message header (MH field)
933 * @spu_req_hdr_len: Length in bytes of the SPU request header
934 * @isInbound: 0 encrypt, 1 decrypt
935 * @cipher_parms: Parameters describing cipher operation to be performed
936 * @update_key: If true, rewrite the cipher key in SCTX
937 * @data_size: Length of the data in the BD field
939 * Assumes much of the header was already filled in at setkey() time in
940 * spum_cipher_req_init().
941 * spum_cipher_req_init() fills in the encryption key. For RC4, when submitting
942 * a request for a non-first chunk, we use the 260-byte SUPDT field from the
943 * previous response as the key. update_key is true for this case. Unused in all
946 void spum_cipher_req_finish(u8
*spu_hdr
,
948 unsigned int is_inbound
,
949 struct spu_cipher_parms
*cipher_parms
,
951 unsigned int data_size
)
953 struct SPUHEADER
*spuh
;
954 struct BDESC_HEADER
*bdesc
;
955 struct BD_HEADER
*bd
;
956 u8
*bdesc_ptr
= spu_hdr
+ spu_req_hdr_len
-
957 (sizeof(struct BD_HEADER
) + sizeof(struct BDESC_HEADER
));
961 flow_log("%s()\n", __func__
);
962 flow_log(" in: %u\n", is_inbound
);
963 flow_log(" cipher alg: %u, cipher_type: %u\n", cipher_parms
->alg
,
966 flow_log(" cipher key len: %u\n", cipher_parms
->key_len
);
967 flow_dump(" key: ", cipher_parms
->key_buf
,
968 cipher_parms
->key_len
);
972 * In XTS mode, API puts "i" parameter (block tweak) in IV. For
973 * SPU-M, should be in start of the BD; tx_sg_create() copies it there.
974 * IV in SPU msg for SPU-M should be 0, since that's the "j" parameter
975 * (block ctr within larger data unit) - given we can send entire disk
976 * block (<= 4KB) in 1 SPU msg, don't need to use this parameter.
978 if (cipher_parms
->mode
== CIPHER_MODE_XTS
)
979 memset(cipher_parms
->iv_buf
, 0, cipher_parms
->iv_len
);
981 flow_log(" iv len: %d\n", cipher_parms
->iv_len
);
982 flow_dump(" iv: ", cipher_parms
->iv_buf
, cipher_parms
->iv_len
);
983 flow_log(" data_size: %u\n", data_size
);
985 /* format master header word */
986 /* Do not set the next bit even though the datasheet says to */
987 spuh
= (struct SPUHEADER
*)spu_hdr
;
989 /* cipher_bits was initialized at setkey time */
990 cipher_bits
= be32_to_cpu(spuh
->sa
.cipher_flags
);
992 /* Format sctx word 1 (cipher_bits) */
994 cipher_bits
|= CIPHER_INBOUND
;
996 cipher_bits
&= ~CIPHER_INBOUND
;
998 /* update encryption key for RC4 on non-first chunk */
1000 spuh
->sa
.cipher_flags
|=
1001 cipher_parms
->type
<< CIPHER_TYPE_SHIFT
;
1002 memcpy(spuh
+ 1, cipher_parms
->key_buf
, cipher_parms
->key_len
);
1005 if (cipher_parms
->alg
&& cipher_parms
->iv_buf
&& cipher_parms
->iv_len
)
1006 /* cipher iv provided so put it in here */
1007 memcpy(bdesc_ptr
- cipher_parms
->iv_len
, cipher_parms
->iv_buf
,
1008 cipher_parms
->iv_len
);
1010 spuh
->sa
.cipher_flags
= cpu_to_be32(cipher_bits
);
1012 /* === create the BDESC section === */
1013 bdesc
= (struct BDESC_HEADER
*)bdesc_ptr
;
1014 bdesc
->offset_mac
= 0;
1015 bdesc
->length_mac
= 0;
1016 bdesc
->offset_crypto
= 0;
1018 /* XTS mode, data_size needs to include tweak parameter */
1019 if (cipher_parms
->mode
== CIPHER_MODE_XTS
)
1020 bdesc
->length_crypto
= cpu_to_be16(data_size
+
1021 SPU_XTS_TWEAK_SIZE
);
1023 bdesc
->length_crypto
= cpu_to_be16(data_size
);
1025 bdesc
->offset_icv
= 0;
1026 bdesc
->offset_iv
= 0;
1028 /* === no MFM section === */
1030 /* === create the BD section === */
1031 /* add the BD header */
1032 bd
= (struct BD_HEADER
*)(bdesc_ptr
+ sizeof(struct BDESC_HEADER
));
1033 bd
->size
= cpu_to_be16(data_size
);
1035 /* XTS mode, data_size needs to include tweak parameter */
1036 if (cipher_parms
->mode
== CIPHER_MODE_XTS
)
1037 bd
->size
= cpu_to_be16(data_size
+ SPU_XTS_TWEAK_SIZE
);
1039 bd
->size
= cpu_to_be16(data_size
);
1041 bd
->prev_length
= 0;
1043 packet_dump(" SPU request header: ", spu_hdr
, spu_req_hdr_len
);
1047 * spum_request_pad() - Create pad bytes at the end of the data.
1048 * @pad_start: Start of buffer where pad bytes are to be written
1049 * @gcm_ccm_padding: length of GCM/CCM padding, in bytes
1050 * @hash_pad_len: Number of bytes of padding extend data to full block
1051 * @auth_alg: authentication algorithm
1052 * @auth_mode: authentication mode
1053 * @total_sent: length inserted at end of hash pad
1054 * @status_padding: Number of bytes of padding to align STATUS word
1056 * There may be three forms of pad:
1057 * 1. GCM/CCM pad - for GCM/CCM mode ciphers, pad to 16-byte alignment
1058 * 2. hash pad - pad to a block length, with 0x80 data terminator and
1060 * 3. STAT pad - to ensure the STAT field is 4-byte aligned
1062 void spum_request_pad(u8
*pad_start
,
1063 u32 gcm_ccm_padding
,
1065 enum hash_alg auth_alg
,
1066 enum hash_mode auth_mode
,
1067 unsigned int total_sent
, u32 status_padding
)
1069 u8
*ptr
= pad_start
;
1071 /* fix data alignent for GCM/CCM */
1072 if (gcm_ccm_padding
> 0) {
1073 flow_log(" GCM: padding to 16 byte alignment: %u bytes\n",
1075 memset(ptr
, 0, gcm_ccm_padding
);
1076 ptr
+= gcm_ccm_padding
;
1079 if (hash_pad_len
> 0) {
1080 /* clear the padding section */
1081 memset(ptr
, 0, hash_pad_len
);
1083 if ((auth_alg
== HASH_ALG_AES
) &&
1084 (auth_mode
== HASH_MODE_XCBC
)) {
1085 /* AES/XCBC just requires padding to be 0s */
1086 ptr
+= hash_pad_len
;
1088 /* terminate the data */
1090 ptr
+= (hash_pad_len
- sizeof(u64
));
1092 /* add the size at the end as required per alg */
1093 if (auth_alg
== HASH_ALG_MD5
)
1094 *(u64
*)ptr
= cpu_to_le64((u64
)total_sent
* 8);
1095 else /* SHA1, SHA2-224, SHA2-256 */
1096 *(u64
*)ptr
= cpu_to_be64((u64
)total_sent
* 8);
1101 /* pad to a 4byte alignment for STAT */
1102 if (status_padding
> 0) {
1103 flow_log(" STAT: padding to 4 byte alignment: %u bytes\n",
1106 memset(ptr
, 0, status_padding
);
1107 ptr
+= status_padding
;
1112 * spum_xts_tweak_in_payload() - Indicate that SPUM DOES place the XTS tweak
1113 * field in the packet payload (rather than using IV)
1117 u8
spum_xts_tweak_in_payload(void)
1123 * spum_tx_status_len() - Return the length of the STATUS field in a SPU
1126 * Return: Length of STATUS field in bytes.
1128 u8
spum_tx_status_len(void)
1130 return SPU_TX_STATUS_LEN
;
1134 * spum_rx_status_len() - Return the length of the STATUS field in a SPU
1137 * Return: Length of STATUS field in bytes.
1139 u8
spum_rx_status_len(void)
1141 return SPU_RX_STATUS_LEN
;
1145 * spum_status_process() - Process the status from a SPU response message.
1146 * @statp: start of STATUS word
1148 * 0 - if status is good and response should be processed
1149 * !0 - status indicates an error and response is invalid
1151 int spum_status_process(u8
*statp
)
1155 status
= __be32_to_cpu(*(__be32
*)statp
);
1156 flow_log("SPU response STATUS %#08x\n", status
);
1157 if (status
& SPU_STATUS_ERROR_FLAG
) {
1158 pr_err("%s() Warning: Error result from SPU: %#08x\n",
1160 if (status
& SPU_STATUS_INVALID_ICV
)
1161 return SPU_INVALID_ICV
;
1168 * spum_ccm_update_iv() - Update the IV as per the requirements for CCM mode.
1170 * @digestsize: Digest size of this request
1171 * @cipher_parms: (pointer to) cipher parmaeters, includes IV buf & IV len
1172 * @assoclen: Length of AAD data
1173 * @chunksize: length of input data to be sent in this req
1174 * @is_encrypt: true if this is an output/encrypt operation
1175 * @is_esp: true if this is an ESP / RFC4309 operation
1178 void spum_ccm_update_iv(unsigned int digestsize
,
1179 struct spu_cipher_parms
*cipher_parms
,
1180 unsigned int assoclen
,
1181 unsigned int chunksize
,
1185 u8 L
; /* L from CCM algorithm, length of plaintext data */
1186 u8 mprime
; /* M' from CCM algo, (M - 2) / 2, where M=authsize */
1189 if (cipher_parms
->iv_len
!= CCM_AES_IV_SIZE
) {
1190 pr_err("%s(): Invalid IV len %d for CCM mode, should be %d\n",
1191 __func__
, cipher_parms
->iv_len
, CCM_AES_IV_SIZE
);
1196 * IV needs to be formatted as follows:
1198 * | Byte 0 | Bytes 1 - N | Bytes (N+1) - 15 |
1199 * | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 | Bits 7 - 0 | Bits 7 - 0 |
1200 * | 0 |Ad?|(M - 2) / 2| L - 1 | Nonce | Plaintext Length |
1202 * Ad? = 1 if AAD present, 0 if not present
1203 * M = size of auth field, 8, 12, or 16 bytes (SPU-M) -or-
1204 * 4, 6, 8, 10, 12, 14, 16 bytes (SPU2)
1205 * L = Size of Plaintext Length field; Nonce size = 15 - L
1207 * It appears that the crypto API already expects the L-1 portion
1208 * to be set in the first byte of the IV, which implicitly determines
1209 * the nonce size, and also fills in the nonce. But the other bits
1210 * in byte 0 as well as the plaintext length need to be filled in.
1212 * In rfc4309/esp mode, L is not already in the supplied IV and
1213 * we need to fill it in, as well as move the IV data to be after
1217 L
= CCM_ESP_L_VALUE
; /* RFC4309 has fixed L */
1219 /* L' = plaintext length - 1 so Plaintext length is L' + 1 */
1220 L
= ((cipher_parms
->iv_buf
[0] & CCM_B0_L_PRIME
) >>
1221 CCM_B0_L_PRIME_SHIFT
) + 1;
1224 mprime
= (digestsize
- 2) >> 1; /* M' = (M - 2) / 2 */
1225 adata
= (assoclen
> 0); /* adata = 1 if any associated data */
1227 cipher_parms
->iv_buf
[0] = (adata
<< CCM_B0_ADATA_SHIFT
) |
1228 (mprime
<< CCM_B0_M_PRIME_SHIFT
) |
1229 ((L
- 1) << CCM_B0_L_PRIME_SHIFT
);
1231 /* Nonce is already filled in by crypto API, and is 15 - L bytes */
1233 /* Don't include digest in plaintext size when decrypting */
1235 chunksize
-= digestsize
;
1237 /* Fill in length of plaintext, formatted to be L bytes long */
1238 format_value_ccm(chunksize
, &cipher_parms
->iv_buf
[15 - L
+ 1], L
);
1242 * spum_wordalign_padlen() - Given the length of a data field, determine the
1243 * padding required to align the data following this field on a 4-byte boundary.
1244 * @data_size: length of data field in bytes
1246 * Return: length of status field padding, in bytes
1248 u32
spum_wordalign_padlen(u32 data_size
)
1250 return ((data_size
+ 3) & ~3) - data_size
;