1 // SPDX-License-Identifier: GPL-2.0-only
3 * AES XCBC routines supporting the Power 7+ Nest Accelerators driver
5 * Copyright (C) 2011-2012 International Business Machines Inc.
7 * Author: Kent Yoder <yoder1@us.ibm.com>
10 #include <crypto/internal/hash.h>
11 #include <crypto/aes.h>
12 #include <crypto/algapi.h>
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/crypto.h>
18 #include "nx_csbcpb.h"
23 u8 state
[AES_BLOCK_SIZE
];
25 u8 buffer
[AES_BLOCK_SIZE
];
28 static int nx_xcbc_set_key(struct crypto_shash
*desc
,
32 struct nx_crypto_ctx
*nx_ctx
= crypto_shash_ctx(desc
);
33 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
37 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_128
];
43 memcpy(csbcpb
->cpb
.aes_xcbc
.key
, in_key
, key_len
);
49 * Based on RFC 3566, for a zero-length message:
52 * K1 = E(K, 0x01010101010101010101010101010101)
53 * K3 = E(K, 0x03030303030303030303030303030303)
54 * E[0] = 0x00000000000000000000000000000000
55 * M[1] = 0x80000000000000000000000000000000 (0 length message with padding)
56 * E[1] = (K1, M[1] ^ E[0] ^ K3)
59 static int nx_xcbc_empty(struct shash_desc
*desc
, u8
*out
)
61 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
62 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
63 struct nx_sg
*in_sg
, *out_sg
;
64 u8 keys
[2][AES_BLOCK_SIZE
];
69 /* Change to ECB mode */
70 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_ECB
;
71 memcpy(key
, csbcpb
->cpb
.aes_xcbc
.key
, AES_BLOCK_SIZE
);
72 memcpy(csbcpb
->cpb
.aes_ecb
.key
, key
, AES_BLOCK_SIZE
);
73 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
75 /* K1 and K3 base patterns */
76 memset(keys
[0], 0x01, sizeof(keys
[0]));
77 memset(keys
[1], 0x03, sizeof(keys
[1]));
80 /* Generate K1 and K3 encrypting the patterns */
81 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*) keys
, &len
,
84 if (len
!= sizeof(keys
))
87 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, (u8
*) keys
, &len
,
90 if (len
!= sizeof(keys
))
93 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
94 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
96 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
, 0);
99 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
101 /* XOr K3 with the padding for a 0 length message */
104 len
= sizeof(keys
[1]);
106 /* Encrypt the final result */
107 memcpy(csbcpb
->cpb
.aes_ecb
.key
, keys
[0], AES_BLOCK_SIZE
);
108 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*) keys
[1], &len
,
111 if (len
!= sizeof(keys
[1]))
114 len
= AES_BLOCK_SIZE
;
115 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, out
, &len
,
118 if (len
!= AES_BLOCK_SIZE
)
121 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
122 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
124 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
, 0);
127 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
130 /* Restore XCBC mode */
131 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_XCBC_MAC
;
132 memcpy(csbcpb
->cpb
.aes_xcbc
.key
, key
, AES_BLOCK_SIZE
);
133 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
138 static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm
*tfm
)
140 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(tfm
);
141 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
144 err
= nx_crypto_ctx_aes_xcbc_init(tfm
);
148 nx_ctx_init(nx_ctx
, HCOP_FC_AES
);
150 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_128
);
151 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_XCBC_MAC
;
156 static int nx_xcbc_init(struct shash_desc
*desc
)
158 struct xcbc_state
*sctx
= shash_desc_ctx(desc
);
160 memset(sctx
, 0, sizeof *sctx
);
165 static int nx_xcbc_update(struct shash_desc
*desc
,
169 struct xcbc_state
*sctx
= shash_desc_ctx(desc
);
170 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
171 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
173 struct nx_sg
*out_sg
;
174 u32 to_process
= 0, leftover
, total
;
175 unsigned int max_sg_len
;
176 unsigned long irq_flags
;
180 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
183 total
= sctx
->count
+ len
;
185 /* 2 cases for total data len:
186 * 1: <= AES_BLOCK_SIZE: copy into state, return 0
187 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
189 if (total
<= AES_BLOCK_SIZE
) {
190 memcpy(sctx
->buffer
+ sctx
->count
, data
, len
);
195 in_sg
= nx_ctx
->in_sg
;
196 max_sg_len
= min_t(u64
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
198 max_sg_len
= min_t(u64
, max_sg_len
,
199 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
201 data_len
= AES_BLOCK_SIZE
;
202 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, (u8
*)sctx
->state
,
203 &len
, nx_ctx
->ap
->sglen
);
205 if (data_len
!= AES_BLOCK_SIZE
) {
210 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
213 to_process
= total
- to_process
;
214 to_process
= to_process
& ~(AES_BLOCK_SIZE
- 1);
216 leftover
= total
- to_process
;
218 /* the hardware will not accept a 0 byte operation for this
219 * algorithm and the operation MUST be finalized to be correct.
220 * So if we happen to get an update that falls on a block sized
221 * boundary, we must save off the last block to finalize with
224 to_process
-= AES_BLOCK_SIZE
;
225 leftover
= AES_BLOCK_SIZE
;
229 data_len
= sctx
->count
;
230 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
,
234 if (data_len
!= sctx
->count
) {
240 data_len
= to_process
- sctx
->count
;
241 in_sg
= nx_build_sg_list(in_sg
,
246 if (data_len
!= to_process
- sctx
->count
) {
251 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) *
252 sizeof(struct nx_sg
);
254 /* we've hit the nx chip previously and we're updating again,
255 * so copy over the partial digest */
256 if (NX_CPB_FDM(csbcpb
) & NX_FDM_CONTINUATION
) {
257 memcpy(csbcpb
->cpb
.aes_xcbc
.cv
,
258 csbcpb
->cpb
.aes_xcbc
.out_cv_mac
,
262 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
263 if (!nx_ctx
->op
.inlen
|| !nx_ctx
->op
.outlen
) {
268 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
, 0);
272 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
274 /* everything after the first update is continuation */
275 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
278 data
+= to_process
- sctx
->count
;
280 in_sg
= nx_ctx
->in_sg
;
281 } while (leftover
> AES_BLOCK_SIZE
);
283 /* copy the leftover back into the state struct */
284 memcpy(sctx
->buffer
, data
, leftover
);
285 sctx
->count
= leftover
;
288 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
292 static int nx_xcbc_final(struct shash_desc
*desc
, u8
*out
)
294 struct xcbc_state
*sctx
= shash_desc_ctx(desc
);
295 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
296 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
297 struct nx_sg
*in_sg
, *out_sg
;
298 unsigned long irq_flags
;
302 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
304 if (NX_CPB_FDM(csbcpb
) & NX_FDM_CONTINUATION
) {
305 /* we've hit the nx chip previously, now we're finalizing,
306 * so copy over the partial digest */
307 memcpy(csbcpb
->cpb
.aes_xcbc
.cv
,
308 csbcpb
->cpb
.aes_xcbc
.out_cv_mac
, AES_BLOCK_SIZE
);
309 } else if (sctx
->count
== 0) {
311 * we've never seen an update, so this is a 0 byte op. The
312 * hardware cannot handle a 0 byte op, so just ECB to
315 rc
= nx_xcbc_empty(desc
, out
);
319 /* final is represented by continuing the operation and indicating that
320 * this is not an intermediate operation */
321 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
324 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*)sctx
->buffer
,
325 &len
, nx_ctx
->ap
->sglen
);
327 if (len
!= sctx
->count
) {
332 len
= AES_BLOCK_SIZE
;
333 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, out
, &len
,
336 if (len
!= AES_BLOCK_SIZE
) {
341 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
342 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
344 if (!nx_ctx
->op
.outlen
) {
349 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
, 0);
353 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
355 memcpy(out
, csbcpb
->cpb
.aes_xcbc
.out_cv_mac
, AES_BLOCK_SIZE
);
357 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
361 struct shash_alg nx_shash_aes_xcbc_alg
= {
362 .digestsize
= AES_BLOCK_SIZE
,
363 .init
= nx_xcbc_init
,
364 .update
= nx_xcbc_update
,
365 .final
= nx_xcbc_final
,
366 .setkey
= nx_xcbc_set_key
,
367 .descsize
= sizeof(struct xcbc_state
),
368 .statesize
= sizeof(struct xcbc_state
),
370 .cra_name
= "xcbc(aes)",
371 .cra_driver_name
= "xcbc-aes-nx",
373 .cra_blocksize
= AES_BLOCK_SIZE
,
374 .cra_module
= THIS_MODULE
,
375 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
376 .cra_init
= nx_crypto_ctx_aes_xcbc_init2
,
377 .cra_exit
= nx_crypto_ctx_exit
,