2 * AES XCBC routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2011-2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/hash.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <linux/module.h>
26 #include <linux/types.h>
27 #include <linux/crypto.h>
30 #include "nx_csbcpb.h"
35 u8 state
[AES_BLOCK_SIZE
];
37 u8 buffer
[AES_BLOCK_SIZE
];
40 static int nx_xcbc_set_key(struct crypto_shash
*desc
,
44 struct nx_crypto_ctx
*nx_ctx
= crypto_shash_ctx(desc
);
45 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
49 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_128
];
55 memcpy(csbcpb
->cpb
.aes_xcbc
.key
, in_key
, key_len
);
61 * Based on RFC 3566, for a zero-length message:
64 * K1 = E(K, 0x01010101010101010101010101010101)
65 * K3 = E(K, 0x03030303030303030303030303030303)
66 * E[0] = 0x00000000000000000000000000000000
67 * M[1] = 0x80000000000000000000000000000000 (0 length message with padding)
68 * E[1] = (K1, M[1] ^ E[0] ^ K3)
71 static int nx_xcbc_empty(struct shash_desc
*desc
, u8
*out
)
73 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
74 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
75 struct nx_sg
*in_sg
, *out_sg
;
76 u8 keys
[2][AES_BLOCK_SIZE
];
81 /* Change to ECB mode */
82 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_ECB
;
83 memcpy(key
, csbcpb
->cpb
.aes_xcbc
.key
, AES_BLOCK_SIZE
);
84 memcpy(csbcpb
->cpb
.aes_ecb
.key
, key
, AES_BLOCK_SIZE
);
85 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
87 /* K1 and K3 base patterns */
88 memset(keys
[0], 0x01, sizeof(keys
[0]));
89 memset(keys
[1], 0x03, sizeof(keys
[1]));
92 /* Generate K1 and K3 encrypting the patterns */
93 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*) keys
, &len
,
96 if (len
!= sizeof(keys
))
99 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, (u8
*) keys
, &len
,
102 if (len
!= sizeof(keys
))
105 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
106 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
108 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
109 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
112 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
114 /* XOr K3 with the padding for a 0 length message */
117 len
= sizeof(keys
[1]);
119 /* Encrypt the final result */
120 memcpy(csbcpb
->cpb
.aes_ecb
.key
, keys
[0], AES_BLOCK_SIZE
);
121 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*) keys
[1], &len
,
124 if (len
!= sizeof(keys
[1]))
127 len
= AES_BLOCK_SIZE
;
128 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, out
, &len
,
131 if (len
!= AES_BLOCK_SIZE
)
134 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
135 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
137 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
138 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
141 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
144 /* Restore XCBC mode */
145 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_XCBC_MAC
;
146 memcpy(csbcpb
->cpb
.aes_xcbc
.key
, key
, AES_BLOCK_SIZE
);
147 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
152 static int nx_crypto_ctx_aes_xcbc_init2(struct crypto_tfm
*tfm
)
154 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(tfm
);
155 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
158 err
= nx_crypto_ctx_aes_xcbc_init(tfm
);
162 nx_ctx_init(nx_ctx
, HCOP_FC_AES
);
164 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_128
);
165 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_XCBC_MAC
;
170 static int nx_xcbc_init(struct shash_desc
*desc
)
172 struct xcbc_state
*sctx
= shash_desc_ctx(desc
);
174 memset(sctx
, 0, sizeof *sctx
);
179 static int nx_xcbc_update(struct shash_desc
*desc
,
183 struct xcbc_state
*sctx
= shash_desc_ctx(desc
);
184 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
185 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
187 struct nx_sg
*out_sg
;
188 u32 to_process
= 0, leftover
, total
;
189 unsigned int max_sg_len
;
190 unsigned long irq_flags
;
194 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
197 total
= sctx
->count
+ len
;
199 /* 2 cases for total data len:
200 * 1: <= AES_BLOCK_SIZE: copy into state, return 0
201 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
203 if (total
<= AES_BLOCK_SIZE
) {
204 memcpy(sctx
->buffer
+ sctx
->count
, data
, len
);
209 in_sg
= nx_ctx
->in_sg
;
210 max_sg_len
= min_t(u64
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
212 max_sg_len
= min_t(u64
, max_sg_len
,
213 nx_ctx
->ap
->databytelen
/NX_PAGE_SIZE
);
215 data_len
= AES_BLOCK_SIZE
;
216 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, (u8
*)sctx
->state
,
217 &len
, nx_ctx
->ap
->sglen
);
219 if (data_len
!= AES_BLOCK_SIZE
) {
224 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
227 to_process
= total
- to_process
;
228 to_process
= to_process
& ~(AES_BLOCK_SIZE
- 1);
230 leftover
= total
- to_process
;
232 /* the hardware will not accept a 0 byte operation for this
233 * algorithm and the operation MUST be finalized to be correct.
234 * So if we happen to get an update that falls on a block sized
235 * boundary, we must save off the last block to finalize with
238 to_process
-= AES_BLOCK_SIZE
;
239 leftover
= AES_BLOCK_SIZE
;
243 data_len
= sctx
->count
;
244 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
,
248 if (data_len
!= sctx
->count
) {
254 data_len
= to_process
- sctx
->count
;
255 in_sg
= nx_build_sg_list(in_sg
,
260 if (data_len
!= to_process
- sctx
->count
) {
265 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) *
266 sizeof(struct nx_sg
);
268 /* we've hit the nx chip previously and we're updating again,
269 * so copy over the partial digest */
270 if (NX_CPB_FDM(csbcpb
) & NX_FDM_CONTINUATION
) {
271 memcpy(csbcpb
->cpb
.aes_xcbc
.cv
,
272 csbcpb
->cpb
.aes_xcbc
.out_cv_mac
,
276 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
277 if (!nx_ctx
->op
.inlen
|| !nx_ctx
->op
.outlen
) {
282 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
283 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
287 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
289 /* everything after the first update is continuation */
290 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
293 data
+= to_process
- sctx
->count
;
295 in_sg
= nx_ctx
->in_sg
;
296 } while (leftover
> AES_BLOCK_SIZE
);
298 /* copy the leftover back into the state struct */
299 memcpy(sctx
->buffer
, data
, leftover
);
300 sctx
->count
= leftover
;
303 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
307 static int nx_xcbc_final(struct shash_desc
*desc
, u8
*out
)
309 struct xcbc_state
*sctx
= shash_desc_ctx(desc
);
310 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
311 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
312 struct nx_sg
*in_sg
, *out_sg
;
313 unsigned long irq_flags
;
317 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
319 if (NX_CPB_FDM(csbcpb
) & NX_FDM_CONTINUATION
) {
320 /* we've hit the nx chip previously, now we're finalizing,
321 * so copy over the partial digest */
322 memcpy(csbcpb
->cpb
.aes_xcbc
.cv
,
323 csbcpb
->cpb
.aes_xcbc
.out_cv_mac
, AES_BLOCK_SIZE
);
324 } else if (sctx
->count
== 0) {
326 * we've never seen an update, so this is a 0 byte op. The
327 * hardware cannot handle a 0 byte op, so just ECB to
330 rc
= nx_xcbc_empty(desc
, out
);
334 /* final is represented by continuing the operation and indicating that
335 * this is not an intermediate operation */
336 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
339 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*)sctx
->buffer
,
340 &len
, nx_ctx
->ap
->sglen
);
342 if (len
!= sctx
->count
) {
347 len
= AES_BLOCK_SIZE
;
348 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, out
, &len
,
351 if (len
!= AES_BLOCK_SIZE
) {
356 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
357 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
359 if (!nx_ctx
->op
.outlen
) {
364 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
365 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
369 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
371 memcpy(out
, csbcpb
->cpb
.aes_xcbc
.out_cv_mac
, AES_BLOCK_SIZE
);
373 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
377 struct shash_alg nx_shash_aes_xcbc_alg
= {
378 .digestsize
= AES_BLOCK_SIZE
,
379 .init
= nx_xcbc_init
,
380 .update
= nx_xcbc_update
,
381 .final
= nx_xcbc_final
,
382 .setkey
= nx_xcbc_set_key
,
383 .descsize
= sizeof(struct xcbc_state
),
384 .statesize
= sizeof(struct xcbc_state
),
386 .cra_name
= "xcbc(aes)",
387 .cra_driver_name
= "xcbc-aes-nx",
389 .cra_flags
= CRYPTO_ALG_TYPE_SHASH
,
390 .cra_blocksize
= AES_BLOCK_SIZE
,
391 .cra_module
= THIS_MODULE
,
392 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
393 .cra_init
= nx_crypto_ctx_aes_xcbc_init2
,
394 .cra_exit
= nx_crypto_ctx_exit
,