2 * AES XCBC routines supporting the Power 7+ Nest Accelerators driver
4 * Copyright (C) 2011-2012 International Business Machines Inc.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 only.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * Author: Kent Yoder <yoder1@us.ibm.com>
22 #include <crypto/internal/hash.h>
23 #include <crypto/aes.h>
24 #include <crypto/algapi.h>
25 #include <linux/module.h>
26 #include <linux/types.h>
27 #include <linux/crypto.h>
30 #include "nx_csbcpb.h"
35 u8 state
[AES_BLOCK_SIZE
];
37 u8 buffer
[AES_BLOCK_SIZE
];
40 static int nx_xcbc_set_key(struct crypto_shash
*desc
,
44 struct nx_crypto_ctx
*nx_ctx
= crypto_shash_ctx(desc
);
48 nx_ctx
->ap
= &nx_ctx
->props
[NX_PROPS_AES_128
];
54 memcpy(nx_ctx
->priv
.xcbc
.key
, in_key
, key_len
);
60 * Based on RFC 3566, for a zero-length message:
63 * K1 = E(K, 0x01010101010101010101010101010101)
64 * K3 = E(K, 0x03030303030303030303030303030303)
65 * E[0] = 0x00000000000000000000000000000000
66 * M[1] = 0x80000000000000000000000000000000 (0 length message with padding)
67 * E[1] = (K1, M[1] ^ E[0] ^ K3)
70 static int nx_xcbc_empty(struct shash_desc
*desc
, u8
*out
)
72 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
73 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
74 struct nx_sg
*in_sg
, *out_sg
;
75 u8 keys
[2][AES_BLOCK_SIZE
];
79 /* Change to ECB mode */
80 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_ECB
;
81 memcpy(key
, csbcpb
->cpb
.aes_xcbc
.key
, AES_BLOCK_SIZE
);
82 memcpy(csbcpb
->cpb
.aes_ecb
.key
, key
, AES_BLOCK_SIZE
);
83 NX_CPB_FDM(csbcpb
) |= NX_FDM_ENDE_ENCRYPT
;
85 /* K1 and K3 base patterns */
86 memset(keys
[0], 0x01, sizeof(keys
[0]));
87 memset(keys
[1], 0x03, sizeof(keys
[1]));
89 /* Generate K1 and K3 encrypting the patterns */
90 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*) keys
, sizeof(keys
),
92 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, (u8
*) keys
, sizeof(keys
),
94 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
95 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
97 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
98 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
101 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
103 /* XOr K3 with the padding for a 0 length message */
106 /* Encrypt the final result */
107 memcpy(csbcpb
->cpb
.aes_ecb
.key
, keys
[0], AES_BLOCK_SIZE
);
108 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*) keys
[1], sizeof(keys
[1]),
110 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, out
, AES_BLOCK_SIZE
,
112 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
113 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
115 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
116 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
119 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
122 /* Restore XCBC mode */
123 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_XCBC_MAC
;
124 memcpy(csbcpb
->cpb
.aes_xcbc
.key
, key
, AES_BLOCK_SIZE
);
125 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_ENDE_ENCRYPT
;
130 static int nx_xcbc_init(struct shash_desc
*desc
)
132 struct xcbc_state
*sctx
= shash_desc_ctx(desc
);
133 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
134 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
135 struct nx_sg
*out_sg
;
137 nx_ctx_init(nx_ctx
, HCOP_FC_AES
);
139 memset(sctx
, 0, sizeof *sctx
);
141 NX_CPB_SET_KEY_SIZE(csbcpb
, NX_KS_AES_128
);
142 csbcpb
->cpb
.hdr
.mode
= NX_MODE_AES_XCBC_MAC
;
144 memcpy(csbcpb
->cpb
.aes_xcbc
.key
, nx_ctx
->priv
.xcbc
.key
, AES_BLOCK_SIZE
);
145 memset(nx_ctx
->priv
.xcbc
.key
, 0, sizeof *nx_ctx
->priv
.xcbc
.key
);
147 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, (u8
*)sctx
->state
,
148 AES_BLOCK_SIZE
, nx_ctx
->ap
->sglen
);
149 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
154 static int nx_xcbc_update(struct shash_desc
*desc
,
158 struct xcbc_state
*sctx
= shash_desc_ctx(desc
);
159 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
160 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
162 u32 to_process
, leftover
, total
;
164 unsigned long irq_flags
;
167 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
170 total
= sctx
->count
+ len
;
172 /* 2 cases for total data len:
173 * 1: <= AES_BLOCK_SIZE: copy into state, return 0
174 * 2: > AES_BLOCK_SIZE: process X blocks, copy in leftover
176 if (total
<= AES_BLOCK_SIZE
) {
177 memcpy(sctx
->buffer
+ sctx
->count
, data
, len
);
182 in_sg
= nx_ctx
->in_sg
;
183 max_sg_len
= min_t(u32
, nx_driver
.of
.max_sg_len
/sizeof(struct nx_sg
),
188 /* to_process: the AES_BLOCK_SIZE data chunk to process in this
190 to_process
= min_t(u64
, total
, nx_ctx
->ap
->databytelen
);
191 to_process
= min_t(u64
, to_process
,
192 NX_PAGE_SIZE
* (max_sg_len
- 1));
193 to_process
= to_process
& ~(AES_BLOCK_SIZE
- 1);
194 leftover
= total
- to_process
;
196 /* the hardware will not accept a 0 byte operation for this
197 * algorithm and the operation MUST be finalized to be correct.
198 * So if we happen to get an update that falls on a block sized
199 * boundary, we must save off the last block to finalize with
202 to_process
-= AES_BLOCK_SIZE
;
203 leftover
= AES_BLOCK_SIZE
;
207 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
,
212 in_sg
= nx_build_sg_list(in_sg
,
214 to_process
- sctx
->count
,
216 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) *
217 sizeof(struct nx_sg
);
219 /* we've hit the nx chip previously and we're updating again,
220 * so copy over the partial digest */
221 if (NX_CPB_FDM(csbcpb
) & NX_FDM_CONTINUATION
) {
222 memcpy(csbcpb
->cpb
.aes_xcbc
.cv
,
223 csbcpb
->cpb
.aes_xcbc
.out_cv_mac
,
227 NX_CPB_FDM(csbcpb
) |= NX_FDM_INTERMEDIATE
;
228 if (!nx_ctx
->op
.inlen
|| !nx_ctx
->op
.outlen
) {
233 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
234 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
238 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
240 /* everything after the first update is continuation */
241 NX_CPB_FDM(csbcpb
) |= NX_FDM_CONTINUATION
;
244 data
+= to_process
- sctx
->count
;
246 in_sg
= nx_ctx
->in_sg
;
247 } while (leftover
> AES_BLOCK_SIZE
);
249 /* copy the leftover back into the state struct */
250 memcpy(sctx
->buffer
, data
, leftover
);
251 sctx
->count
= leftover
;
254 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
258 static int nx_xcbc_final(struct shash_desc
*desc
, u8
*out
)
260 struct xcbc_state
*sctx
= shash_desc_ctx(desc
);
261 struct nx_crypto_ctx
*nx_ctx
= crypto_tfm_ctx(&desc
->tfm
->base
);
262 struct nx_csbcpb
*csbcpb
= nx_ctx
->csbcpb
;
263 struct nx_sg
*in_sg
, *out_sg
;
264 unsigned long irq_flags
;
267 spin_lock_irqsave(&nx_ctx
->lock
, irq_flags
);
269 if (NX_CPB_FDM(csbcpb
) & NX_FDM_CONTINUATION
) {
270 /* we've hit the nx chip previously, now we're finalizing,
271 * so copy over the partial digest */
272 memcpy(csbcpb
->cpb
.aes_xcbc
.cv
,
273 csbcpb
->cpb
.aes_xcbc
.out_cv_mac
, AES_BLOCK_SIZE
);
274 } else if (sctx
->count
== 0) {
276 * we've never seen an update, so this is a 0 byte op. The
277 * hardware cannot handle a 0 byte op, so just ECB to
280 rc
= nx_xcbc_empty(desc
, out
);
284 /* final is represented by continuing the operation and indicating that
285 * this is not an intermediate operation */
286 NX_CPB_FDM(csbcpb
) &= ~NX_FDM_INTERMEDIATE
;
288 in_sg
= nx_build_sg_list(nx_ctx
->in_sg
, (u8
*)sctx
->buffer
,
289 sctx
->count
, nx_ctx
->ap
->sglen
);
290 out_sg
= nx_build_sg_list(nx_ctx
->out_sg
, out
, AES_BLOCK_SIZE
,
293 nx_ctx
->op
.inlen
= (nx_ctx
->in_sg
- in_sg
) * sizeof(struct nx_sg
);
294 nx_ctx
->op
.outlen
= (nx_ctx
->out_sg
- out_sg
) * sizeof(struct nx_sg
);
296 if (!nx_ctx
->op
.outlen
) {
301 rc
= nx_hcall_sync(nx_ctx
, &nx_ctx
->op
,
302 desc
->flags
& CRYPTO_TFM_REQ_MAY_SLEEP
);
306 atomic_inc(&(nx_ctx
->stats
->aes_ops
));
308 memcpy(out
, csbcpb
->cpb
.aes_xcbc
.out_cv_mac
, AES_BLOCK_SIZE
);
310 spin_unlock_irqrestore(&nx_ctx
->lock
, irq_flags
);
314 struct shash_alg nx_shash_aes_xcbc_alg
= {
315 .digestsize
= AES_BLOCK_SIZE
,
316 .init
= nx_xcbc_init
,
317 .update
= nx_xcbc_update
,
318 .final
= nx_xcbc_final
,
319 .setkey
= nx_xcbc_set_key
,
320 .descsize
= sizeof(struct xcbc_state
),
321 .statesize
= sizeof(struct xcbc_state
),
323 .cra_name
= "xcbc(aes)",
324 .cra_driver_name
= "xcbc-aes-nx",
326 .cra_flags
= CRYPTO_ALG_TYPE_SHASH
,
327 .cra_blocksize
= AES_BLOCK_SIZE
,
328 .cra_module
= THIS_MODULE
,
329 .cra_ctxsize
= sizeof(struct nx_crypto_ctx
),
330 .cra_init
= nx_crypto_ctx_aes_xcbc_init
,
331 .cra_exit
= nx_crypto_ctx_exit
,