2 * Shared glue code for 128bit block ciphers
4 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
6 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 * CTR part based on code (crypto/ctr.c) by:
9 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 #include <linux/module.h>
29 #include <crypto/b128ops.h>
30 #include <crypto/internal/skcipher.h>
31 #include <crypto/lrw.h>
32 #include <crypto/xts.h>
33 #include <asm/crypto/glue_helper.h>
35 static int __glue_ecb_crypt_128bit(const struct common_glue_ctx
*gctx
,
36 struct blkcipher_desc
*desc
,
37 struct blkcipher_walk
*walk
)
39 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
40 const unsigned int bsize
= 128 / 8;
41 unsigned int nbytes
, i
, func_bytes
;
42 bool fpu_enabled
= false;
45 err
= blkcipher_walk_virt(desc
, walk
);
47 while ((nbytes
= walk
->nbytes
)) {
48 u8
*wsrc
= walk
->src
.virt
.addr
;
49 u8
*wdst
= walk
->dst
.virt
.addr
;
51 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
52 desc
, fpu_enabled
, nbytes
);
54 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
55 func_bytes
= bsize
* gctx
->funcs
[i
].num_blocks
;
57 /* Process multi-block batch */
58 if (nbytes
>= func_bytes
) {
60 gctx
->funcs
[i
].fn_u
.ecb(ctx
, wdst
,
66 } while (nbytes
>= func_bytes
);
74 err
= blkcipher_walk_done(desc
, walk
, nbytes
);
77 glue_fpu_end(fpu_enabled
);
81 int glue_ecb_crypt_128bit(const struct common_glue_ctx
*gctx
,
82 struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
83 struct scatterlist
*src
, unsigned int nbytes
)
85 struct blkcipher_walk walk
;
87 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
88 return __glue_ecb_crypt_128bit(gctx
, desc
, &walk
);
90 EXPORT_SYMBOL_GPL(glue_ecb_crypt_128bit
);
92 static unsigned int __glue_cbc_encrypt_128bit(const common_glue_func_t fn
,
93 struct blkcipher_desc
*desc
,
94 struct blkcipher_walk
*walk
)
96 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
97 const unsigned int bsize
= 128 / 8;
98 unsigned int nbytes
= walk
->nbytes
;
99 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
100 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
101 u128
*iv
= (u128
*)walk
->iv
;
104 u128_xor(dst
, src
, iv
);
105 fn(ctx
, (u8
*)dst
, (u8
*)dst
);
111 } while (nbytes
>= bsize
);
113 *(u128
*)walk
->iv
= *iv
;
117 int glue_cbc_encrypt_128bit(const common_glue_func_t fn
,
118 struct blkcipher_desc
*desc
,
119 struct scatterlist
*dst
,
120 struct scatterlist
*src
, unsigned int nbytes
)
122 struct blkcipher_walk walk
;
125 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
126 err
= blkcipher_walk_virt(desc
, &walk
);
128 while ((nbytes
= walk
.nbytes
)) {
129 nbytes
= __glue_cbc_encrypt_128bit(fn
, desc
, &walk
);
130 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
135 EXPORT_SYMBOL_GPL(glue_cbc_encrypt_128bit
);
138 __glue_cbc_decrypt_128bit(const struct common_glue_ctx
*gctx
,
139 struct blkcipher_desc
*desc
,
140 struct blkcipher_walk
*walk
)
142 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
143 const unsigned int bsize
= 128 / 8;
144 unsigned int nbytes
= walk
->nbytes
;
145 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
146 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
148 unsigned int num_blocks
, func_bytes
;
151 /* Start of the last block. */
152 src
+= nbytes
/ bsize
- 1;
153 dst
+= nbytes
/ bsize
- 1;
157 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
158 num_blocks
= gctx
->funcs
[i
].num_blocks
;
159 func_bytes
= bsize
* num_blocks
;
161 /* Process multi-block batch */
162 if (nbytes
>= func_bytes
) {
164 nbytes
-= func_bytes
- bsize
;
165 src
-= num_blocks
- 1;
166 dst
-= num_blocks
- 1;
168 gctx
->funcs
[i
].fn_u
.cbc(ctx
, dst
, src
);
174 u128_xor(dst
, dst
, src
- 1);
177 } while (nbytes
>= func_bytes
);
185 u128_xor(dst
, dst
, (u128
*)walk
->iv
);
186 *(u128
*)walk
->iv
= last_iv
;
191 int glue_cbc_decrypt_128bit(const struct common_glue_ctx
*gctx
,
192 struct blkcipher_desc
*desc
,
193 struct scatterlist
*dst
,
194 struct scatterlist
*src
, unsigned int nbytes
)
196 const unsigned int bsize
= 128 / 8;
197 bool fpu_enabled
= false;
198 struct blkcipher_walk walk
;
201 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
202 err
= blkcipher_walk_virt(desc
, &walk
);
204 while ((nbytes
= walk
.nbytes
)) {
205 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
206 desc
, fpu_enabled
, nbytes
);
207 nbytes
= __glue_cbc_decrypt_128bit(gctx
, desc
, &walk
);
208 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
211 glue_fpu_end(fpu_enabled
);
214 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit
);
216 static void glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr
,
217 struct blkcipher_desc
*desc
,
218 struct blkcipher_walk
*walk
)
220 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
221 u8
*src
= (u8
*)walk
->src
.virt
.addr
;
222 u8
*dst
= (u8
*)walk
->dst
.virt
.addr
;
223 unsigned int nbytes
= walk
->nbytes
;
227 be128_to_le128(&ctrblk
, (be128
*)walk
->iv
);
229 memcpy(&tmp
, src
, nbytes
);
230 fn_ctr(ctx
, &tmp
, &tmp
, &ctrblk
);
231 memcpy(dst
, &tmp
, nbytes
);
233 le128_to_be128((be128
*)walk
->iv
, &ctrblk
);
236 static unsigned int __glue_ctr_crypt_128bit(const struct common_glue_ctx
*gctx
,
237 struct blkcipher_desc
*desc
,
238 struct blkcipher_walk
*walk
)
240 const unsigned int bsize
= 128 / 8;
241 void *ctx
= crypto_blkcipher_ctx(desc
->tfm
);
242 unsigned int nbytes
= walk
->nbytes
;
243 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
244 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
246 unsigned int num_blocks
, func_bytes
;
249 be128_to_le128(&ctrblk
, (be128
*)walk
->iv
);
251 /* Process multi-block batch */
252 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
253 num_blocks
= gctx
->funcs
[i
].num_blocks
;
254 func_bytes
= bsize
* num_blocks
;
256 if (nbytes
>= func_bytes
) {
258 gctx
->funcs
[i
].fn_u
.ctr(ctx
, dst
, src
, &ctrblk
);
262 nbytes
-= func_bytes
;
263 } while (nbytes
>= func_bytes
);
271 le128_to_be128((be128
*)walk
->iv
, &ctrblk
);
275 int glue_ctr_crypt_128bit(const struct common_glue_ctx
*gctx
,
276 struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
277 struct scatterlist
*src
, unsigned int nbytes
)
279 const unsigned int bsize
= 128 / 8;
280 bool fpu_enabled
= false;
281 struct blkcipher_walk walk
;
284 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
285 err
= blkcipher_walk_virt_block(desc
, &walk
, bsize
);
287 while ((nbytes
= walk
.nbytes
) >= bsize
) {
288 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
289 desc
, fpu_enabled
, nbytes
);
290 nbytes
= __glue_ctr_crypt_128bit(gctx
, desc
, &walk
);
291 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
294 glue_fpu_end(fpu_enabled
);
297 glue_ctr_crypt_final_128bit(
298 gctx
->funcs
[gctx
->num_funcs
- 1].fn_u
.ctr
, desc
, &walk
);
299 err
= blkcipher_walk_done(desc
, &walk
, 0);
304 EXPORT_SYMBOL_GPL(glue_ctr_crypt_128bit
);
306 static unsigned int __glue_xts_crypt_128bit(const struct common_glue_ctx
*gctx
,
308 struct blkcipher_desc
*desc
,
309 struct blkcipher_walk
*walk
)
311 const unsigned int bsize
= 128 / 8;
312 unsigned int nbytes
= walk
->nbytes
;
313 u128
*src
= (u128
*)walk
->src
.virt
.addr
;
314 u128
*dst
= (u128
*)walk
->dst
.virt
.addr
;
315 unsigned int num_blocks
, func_bytes
;
318 /* Process multi-block batch */
319 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
320 num_blocks
= gctx
->funcs
[i
].num_blocks
;
321 func_bytes
= bsize
* num_blocks
;
323 if (nbytes
>= func_bytes
) {
325 gctx
->funcs
[i
].fn_u
.xts(ctx
, dst
, src
,
330 nbytes
-= func_bytes
;
331 } while (nbytes
>= func_bytes
);
342 static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx
*gctx
,
344 struct skcipher_walk
*walk
)
346 const unsigned int bsize
= 128 / 8;
347 unsigned int nbytes
= walk
->nbytes
;
348 u128
*src
= walk
->src
.virt
.addr
;
349 u128
*dst
= walk
->dst
.virt
.addr
;
350 unsigned int num_blocks
, func_bytes
;
353 /* Process multi-block batch */
354 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
355 num_blocks
= gctx
->funcs
[i
].num_blocks
;
356 func_bytes
= bsize
* num_blocks
;
358 if (nbytes
>= func_bytes
) {
360 gctx
->funcs
[i
].fn_u
.xts(ctx
, dst
, src
,
365 nbytes
-= func_bytes
;
366 } while (nbytes
>= func_bytes
);
377 /* for implementations implementing faster XTS IV generator */
378 int glue_xts_crypt_128bit(const struct common_glue_ctx
*gctx
,
379 struct blkcipher_desc
*desc
, struct scatterlist
*dst
,
380 struct scatterlist
*src
, unsigned int nbytes
,
381 void (*tweak_fn
)(void *ctx
, u8
*dst
, const u8
*src
),
382 void *tweak_ctx
, void *crypt_ctx
)
384 const unsigned int bsize
= 128 / 8;
385 bool fpu_enabled
= false;
386 struct blkcipher_walk walk
;
389 blkcipher_walk_init(&walk
, dst
, src
, nbytes
);
391 err
= blkcipher_walk_virt(desc
, &walk
);
392 nbytes
= walk
.nbytes
;
396 /* set minimum length to bsize, for tweak_fn */
397 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
399 nbytes
< bsize
? bsize
: nbytes
);
401 /* calculate first value of T */
402 tweak_fn(tweak_ctx
, walk
.iv
, walk
.iv
);
405 nbytes
= __glue_xts_crypt_128bit(gctx
, crypt_ctx
, desc
, &walk
);
407 err
= blkcipher_walk_done(desc
, &walk
, nbytes
);
408 nbytes
= walk
.nbytes
;
411 glue_fpu_end(fpu_enabled
);
415 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit
);
417 int glue_xts_req_128bit(const struct common_glue_ctx
*gctx
,
418 struct skcipher_request
*req
,
419 common_glue_func_t tweak_fn
, void *tweak_ctx
,
422 const unsigned int bsize
= 128 / 8;
423 struct skcipher_walk walk
;
424 bool fpu_enabled
= false;
428 err
= skcipher_walk_virt(&walk
, req
, false);
429 nbytes
= walk
.nbytes
;
433 /* set minimum length to bsize, for tweak_fn */
434 fpu_enabled
= glue_skwalk_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
436 nbytes
< bsize
? bsize
: nbytes
);
438 /* calculate first value of T */
439 tweak_fn(tweak_ctx
, walk
.iv
, walk
.iv
);
442 nbytes
= __glue_xts_req_128bit(gctx
, crypt_ctx
, &walk
);
444 err
= skcipher_walk_done(&walk
, nbytes
);
445 nbytes
= walk
.nbytes
;
448 glue_fpu_end(fpu_enabled
);
452 EXPORT_SYMBOL_GPL(glue_xts_req_128bit
);
454 void glue_xts_crypt_128bit_one(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
,
455 common_glue_func_t fn
)
459 /* generate next IV */
460 le128_gf128mul_x_ble(iv
, &ivblk
);
463 u128_xor(dst
, src
, (u128
*)&ivblk
);
465 /* PP <- D(Key2,CC) */
466 fn(ctx
, (u8
*)dst
, (u8
*)dst
);
469 u128_xor(dst
, dst
, (u128
*)&ivblk
);
471 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one
);
473 MODULE_LICENSE("GPL");