2 * Shared glue code for 128bit block ciphers
4 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
6 * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by:
7 * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
8 * CTR part based on code (crypto/ctr.c) by:
9 * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
28 #include <linux/module.h>
29 #include <crypto/b128ops.h>
30 #include <crypto/gf128mul.h>
31 #include <crypto/internal/skcipher.h>
32 #include <crypto/xts.h>
33 #include <asm/crypto/glue_helper.h>
35 int glue_ecb_req_128bit(const struct common_glue_ctx
*gctx
,
36 struct skcipher_request
*req
)
38 void *ctx
= crypto_skcipher_ctx(crypto_skcipher_reqtfm(req
));
39 const unsigned int bsize
= 128 / 8;
40 struct skcipher_walk walk
;
41 bool fpu_enabled
= false;
45 err
= skcipher_walk_virt(&walk
, req
, false);
47 while ((nbytes
= walk
.nbytes
)) {
48 const u8
*src
= walk
.src
.virt
.addr
;
49 u8
*dst
= walk
.dst
.virt
.addr
;
50 unsigned int func_bytes
;
53 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
54 &walk
, fpu_enabled
, nbytes
);
55 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
56 func_bytes
= bsize
* gctx
->funcs
[i
].num_blocks
;
58 if (nbytes
< func_bytes
)
61 /* Process multi-block batch */
63 gctx
->funcs
[i
].fn_u
.ecb(ctx
, dst
, src
);
67 } while (nbytes
>= func_bytes
);
72 err
= skcipher_walk_done(&walk
, nbytes
);
75 glue_fpu_end(fpu_enabled
);
78 EXPORT_SYMBOL_GPL(glue_ecb_req_128bit
);
80 int glue_cbc_encrypt_req_128bit(const common_glue_func_t fn
,
81 struct skcipher_request
*req
)
83 void *ctx
= crypto_skcipher_ctx(crypto_skcipher_reqtfm(req
));
84 const unsigned int bsize
= 128 / 8;
85 struct skcipher_walk walk
;
89 err
= skcipher_walk_virt(&walk
, req
, false);
91 while ((nbytes
= walk
.nbytes
)) {
92 const u128
*src
= (u128
*)walk
.src
.virt
.addr
;
93 u128
*dst
= (u128
*)walk
.dst
.virt
.addr
;
94 u128
*iv
= (u128
*)walk
.iv
;
97 u128_xor(dst
, src
, iv
);
98 fn(ctx
, (u8
*)dst
, (u8
*)dst
);
103 } while (nbytes
>= bsize
);
105 *(u128
*)walk
.iv
= *iv
;
106 err
= skcipher_walk_done(&walk
, nbytes
);
110 EXPORT_SYMBOL_GPL(glue_cbc_encrypt_req_128bit
);
112 int glue_cbc_decrypt_req_128bit(const struct common_glue_ctx
*gctx
,
113 struct skcipher_request
*req
)
115 void *ctx
= crypto_skcipher_ctx(crypto_skcipher_reqtfm(req
));
116 const unsigned int bsize
= 128 / 8;
117 struct skcipher_walk walk
;
118 bool fpu_enabled
= false;
122 err
= skcipher_walk_virt(&walk
, req
, false);
124 while ((nbytes
= walk
.nbytes
)) {
125 const u128
*src
= walk
.src
.virt
.addr
;
126 u128
*dst
= walk
.dst
.virt
.addr
;
127 unsigned int func_bytes
, num_blocks
;
131 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
132 &walk
, fpu_enabled
, nbytes
);
133 /* Start of the last block. */
134 src
+= nbytes
/ bsize
- 1;
135 dst
+= nbytes
/ bsize
- 1;
139 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
140 num_blocks
= gctx
->funcs
[i
].num_blocks
;
141 func_bytes
= bsize
* num_blocks
;
143 if (nbytes
< func_bytes
)
146 /* Process multi-block batch */
148 src
-= num_blocks
- 1;
149 dst
-= num_blocks
- 1;
151 gctx
->funcs
[i
].fn_u
.cbc(ctx
, dst
, src
);
153 nbytes
-= func_bytes
;
157 u128_xor(dst
, dst
, --src
);
159 } while (nbytes
>= func_bytes
);
162 u128_xor(dst
, dst
, (u128
*)walk
.iv
);
163 *(u128
*)walk
.iv
= last_iv
;
164 err
= skcipher_walk_done(&walk
, nbytes
);
167 glue_fpu_end(fpu_enabled
);
170 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_req_128bit
);
172 int glue_ctr_req_128bit(const struct common_glue_ctx
*gctx
,
173 struct skcipher_request
*req
)
175 void *ctx
= crypto_skcipher_ctx(crypto_skcipher_reqtfm(req
));
176 const unsigned int bsize
= 128 / 8;
177 struct skcipher_walk walk
;
178 bool fpu_enabled
= false;
182 err
= skcipher_walk_virt(&walk
, req
, false);
184 while ((nbytes
= walk
.nbytes
) >= bsize
) {
185 const u128
*src
= walk
.src
.virt
.addr
;
186 u128
*dst
= walk
.dst
.virt
.addr
;
187 unsigned int func_bytes
, num_blocks
;
191 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
192 &walk
, fpu_enabled
, nbytes
);
194 be128_to_le128(&ctrblk
, (be128
*)walk
.iv
);
196 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
197 num_blocks
= gctx
->funcs
[i
].num_blocks
;
198 func_bytes
= bsize
* num_blocks
;
200 if (nbytes
< func_bytes
)
203 /* Process multi-block batch */
205 gctx
->funcs
[i
].fn_u
.ctr(ctx
, dst
, src
, &ctrblk
);
208 nbytes
-= func_bytes
;
209 } while (nbytes
>= func_bytes
);
215 le128_to_be128((be128
*)walk
.iv
, &ctrblk
);
216 err
= skcipher_walk_done(&walk
, nbytes
);
219 glue_fpu_end(fpu_enabled
);
225 be128_to_le128(&ctrblk
, (be128
*)walk
.iv
);
226 memcpy(&tmp
, walk
.src
.virt
.addr
, nbytes
);
227 gctx
->funcs
[gctx
->num_funcs
- 1].fn_u
.ctr(ctx
, &tmp
, &tmp
,
229 memcpy(walk
.dst
.virt
.addr
, &tmp
, nbytes
);
230 le128_to_be128((be128
*)walk
.iv
, &ctrblk
);
232 err
= skcipher_walk_done(&walk
, 0);
237 EXPORT_SYMBOL_GPL(glue_ctr_req_128bit
);
239 static unsigned int __glue_xts_req_128bit(const struct common_glue_ctx
*gctx
,
241 struct skcipher_walk
*walk
)
243 const unsigned int bsize
= 128 / 8;
244 unsigned int nbytes
= walk
->nbytes
;
245 u128
*src
= walk
->src
.virt
.addr
;
246 u128
*dst
= walk
->dst
.virt
.addr
;
247 unsigned int num_blocks
, func_bytes
;
250 /* Process multi-block batch */
251 for (i
= 0; i
< gctx
->num_funcs
; i
++) {
252 num_blocks
= gctx
->funcs
[i
].num_blocks
;
253 func_bytes
= bsize
* num_blocks
;
255 if (nbytes
>= func_bytes
) {
257 gctx
->funcs
[i
].fn_u
.xts(ctx
, dst
, src
,
262 nbytes
-= func_bytes
;
263 } while (nbytes
>= func_bytes
);
274 int glue_xts_req_128bit(const struct common_glue_ctx
*gctx
,
275 struct skcipher_request
*req
,
276 common_glue_func_t tweak_fn
, void *tweak_ctx
,
279 const unsigned int bsize
= 128 / 8;
280 struct skcipher_walk walk
;
281 bool fpu_enabled
= false;
285 err
= skcipher_walk_virt(&walk
, req
, false);
286 nbytes
= walk
.nbytes
;
290 /* set minimum length to bsize, for tweak_fn */
291 fpu_enabled
= glue_fpu_begin(bsize
, gctx
->fpu_blocks_limit
,
293 nbytes
< bsize
? bsize
: nbytes
);
295 /* calculate first value of T */
296 tweak_fn(tweak_ctx
, walk
.iv
, walk
.iv
);
299 nbytes
= __glue_xts_req_128bit(gctx
, crypt_ctx
, &walk
);
301 err
= skcipher_walk_done(&walk
, nbytes
);
302 nbytes
= walk
.nbytes
;
305 glue_fpu_end(fpu_enabled
);
309 EXPORT_SYMBOL_GPL(glue_xts_req_128bit
);
311 void glue_xts_crypt_128bit_one(void *ctx
, u128
*dst
, const u128
*src
, le128
*iv
,
312 common_glue_func_t fn
)
316 /* generate next IV */
317 gf128mul_x_ble(iv
, &ivblk
);
320 u128_xor(dst
, src
, (u128
*)&ivblk
);
322 /* PP <- D(Key2,CC) */
323 fn(ctx
, (u8
*)dst
, (u8
*)dst
);
326 u128_xor(dst
, dst
, (u128
*)&ivblk
);
328 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit_one
);
330 MODULE_LICENSE("GPL");