Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / crypto / vmac.c
blob9b565d1040d65bb8d4b5277f3bf94a3a33df2282
1 /*
2 * VMAC: Message Authentication Code using Universal Hashing
4 * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
6 * Copyright (c) 2009, Intel Corporation.
7 * Copyright (c) 2018, Google Inc.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
24 * Derived from:
25 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
26 * This implementation is herby placed in the public domain.
27 * The authors offers no warranty. Use at your own risk.
28 * Last modified: 17 APR 08, 1700 PDT
31 #include <asm/unaligned.h>
32 #include <linux/init.h>
33 #include <linux/types.h>
34 #include <linux/crypto.h>
35 #include <linux/module.h>
36 #include <linux/scatterlist.h>
37 #include <asm/byteorder.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/hash.h>
42 * User definable settings.
44 #define VMAC_TAG_LEN 64
45 #define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
46 #define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
47 #define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
48 #define VMAC_NONCEBYTES 16
50 /* per-transform (per-key) context */
51 struct vmac_tfm_ctx {
52 struct crypto_cipher *cipher;
53 u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
54 u64 polykey[2*VMAC_TAG_LEN/64];
55 u64 l3key[2*VMAC_TAG_LEN/64];
58 /* per-request context */
59 struct vmac_desc_ctx {
60 union {
61 u8 partial[VMAC_NHBYTES]; /* partial block */
62 __le64 partial_words[VMAC_NHBYTES / 8];
64 unsigned int partial_size; /* size of the partial block */
65 bool first_block_processed;
66 u64 polytmp[2*VMAC_TAG_LEN/64]; /* running total of L2-hash */
67 union {
68 u8 bytes[VMAC_NONCEBYTES];
69 __be64 pads[VMAC_NONCEBYTES / 8];
70 } nonce;
71 unsigned int nonce_size; /* nonce bytes filled so far */
75 * Constants and masks
77 #define UINT64_C(x) x##ULL
78 static const u64 p64 = UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
79 static const u64 m62 = UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
80 static const u64 m63 = UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
81 static const u64 m64 = UINT64_C(0xffffffffffffffff); /* 64-bit mask */
82 static const u64 mpoly = UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
84 #define pe64_to_cpup le64_to_cpup /* Prefer little endian */
86 #ifdef __LITTLE_ENDIAN
87 #define INDEX_HIGH 1
88 #define INDEX_LOW 0
89 #else
90 #define INDEX_HIGH 0
91 #define INDEX_LOW 1
92 #endif
95 * The following routines are used in this implementation. They are
96 * written via macros to simulate zero-overhead call-by-reference.
98 * MUL64: 64x64->128-bit multiplication
99 * PMUL64: assumes top bits cleared on inputs
100 * ADD128: 128x128->128-bit addition
103 #define ADD128(rh, rl, ih, il) \
104 do { \
105 u64 _il = (il); \
106 (rl) += (_il); \
107 if ((rl) < (_il)) \
108 (rh)++; \
109 (rh) += (ih); \
110 } while (0)
112 #define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
114 #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
115 do { \
116 u64 _i1 = (i1), _i2 = (i2); \
117 u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
118 rh = MUL32(_i1>>32, _i2>>32); \
119 rl = MUL32(_i1, _i2); \
120 ADD128(rh, rl, (m >> 32), (m << 32)); \
121 } while (0)
123 #define MUL64(rh, rl, i1, i2) \
124 do { \
125 u64 _i1 = (i1), _i2 = (i2); \
126 u64 m1 = MUL32(_i1, _i2>>32); \
127 u64 m2 = MUL32(_i1>>32, _i2); \
128 rh = MUL32(_i1>>32, _i2>>32); \
129 rl = MUL32(_i1, _i2); \
130 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
131 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
132 } while (0)
135 * For highest performance the L1 NH and L2 polynomial hashes should be
136 * carefully implemented to take advantage of one's target architecture.
137 * Here these two hash functions are defined multiple time; once for
138 * 64-bit architectures, once for 32-bit SSE2 architectures, and once
139 * for the rest (32-bit) architectures.
140 * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
141 * Optionally, nh_vmac_nhbytes can be defined (for multiples of
142 * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
143 * NH computations at once).
146 #ifdef CONFIG_64BIT
148 #define nh_16(mp, kp, nw, rh, rl) \
149 do { \
150 int i; u64 th, tl; \
151 rh = rl = 0; \
152 for (i = 0; i < nw; i += 2) { \
153 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
154 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
155 ADD128(rh, rl, th, tl); \
157 } while (0)
159 #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
160 do { \
161 int i; u64 th, tl; \
162 rh1 = rl1 = rh = rl = 0; \
163 for (i = 0; i < nw; i += 2) { \
164 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
165 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
166 ADD128(rh, rl, th, tl); \
167 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
168 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
169 ADD128(rh1, rl1, th, tl); \
171 } while (0)
173 #if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
174 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
175 do { \
176 int i; u64 th, tl; \
177 rh = rl = 0; \
178 for (i = 0; i < nw; i += 8) { \
179 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
180 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
181 ADD128(rh, rl, th, tl); \
182 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
183 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
184 ADD128(rh, rl, th, tl); \
185 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
186 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
187 ADD128(rh, rl, th, tl); \
188 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
189 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
190 ADD128(rh, rl, th, tl); \
192 } while (0)
194 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
195 do { \
196 int i; u64 th, tl; \
197 rh1 = rl1 = rh = rl = 0; \
198 for (i = 0; i < nw; i += 8) { \
199 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
200 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
201 ADD128(rh, rl, th, tl); \
202 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
203 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
204 ADD128(rh1, rl1, th, tl); \
205 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
206 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
207 ADD128(rh, rl, th, tl); \
208 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
209 pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
210 ADD128(rh1, rl1, th, tl); \
211 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
212 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
213 ADD128(rh, rl, th, tl); \
214 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
215 pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
216 ADD128(rh1, rl1, th, tl); \
217 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
218 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
219 ADD128(rh, rl, th, tl); \
220 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
221 pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
222 ADD128(rh1, rl1, th, tl); \
224 } while (0)
225 #endif
227 #define poly_step(ah, al, kh, kl, mh, ml) \
228 do { \
229 u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
230 /* compute ab*cd, put bd into result registers */ \
231 PMUL64(t3h, t3l, al, kh); \
232 PMUL64(t2h, t2l, ah, kl); \
233 PMUL64(t1h, t1l, ah, 2*kh); \
234 PMUL64(ah, al, al, kl); \
235 /* add 2 * ac to result */ \
236 ADD128(ah, al, t1h, t1l); \
237 /* add together ad + bc */ \
238 ADD128(t2h, t2l, t3h, t3l); \
239 /* now (ah,al), (t2l,2*t2h) need summing */ \
240 /* first add the high registers, carrying into t2h */ \
241 ADD128(t2h, ah, z, t2l); \
242 /* double t2h and add top bit of ah */ \
243 t2h = 2 * t2h + (ah >> 63); \
244 ah &= m63; \
245 /* now add the low registers */ \
246 ADD128(ah, al, mh, ml); \
247 ADD128(ah, al, z, t2h); \
248 } while (0)
250 #else /* ! CONFIG_64BIT */
252 #ifndef nh_16
253 #define nh_16(mp, kp, nw, rh, rl) \
254 do { \
255 u64 t1, t2, m1, m2, t; \
256 int i; \
257 rh = rl = t = 0; \
258 for (i = 0; i < nw; i += 2) { \
259 t1 = pe64_to_cpup(mp+i) + kp[i]; \
260 t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
261 m2 = MUL32(t1 >> 32, t2); \
262 m1 = MUL32(t1, t2 >> 32); \
263 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
264 MUL32(t1, t2)); \
265 rh += (u64)(u32)(m1 >> 32) \
266 + (u32)(m2 >> 32); \
267 t += (u64)(u32)m1 + (u32)m2; \
269 ADD128(rh, rl, (t >> 32), (t << 32)); \
270 } while (0)
271 #endif
273 static void poly_step_func(u64 *ahi, u64 *alo,
274 const u64 *kh, const u64 *kl,
275 const u64 *mh, const u64 *ml)
277 #define a0 (*(((u32 *)alo)+INDEX_LOW))
278 #define a1 (*(((u32 *)alo)+INDEX_HIGH))
279 #define a2 (*(((u32 *)ahi)+INDEX_LOW))
280 #define a3 (*(((u32 *)ahi)+INDEX_HIGH))
281 #define k0 (*(((u32 *)kl)+INDEX_LOW))
282 #define k1 (*(((u32 *)kl)+INDEX_HIGH))
283 #define k2 (*(((u32 *)kh)+INDEX_LOW))
284 #define k3 (*(((u32 *)kh)+INDEX_HIGH))
286 u64 p, q, t;
287 u32 t2;
289 p = MUL32(a3, k3);
290 p += p;
291 p += *(u64 *)mh;
292 p += MUL32(a0, k2);
293 p += MUL32(a1, k1);
294 p += MUL32(a2, k0);
295 t = (u32)(p);
296 p >>= 32;
297 p += MUL32(a0, k3);
298 p += MUL32(a1, k2);
299 p += MUL32(a2, k1);
300 p += MUL32(a3, k0);
301 t |= ((u64)((u32)p & 0x7fffffff)) << 32;
302 p >>= 31;
303 p += (u64)(((u32 *)ml)[INDEX_LOW]);
304 p += MUL32(a0, k0);
305 q = MUL32(a1, k3);
306 q += MUL32(a2, k2);
307 q += MUL32(a3, k1);
308 q += q;
309 p += q;
310 t2 = (u32)(p);
311 p >>= 32;
312 p += (u64)(((u32 *)ml)[INDEX_HIGH]);
313 p += MUL32(a0, k1);
314 p += MUL32(a1, k0);
315 q = MUL32(a2, k3);
316 q += MUL32(a3, k2);
317 q += q;
318 p += q;
319 *(u64 *)(alo) = (p << 32) | t2;
320 p >>= 32;
321 *(u64 *)(ahi) = p + t;
323 #undef a0
324 #undef a1
325 #undef a2
326 #undef a3
327 #undef k0
328 #undef k1
329 #undef k2
330 #undef k3
333 #define poly_step(ah, al, kh, kl, mh, ml) \
334 poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
336 #endif /* end of specialized NH and poly definitions */
338 /* At least nh_16 is defined. Defined others as needed here */
339 #ifndef nh_16_2
340 #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
341 do { \
342 nh_16(mp, kp, nw, rh, rl); \
343 nh_16(mp, ((kp)+2), nw, rh2, rl2); \
344 } while (0)
345 #endif
346 #ifndef nh_vmac_nhbytes
347 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
348 nh_16(mp, kp, nw, rh, rl)
349 #endif
350 #ifndef nh_vmac_nhbytes_2
351 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
352 do { \
353 nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
354 nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
355 } while (0)
356 #endif
358 static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
360 u64 rh, rl, t, z = 0;
362 /* fully reduce (p1,p2)+(len,0) mod p127 */
363 t = p1 >> 63;
364 p1 &= m63;
365 ADD128(p1, p2, len, t);
366 /* At this point, (p1,p2) is at most 2^127+(len<<64) */
367 t = (p1 > m63) + ((p1 == m63) && (p2 == m64));
368 ADD128(p1, p2, z, t);
369 p1 &= m63;
371 /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
372 t = p1 + (p2 >> 32);
373 t += (t >> 32);
374 t += (u32)t > 0xfffffffeu;
375 p1 += (t >> 32);
376 p2 += (p1 << 32);
378 /* compute (p1+k1)%p64 and (p2+k2)%p64 */
379 p1 += k1;
380 p1 += (0 - (p1 < k1)) & 257;
381 p2 += k2;
382 p2 += (0 - (p2 < k2)) & 257;
384 /* compute (p1+k1)*(p2+k2)%p64 */
385 MUL64(rh, rl, p1, p2);
386 t = rh >> 56;
387 ADD128(t, rl, z, rh);
388 rh <<= 8;
389 ADD128(t, rl, z, rh);
390 t += t << 8;
391 rl += t;
392 rl += (0 - (rl < t)) & 257;
393 rl += (0 - (rl > p64-1)) & 257;
394 return rl;
397 /* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
398 static void vhash_blocks(const struct vmac_tfm_ctx *tctx,
399 struct vmac_desc_ctx *dctx,
400 const __le64 *mptr, unsigned int blocks)
402 const u64 *kptr = tctx->nhkey;
403 const u64 pkh = tctx->polykey[0];
404 const u64 pkl = tctx->polykey[1];
405 u64 ch = dctx->polytmp[0];
406 u64 cl = dctx->polytmp[1];
407 u64 rh, rl;
409 if (!dctx->first_block_processed) {
410 dctx->first_block_processed = true;
411 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
412 rh &= m62;
413 ADD128(ch, cl, rh, rl);
414 mptr += (VMAC_NHBYTES/sizeof(u64));
415 blocks--;
418 while (blocks--) {
419 nh_vmac_nhbytes(mptr, kptr, VMAC_NHBYTES/8, rh, rl);
420 rh &= m62;
421 poly_step(ch, cl, pkh, pkl, rh, rl);
422 mptr += (VMAC_NHBYTES/sizeof(u64));
425 dctx->polytmp[0] = ch;
426 dctx->polytmp[1] = cl;
429 static int vmac_setkey(struct crypto_shash *tfm,
430 const u8 *key, unsigned int keylen)
432 struct vmac_tfm_ctx *tctx = crypto_shash_ctx(tfm);
433 __be64 out[2];
434 u8 in[16] = { 0 };
435 unsigned int i;
436 int err;
438 if (keylen != VMAC_KEY_LEN)
439 return -EINVAL;
441 err = crypto_cipher_setkey(tctx->cipher, key, keylen);
442 if (err)
443 return err;
445 /* Fill nh key */
446 in[0] = 0x80;
447 for (i = 0; i < ARRAY_SIZE(tctx->nhkey); i += 2) {
448 crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
449 tctx->nhkey[i] = be64_to_cpu(out[0]);
450 tctx->nhkey[i+1] = be64_to_cpu(out[1]);
451 in[15]++;
454 /* Fill poly key */
455 in[0] = 0xC0;
456 in[15] = 0;
457 for (i = 0; i < ARRAY_SIZE(tctx->polykey); i += 2) {
458 crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
459 tctx->polykey[i] = be64_to_cpu(out[0]) & mpoly;
460 tctx->polykey[i+1] = be64_to_cpu(out[1]) & mpoly;
461 in[15]++;
464 /* Fill ip key */
465 in[0] = 0xE0;
466 in[15] = 0;
467 for (i = 0; i < ARRAY_SIZE(tctx->l3key); i += 2) {
468 do {
469 crypto_cipher_encrypt_one(tctx->cipher, (u8 *)out, in);
470 tctx->l3key[i] = be64_to_cpu(out[0]);
471 tctx->l3key[i+1] = be64_to_cpu(out[1]);
472 in[15]++;
473 } while (tctx->l3key[i] >= p64 || tctx->l3key[i+1] >= p64);
476 return 0;
479 static int vmac_init(struct shash_desc *desc)
481 const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
482 struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
484 dctx->partial_size = 0;
485 dctx->first_block_processed = false;
486 memcpy(dctx->polytmp, tctx->polykey, sizeof(dctx->polytmp));
487 dctx->nonce_size = 0;
488 return 0;
491 static int vmac_update(struct shash_desc *desc, const u8 *p, unsigned int len)
493 const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
494 struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
495 unsigned int n;
497 /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
498 if (dctx->nonce_size < VMAC_NONCEBYTES) {
499 n = min(len, VMAC_NONCEBYTES - dctx->nonce_size);
500 memcpy(&dctx->nonce.bytes[dctx->nonce_size], p, n);
501 dctx->nonce_size += n;
502 p += n;
503 len -= n;
506 if (dctx->partial_size) {
507 n = min(len, VMAC_NHBYTES - dctx->partial_size);
508 memcpy(&dctx->partial[dctx->partial_size], p, n);
509 dctx->partial_size += n;
510 p += n;
511 len -= n;
512 if (dctx->partial_size == VMAC_NHBYTES) {
513 vhash_blocks(tctx, dctx, dctx->partial_words, 1);
514 dctx->partial_size = 0;
518 if (len >= VMAC_NHBYTES) {
519 n = round_down(len, VMAC_NHBYTES);
520 /* TODO: 'p' may be misaligned here */
521 vhash_blocks(tctx, dctx, (const __le64 *)p, n / VMAC_NHBYTES);
522 p += n;
523 len -= n;
526 if (len) {
527 memcpy(dctx->partial, p, len);
528 dctx->partial_size = len;
531 return 0;
534 static u64 vhash_final(const struct vmac_tfm_ctx *tctx,
535 struct vmac_desc_ctx *dctx)
537 unsigned int partial = dctx->partial_size;
538 u64 ch = dctx->polytmp[0];
539 u64 cl = dctx->polytmp[1];
541 /* L1 and L2-hash the final block if needed */
542 if (partial) {
543 /* Zero-pad to next 128-bit boundary */
544 unsigned int n = round_up(partial, 16);
545 u64 rh, rl;
547 memset(&dctx->partial[partial], 0, n - partial);
548 nh_16(dctx->partial_words, tctx->nhkey, n / 8, rh, rl);
549 rh &= m62;
550 if (dctx->first_block_processed)
551 poly_step(ch, cl, tctx->polykey[0], tctx->polykey[1],
552 rh, rl);
553 else
554 ADD128(ch, cl, rh, rl);
557 /* L3-hash the 128-bit output of L2-hash */
558 return l3hash(ch, cl, tctx->l3key[0], tctx->l3key[1], partial * 8);
561 static int vmac_final(struct shash_desc *desc, u8 *out)
563 const struct vmac_tfm_ctx *tctx = crypto_shash_ctx(desc->tfm);
564 struct vmac_desc_ctx *dctx = shash_desc_ctx(desc);
565 int index;
566 u64 hash, pad;
568 if (dctx->nonce_size != VMAC_NONCEBYTES)
569 return -EINVAL;
572 * The VMAC specification requires a nonce at least 1 bit shorter than
573 * the block cipher's block length, so we actually only accept a 127-bit
574 * nonce. We define the unused bit to be the first one and require that
575 * it be 0, so the needed prepending of a 0 bit is implicit.
577 if (dctx->nonce.bytes[0] & 0x80)
578 return -EINVAL;
580 /* Finish calculating the VHASH of the message */
581 hash = vhash_final(tctx, dctx);
583 /* Generate pseudorandom pad by encrypting the nonce */
584 BUILD_BUG_ON(VMAC_NONCEBYTES != 2 * (VMAC_TAG_LEN / 8));
585 index = dctx->nonce.bytes[VMAC_NONCEBYTES - 1] & 1;
586 dctx->nonce.bytes[VMAC_NONCEBYTES - 1] &= ~1;
587 crypto_cipher_encrypt_one(tctx->cipher, dctx->nonce.bytes,
588 dctx->nonce.bytes);
589 pad = be64_to_cpu(dctx->nonce.pads[index]);
591 /* The VMAC is the sum of VHASH and the pseudorandom pad */
592 put_unaligned_be64(hash + pad, out);
593 return 0;
596 static int vmac_init_tfm(struct crypto_tfm *tfm)
598 struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
599 struct crypto_cipher_spawn *spawn = crypto_instance_ctx(inst);
600 struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
601 struct crypto_cipher *cipher;
603 cipher = crypto_spawn_cipher(spawn);
604 if (IS_ERR(cipher))
605 return PTR_ERR(cipher);
607 tctx->cipher = cipher;
608 return 0;
611 static void vmac_exit_tfm(struct crypto_tfm *tfm)
613 struct vmac_tfm_ctx *tctx = crypto_tfm_ctx(tfm);
615 crypto_free_cipher(tctx->cipher);
618 static int vmac_create(struct crypto_template *tmpl, struct rtattr **tb)
620 struct shash_instance *inst;
621 struct crypto_cipher_spawn *spawn;
622 struct crypto_alg *alg;
623 u32 mask;
624 int err;
626 err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH, &mask);
627 if (err)
628 return err;
630 inst = kzalloc(sizeof(*inst) + sizeof(*spawn), GFP_KERNEL);
631 if (!inst)
632 return -ENOMEM;
633 spawn = shash_instance_ctx(inst);
635 err = crypto_grab_cipher(spawn, shash_crypto_instance(inst),
636 crypto_attr_alg_name(tb[1]), 0, mask);
637 if (err)
638 goto err_free_inst;
639 alg = crypto_spawn_cipher_alg(spawn);
641 err = -EINVAL;
642 if (alg->cra_blocksize != VMAC_NONCEBYTES)
643 goto err_free_inst;
645 err = crypto_inst_setname(shash_crypto_instance(inst), tmpl->name, alg);
646 if (err)
647 goto err_free_inst;
649 inst->alg.base.cra_priority = alg->cra_priority;
650 inst->alg.base.cra_blocksize = alg->cra_blocksize;
651 inst->alg.base.cra_alignmask = alg->cra_alignmask;
653 inst->alg.base.cra_ctxsize = sizeof(struct vmac_tfm_ctx);
654 inst->alg.base.cra_init = vmac_init_tfm;
655 inst->alg.base.cra_exit = vmac_exit_tfm;
657 inst->alg.descsize = sizeof(struct vmac_desc_ctx);
658 inst->alg.digestsize = VMAC_TAG_LEN / 8;
659 inst->alg.init = vmac_init;
660 inst->alg.update = vmac_update;
661 inst->alg.final = vmac_final;
662 inst->alg.setkey = vmac_setkey;
664 inst->free = shash_free_singlespawn_instance;
666 err = shash_register_instance(tmpl, inst);
667 if (err) {
668 err_free_inst:
669 shash_free_singlespawn_instance(inst);
671 return err;
674 static struct crypto_template vmac64_tmpl = {
675 .name = "vmac64",
676 .create = vmac_create,
677 .module = THIS_MODULE,
680 static int __init vmac_module_init(void)
682 return crypto_register_template(&vmac64_tmpl);
685 static void __exit vmac_module_exit(void)
687 crypto_unregister_template(&vmac64_tmpl);
690 subsys_initcall(vmac_module_init);
691 module_exit(vmac_module_exit);
693 MODULE_LICENSE("GPL");
694 MODULE_DESCRIPTION("VMAC hash algorithm");
695 MODULE_ALIAS_CRYPTO("vmac64");