2 * VMAC: Message Authentication Code using Universal Hashing
4 * Reference: https://tools.ietf.org/html/draft-krovetz-vmac-01
6 * Copyright (c) 2009, Intel Corporation.
7 * Copyright (c) 2018, Google Inc.
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
20 * Place - Suite 330, Boston, MA 02111-1307 USA.
25 * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
26 * This implementation is herby placed in the public domain.
27 * The authors offers no warranty. Use at your own risk.
28 * Last modified: 17 APR 08, 1700 PDT
31 #include <asm/unaligned.h>
32 #include <linux/init.h>
33 #include <linux/types.h>
34 #include <linux/crypto.h>
35 #include <linux/module.h>
36 #include <linux/scatterlist.h>
37 #include <asm/byteorder.h>
38 #include <crypto/scatterwalk.h>
39 #include <crypto/internal/hash.h>
42 * User definable settings.
44 #define VMAC_TAG_LEN 64
45 #define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
46 #define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
47 #define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
48 #define VMAC_NONCEBYTES 16
50 /* per-transform (per-key) context */
52 struct crypto_cipher
*cipher
;
53 u64 nhkey
[(VMAC_NHBYTES
/8)+2*(VMAC_TAG_LEN
/64-1)];
54 u64 polykey
[2*VMAC_TAG_LEN
/64];
55 u64 l3key
[2*VMAC_TAG_LEN
/64];
58 /* per-request context */
59 struct vmac_desc_ctx
{
61 u8 partial
[VMAC_NHBYTES
]; /* partial block */
62 __le64 partial_words
[VMAC_NHBYTES
/ 8];
64 unsigned int partial_size
; /* size of the partial block */
65 bool first_block_processed
;
66 u64 polytmp
[2*VMAC_TAG_LEN
/64]; /* running total of L2-hash */
68 u8 bytes
[VMAC_NONCEBYTES
];
69 __be64 pads
[VMAC_NONCEBYTES
/ 8];
71 unsigned int nonce_size
; /* nonce bytes filled so far */
77 #define UINT64_C(x) x##ULL
78 static const u64 p64
= UINT64_C(0xfffffffffffffeff); /* 2^64 - 257 prime */
79 static const u64 m62
= UINT64_C(0x3fffffffffffffff); /* 62-bit mask */
80 static const u64 m63
= UINT64_C(0x7fffffffffffffff); /* 63-bit mask */
81 static const u64 m64
= UINT64_C(0xffffffffffffffff); /* 64-bit mask */
82 static const u64 mpoly
= UINT64_C(0x1fffffff1fffffff); /* Poly key mask */
84 #define pe64_to_cpup le64_to_cpup /* Prefer little endian */
86 #ifdef __LITTLE_ENDIAN
95 * The following routines are used in this implementation. They are
96 * written via macros to simulate zero-overhead call-by-reference.
98 * MUL64: 64x64->128-bit multiplication
99 * PMUL64: assumes top bits cleared on inputs
100 * ADD128: 128x128->128-bit addition
103 #define ADD128(rh, rl, ih, il) \
112 #define MUL32(i1, i2) ((u64)(u32)(i1)*(u32)(i2))
114 #define PMUL64(rh, rl, i1, i2) /* Assumes m doesn't overflow */ \
116 u64 _i1 = (i1), _i2 = (i2); \
117 u64 m = MUL32(_i1, _i2>>32) + MUL32(_i1>>32, _i2); \
118 rh = MUL32(_i1>>32, _i2>>32); \
119 rl = MUL32(_i1, _i2); \
120 ADD128(rh, rl, (m >> 32), (m << 32)); \
123 #define MUL64(rh, rl, i1, i2) \
125 u64 _i1 = (i1), _i2 = (i2); \
126 u64 m1 = MUL32(_i1, _i2>>32); \
127 u64 m2 = MUL32(_i1>>32, _i2); \
128 rh = MUL32(_i1>>32, _i2>>32); \
129 rl = MUL32(_i1, _i2); \
130 ADD128(rh, rl, (m1 >> 32), (m1 << 32)); \
131 ADD128(rh, rl, (m2 >> 32), (m2 << 32)); \
135 * For highest performance the L1 NH and L2 polynomial hashes should be
136 * carefully implemented to take advantage of one's target architecture.
137 * Here these two hash functions are defined multiple time; once for
138 * 64-bit architectures, once for 32-bit SSE2 architectures, and once
139 * for the rest (32-bit) architectures.
140 * For each, nh_16 *must* be defined (works on multiples of 16 bytes).
141 * Optionally, nh_vmac_nhbytes can be defined (for multiples of
142 * VMAC_NHBYTES), and nh_16_2 and nh_vmac_nhbytes_2 (versions that do two
143 * NH computations at once).
148 #define nh_16(mp, kp, nw, rh, rl) \
152 for (i = 0; i < nw; i += 2) { \
153 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
154 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
155 ADD128(rh, rl, th, tl); \
159 #define nh_16_2(mp, kp, nw, rh, rl, rh1, rl1) \
162 rh1 = rl1 = rh = rl = 0; \
163 for (i = 0; i < nw; i += 2) { \
164 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
165 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
166 ADD128(rh, rl, th, tl); \
167 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
168 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
169 ADD128(rh1, rl1, th, tl); \
173 #if (VMAC_NHBYTES >= 64) /* These versions do 64-bytes of message at a time */
174 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
178 for (i = 0; i < nw; i += 8) { \
179 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
180 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
181 ADD128(rh, rl, th, tl); \
182 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
183 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
184 ADD128(rh, rl, th, tl); \
185 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
186 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
187 ADD128(rh, rl, th, tl); \
188 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
189 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
190 ADD128(rh, rl, th, tl); \
194 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh1, rl1) \
197 rh1 = rl1 = rh = rl = 0; \
198 for (i = 0; i < nw; i += 8) { \
199 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i], \
200 pe64_to_cpup((mp)+i+1)+(kp)[i+1]); \
201 ADD128(rh, rl, th, tl); \
202 MUL64(th, tl, pe64_to_cpup((mp)+i)+(kp)[i+2], \
203 pe64_to_cpup((mp)+i+1)+(kp)[i+3]); \
204 ADD128(rh1, rl1, th, tl); \
205 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+2], \
206 pe64_to_cpup((mp)+i+3)+(kp)[i+3]); \
207 ADD128(rh, rl, th, tl); \
208 MUL64(th, tl, pe64_to_cpup((mp)+i+2)+(kp)[i+4], \
209 pe64_to_cpup((mp)+i+3)+(kp)[i+5]); \
210 ADD128(rh1, rl1, th, tl); \
211 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+4], \
212 pe64_to_cpup((mp)+i+5)+(kp)[i+5]); \
213 ADD128(rh, rl, th, tl); \
214 MUL64(th, tl, pe64_to_cpup((mp)+i+4)+(kp)[i+6], \
215 pe64_to_cpup((mp)+i+5)+(kp)[i+7]); \
216 ADD128(rh1, rl1, th, tl); \
217 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+6], \
218 pe64_to_cpup((mp)+i+7)+(kp)[i+7]); \
219 ADD128(rh, rl, th, tl); \
220 MUL64(th, tl, pe64_to_cpup((mp)+i+6)+(kp)[i+8], \
221 pe64_to_cpup((mp)+i+7)+(kp)[i+9]); \
222 ADD128(rh1, rl1, th, tl); \
227 #define poly_step(ah, al, kh, kl, mh, ml) \
229 u64 t1h, t1l, t2h, t2l, t3h, t3l, z = 0; \
230 /* compute ab*cd, put bd into result registers */ \
231 PMUL64(t3h, t3l, al, kh); \
232 PMUL64(t2h, t2l, ah, kl); \
233 PMUL64(t1h, t1l, ah, 2*kh); \
234 PMUL64(ah, al, al, kl); \
235 /* add 2 * ac to result */ \
236 ADD128(ah, al, t1h, t1l); \
237 /* add together ad + bc */ \
238 ADD128(t2h, t2l, t3h, t3l); \
239 /* now (ah,al), (t2l,2*t2h) need summing */ \
240 /* first add the high registers, carrying into t2h */ \
241 ADD128(t2h, ah, z, t2l); \
242 /* double t2h and add top bit of ah */ \
243 t2h = 2 * t2h + (ah >> 63); \
245 /* now add the low registers */ \
246 ADD128(ah, al, mh, ml); \
247 ADD128(ah, al, z, t2h); \
250 #else /* ! CONFIG_64BIT */
253 #define nh_16(mp, kp, nw, rh, rl) \
255 u64 t1, t2, m1, m2, t; \
258 for (i = 0; i < nw; i += 2) { \
259 t1 = pe64_to_cpup(mp+i) + kp[i]; \
260 t2 = pe64_to_cpup(mp+i+1) + kp[i+1]; \
261 m2 = MUL32(t1 >> 32, t2); \
262 m1 = MUL32(t1, t2 >> 32); \
263 ADD128(rh, rl, MUL32(t1 >> 32, t2 >> 32), \
265 rh += (u64)(u32)(m1 >> 32) \
267 t += (u64)(u32)m1 + (u32)m2; \
269 ADD128(rh, rl, (t >> 32), (t << 32)); \
273 static void poly_step_func(u64
*ahi
, u64
*alo
,
274 const u64
*kh
, const u64
*kl
,
275 const u64
*mh
, const u64
*ml
)
277 #define a0 (*(((u32 *)alo)+INDEX_LOW))
278 #define a1 (*(((u32 *)alo)+INDEX_HIGH))
279 #define a2 (*(((u32 *)ahi)+INDEX_LOW))
280 #define a3 (*(((u32 *)ahi)+INDEX_HIGH))
281 #define k0 (*(((u32 *)kl)+INDEX_LOW))
282 #define k1 (*(((u32 *)kl)+INDEX_HIGH))
283 #define k2 (*(((u32 *)kh)+INDEX_LOW))
284 #define k3 (*(((u32 *)kh)+INDEX_HIGH))
301 t
|= ((u64
)((u32
)p
& 0x7fffffff)) << 32;
303 p
+= (u64
)(((u32
*)ml
)[INDEX_LOW
]);
312 p
+= (u64
)(((u32
*)ml
)[INDEX_HIGH
]);
319 *(u64
*)(alo
) = (p
<< 32) | t2
;
321 *(u64
*)(ahi
) = p
+ t
;
333 #define poly_step(ah, al, kh, kl, mh, ml) \
334 poly_step_func(&(ah), &(al), &(kh), &(kl), &(mh), &(ml))
336 #endif /* end of specialized NH and poly definitions */
338 /* At least nh_16 is defined. Defined others as needed here */
340 #define nh_16_2(mp, kp, nw, rh, rl, rh2, rl2) \
342 nh_16(mp, kp, nw, rh, rl); \
343 nh_16(mp, ((kp)+2), nw, rh2, rl2); \
346 #ifndef nh_vmac_nhbytes
347 #define nh_vmac_nhbytes(mp, kp, nw, rh, rl) \
348 nh_16(mp, kp, nw, rh, rl)
350 #ifndef nh_vmac_nhbytes_2
351 #define nh_vmac_nhbytes_2(mp, kp, nw, rh, rl, rh2, rl2) \
353 nh_vmac_nhbytes(mp, kp, nw, rh, rl); \
354 nh_vmac_nhbytes(mp, ((kp)+2), nw, rh2, rl2); \
358 static u64
l3hash(u64 p1
, u64 p2
, u64 k1
, u64 k2
, u64 len
)
360 u64 rh
, rl
, t
, z
= 0;
362 /* fully reduce (p1,p2)+(len,0) mod p127 */
365 ADD128(p1
, p2
, len
, t
);
366 /* At this point, (p1,p2) is at most 2^127+(len<<64) */
367 t
= (p1
> m63
) + ((p1
== m63
) && (p2
== m64
));
368 ADD128(p1
, p2
, z
, t
);
371 /* compute (p1,p2)/(2^64-2^32) and (p1,p2)%(2^64-2^32) */
374 t
+= (u32
)t
> 0xfffffffeu
;
378 /* compute (p1+k1)%p64 and (p2+k2)%p64 */
380 p1
+= (0 - (p1
< k1
)) & 257;
382 p2
+= (0 - (p2
< k2
)) & 257;
384 /* compute (p1+k1)*(p2+k2)%p64 */
385 MUL64(rh
, rl
, p1
, p2
);
387 ADD128(t
, rl
, z
, rh
);
389 ADD128(t
, rl
, z
, rh
);
392 rl
+= (0 - (rl
< t
)) & 257;
393 rl
+= (0 - (rl
> p64
-1)) & 257;
397 /* L1 and L2-hash one or more VMAC_NHBYTES-byte blocks */
398 static void vhash_blocks(const struct vmac_tfm_ctx
*tctx
,
399 struct vmac_desc_ctx
*dctx
,
400 const __le64
*mptr
, unsigned int blocks
)
402 const u64
*kptr
= tctx
->nhkey
;
403 const u64 pkh
= tctx
->polykey
[0];
404 const u64 pkl
= tctx
->polykey
[1];
405 u64 ch
= dctx
->polytmp
[0];
406 u64 cl
= dctx
->polytmp
[1];
409 if (!dctx
->first_block_processed
) {
410 dctx
->first_block_processed
= true;
411 nh_vmac_nhbytes(mptr
, kptr
, VMAC_NHBYTES
/8, rh
, rl
);
413 ADD128(ch
, cl
, rh
, rl
);
414 mptr
+= (VMAC_NHBYTES
/sizeof(u64
));
419 nh_vmac_nhbytes(mptr
, kptr
, VMAC_NHBYTES
/8, rh
, rl
);
421 poly_step(ch
, cl
, pkh
, pkl
, rh
, rl
);
422 mptr
+= (VMAC_NHBYTES
/sizeof(u64
));
425 dctx
->polytmp
[0] = ch
;
426 dctx
->polytmp
[1] = cl
;
429 static int vmac_setkey(struct crypto_shash
*tfm
,
430 const u8
*key
, unsigned int keylen
)
432 struct vmac_tfm_ctx
*tctx
= crypto_shash_ctx(tfm
);
438 if (keylen
!= VMAC_KEY_LEN
) {
439 crypto_shash_set_flags(tfm
, CRYPTO_TFM_RES_BAD_KEY_LEN
);
443 err
= crypto_cipher_setkey(tctx
->cipher
, key
, keylen
);
449 for (i
= 0; i
< ARRAY_SIZE(tctx
->nhkey
); i
+= 2) {
450 crypto_cipher_encrypt_one(tctx
->cipher
, (u8
*)out
, in
);
451 tctx
->nhkey
[i
] = be64_to_cpu(out
[0]);
452 tctx
->nhkey
[i
+1] = be64_to_cpu(out
[1]);
459 for (i
= 0; i
< ARRAY_SIZE(tctx
->polykey
); i
+= 2) {
460 crypto_cipher_encrypt_one(tctx
->cipher
, (u8
*)out
, in
);
461 tctx
->polykey
[i
] = be64_to_cpu(out
[0]) & mpoly
;
462 tctx
->polykey
[i
+1] = be64_to_cpu(out
[1]) & mpoly
;
469 for (i
= 0; i
< ARRAY_SIZE(tctx
->l3key
); i
+= 2) {
471 crypto_cipher_encrypt_one(tctx
->cipher
, (u8
*)out
, in
);
472 tctx
->l3key
[i
] = be64_to_cpu(out
[0]);
473 tctx
->l3key
[i
+1] = be64_to_cpu(out
[1]);
475 } while (tctx
->l3key
[i
] >= p64
|| tctx
->l3key
[i
+1] >= p64
);
481 static int vmac_init(struct shash_desc
*desc
)
483 const struct vmac_tfm_ctx
*tctx
= crypto_shash_ctx(desc
->tfm
);
484 struct vmac_desc_ctx
*dctx
= shash_desc_ctx(desc
);
486 dctx
->partial_size
= 0;
487 dctx
->first_block_processed
= false;
488 memcpy(dctx
->polytmp
, tctx
->polykey
, sizeof(dctx
->polytmp
));
489 dctx
->nonce_size
= 0;
493 static int vmac_update(struct shash_desc
*desc
, const u8
*p
, unsigned int len
)
495 const struct vmac_tfm_ctx
*tctx
= crypto_shash_ctx(desc
->tfm
);
496 struct vmac_desc_ctx
*dctx
= shash_desc_ctx(desc
);
499 /* Nonce is passed as first VMAC_NONCEBYTES bytes of data */
500 if (dctx
->nonce_size
< VMAC_NONCEBYTES
) {
501 n
= min(len
, VMAC_NONCEBYTES
- dctx
->nonce_size
);
502 memcpy(&dctx
->nonce
.bytes
[dctx
->nonce_size
], p
, n
);
503 dctx
->nonce_size
+= n
;
508 if (dctx
->partial_size
) {
509 n
= min(len
, VMAC_NHBYTES
- dctx
->partial_size
);
510 memcpy(&dctx
->partial
[dctx
->partial_size
], p
, n
);
511 dctx
->partial_size
+= n
;
514 if (dctx
->partial_size
== VMAC_NHBYTES
) {
515 vhash_blocks(tctx
, dctx
, dctx
->partial_words
, 1);
516 dctx
->partial_size
= 0;
520 if (len
>= VMAC_NHBYTES
) {
521 n
= round_down(len
, VMAC_NHBYTES
);
522 /* TODO: 'p' may be misaligned here */
523 vhash_blocks(tctx
, dctx
, (const __le64
*)p
, n
/ VMAC_NHBYTES
);
529 memcpy(dctx
->partial
, p
, len
);
530 dctx
->partial_size
= len
;
536 static u64
vhash_final(const struct vmac_tfm_ctx
*tctx
,
537 struct vmac_desc_ctx
*dctx
)
539 unsigned int partial
= dctx
->partial_size
;
540 u64 ch
= dctx
->polytmp
[0];
541 u64 cl
= dctx
->polytmp
[1];
543 /* L1 and L2-hash the final block if needed */
545 /* Zero-pad to next 128-bit boundary */
546 unsigned int n
= round_up(partial
, 16);
549 memset(&dctx
->partial
[partial
], 0, n
- partial
);
550 nh_16(dctx
->partial_words
, tctx
->nhkey
, n
/ 8, rh
, rl
);
552 if (dctx
->first_block_processed
)
553 poly_step(ch
, cl
, tctx
->polykey
[0], tctx
->polykey
[1],
556 ADD128(ch
, cl
, rh
, rl
);
559 /* L3-hash the 128-bit output of L2-hash */
560 return l3hash(ch
, cl
, tctx
->l3key
[0], tctx
->l3key
[1], partial
* 8);
563 static int vmac_final(struct shash_desc
*desc
, u8
*out
)
565 const struct vmac_tfm_ctx
*tctx
= crypto_shash_ctx(desc
->tfm
);
566 struct vmac_desc_ctx
*dctx
= shash_desc_ctx(desc
);
570 if (dctx
->nonce_size
!= VMAC_NONCEBYTES
)
574 * The VMAC specification requires a nonce at least 1 bit shorter than
575 * the block cipher's block length, so we actually only accept a 127-bit
576 * nonce. We define the unused bit to be the first one and require that
577 * it be 0, so the needed prepending of a 0 bit is implicit.
579 if (dctx
->nonce
.bytes
[0] & 0x80)
582 /* Finish calculating the VHASH of the message */
583 hash
= vhash_final(tctx
, dctx
);
585 /* Generate pseudorandom pad by encrypting the nonce */
586 BUILD_BUG_ON(VMAC_NONCEBYTES
!= 2 * (VMAC_TAG_LEN
/ 8));
587 index
= dctx
->nonce
.bytes
[VMAC_NONCEBYTES
- 1] & 1;
588 dctx
->nonce
.bytes
[VMAC_NONCEBYTES
- 1] &= ~1;
589 crypto_cipher_encrypt_one(tctx
->cipher
, dctx
->nonce
.bytes
,
591 pad
= be64_to_cpu(dctx
->nonce
.pads
[index
]);
593 /* The VMAC is the sum of VHASH and the pseudorandom pad */
594 put_unaligned_be64(hash
+ pad
, out
);
598 static int vmac_init_tfm(struct crypto_tfm
*tfm
)
600 struct crypto_instance
*inst
= crypto_tfm_alg_instance(tfm
);
601 struct crypto_spawn
*spawn
= crypto_instance_ctx(inst
);
602 struct vmac_tfm_ctx
*tctx
= crypto_tfm_ctx(tfm
);
603 struct crypto_cipher
*cipher
;
605 cipher
= crypto_spawn_cipher(spawn
);
607 return PTR_ERR(cipher
);
609 tctx
->cipher
= cipher
;
613 static void vmac_exit_tfm(struct crypto_tfm
*tfm
)
615 struct vmac_tfm_ctx
*tctx
= crypto_tfm_ctx(tfm
);
617 crypto_free_cipher(tctx
->cipher
);
620 static int vmac_create(struct crypto_template
*tmpl
, struct rtattr
**tb
)
622 struct shash_instance
*inst
;
623 struct crypto_alg
*alg
;
626 err
= crypto_check_attr_type(tb
, CRYPTO_ALG_TYPE_SHASH
);
630 alg
= crypto_get_attr_alg(tb
, CRYPTO_ALG_TYPE_CIPHER
,
631 CRYPTO_ALG_TYPE_MASK
);
636 if (alg
->cra_blocksize
!= VMAC_NONCEBYTES
)
639 inst
= shash_alloc_instance(tmpl
->name
, alg
);
644 err
= crypto_init_spawn(shash_instance_ctx(inst
), alg
,
645 shash_crypto_instance(inst
),
646 CRYPTO_ALG_TYPE_MASK
);
650 inst
->alg
.base
.cra_priority
= alg
->cra_priority
;
651 inst
->alg
.base
.cra_blocksize
= alg
->cra_blocksize
;
652 inst
->alg
.base
.cra_alignmask
= alg
->cra_alignmask
;
654 inst
->alg
.base
.cra_ctxsize
= sizeof(struct vmac_tfm_ctx
);
655 inst
->alg
.base
.cra_init
= vmac_init_tfm
;
656 inst
->alg
.base
.cra_exit
= vmac_exit_tfm
;
658 inst
->alg
.descsize
= sizeof(struct vmac_desc_ctx
);
659 inst
->alg
.digestsize
= VMAC_TAG_LEN
/ 8;
660 inst
->alg
.init
= vmac_init
;
661 inst
->alg
.update
= vmac_update
;
662 inst
->alg
.final
= vmac_final
;
663 inst
->alg
.setkey
= vmac_setkey
;
665 err
= shash_register_instance(tmpl
, inst
);
668 shash_free_instance(shash_crypto_instance(inst
));
676 static struct crypto_template vmac64_tmpl
= {
678 .create
= vmac_create
,
679 .free
= shash_free_instance
,
680 .module
= THIS_MODULE
,
683 static int __init
vmac_module_init(void)
685 return crypto_register_template(&vmac64_tmpl
);
688 static void __exit
vmac_module_exit(void)
690 crypto_unregister_template(&vmac64_tmpl
);
693 module_init(vmac_module_init
);
694 module_exit(vmac_module_exit
);
696 MODULE_LICENSE("GPL");
697 MODULE_DESCRIPTION("VMAC hash algorithm");
698 MODULE_ALIAS_CRYPTO("vmac64");