2 * sha1-ce-glue.c - SHA-1 secure hash using ARMv8 Crypto Extensions
4 * Copyright (C) 2014 Linaro Ltd <ard.biesheuvel@linaro.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
12 #include <asm/unaligned.h>
13 #include <crypto/internal/hash.h>
14 #include <crypto/sha.h>
15 #include <linux/cpufeature.h>
16 #include <linux/crypto.h>
17 #include <linux/module.h>
19 MODULE_DESCRIPTION("SHA1 secure hash using ARMv8 Crypto Extensions");
20 MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
21 MODULE_LICENSE("GPL v2");
23 asmlinkage
void sha1_ce_transform(int blocks
, u8
const *src
, u32
*state
,
24 u8
*head
, long bytes
);
26 static int sha1_init(struct shash_desc
*desc
)
28 struct sha1_state
*sctx
= shash_desc_ctx(desc
);
30 *sctx
= (struct sha1_state
){
31 .state
= { SHA1_H0
, SHA1_H1
, SHA1_H2
, SHA1_H3
, SHA1_H4
},
36 static int sha1_update(struct shash_desc
*desc
, const u8
*data
,
39 struct sha1_state
*sctx
= shash_desc_ctx(desc
);
40 unsigned int partial
= sctx
->count
% SHA1_BLOCK_SIZE
;
44 if ((partial
+ len
) >= SHA1_BLOCK_SIZE
) {
48 int p
= SHA1_BLOCK_SIZE
- partial
;
50 memcpy(sctx
->buffer
+ partial
, data
, p
);
55 blocks
= len
/ SHA1_BLOCK_SIZE
;
56 len
%= SHA1_BLOCK_SIZE
;
58 kernel_neon_begin_partial(16);
59 sha1_ce_transform(blocks
, data
, sctx
->state
,
60 partial
? sctx
->buffer
: NULL
, 0);
63 data
+= blocks
* SHA1_BLOCK_SIZE
;
67 memcpy(sctx
->buffer
+ partial
, data
, len
);
71 static int sha1_final(struct shash_desc
*desc
, u8
*out
)
73 static const u8 padding
[SHA1_BLOCK_SIZE
] = { 0x80, };
75 struct sha1_state
*sctx
= shash_desc_ctx(desc
);
76 __be64 bits
= cpu_to_be64(sctx
->count
<< 3);
77 __be32
*dst
= (__be32
*)out
;
80 u32 padlen
= SHA1_BLOCK_SIZE
81 - ((sctx
->count
+ sizeof(bits
)) % SHA1_BLOCK_SIZE
);
83 sha1_update(desc
, padding
, padlen
);
84 sha1_update(desc
, (const u8
*)&bits
, sizeof(bits
));
86 for (i
= 0; i
< SHA1_DIGEST_SIZE
/ sizeof(__be32
); i
++)
87 put_unaligned_be32(sctx
->state
[i
], dst
++);
89 *sctx
= (struct sha1_state
){};
93 static int sha1_finup(struct shash_desc
*desc
, const u8
*data
,
94 unsigned int len
, u8
*out
)
96 struct sha1_state
*sctx
= shash_desc_ctx(desc
);
97 __be32
*dst
= (__be32
*)out
;
101 if (sctx
->count
|| !len
|| (len
% SHA1_BLOCK_SIZE
)) {
102 sha1_update(desc
, data
, len
);
103 return sha1_final(desc
, out
);
107 * Use a fast path if the input is a multiple of 64 bytes. In
108 * this case, there is no need to copy data around, and we can
109 * perform the entire digest calculation in a single invocation
110 * of sha1_ce_transform()
112 blocks
= len
/ SHA1_BLOCK_SIZE
;
114 kernel_neon_begin_partial(16);
115 sha1_ce_transform(blocks
, data
, sctx
->state
, NULL
, len
);
118 for (i
= 0; i
< SHA1_DIGEST_SIZE
/ sizeof(__be32
); i
++)
119 put_unaligned_be32(sctx
->state
[i
], dst
++);
121 *sctx
= (struct sha1_state
){};
125 static int sha1_export(struct shash_desc
*desc
, void *out
)
127 struct sha1_state
*sctx
= shash_desc_ctx(desc
);
128 struct sha1_state
*dst
= out
;
134 static int sha1_import(struct shash_desc
*desc
, const void *in
)
136 struct sha1_state
*sctx
= shash_desc_ctx(desc
);
137 struct sha1_state
const *src
= in
;
143 static struct shash_alg alg
= {
145 .update
= sha1_update
,
148 .export
= sha1_export
,
149 .import
= sha1_import
,
150 .descsize
= sizeof(struct sha1_state
),
151 .digestsize
= SHA1_DIGEST_SIZE
,
152 .statesize
= sizeof(struct sha1_state
),
155 .cra_driver_name
= "sha1-ce",
157 .cra_flags
= CRYPTO_ALG_TYPE_SHASH
,
158 .cra_blocksize
= SHA1_BLOCK_SIZE
,
159 .cra_module
= THIS_MODULE
,
163 static int __init
sha1_ce_mod_init(void)
165 return crypto_register_shash(&alg
);
168 static void __exit
sha1_ce_mod_fini(void)
170 crypto_unregister_shash(&alg
);
173 module_cpu_feature_match(SHA1
, sha1_ce_mod_init
);
174 module_exit(sha1_ce_mod_fini
);