1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SHA-256, as specified in
4 * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf
6 * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>.
8 * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com>
9 * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
10 * Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
11 * Copyright (c) 2014 Red Hat Inc.
14 #include <linux/bitops.h>
15 #include <linux/export.h>
16 #include <linux/module.h>
17 #include <linux/string.h>
18 #include <crypto/sha.h>
19 #include <asm/unaligned.h>
21 static inline u32
Ch(u32 x
, u32 y
, u32 z
)
23 return z
^ (x
& (y
^ z
));
26 static inline u32
Maj(u32 x
, u32 y
, u32 z
)
28 return (x
& y
) | (z
& (x
| y
));
31 #define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22))
32 #define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25))
33 #define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3))
34 #define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10))
36 static inline void LOAD_OP(int I
, u32
*W
, const u8
*input
)
38 W
[I
] = get_unaligned_be32((__u32
*)input
+ I
);
41 static inline void BLEND_OP(int I
, u32
*W
)
43 W
[I
] = s1(W
[I
-2]) + W
[I
-7] + s0(W
[I
-15]) + W
[I
-16];
46 static void sha256_transform(u32
*state
, const u8
*input
)
48 u32 a
, b
, c
, d
, e
, f
, g
, h
, t1
, t2
;
53 for (i
= 0; i
< 16; i
++)
57 for (i
= 16; i
< 64; i
++)
60 /* load the state into our registers */
61 a
= state
[0]; b
= state
[1]; c
= state
[2]; d
= state
[3];
62 e
= state
[4]; f
= state
[5]; g
= state
[6]; h
= state
[7];
65 t1
= h
+ e1(e
) + Ch(e
, f
, g
) + 0x428a2f98 + W
[0];
66 t2
= e0(a
) + Maj(a
, b
, c
); d
+= t1
; h
= t1
+ t2
;
67 t1
= g
+ e1(d
) + Ch(d
, e
, f
) + 0x71374491 + W
[1];
68 t2
= e0(h
) + Maj(h
, a
, b
); c
+= t1
; g
= t1
+ t2
;
69 t1
= f
+ e1(c
) + Ch(c
, d
, e
) + 0xb5c0fbcf + W
[2];
70 t2
= e0(g
) + Maj(g
, h
, a
); b
+= t1
; f
= t1
+ t2
;
71 t1
= e
+ e1(b
) + Ch(b
, c
, d
) + 0xe9b5dba5 + W
[3];
72 t2
= e0(f
) + Maj(f
, g
, h
); a
+= t1
; e
= t1
+ t2
;
73 t1
= d
+ e1(a
) + Ch(a
, b
, c
) + 0x3956c25b + W
[4];
74 t2
= e0(e
) + Maj(e
, f
, g
); h
+= t1
; d
= t1
+ t2
;
75 t1
= c
+ e1(h
) + Ch(h
, a
, b
) + 0x59f111f1 + W
[5];
76 t2
= e0(d
) + Maj(d
, e
, f
); g
+= t1
; c
= t1
+ t2
;
77 t1
= b
+ e1(g
) + Ch(g
, h
, a
) + 0x923f82a4 + W
[6];
78 t2
= e0(c
) + Maj(c
, d
, e
); f
+= t1
; b
= t1
+ t2
;
79 t1
= a
+ e1(f
) + Ch(f
, g
, h
) + 0xab1c5ed5 + W
[7];
80 t2
= e0(b
) + Maj(b
, c
, d
); e
+= t1
; a
= t1
+ t2
;
82 t1
= h
+ e1(e
) + Ch(e
, f
, g
) + 0xd807aa98 + W
[8];
83 t2
= e0(a
) + Maj(a
, b
, c
); d
+= t1
; h
= t1
+ t2
;
84 t1
= g
+ e1(d
) + Ch(d
, e
, f
) + 0x12835b01 + W
[9];
85 t2
= e0(h
) + Maj(h
, a
, b
); c
+= t1
; g
= t1
+ t2
;
86 t1
= f
+ e1(c
) + Ch(c
, d
, e
) + 0x243185be + W
[10];
87 t2
= e0(g
) + Maj(g
, h
, a
); b
+= t1
; f
= t1
+ t2
;
88 t1
= e
+ e1(b
) + Ch(b
, c
, d
) + 0x550c7dc3 + W
[11];
89 t2
= e0(f
) + Maj(f
, g
, h
); a
+= t1
; e
= t1
+ t2
;
90 t1
= d
+ e1(a
) + Ch(a
, b
, c
) + 0x72be5d74 + W
[12];
91 t2
= e0(e
) + Maj(e
, f
, g
); h
+= t1
; d
= t1
+ t2
;
92 t1
= c
+ e1(h
) + Ch(h
, a
, b
) + 0x80deb1fe + W
[13];
93 t2
= e0(d
) + Maj(d
, e
, f
); g
+= t1
; c
= t1
+ t2
;
94 t1
= b
+ e1(g
) + Ch(g
, h
, a
) + 0x9bdc06a7 + W
[14];
95 t2
= e0(c
) + Maj(c
, d
, e
); f
+= t1
; b
= t1
+ t2
;
96 t1
= a
+ e1(f
) + Ch(f
, g
, h
) + 0xc19bf174 + W
[15];
97 t2
= e0(b
) + Maj(b
, c
, d
); e
+= t1
; a
= t1
+ t2
;
99 t1
= h
+ e1(e
) + Ch(e
, f
, g
) + 0xe49b69c1 + W
[16];
100 t2
= e0(a
) + Maj(a
, b
, c
); d
+= t1
; h
= t1
+ t2
;
101 t1
= g
+ e1(d
) + Ch(d
, e
, f
) + 0xefbe4786 + W
[17];
102 t2
= e0(h
) + Maj(h
, a
, b
); c
+= t1
; g
= t1
+ t2
;
103 t1
= f
+ e1(c
) + Ch(c
, d
, e
) + 0x0fc19dc6 + W
[18];
104 t2
= e0(g
) + Maj(g
, h
, a
); b
+= t1
; f
= t1
+ t2
;
105 t1
= e
+ e1(b
) + Ch(b
, c
, d
) + 0x240ca1cc + W
[19];
106 t2
= e0(f
) + Maj(f
, g
, h
); a
+= t1
; e
= t1
+ t2
;
107 t1
= d
+ e1(a
) + Ch(a
, b
, c
) + 0x2de92c6f + W
[20];
108 t2
= e0(e
) + Maj(e
, f
, g
); h
+= t1
; d
= t1
+ t2
;
109 t1
= c
+ e1(h
) + Ch(h
, a
, b
) + 0x4a7484aa + W
[21];
110 t2
= e0(d
) + Maj(d
, e
, f
); g
+= t1
; c
= t1
+ t2
;
111 t1
= b
+ e1(g
) + Ch(g
, h
, a
) + 0x5cb0a9dc + W
[22];
112 t2
= e0(c
) + Maj(c
, d
, e
); f
+= t1
; b
= t1
+ t2
;
113 t1
= a
+ e1(f
) + Ch(f
, g
, h
) + 0x76f988da + W
[23];
114 t2
= e0(b
) + Maj(b
, c
, d
); e
+= t1
; a
= t1
+ t2
;
116 t1
= h
+ e1(e
) + Ch(e
, f
, g
) + 0x983e5152 + W
[24];
117 t2
= e0(a
) + Maj(a
, b
, c
); d
+= t1
; h
= t1
+ t2
;
118 t1
= g
+ e1(d
) + Ch(d
, e
, f
) + 0xa831c66d + W
[25];
119 t2
= e0(h
) + Maj(h
, a
, b
); c
+= t1
; g
= t1
+ t2
;
120 t1
= f
+ e1(c
) + Ch(c
, d
, e
) + 0xb00327c8 + W
[26];
121 t2
= e0(g
) + Maj(g
, h
, a
); b
+= t1
; f
= t1
+ t2
;
122 t1
= e
+ e1(b
) + Ch(b
, c
, d
) + 0xbf597fc7 + W
[27];
123 t2
= e0(f
) + Maj(f
, g
, h
); a
+= t1
; e
= t1
+ t2
;
124 t1
= d
+ e1(a
) + Ch(a
, b
, c
) + 0xc6e00bf3 + W
[28];
125 t2
= e0(e
) + Maj(e
, f
, g
); h
+= t1
; d
= t1
+ t2
;
126 t1
= c
+ e1(h
) + Ch(h
, a
, b
) + 0xd5a79147 + W
[29];
127 t2
= e0(d
) + Maj(d
, e
, f
); g
+= t1
; c
= t1
+ t2
;
128 t1
= b
+ e1(g
) + Ch(g
, h
, a
) + 0x06ca6351 + W
[30];
129 t2
= e0(c
) + Maj(c
, d
, e
); f
+= t1
; b
= t1
+ t2
;
130 t1
= a
+ e1(f
) + Ch(f
, g
, h
) + 0x14292967 + W
[31];
131 t2
= e0(b
) + Maj(b
, c
, d
); e
+= t1
; a
= t1
+ t2
;
133 t1
= h
+ e1(e
) + Ch(e
, f
, g
) + 0x27b70a85 + W
[32];
134 t2
= e0(a
) + Maj(a
, b
, c
); d
+= t1
; h
= t1
+ t2
;
135 t1
= g
+ e1(d
) + Ch(d
, e
, f
) + 0x2e1b2138 + W
[33];
136 t2
= e0(h
) + Maj(h
, a
, b
); c
+= t1
; g
= t1
+ t2
;
137 t1
= f
+ e1(c
) + Ch(c
, d
, e
) + 0x4d2c6dfc + W
[34];
138 t2
= e0(g
) + Maj(g
, h
, a
); b
+= t1
; f
= t1
+ t2
;
139 t1
= e
+ e1(b
) + Ch(b
, c
, d
) + 0x53380d13 + W
[35];
140 t2
= e0(f
) + Maj(f
, g
, h
); a
+= t1
; e
= t1
+ t2
;
141 t1
= d
+ e1(a
) + Ch(a
, b
, c
) + 0x650a7354 + W
[36];
142 t2
= e0(e
) + Maj(e
, f
, g
); h
+= t1
; d
= t1
+ t2
;
143 t1
= c
+ e1(h
) + Ch(h
, a
, b
) + 0x766a0abb + W
[37];
144 t2
= e0(d
) + Maj(d
, e
, f
); g
+= t1
; c
= t1
+ t2
;
145 t1
= b
+ e1(g
) + Ch(g
, h
, a
) + 0x81c2c92e + W
[38];
146 t2
= e0(c
) + Maj(c
, d
, e
); f
+= t1
; b
= t1
+ t2
;
147 t1
= a
+ e1(f
) + Ch(f
, g
, h
) + 0x92722c85 + W
[39];
148 t2
= e0(b
) + Maj(b
, c
, d
); e
+= t1
; a
= t1
+ t2
;
150 t1
= h
+ e1(e
) + Ch(e
, f
, g
) + 0xa2bfe8a1 + W
[40];
151 t2
= e0(a
) + Maj(a
, b
, c
); d
+= t1
; h
= t1
+ t2
;
152 t1
= g
+ e1(d
) + Ch(d
, e
, f
) + 0xa81a664b + W
[41];
153 t2
= e0(h
) + Maj(h
, a
, b
); c
+= t1
; g
= t1
+ t2
;
154 t1
= f
+ e1(c
) + Ch(c
, d
, e
) + 0xc24b8b70 + W
[42];
155 t2
= e0(g
) + Maj(g
, h
, a
); b
+= t1
; f
= t1
+ t2
;
156 t1
= e
+ e1(b
) + Ch(b
, c
, d
) + 0xc76c51a3 + W
[43];
157 t2
= e0(f
) + Maj(f
, g
, h
); a
+= t1
; e
= t1
+ t2
;
158 t1
= d
+ e1(a
) + Ch(a
, b
, c
) + 0xd192e819 + W
[44];
159 t2
= e0(e
) + Maj(e
, f
, g
); h
+= t1
; d
= t1
+ t2
;
160 t1
= c
+ e1(h
) + Ch(h
, a
, b
) + 0xd6990624 + W
[45];
161 t2
= e0(d
) + Maj(d
, e
, f
); g
+= t1
; c
= t1
+ t2
;
162 t1
= b
+ e1(g
) + Ch(g
, h
, a
) + 0xf40e3585 + W
[46];
163 t2
= e0(c
) + Maj(c
, d
, e
); f
+= t1
; b
= t1
+ t2
;
164 t1
= a
+ e1(f
) + Ch(f
, g
, h
) + 0x106aa070 + W
[47];
165 t2
= e0(b
) + Maj(b
, c
, d
); e
+= t1
; a
= t1
+ t2
;
167 t1
= h
+ e1(e
) + Ch(e
, f
, g
) + 0x19a4c116 + W
[48];
168 t2
= e0(a
) + Maj(a
, b
, c
); d
+= t1
; h
= t1
+ t2
;
169 t1
= g
+ e1(d
) + Ch(d
, e
, f
) + 0x1e376c08 + W
[49];
170 t2
= e0(h
) + Maj(h
, a
, b
); c
+= t1
; g
= t1
+ t2
;
171 t1
= f
+ e1(c
) + Ch(c
, d
, e
) + 0x2748774c + W
[50];
172 t2
= e0(g
) + Maj(g
, h
, a
); b
+= t1
; f
= t1
+ t2
;
173 t1
= e
+ e1(b
) + Ch(b
, c
, d
) + 0x34b0bcb5 + W
[51];
174 t2
= e0(f
) + Maj(f
, g
, h
); a
+= t1
; e
= t1
+ t2
;
175 t1
= d
+ e1(a
) + Ch(a
, b
, c
) + 0x391c0cb3 + W
[52];
176 t2
= e0(e
) + Maj(e
, f
, g
); h
+= t1
; d
= t1
+ t2
;
177 t1
= c
+ e1(h
) + Ch(h
, a
, b
) + 0x4ed8aa4a + W
[53];
178 t2
= e0(d
) + Maj(d
, e
, f
); g
+= t1
; c
= t1
+ t2
;
179 t1
= b
+ e1(g
) + Ch(g
, h
, a
) + 0x5b9cca4f + W
[54];
180 t2
= e0(c
) + Maj(c
, d
, e
); f
+= t1
; b
= t1
+ t2
;
181 t1
= a
+ e1(f
) + Ch(f
, g
, h
) + 0x682e6ff3 + W
[55];
182 t2
= e0(b
) + Maj(b
, c
, d
); e
+= t1
; a
= t1
+ t2
;
184 t1
= h
+ e1(e
) + Ch(e
, f
, g
) + 0x748f82ee + W
[56];
185 t2
= e0(a
) + Maj(a
, b
, c
); d
+= t1
; h
= t1
+ t2
;
186 t1
= g
+ e1(d
) + Ch(d
, e
, f
) + 0x78a5636f + W
[57];
187 t2
= e0(h
) + Maj(h
, a
, b
); c
+= t1
; g
= t1
+ t2
;
188 t1
= f
+ e1(c
) + Ch(c
, d
, e
) + 0x84c87814 + W
[58];
189 t2
= e0(g
) + Maj(g
, h
, a
); b
+= t1
; f
= t1
+ t2
;
190 t1
= e
+ e1(b
) + Ch(b
, c
, d
) + 0x8cc70208 + W
[59];
191 t2
= e0(f
) + Maj(f
, g
, h
); a
+= t1
; e
= t1
+ t2
;
192 t1
= d
+ e1(a
) + Ch(a
, b
, c
) + 0x90befffa + W
[60];
193 t2
= e0(e
) + Maj(e
, f
, g
); h
+= t1
; d
= t1
+ t2
;
194 t1
= c
+ e1(h
) + Ch(h
, a
, b
) + 0xa4506ceb + W
[61];
195 t2
= e0(d
) + Maj(d
, e
, f
); g
+= t1
; c
= t1
+ t2
;
196 t1
= b
+ e1(g
) + Ch(g
, h
, a
) + 0xbef9a3f7 + W
[62];
197 t2
= e0(c
) + Maj(c
, d
, e
); f
+= t1
; b
= t1
+ t2
;
198 t1
= a
+ e1(f
) + Ch(f
, g
, h
) + 0xc67178f2 + W
[63];
199 t2
= e0(b
) + Maj(b
, c
, d
); e
+= t1
; a
= t1
+ t2
;
201 state
[0] += a
; state
[1] += b
; state
[2] += c
; state
[3] += d
;
202 state
[4] += e
; state
[5] += f
; state
[6] += g
; state
[7] += h
;
204 /* clear any sensitive info... */
205 a
= b
= c
= d
= e
= f
= g
= h
= t1
= t2
= 0;
206 memzero_explicit(W
, 64 * sizeof(u32
));
209 int sha256_update(struct sha256_state
*sctx
, const u8
*data
, unsigned int len
)
211 unsigned int partial
, done
;
214 partial
= sctx
->count
& 0x3f;
219 if ((partial
+ len
) > 63) {
222 memcpy(sctx
->buf
+ partial
, data
, done
+ 64);
227 sha256_transform(sctx
->state
, src
);
230 } while (done
+ 63 < len
);
234 memcpy(sctx
->buf
+ partial
, src
, len
- done
);
238 EXPORT_SYMBOL(sha256_update
);
240 int sha224_update(struct sha256_state
*sctx
, const u8
*data
, unsigned int len
)
242 return sha256_update(sctx
, data
, len
);
244 EXPORT_SYMBOL(sha224_update
);
246 static int __sha256_final(struct sha256_state
*sctx
, u8
*out
, int digest_words
)
248 __be32
*dst
= (__be32
*)out
;
250 unsigned int index
, pad_len
;
252 static const u8 padding
[64] = { 0x80, };
254 /* Save number of bits */
255 bits
= cpu_to_be64(sctx
->count
<< 3);
257 /* Pad out to 56 mod 64. */
258 index
= sctx
->count
& 0x3f;
259 pad_len
= (index
< 56) ? (56 - index
) : ((64+56) - index
);
260 sha256_update(sctx
, padding
, pad_len
);
262 /* Append length (before padding) */
263 sha256_update(sctx
, (const u8
*)&bits
, sizeof(bits
));
265 /* Store state in digest */
266 for (i
= 0; i
< digest_words
; i
++)
267 put_unaligned_be32(sctx
->state
[i
], &dst
[i
]);
269 /* Zeroize sensitive information. */
270 memset(sctx
, 0, sizeof(*sctx
));
275 int sha256_final(struct sha256_state
*sctx
, u8
*out
)
277 return __sha256_final(sctx
, out
, 8);
279 EXPORT_SYMBOL(sha256_final
);
281 int sha224_final(struct sha256_state
*sctx
, u8
*out
)
283 return __sha256_final(sctx
, out
, 7);
285 EXPORT_SYMBOL(sha224_final
);
287 MODULE_LICENSE("GPL");