1 /*******************************************************************************
2 * Copyright (c) 2013, Intel Corporation
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the
18 * * Neither the name of the Intel Corporation nor the names of its
19 * contributors may be used to endorse or promote products derived from
20 * this software without specific prior written permission.
23 * THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION ""AS IS"" AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
26 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
27 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
28 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
29 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
30 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
31 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
32 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
33 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 ********************************************************************************
36 * Intel SHA Extensions optimized implementation of a SHA-256 update function
38 * The function takes a pointer to the current hash values, a pointer to the
39 * input data, and a number of 64 byte blocks to process. Once all blocks have
40 * been processed, the digest pointer is updated with the resulting hash value.
41 * The function only processes complete blocks, there is no functionality to
42 * store partial blocks. All message padding and hash value initialization must
43 * be done outside the update function.
45 * The indented lines in the loop are instructions related to rounds processing.
46 * The non-indented lines are instructions related to the message schedule.
48 * Author: Sean Gulley <sean.m.gulley@intel.com>
51 ********************************************************************************
53 * Example complier command line:
54 * icc intel_sha_extensions_sha256_intrinsic.c
55 * gcc -msha -msse4 intel_sha_extensions_sha256_intrinsic.c
57 *******************************************************************************/
59 #include <sys/types.h>
60 #include <crypto/aesni/aesni_os.h>
61 #include <crypto/aesni/sha_sse.h>
63 #include <immintrin.h>
65 void intel_sha256_step(uint32_t *digest
, const char *data
, uint32_t num_blks
) {
66 __m128i state0
, state1
;
68 __m128i msgtmp0
, msgtmp1
, msgtmp2
, msgtmp3
;
71 __m128i abef_save
, cdgh_save
;
73 // Load initial hash values
74 // Need to reorder these appropriately
75 // DCBA, HGFE -> ABEF, CDGH
76 tmp
= _mm_loadu_si128((__m128i
*) digest
);
77 state1
= _mm_loadu_si128((__m128i
*) (digest
+4));
79 tmp
= _mm_shuffle_epi32(tmp
, 0xB1); // CDAB
80 state1
= _mm_shuffle_epi32(state1
, 0x1B); // EFGH
81 state0
= _mm_alignr_epi8(tmp
, state1
, 8); // ABEF
82 state1
= _mm_blend_epi16(state1
, tmp
, 0xF0); // CDGH
84 shuf_mask
= _mm_set_epi64x(0x0c0d0e0f08090a0bull
, 0x0405060700010203ull
);
86 while (num_blks
> 0) {
87 // Save hash values for addition after rounds
92 msg
= _mm_loadu_si128((const __m128i
*) data
);
93 msgtmp0
= _mm_shuffle_epi8(msg
, shuf_mask
);
94 msg
= _mm_add_epi32(msgtmp0
,
95 _mm_set_epi64x(0xE9B5DBA5B5C0FBCFull
, 0x71374491428A2F98ull
));
96 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
97 msg
= _mm_shuffle_epi32(msg
, 0x0E);
98 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
101 msgtmp1
= _mm_loadu_si128((const __m128i
*) (data
+16));
102 msgtmp1
= _mm_shuffle_epi8(msgtmp1
, shuf_mask
);
103 msg
= _mm_add_epi32(msgtmp1
,
104 _mm_set_epi64x(0xAB1C5ED5923F82A4ull
, 0x59F111F13956C25Bull
));
105 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
106 msg
= _mm_shuffle_epi32(msg
, 0x0E);
107 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
108 msgtmp0
= _mm_sha256msg1_epu32(msgtmp0
, msgtmp1
);
111 msgtmp2
= _mm_loadu_si128((const __m128i
*) (data
+32));
112 msgtmp2
= _mm_shuffle_epi8(msgtmp2
, shuf_mask
);
113 msg
= _mm_add_epi32(msgtmp2
,
114 _mm_set_epi64x(0x550C7DC3243185BEull
, 0x12835B01D807AA98ull
));
115 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
116 msg
= _mm_shuffle_epi32(msg
, 0x0E);
117 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
118 msgtmp1
= _mm_sha256msg1_epu32(msgtmp1
, msgtmp2
);
121 msgtmp3
= _mm_loadu_si128((const __m128i
*) (data
+48));
122 msgtmp3
= _mm_shuffle_epi8(msgtmp3
, shuf_mask
);
123 msg
= _mm_add_epi32(msgtmp3
,
124 _mm_set_epi64x(0xC19BF1749BDC06A7ull
, 0x80DEB1FE72BE5D74ull
));
125 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
126 tmp
= _mm_alignr_epi8(msgtmp3
, msgtmp2
, 4);
127 msgtmp0
= _mm_add_epi32(msgtmp0
, tmp
);
128 msgtmp0
= _mm_sha256msg2_epu32(msgtmp0
, msgtmp3
);
129 msg
= _mm_shuffle_epi32(msg
, 0x0E);
130 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
131 msgtmp2
= _mm_sha256msg1_epu32(msgtmp2
, msgtmp3
);
134 msg
= _mm_add_epi32(msgtmp0
,
135 _mm_set_epi64x(0x240CA1CC0FC19DC6ull
, 0xEFBE4786E49B69C1ull
));
136 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
137 tmp
= _mm_alignr_epi8(msgtmp0
, msgtmp3
, 4);
138 msgtmp1
= _mm_add_epi32(msgtmp1
, tmp
);
139 msgtmp1
= _mm_sha256msg2_epu32(msgtmp1
, msgtmp0
);
140 msg
= _mm_shuffle_epi32(msg
, 0x0E);
141 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
142 msgtmp3
= _mm_sha256msg1_epu32(msgtmp3
, msgtmp0
);
145 msg
= _mm_add_epi32(msgtmp1
,
146 _mm_set_epi64x(0x76F988DA5CB0A9DCull
, 0x4A7484AA2DE92C6Full
));
147 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
148 tmp
= _mm_alignr_epi8(msgtmp1
, msgtmp0
, 4);
149 msgtmp2
= _mm_add_epi32(msgtmp2
, tmp
);
150 msgtmp2
= _mm_sha256msg2_epu32(msgtmp2
, msgtmp1
);
151 msg
= _mm_shuffle_epi32(msg
, 0x0E);
152 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
153 msgtmp0
= _mm_sha256msg1_epu32(msgtmp0
, msgtmp1
);
156 msg
= _mm_add_epi32(msgtmp2
,
157 _mm_set_epi64x(0xBF597FC7B00327C8ull
, 0xA831C66D983E5152ull
));
158 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
159 tmp
= _mm_alignr_epi8(msgtmp2
, msgtmp1
, 4);
160 msgtmp3
= _mm_add_epi32(msgtmp3
, tmp
);
161 msgtmp3
= _mm_sha256msg2_epu32(msgtmp3
, msgtmp2
);
162 msg
= _mm_shuffle_epi32(msg
, 0x0E);
163 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
164 msgtmp1
= _mm_sha256msg1_epu32(msgtmp1
, msgtmp2
);
167 msg
= _mm_add_epi32(msgtmp3
,
168 _mm_set_epi64x(0x1429296706CA6351ull
, 0xD5A79147C6E00BF3ull
));
169 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
170 tmp
= _mm_alignr_epi8(msgtmp3
, msgtmp2
, 4);
171 msgtmp0
= _mm_add_epi32(msgtmp0
, tmp
);
172 msgtmp0
= _mm_sha256msg2_epu32(msgtmp0
, msgtmp3
);
173 msg
= _mm_shuffle_epi32(msg
, 0x0E);
174 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
175 msgtmp2
= _mm_sha256msg1_epu32(msgtmp2
, msgtmp3
);
178 msg
= _mm_add_epi32(msgtmp0
,
179 _mm_set_epi64x(0x53380D134D2C6DFCull
, 0x2E1B213827B70A85ull
));
180 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
181 tmp
= _mm_alignr_epi8(msgtmp0
, msgtmp3
, 4);
182 msgtmp1
= _mm_add_epi32(msgtmp1
, tmp
);
183 msgtmp1
= _mm_sha256msg2_epu32(msgtmp1
, msgtmp0
);
184 msg
= _mm_shuffle_epi32(msg
, 0x0E);
185 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
186 msgtmp3
= _mm_sha256msg1_epu32(msgtmp3
, msgtmp0
);
189 msg
= _mm_add_epi32(msgtmp1
,
190 _mm_set_epi64x(0x92722C8581C2C92Eull
, 0x766A0ABB650A7354ull
));
191 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
192 tmp
= _mm_alignr_epi8(msgtmp1
, msgtmp0
, 4);
193 msgtmp2
= _mm_add_epi32(msgtmp2
, tmp
);
194 msgtmp2
= _mm_sha256msg2_epu32(msgtmp2
, msgtmp1
);
195 msg
= _mm_shuffle_epi32(msg
, 0x0E);
196 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
197 msgtmp0
= _mm_sha256msg1_epu32(msgtmp0
, msgtmp1
);
200 msg
= _mm_add_epi32(msgtmp2
,
201 _mm_set_epi64x(0xC76C51A3C24B8B70ull
, 0xA81A664BA2BFE8A1ull
));
202 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
203 tmp
= _mm_alignr_epi8(msgtmp2
, msgtmp1
, 4);
204 msgtmp3
= _mm_add_epi32(msgtmp3
, tmp
);
205 msgtmp3
= _mm_sha256msg2_epu32(msgtmp3
, msgtmp2
);
206 msg
= _mm_shuffle_epi32(msg
, 0x0E);
207 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
208 msgtmp1
= _mm_sha256msg1_epu32(msgtmp1
, msgtmp2
);
211 msg
= _mm_add_epi32(msgtmp3
,
212 _mm_set_epi64x(0x106AA070F40E3585ull
, 0xD6990624D192E819ull
));
213 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
214 tmp
= _mm_alignr_epi8(msgtmp3
, msgtmp2
, 4);
215 msgtmp0
= _mm_add_epi32(msgtmp0
, tmp
);
216 msgtmp0
= _mm_sha256msg2_epu32(msgtmp0
, msgtmp3
);
217 msg
= _mm_shuffle_epi32(msg
, 0x0E);
218 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
219 msgtmp2
= _mm_sha256msg1_epu32(msgtmp2
, msgtmp3
);
222 msg
= _mm_add_epi32(msgtmp0
,
223 _mm_set_epi64x(0x34B0BCB52748774Cull
, 0x1E376C0819A4C116ull
));
224 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
225 tmp
= _mm_alignr_epi8(msgtmp0
, msgtmp3
, 4);
226 msgtmp1
= _mm_add_epi32(msgtmp1
, tmp
);
227 msgtmp1
= _mm_sha256msg2_epu32(msgtmp1
, msgtmp0
);
228 msg
= _mm_shuffle_epi32(msg
, 0x0E);
229 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
230 msgtmp3
= _mm_sha256msg1_epu32(msgtmp3
, msgtmp0
);
233 msg
= _mm_add_epi32(msgtmp1
,
234 _mm_set_epi64x(0x682E6FF35B9CCA4Full
, 0x4ED8AA4A391C0CB3ull
));
235 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
236 tmp
= _mm_alignr_epi8(msgtmp1
, msgtmp0
, 4);
237 msgtmp2
= _mm_add_epi32(msgtmp2
, tmp
);
238 msgtmp2
= _mm_sha256msg2_epu32(msgtmp2
, msgtmp1
);
239 msg
= _mm_shuffle_epi32(msg
, 0x0E);
240 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
243 msg
= _mm_add_epi32(msgtmp2
,
244 _mm_set_epi64x(0x8CC7020884C87814ull
, 0x78A5636F748F82EEull
));
245 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
246 tmp
= _mm_alignr_epi8(msgtmp2
, msgtmp1
, 4);
247 msgtmp3
= _mm_add_epi32(msgtmp3
, tmp
);
248 msgtmp3
= _mm_sha256msg2_epu32(msgtmp3
, msgtmp2
);
249 msg
= _mm_shuffle_epi32(msg
, 0x0E);
250 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
253 msg
= _mm_add_epi32(msgtmp3
,
254 _mm_set_epi64x(0xC67178F2BEF9A3F7ull
, 0xA4506CEB90BEFFFAull
));
255 state1
= _mm_sha256rnds2_epu32(state1
, state0
, msg
);
256 msg
= _mm_shuffle_epi32(msg
, 0x0E);
257 state0
= _mm_sha256rnds2_epu32(state0
, state1
, msg
);
259 // Add current hash values with previously saved
260 state0
= _mm_add_epi32(state0
, abef_save
);
261 state1
= _mm_add_epi32(state1
, cdgh_save
);
267 // Write hash values back in the correct order
268 tmp
= _mm_shuffle_epi32(state0
, 0x1B); // FEBA
269 state1
= _mm_shuffle_epi32(state1
, 0xB1); // DCHG
270 state0
= _mm_blend_epi16(tmp
, state1
, 0xF0); // DCBA
271 state1
= _mm_alignr_epi8(state1
, tmp
, 8); // ABEF
273 _mm_store_si128((__m128i
*) digest
, state0
);
274 _mm_store_si128((__m128i
*) (digest
+4), state1
);