Sync usage with man page.
[netbsd-mini2440.git] / sys / dev / pci / n8 / common / api / n8_precomp_md5.c
blob86469746a8c2f8aed56d82c0b73226463c839a74
1 /*-
2 * Copyright (C) 2001-2003 by NBMK Encryption Technologies.
3 * All rights reserved.
5 * NBMK Encryption Technologies provides no support of any kind for
6 * this software. Questions or concerns about it may be addressed to
7 * the members of the relevant open-source community at
8 * <tech-crypto@netbsd.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
12 * met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 static char const n8_id[] = "$Id: n8_precomp_md5.c,v 1.1 2008/10/30 12:02:15 darran Exp $";
36 /*****************************************************************************/
37 /** @file n8_precomp_md5.c
38 * @brief Private version of md5 for n8_precompute_*.
40 * A more detailed description of the file.
42 *****************************************************************************/
44 /*****************************************************************************
45 * Revision history:
46 * 03/06/02 brr Removed openssl includes.
47 * 02/18/02 spm Removed #includes of usr headers. Converted printf's to
48 * DBG's.
49 * 01/22/02 dws Original version.
50 ****************************************************************************/
51 /** @defgroup subsystem_name Subsystem Title (not used for a header file)
54 #define NO_ASM
56 #include "n8_precomp_md5.h"
57 #include "n8_precomp_md5_locl.h"
59 #define INIT_DATA_A (unsigned long)0x67452301L
60 #define INIT_DATA_B (unsigned long)0xefcdab89L
61 #define INIT_DATA_C (unsigned long)0x98badcfeL
62 #define INIT_DATA_D (unsigned long)0x10325476L
64 void n8_precomp_MD5_Init(N8_PRECOMP_MD5_CTX *c)
66 c->A=INIT_DATA_A;
67 c->B=INIT_DATA_B;
68 c->C=INIT_DATA_C;
69 c->D=INIT_DATA_D;
70 c->Nl=0;
71 c->Nh=0;
72 c->num=0;
75 /*************************************************/
76 /* This stuff implements n8_precomp_MD5_Update() */
77 /*************************************************/
78 #ifndef md5_block_host_order
79 void n8_precomp_md5_block_host_order (N8_PRECOMP_MD5_CTX *c, const void *data, int num)
81 const MD5_LONG *X=data;
82 register unsigned long A,B,C,D;
84 * In case you wonder why A-D are declared as long and not
85 * as MD5_LONG. Doing so results in slight performance
86 * boost on LP64 architectures. The catch is we don't
87 * really care if 32 MSBs of a 64-bit register get polluted
88 * with eventual overflows as we *save* only 32 LSBs in
89 * *either* case. Now declaring 'em long excuses the compiler
90 * from keeping 32 MSBs zeroed resulting in 13% performance
91 * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
92 * Well, to be honest it should say that this *prevents*
93 * performance degradation.
95 * <appro@fy.chalmers.se>
98 #ifdef HMAC_DEBUG
99 DBG(("*********************************************************\n"));
100 DBG(("Entering md5_block_host_order ***************************\n"));
101 #endif
103 A=c->A;
104 B=c->B;
105 C=c->C;
106 D=c->D;
108 for (;num--;X+=HASH_LBLOCK)
110 /* Round 0 */
111 R0(A,B,C,D,X[ 0], 7,0xd76aa478L);
112 R0(D,A,B,C,X[ 1],12,0xe8c7b756L);
113 R0(C,D,A,B,X[ 2],17,0x242070dbL);
114 R0(B,C,D,A,X[ 3],22,0xc1bdceeeL);
115 R0(A,B,C,D,X[ 4], 7,0xf57c0fafL);
116 R0(D,A,B,C,X[ 5],12,0x4787c62aL);
117 R0(C,D,A,B,X[ 6],17,0xa8304613L);
118 R0(B,C,D,A,X[ 7],22,0xfd469501L);
119 R0(A,B,C,D,X[ 8], 7,0x698098d8L);
120 R0(D,A,B,C,X[ 9],12,0x8b44f7afL);
121 R0(C,D,A,B,X[10],17,0xffff5bb1L);
122 R0(B,C,D,A,X[11],22,0x895cd7beL);
123 R0(A,B,C,D,X[12], 7,0x6b901122L);
124 R0(D,A,B,C,X[13],12,0xfd987193L);
125 R0(C,D,A,B,X[14],17,0xa679438eL);
126 R0(B,C,D,A,X[15],22,0x49b40821L);
127 /* Round 1 */
128 R1(A,B,C,D,X[ 1], 5,0xf61e2562L);
129 R1(D,A,B,C,X[ 6], 9,0xc040b340L);
130 R1(C,D,A,B,X[11],14,0x265e5a51L);
131 R1(B,C,D,A,X[ 0],20,0xe9b6c7aaL);
132 R1(A,B,C,D,X[ 5], 5,0xd62f105dL);
133 R1(D,A,B,C,X[10], 9,0x02441453L);
134 R1(C,D,A,B,X[15],14,0xd8a1e681L);
135 R1(B,C,D,A,X[ 4],20,0xe7d3fbc8L);
136 R1(A,B,C,D,X[ 9], 5,0x21e1cde6L);
137 R1(D,A,B,C,X[14], 9,0xc33707d6L);
138 R1(C,D,A,B,X[ 3],14,0xf4d50d87L);
139 R1(B,C,D,A,X[ 8],20,0x455a14edL);
140 R1(A,B,C,D,X[13], 5,0xa9e3e905L);
141 R1(D,A,B,C,X[ 2], 9,0xfcefa3f8L);
142 R1(C,D,A,B,X[ 7],14,0x676f02d9L);
143 R1(B,C,D,A,X[12],20,0x8d2a4c8aL);
144 /* Round 2 */
145 R2(A,B,C,D,X[ 5], 4,0xfffa3942L);
146 R2(D,A,B,C,X[ 8],11,0x8771f681L);
147 R2(C,D,A,B,X[11],16,0x6d9d6122L);
148 R2(B,C,D,A,X[14],23,0xfde5380cL);
149 R2(A,B,C,D,X[ 1], 4,0xa4beea44L);
150 R2(D,A,B,C,X[ 4],11,0x4bdecfa9L);
151 R2(C,D,A,B,X[ 7],16,0xf6bb4b60L);
152 R2(B,C,D,A,X[10],23,0xbebfbc70L);
153 R2(A,B,C,D,X[13], 4,0x289b7ec6L);
154 R2(D,A,B,C,X[ 0],11,0xeaa127faL);
155 R2(C,D,A,B,X[ 3],16,0xd4ef3085L);
156 R2(B,C,D,A,X[ 6],23,0x04881d05L);
157 R2(A,B,C,D,X[ 9], 4,0xd9d4d039L);
158 R2(D,A,B,C,X[12],11,0xe6db99e5L);
159 R2(C,D,A,B,X[15],16,0x1fa27cf8L);
160 R2(B,C,D,A,X[ 2],23,0xc4ac5665L);
161 /* Round 3 */
162 R3(A,B,C,D,X[ 0], 6,0xf4292244L);
163 R3(D,A,B,C,X[ 7],10,0x432aff97L);
164 R3(C,D,A,B,X[14],15,0xab9423a7L);
165 R3(B,C,D,A,X[ 5],21,0xfc93a039L);
166 R3(A,B,C,D,X[12], 6,0x655b59c3L);
167 R3(D,A,B,C,X[ 3],10,0x8f0ccc92L);
168 R3(C,D,A,B,X[10],15,0xffeff47dL);
169 R3(B,C,D,A,X[ 1],21,0x85845dd1L);
170 R3(A,B,C,D,X[ 8], 6,0x6fa87e4fL);
171 R3(D,A,B,C,X[15],10,0xfe2ce6e0L);
172 R3(C,D,A,B,X[ 6],15,0xa3014314L);
173 R3(B,C,D,A,X[13],21,0x4e0811a1L);
174 R3(A,B,C,D,X[ 4], 6,0xf7537e82L);
175 R3(D,A,B,C,X[11],10,0xbd3af235L);
176 R3(C,D,A,B,X[ 2],15,0x2ad7d2bbL);
177 R3(B,C,D,A,X[ 9],21,0xeb86d391L);
179 A = c->A += A;
180 B = c->B += B;
181 C = c->C += C;
182 D = c->D += D;
183 #ifdef HMAC_DEBUG
184 DBG(("Final {A=%08lx B=%08lx C=%08lx D=%08lx}\n", A, B, C, D));
185 #endif
187 #ifdef HMAC_DEBUG
188 DBG(("Leaving md5_block_host_order ****************************\n"));
189 DBG(("*********************************************************\n"));
190 #endif
192 #endif
194 #ifndef md5_block_data_order
195 #ifdef X
196 #undef X
197 #endif
198 void n8_precomp_md5_block_data_order (N8_PRECOMP_MD5_CTX *c, const void *data_, int num)
200 const unsigned char *data=data_;
201 register unsigned long A,B,C,D,l;
203 * In case you wonder why A-D are declared as long and not
204 * as MD5_LONG. Doing so results in slight performance
205 * boost on LP64 architectures. The catch is we don't
206 * really care if 32 MSBs of a 64-bit register get polluted
207 * with eventual overflows as we *save* only 32 LSBs in
208 * *either* case. Now declaring 'em long excuses the compiler
209 * from keeping 32 MSBs zeroed resulting in 13% performance
210 * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
211 * Well, to be honest it should say that this *prevents*
212 * performance degradation.
214 * <appro@fy.chalmers.se>
216 #ifndef MD32_XARRAY
217 /* See comment in crypto/sha/sha_locl.h for details. */
218 unsigned long XX0, XX1, XX2, XX3, XX4, XX5, XX6, XX7,
219 XX8, XX9,XX10,XX11,XX12,XX13,XX14,XX15;
220 # define X(i) XX##i
221 #else
222 MD5_LONG XX[MD5_LBLOCK];
223 # define X(i) XX[i]
224 #endif
226 #ifdef HMAC_DEBUG
227 DBG(("*********************************************************\n"));
228 DBG(("Entering md5_block_data_order ***************************\n"));
229 #endif
231 A=c->A;
232 B=c->B;
233 C=c->C;
234 D=c->D;
236 for (;num--;)
238 HOST_c2l(data,l); X( 0)=l; HOST_c2l(data,l); X( 1)=l;
239 /* Round 0 */
240 R0(A,B,C,D,X( 0), 7,0xd76aa478L); HOST_c2l(data,l); X( 2)=l;
241 R0(D,A,B,C,X( 1),12,0xe8c7b756L); HOST_c2l(data,l); X( 3)=l;
242 R0(C,D,A,B,X( 2),17,0x242070dbL); HOST_c2l(data,l); X( 4)=l;
243 R0(B,C,D,A,X( 3),22,0xc1bdceeeL); HOST_c2l(data,l); X( 5)=l;
244 R0(A,B,C,D,X( 4), 7,0xf57c0fafL); HOST_c2l(data,l); X( 6)=l;
245 R0(D,A,B,C,X( 5),12,0x4787c62aL); HOST_c2l(data,l); X( 7)=l;
246 R0(C,D,A,B,X( 6),17,0xa8304613L); HOST_c2l(data,l); X( 8)=l;
247 R0(B,C,D,A,X( 7),22,0xfd469501L); HOST_c2l(data,l); X( 9)=l;
248 R0(A,B,C,D,X( 8), 7,0x698098d8L); HOST_c2l(data,l); X(10)=l;
249 R0(D,A,B,C,X( 9),12,0x8b44f7afL); HOST_c2l(data,l); X(11)=l;
250 R0(C,D,A,B,X(10),17,0xffff5bb1L); HOST_c2l(data,l); X(12)=l;
251 R0(B,C,D,A,X(11),22,0x895cd7beL); HOST_c2l(data,l); X(13)=l;
252 R0(A,B,C,D,X(12), 7,0x6b901122L); HOST_c2l(data,l); X(14)=l;
253 R0(D,A,B,C,X(13),12,0xfd987193L); HOST_c2l(data,l); X(15)=l;
254 R0(C,D,A,B,X(14),17,0xa679438eL);
255 R0(B,C,D,A,X(15),22,0x49b40821L);
256 /* Round 1 */
257 R1(A,B,C,D,X( 1), 5,0xf61e2562L);
258 R1(D,A,B,C,X( 6), 9,0xc040b340L);
259 R1(C,D,A,B,X(11),14,0x265e5a51L);
260 R1(B,C,D,A,X( 0),20,0xe9b6c7aaL);
261 R1(A,B,C,D,X( 5), 5,0xd62f105dL);
262 R1(D,A,B,C,X(10), 9,0x02441453L);
263 R1(C,D,A,B,X(15),14,0xd8a1e681L);
264 R1(B,C,D,A,X( 4),20,0xe7d3fbc8L);
265 R1(A,B,C,D,X( 9), 5,0x21e1cde6L);
266 R1(D,A,B,C,X(14), 9,0xc33707d6L);
267 R1(C,D,A,B,X( 3),14,0xf4d50d87L);
268 R1(B,C,D,A,X( 8),20,0x455a14edL);
269 R1(A,B,C,D,X(13), 5,0xa9e3e905L);
270 R1(D,A,B,C,X( 2), 9,0xfcefa3f8L);
271 R1(C,D,A,B,X( 7),14,0x676f02d9L);
272 R1(B,C,D,A,X(12),20,0x8d2a4c8aL);
273 /* Round 2 */
274 R2(A,B,C,D,X( 5), 4,0xfffa3942L);
275 R2(D,A,B,C,X( 8),11,0x8771f681L);
276 R2(C,D,A,B,X(11),16,0x6d9d6122L);
277 R2(B,C,D,A,X(14),23,0xfde5380cL);
278 R2(A,B,C,D,X( 1), 4,0xa4beea44L);
279 R2(D,A,B,C,X( 4),11,0x4bdecfa9L);
280 R2(C,D,A,B,X( 7),16,0xf6bb4b60L);
281 R2(B,C,D,A,X(10),23,0xbebfbc70L);
282 R2(A,B,C,D,X(13), 4,0x289b7ec6L);
283 R2(D,A,B,C,X( 0),11,0xeaa127faL);
284 R2(C,D,A,B,X( 3),16,0xd4ef3085L);
285 R2(B,C,D,A,X( 6),23,0x04881d05L);
286 R2(A,B,C,D,X( 9), 4,0xd9d4d039L);
287 R2(D,A,B,C,X(12),11,0xe6db99e5L);
288 R2(C,D,A,B,X(15),16,0x1fa27cf8L);
289 R2(B,C,D,A,X( 2),23,0xc4ac5665L);
290 /* Round 3 */
291 R3(A,B,C,D,X( 0), 6,0xf4292244L);
292 R3(D,A,B,C,X( 7),10,0x432aff97L);
293 R3(C,D,A,B,X(14),15,0xab9423a7L);
294 R3(B,C,D,A,X( 5),21,0xfc93a039L);
295 R3(A,B,C,D,X(12), 6,0x655b59c3L);
296 R3(D,A,B,C,X( 3),10,0x8f0ccc92L);
297 R3(C,D,A,B,X(10),15,0xffeff47dL);
298 R3(B,C,D,A,X( 1),21,0x85845dd1L);
299 R3(A,B,C,D,X( 8), 6,0x6fa87e4fL);
300 R3(D,A,B,C,X(15),10,0xfe2ce6e0L);
301 R3(C,D,A,B,X( 6),15,0xa3014314L);
302 R3(B,C,D,A,X(13),21,0x4e0811a1L);
303 R3(A,B,C,D,X( 4), 6,0xf7537e82L);
304 R3(D,A,B,C,X(11),10,0xbd3af235L);
305 R3(C,D,A,B,X( 2),15,0x2ad7d2bbL);
306 R3(B,C,D,A,X( 9),21,0xeb86d391L);
308 A = c->A += A;
309 B = c->B += B;
310 C = c->C += C;
311 D = c->D += D;
312 #ifdef HMAC_DEBUG
313 DBG(("Final {A=%08lx B=%08lx C=%08lx D=%08lx}\n", A, B, C, D));
314 #endif
316 #ifdef HMAC_DEBUG
317 DBG(("Leaving md5_block_data_order ****************************\n"));
318 DBG(("*********************************************************\n"));
319 #endif
321 #endif