2 ;uInt longest_match_x64(
\r
4 ; IPos cur_match); // current match
\r
6 ; gvmat64.S -- Asm portion of the optimized longest_match for 32 bits x86_64
\r
7 ; (AMD64 on Athlon 64, Opteron, Phenom
\r
8 ; and Intel EM64T on Pentium 4 with EM64T, Pentium D, Core 2 Duo, Core I5/I7)
\r
9 ; this file is translation from gvmat64.asm to GCC 4.x (for Linux, Mac XCode)
\r
10 ; Copyright (C) 1995-2010 Jean-loup Gailly, Brian Raiter and Gilles Vollant.
\r
12 ; File written by Gilles Vollant, by converting to assembly the longest_match
\r
13 ; from Jean-loup Gailly in deflate.c of zLib and infoZip zip.
\r
14 ; and by taking inspiration on asm686 with masm, optimised assembly code
\r
15 ; from Brian Raiter, written 1998
\r
17 ; This software is provided 'as-is', without any express or implied
\r
18 ; warranty. In no event will the authors be held liable for any damages
\r
19 ; arising from the use of this software.
\r
21 ; Permission is granted to anyone to use this software for any purpose,
\r
22 ; including commercial applications, and to alter it and redistribute it
\r
23 ; freely, subject to the following restrictions:
\r
25 ; 1. The origin of this software must not be misrepresented; you must not
\r
26 ; claim that you wrote the original software. If you use this software
\r
27 ; in a product, an acknowledgment in the product documentation would be
\r
28 ; appreciated but is not required.
\r
29 ; 2. Altered source versions must be plainly marked as such, and must not be
\r
30 ; misrepresented as being the original software
\r
31 ; 3. This notice may not be removed or altered from any source distribution.
\r
33 ; http://www.zlib.net
\r
34 ; http://www.winimage.com/zLibDll
\r
35 ; http://www.muppetlabs.com/~breadbox/software/assembly.html
\r
37 ; to compile this file for zLib, I use option:
\r
38 ; gcc -c -arch x86_64 gvmat64.S
\r
41 ;uInt longest_match(s, cur_match)
\r
43 ; IPos cur_match; // current match /
\r
45 ; with XCode for Mac, I had strange error with some jump on intel syntax
\r
46 ; this is why BEFORE_JMP and AFTER_JMP are used
\r
50 #define BEFORE_JMP .att_syntax
\r
51 #define AFTER_JMP .intel_syntax noprefix
\r
53 #ifndef NO_UNDERLINE
\r
54 # define match_init _match_init
\r
55 # define longest_match _longest_match
\r
58 .intel_syntax noprefix
\r
60 .globl match_init, longest_match
\r
66 #define LocalVarsSize 96
\r
68 ; register used : rax,rbx,rcx,rdx,rsi,rdi,r8,r9,r10,r11,r12
\r
69 ; free register : r14,r15
\r
70 ; register can be saved : rsp
\r
73 #define chainlenwmask (rsp + 8 - LocalVarsSize)
\r
74 #define nicematch (rsp + 16 - LocalVarsSize)
\r
76 #define save_rdi (rsp + 24 - LocalVarsSize)
\r
77 #define save_rsi (rsp + 32 - LocalVarsSize)
\r
78 #define save_rbx (rsp + 40 - LocalVarsSize)
\r
79 #define save_rbp (rsp + 48 - LocalVarsSize)
\r
80 #define save_r12 (rsp + 56 - LocalVarsSize)
\r
81 #define save_r13 (rsp + 64 - LocalVarsSize)
\r
82 #define save_r14 (rsp + 72 - LocalVarsSize)
\r
83 #define save_r15 (rsp + 80 - LocalVarsSize)
\r
87 ; all the +4 offsets are due to the addition of pending_buf_size (in zlib
\r
88 ; in the deflate_state structure since the asm code was first written
\r
89 ; (if you compile with zlib 1.0.4 or older, remove the +4).
\r
90 ; Note : these value are good with a 8 bytes boundary pack structure
\r
93 #define MAX_MATCH 258
\r
95 #define MIN_LOOKAHEAD (MAX_MATCH+MIN_MATCH+1)
\r
98 ;;; Offsets for fields in the deflate_state structure. These numbers
\r
99 ;;; are calculated from the definition of deflate_state, with the
\r
100 ;;; assumption that the compiler will dword-align the fields. (Thus,
\r
101 ;;; changing the definition of deflate_state could easily cause this
\r
102 ;;; program to crash horribly, without so much as a warning at
\r
103 ;;; compile time. Sigh.)
\r
105 ; all the +zlib1222add offsets are due to the addition of fields
\r
106 ; in zlib in the deflate_state structure since the asm code was first written
\r
107 ; (if you compile with zlib 1.0.4 or older, use "zlib1222add equ (-4)").
\r
108 ; (if you compile with zlib between 1.0.5 and 1.2.2.1, use "zlib1222add equ 0").
\r
109 ; if you compile with zlib 1.2.2.2 or later , use "zlib1222add equ 8").
\r
114 /* you can check the structure offset by running
\r
116 #include <stdlib.h>
\r
118 #include "deflate.h"
\r
123 deflate_state *s=&ds;
\r
124 printf("size pointer=%u\n",(int)sizeof(void*));
\r
126 printf("#define dsWSize %u\n",(int)(((char*)&(s->w_size))-((char*)s)));
\r
127 printf("#define dsWMask %u\n",(int)(((char*)&(s->w_mask))-((char*)s)));
\r
128 printf("#define dsWindow %u\n",(int)(((char*)&(s->window))-((char*)s)));
\r
129 printf("#define dsPrev %u\n",(int)(((char*)&(s->prev))-((char*)s)));
\r
130 printf("#define dsMatchLen %u\n",(int)(((char*)&(s->match_length))-((char*)s)));
\r
131 printf("#define dsPrevMatch %u\n",(int)(((char*)&(s->prev_match))-((char*)s)));
\r
132 printf("#define dsStrStart %u\n",(int)(((char*)&(s->strstart))-((char*)s)));
\r
133 printf("#define dsMatchStart %u\n",(int)(((char*)&(s->match_start))-((char*)s)));
\r
134 printf("#define dsLookahead %u\n",(int)(((char*)&(s->lookahead))-((char*)s)));
\r
135 printf("#define dsPrevLen %u\n",(int)(((char*)&(s->prev_length))-((char*)s)));
\r
136 printf("#define dsMaxChainLen %u\n",(int)(((char*)&(s->max_chain_length))-((char*)s)));
\r
137 printf("#define dsGoodMatch %u\n",(int)(((char*)&(s->good_match))-((char*)s)));
\r
138 printf("#define dsNiceMatch %u\n",(int)(((char*)&(s->nice_match))-((char*)s)));
\r
144 #define dsWindow 80
\r
146 #define dsMatchLen 144
\r
147 #define dsPrevMatch 148
\r
148 #define dsStrStart 156
\r
149 #define dsMatchStart 160
\r
150 #define dsLookahead 164
\r
151 #define dsPrevLen 168
\r
152 #define dsMaxChainLen 172
\r
153 #define dsGoodMatch 188
\r
154 #define dsNiceMatch 192
\r
156 #define window_size [ rcx + dsWSize]
\r
157 #define WMask [ rcx + dsWMask]
\r
158 #define window_ad [ rcx + dsWindow]
\r
159 #define prev_ad [ rcx + dsPrev]
\r
160 #define strstart [ rcx + dsStrStart]
\r
161 #define match_start [ rcx + dsMatchStart]
\r
162 #define Lookahead [ rcx + dsLookahead] //; 0ffffffffh on infozip
\r
163 #define prev_length [ rcx + dsPrevLen]
\r
164 #define max_chain_length [ rcx + dsMaxChainLen]
\r
165 #define good_match [ rcx + dsGoodMatch]
\r
166 #define nice_match [ rcx + dsNiceMatch]
\r
170 ; parameter 1 in rcx(deflate state s), param 2 in rdx (cur match)
\r
172 ; see http://weblogs.asp.net/oldnewthing/archive/2004/01/14/58579.aspx and
\r
173 ; http://msdn.microsoft.com/library/en-us/kmarch/hh/kmarch/64bitAMD_8e951dd2-ee77-4728-8702-55ce4b5dd24a.xml.asp
\r
175 ; All registers must be preserved across the call, except for
\r
176 ; rax, rcx, rdx, r8, r9, r10, and r11, which are scratch.
\r
179 ; gcc on macosx-linux:
\r
180 ; see http://www.x86-64.org/documentation/abi-0.99.pdf
\r
181 ; param 1 in rdi, param 2 in rsi
\r
182 ; rbx, rsp, rbp, r12 to r15 must be preserved
\r
184 ;;; Save registers that the compiler may be using, and adjust esp to
\r
185 ;;; make room for our stack frame.
\r
188 ;;; Retrieve the function arguments. r8d will hold cur_match
\r
189 ;;; throughout the entire function. edx will hold the pointer to the
\r
190 ;;; deflate_state structure during the function's setup (before
\r
191 ;;; entering the main loop.
\r
193 ; ms: parameter 1 in rcx (deflate_state* s), param 2 in edx -> r8 (cur match)
\r
194 ; mac: param 1 in rdi, param 2 rsi
\r
195 ; this clear high 32 bits of r8, which can be garbage in both r8 and rdx
\r
212 //;;; uInt wmask = s->w_mask;
\r
213 //;;; unsigned chain_length = s->max_chain_length;
\r
214 //;;; if (s->prev_length >= s->good_match) {
\r
215 //;;; chain_length >>= 2;
\r
219 mov edi, prev_length
\r
220 mov esi, good_match
\r
222 mov ebx, max_chain_length
\r
228 //;;; chainlen is decremented once beforehand so that the function can
\r
229 //;;; use the sign flag instead of the zero flag for the exit test.
\r
230 //;;; It is then shifted into the high word, to make room for the wmask
\r
231 //;;; value, which it will always accompany.
\r
238 //;;; if ((uInt)nice_match > s->lookahead) nice_match = s->lookahead;
\r
242 mov eax, nice_match
\r
243 mov [chainlenwmask], ebx
\r
244 mov r10d, Lookahead
\r
247 mov [nicematch],r10d
\r
251 //;;; register Bytef *scan = s->window + s->strstart;
\r
254 lea r13, [r10 + rbp]
\r
256 //;;; Determine how many bytes the scan ptr is off from being
\r
257 //;;; dword-aligned.
\r
263 //;;; IPos limit = s->strstart > (IPos)MAX_DIST(s) ?
\r
264 //;;; s->strstart - (IPos)MAX_DIST(s) : NIL;
\r
267 mov eax, window_size
\r
268 sub eax, MIN_LOOKAHEAD
\r
274 mov r11d, prev_length
\r
278 //;;; int best_len = s->prev_length;
\r
281 //;;; Store the sum of s->window + best_len in esi locally, and in esi.
\r
285 //;;; register ush scan_start = *(ushf*)scan;
\r
286 //;;; register ush scan_end = *(ushf*)(scan+best_len-1);
\r
287 //;;; Posf *prev = s->prev;
\r
289 movzx r12d,word ptr [r9]
\r
290 movzx ebx, word ptr [r9 + r11 - 1]
\r
294 //;;; Jump into the main loop.
\r
296 mov edx, [chainlenwmask]
\r
298 cmp bx,word ptr [rsi + r8 - 1]
\r
299 jz LookupLoopIsZero
\r
306 movzx r8d, word ptr [rdi + r8*2]
\r
312 sub edx, 0x00010000
\r
318 cmp bx,word ptr [rsi + r8 - 1]
\r
320 jz LookupLoopIsZero
\r
326 movzx r8d, word ptr [rdi + r8*2]
\r
331 sub edx, 0x00010000
\r
337 cmp bx,word ptr [rsi + r8 - 1]
\r
339 jz LookupLoopIsZero
\r
345 movzx r8d, word ptr [rdi + r8*2]
\r
350 sub edx, 0x00010000
\r
357 cmp bx,word ptr [rsi + r8 - 1]
\r
360 jmp LookupLoopIsZero
\r
364 ;;; match = s->window + cur_match;
\r
365 ;;; if (*(ushf*)(match+best_len-1) != scan_end ||
\r
366 ;;; *(ushf*)match != scan_start) continue;
\r
368 ;;; } while ((cur_match = prev[cur_match & wmask]) > limit
\r
369 ;;; && --chain_length != 0);
\r
371 ;;; Here is the inner loop of the function. The function will spend the
\r
372 ;;; majority of its time in this loop, and majority of that time will
\r
373 ;;; be spent in the first ten instructions.
\r
375 ;;; Within this loop:
\r
378 ;;; edx = chainlenwmask - i.e., ((chainlen << 16) | wmask)
\r
379 ;;; esi = windowbestlen - i.e., (window + bestlen)
\r
387 movzx r8d, word ptr [rdi + r8*2]
\r
392 sub edx, 0x00010000
\r
399 cmp bx,word ptr [rsi + r8 - 1]
\r
404 cmp r12w, word ptr [r10 + r8]
\r
410 //;;; Store the current value of chainlen.
\r
411 mov [chainlenwmask], edx
\r
413 ;;; Point edi to the string under scrutiny, and esi to the string we
\r
414 ;;; are hoping to match it up with. In actuality, esi and edi are
\r
415 ;;; both pointed (MAX_MATCH_8 - scanalign) bytes ahead, and edx is
\r
416 ;;; initialized to -(MAX_MATCH_8 - scanalign).
\r
419 mov rdx, 0xfffffffffffffef8 //; -(MAX_MATCH_8)
\r
420 lea rsi, [rsi + r13 + 0x0108] //;MAX_MATCH_8]
\r
421 lea rdi, [r9 + r13 + 0x0108] //;MAX_MATCH_8]
\r
423 prefetcht1 [rsi+rdx]
\r
424 prefetcht1 [rdi+rdx]
\r
427 ;;; Test the strings for equality, 8 bytes at a time. At the end,
\r
428 ;;; adjust rdx so that it is offset to the exact byte that mismatched.
\r
430 ;;; We already know at this point that the first three bytes of the
\r
431 ;;; strings match each other, and they can be safely passed over before
\r
432 ;;; starting the compare loop. So what this code does is skip over 0-3
\r
433 ;;; bytes, as much as necessary in order to dword-align the edi
\r
434 ;;; pointer. (rsi will still be misaligned three times out of four.)
\r
436 ;;; It should be confessed that this loop usually does not represent
\r
437 ;;; much of the total running time. Replacing it with a more
\r
438 ;;; straightforward "rep cmpsb" would not drastically degrade
\r
443 mov rax, [rsi + rdx]
\r
444 xor rax, [rdi + rdx]
\r
447 mov rax, [rsi + rdx + 8]
\r
448 xor rax, [rdi + rdx + 8]
\r
452 mov rax, [rsi + rdx + 8+8]
\r
453 xor rax, [rdi + rdx + 8+8]
\r
454 jnz LeaveLoopCmps16
\r
463 LeaveLoopCmps16: add rdx,8
\r
464 LeaveLoopCmps8: add rdx,8
\r
467 test eax, 0x0000FFFF
\r
470 test eax,0xffffffff
\r
488 //;;; Calculate the length of the match. If it is longer than MAX_MATCH,
\r
489 //;;; then automatically accept it as the best possible match and leave.
\r
491 lea rax, [rdi + rdx]
\r
498 ;;; If the length of the match is not longer than the best match we
\r
499 ;;; have so far, then forget it and return to the lookup loop.
\r
500 ;///////////////////////////////////
\r
508 mov edx, [chainlenwmask]
\r
513 ;;; s->match_start = cur_match;
\r
514 ;;; best_len = len;
\r
515 ;;; if (len >= nice_match) break;
\r
516 ;;; scan_end = *(ushf*)(scan+best_len-1);
\r
520 mov match_start, r8d
\r
521 cmp eax, [nicematch]
\r
528 movzx ebx, word ptr [r9 + rax - 1]
\r
530 mov edx, [chainlenwmask]
\r
535 //;;; Accept the current string, with the maximum possible length.
\r
539 mov match_start, r8d
\r
541 //;;; if ((uInt)best_len <= s->lookahead) return (uInt)best_len;
\r
542 //;;; return s->lookahead;
\r
551 //;;; Restore the stack and return from whence we came.
\r
554 // mov rsi,[save_rsi]
\r
555 // mov rdi,[save_rdi]
\r
565 //; please don't remove this string !
\r
566 //; Your can freely use gvmat64 in any free or commercial app
\r
567 //; but it is far better don't remove the string in the binary!
\r
568 // db 0dh,0ah,"asm686 with masm, optimised assembly code from Brian Raiter, written 1998, converted to amd 64 by Gilles Vollant 2005",0dh,0ah,0
\r