1 ;*****************************************************************************
3 ;*****************************************************************************
4 ;* Copyright (C) 2005-2011 x264 project
6 ;* Authors: Loren Merritt <lorenm@u.washington.edu>
7 ;* Anton Mitrofanov <BugMaster@narod.ru>
8 ;* Jason Garrett-Glaser <darkshikari@gmail.com>
10 ;* Permission to use, copy, modify, and/or distribute this software for any
11 ;* purpose with or without fee is hereby granted, provided that the above
12 ;* copyright notice and this permission notice appear in all copies.
14 ;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
15 ;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
16 ;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
17 ;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
18 ;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 ;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ;*****************************************************************************
23 ; This is a header file for the x264ASM assembly language, which uses
24 ; NASM/YASM syntax combined with a large number of macros to provide easy
25 ; abstraction between different calling conventions (x86_32, win64, linux64).
26 ; It also has various other useful features to simplify writing the kind of
27 ; DSP functions that are most often used in x264.
29 ; Unlike the rest of x264, this file is available under an ISC license, as it
30 ; has significant usefulness outside of x264 and we want it to be available
31 ; to the largest audience possible. Of course, if you modify it for your own
32 ; purposes to add a new feature, we strongly encourage contributing a patch
33 ; as this feature might be useful for others as well. Send patches or ideas
34 ; to x264-devel@videolan.org .
36 %ifndef THIRD_PARTY_X86INC_X86INC_ASM_
37 %define THIRD_PARTY_X86INC_X86INC_ASM_
39 ; TODO(wolenetz): Consider either updating this customized version to base from
40 ; a more recent original, or switching to using third_party/ffmpeg's version of
41 ; this abstraction layer. See http://crbug.com/175029
43 %define program_name ff
46 %ifidn __OUTPUT_FORMAT__
,win32
48 %elifidn __OUTPUT_FORMAT__
,win64
56 %define mangle
(x
) _
%+ x
61 ; FIXME: All of the 64bit asm functions that take a stride as an argument
62 ; via register, assume that the high dword of that register is filled with 0.
63 ; This is true in practice (since we never do any 64bit arithmetic on strides,
64 ; and x264's strides are all positive), but is not guaranteed by the ABI.
66 ; Name of the .rodata section.
67 ; Kludge: Something on OS X fails to align .rodata even given an align attribute,
68 ; so use a different read-only section.
70 %macro SECTION_RODATA
0-1 16
71 %ifidn __OUTPUT_FORMAT__
,macho64
72 SECTION .text
align=%1
73 %elifidn __OUTPUT_FORMAT__
,macho
74 SECTION .text
align=%1
76 %elifidn __OUTPUT_FORMAT__
,aout
79 SECTION .rodata
align=%1
83 %macro SECTION_RODATA
0-1 16
84 %ifidn __OUTPUT_FORMAT__
,aout
87 SECTION .rodata
align=%1
92 ; aout does not support align=
93 %macro SECTION_TEXT
0-1 16
94 %ifidn __OUTPUT_FORMAT__
,aout
97 SECTION .text
align=%1
103 %elifndef ARCH_X86_64
104 ; For chromium we may build PIC code even for 32 bits system.
106 ; x86_32 doesn't require PIC.
107 ; Some distros prefer shared objects to be PIC, but nothing breaks if
108 ; the code contains a few textrels, so we'll skip that complexity.
116 ; Macros to eliminate most code duplication between x86_32 and x86_64:
117 ; Currently this works only for leaf functions which load all their arguments
118 ; into registers at the start, and make no other use of the stack. Luckily that
119 ; covers most of x264's asm.
122 ; %1 = number of arguments. loads them from stack if needed.
123 ; %2 = number of registers used. pushes callee-saved regs if needed.
124 ; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
125 ; %4 = list of names to define to registers
126 ; PROLOGUE can also be invoked by adding the same options to cglobal
129 ; cglobal foo, 2,3,0, dst, src, tmp
130 ; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
132 ; TODO Some functions can use some args directly from the stack. If they're the
133 ; last args then you can just not declare them, but if they're in the middle
134 ; we need more flexible macro.
137 ; Pops anything that was pushed by PROLOGUE
140 ; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
141 ; which are slow when a normal ret follows a branch.
144 ; rN and rNq are the native-size register holding function argument N
145 ; rNd, rNw, rNb are dword, word, and byte size
146 ; rNm is the original location of arg N (a register or on the stack), dword
147 ; rNmp is native size
155 %ifid
%6 ; i.e. it's a register
157 %elifdef ARCH_X86_64
; memory
158 %define r
%1mp
qword %6
160 %define r
%1mp
dword %6
165 %macro DECLARE_REG_SIZE
2
179 DECLARE_REG_SIZE
ax, al
180 DECLARE_REG_SIZE
bx, bl
181 DECLARE_REG_SIZE
cx, cl
182 DECLARE_REG_SIZE
dx, dl
183 DECLARE_REG_SIZE
si, sil
184 DECLARE_REG_SIZE
di, dil
185 DECLARE_REG_SIZE
bp, bpl
187 ; t# defines for when per-arch register allocation is more complex than just function arguments
189 %macro DECLARE_REG_TMP
1-*
192 CAT_XDEFINE t
, %%i
, r
%1
198 %macro DECLARE_REG_TMP_SIZE
0-*
200 %define t
%1q t
%1 %+ q
201 %define t
%1d t
%1 %+ d
202 %define t
%1w t
%1 %+ w
203 %define t
%1b t
%1 %+ b
208 DECLARE_REG_TMP_SIZE
0,1,2,3,4,5,6,7,8,9
218 %assign stack_offset stack_offset
+gprsize
223 %assign stack_offset stack_offset
-gprsize
229 %assign stack_offset stack_offset
+(%2)
236 %assign stack_offset stack_offset
-(%2)
246 %macro movsxdifnidn
2
258 %macro DEFINE_ARGS
0-*
262 CAT_UNDEF arg_name
%+ %%i
, q
263 CAT_UNDEF arg_name
%+ %%i
, d
264 CAT_UNDEF arg_name
%+ %%i
, w
265 CAT_UNDEF arg_name
%+ %%i
, b
266 CAT_UNDEF arg_name
%+ %%i
, m
267 CAT_UNDEF arg_name
, %%i
274 %xdefine
%1q r
%+ %%i
%+ q
275 %xdefine
%1d r
%+ %%i
%+ d
276 %xdefine
%1w r
%+ %%i
%+ w
277 %xdefine
%1b r
%+ %%i
%+ b
278 %xdefine
%1m r
%+ %%i
%+ m
279 CAT_XDEFINE arg_name
, %%i
, %1
283 %assign n_arg_names
%%i
286 %ifdef WIN64
; Windows x64 ;=================================================
288 DECLARE_REG
0, rcx
, ecx, cx, cl, ecx
289 DECLARE_REG
1, rdx
, edx, dx, dl, edx
290 DECLARE_REG
2, r8
, r8d
, r8w
, r8b
, r8d
291 DECLARE_REG
3, r9
, r9d
, r9w
, r9b
, r9d
292 DECLARE_REG
4, rdi
, edi, di, dil
, [rsp
+ stack_offset
+ 40]
293 DECLARE_REG
5, rsi
, esi, si, sil
, [rsp
+ stack_offset
+ 48]
294 DECLARE_REG
6, rax
, eax, ax, al, [rsp
+ stack_offset
+ 56]
295 %define r7m
[rsp
+ stack_offset
+ 64]
296 %define r8m
[rsp
+ stack_offset
+ 72]
298 %macro LOAD_IF_USED
2 ; reg_id, number_of_args
300 mov r
%1, [rsp
+ stack_offset
+ 8 + %1*8]
304 %macro PROLOGUE
2-4+ 0 ; #args, #regs, #xmm_regs, arg_names...
307 ASSERT regs_used
<= 7
311 %assign stack_offset stack_offset
+16
320 %macro WIN64_SPILL_XMM
1
321 %assign xmm_regs_used
%1
322 ASSERT xmm_regs_used
<= 16
323 %if xmm_regs_used
> 6
324 sub rsp
, (xmm_regs_used
-6)*16+16
325 %assign stack_offset stack_offset
+(xmm_regs_used
-6)*16+16
326 %assign
%%i xmm_regs_used
327 %rep (xmm_regs_used
-6)
329 movdqa
[rsp
+ (%%i
-6)*16+8], xmm
%+ %%i
334 %macro WIN64_RESTORE_XMM_INTERNAL
1
335 %if xmm_regs_used
> 6
336 %assign
%%i xmm_regs_used
337 %rep (xmm_regs_used
-6)
339 movdqa xmm
%+ %%i
, [%1 + (%%i
-6)*16+8]
341 add %1, (xmm_regs_used
-6)*16+16
345 %macro WIN64_RESTORE_XMM
1
346 WIN64_RESTORE_XMM_INTERNAL
%1
347 %assign stack_offset stack_offset
-(xmm_regs_used
-6)*16+16
348 %assign xmm_regs_used
0
352 WIN64_RESTORE_XMM_INTERNAL rsp
361 %if regs_used
> 4 || xmm_regs_used
> 6
368 %elifdef ARCH_X86_64
; *nix x64 ;=============================================
370 DECLARE_REG
0, rdi
, edi, di, dil
, edi
371 DECLARE_REG
1, rsi
, esi, si, sil
, esi
372 DECLARE_REG
2, rdx
, edx, dx, dl, edx
373 DECLARE_REG
3, rcx
, ecx, cx, cl, ecx
374 DECLARE_REG
4, r8
, r8d
, r8w
, r8b
, r8d
375 DECLARE_REG
5, r9
, r9d
, r9w
, r9b
, r9d
376 DECLARE_REG
6, rax
, eax, ax, al, [rsp
+ stack_offset
+ 8]
377 %define r7m
[rsp
+ stack_offset
+ 16]
378 %define r8m
[rsp
+ stack_offset
+ 24]
380 %macro LOAD_IF_USED
2 ; reg_id, number_of_args
382 mov r
%1, [rsp
- 40 + %1*8]
386 %macro PROLOGUE
2-4+ ; #args, #regs, #xmm_regs, arg_names...
401 %else
; X86_32 ;==============================================================
403 ; Begin chromium edits
405 ; Change the order of registers so we can get the lower 8-bit or the 5th and 6th
407 DECLARE_REG
0, esi, esi, si, null
, [esp + stack_offset
+ 4]
408 DECLARE_REG
1, edi, edi, di, null
, [esp + stack_offset
+ 8]
409 DECLARE_REG
2, ecx, ecx, cx, cl, [esp + stack_offset
+ 12]
410 DECLARE_REG
3, edx, edx, dx, dl, [esp + stack_offset
+ 16]
411 DECLARE_REG
4, eax, eax, ax, al, [esp + stack_offset
+ 20]
412 DECLARE_REG
5, ebx, ebx, bx, bl, [esp + stack_offset
+ 24]
414 DECLARE_REG
0, eax, eax, ax, al, [esp + stack_offset
+ 4]
415 DECLARE_REG
1, ecx, ecx, cx, cl, [esp + stack_offset
+ 8]
416 DECLARE_REG
2, edx, edx, dx, dl, [esp + stack_offset
+ 12]
417 DECLARE_REG
3, ebx, ebx, bx, bl, [esp + stack_offset
+ 16]
418 DECLARE_REG
4, esi, esi, si, null
, [esp + stack_offset
+ 20]
419 DECLARE_REG
5, edi, edi, di, null
, [esp + stack_offset
+ 24]
422 DECLARE_REG
6, ebp, ebp, bp, null
, [esp + stack_offset
+ 28]
423 %define r7m
[esp + stack_offset
+ 32]
424 %define r8m
[esp + stack_offset
+ 36]
427 %macro PUSH_IF_USED
1 ; reg_id
430 %assign stack_offset stack_offset
+4
434 %macro POP_IF_USED
1 ; reg_id
440 %macro LOAD_IF_USED
2 ; reg_id, number_of_args
442 mov r
%1, [esp + stack_offset
+ 4 + %1*4]
446 %macro PROLOGUE
2-4+ ; #args, #regs, #xmm_regs, arg_names...
449 ASSERT regs_used
<= 7
492 %endif
;======================================================================
495 %macro WIN64_SPILL_XMM
1
497 %macro WIN64_RESTORE_XMM
1
503 ;=============================================================================
504 ; arch-independent part
505 ;=============================================================================
507 %assign function_align
16
509 ; Symbol prefix for C linkage
511 %xdefine
%1 mangle
(program_name
%+ _
%+ %1)
512 %xdefine
%1.skip_prologue
%1 %+ .skip_prologue
513 %ifidn __OUTPUT_FORMAT__
,elf
514 global %1:function hidden
520 RESET_MM_PERMUTATION
; not really needed, but makes disassembly somewhat nicer
521 %assign stack_offset
0
528 %xdefine
%1 mangle
(program_name
%+ _
%+ %1)
532 ;like cextern, but without the prefix
533 %macro cextern_naked
1
534 %xdefine
%1 mangle
(%1)
539 %xdefine
%1 mangle
(program_name
%+ _
%+ %1)
544 ; This is needed for ELF, otherwise the GNU linker assumes the stack is
545 ; executable by default.
546 %ifidn __OUTPUT_FORMAT__
,elf
547 SECTION .note.GNU
-stack noalloc noexec nowrite progbits
561 %assign avx_enabled
0
562 %define RESET_MM_PERMUTATION INIT_MMX
568 %define movnta movntq
571 CAT_XDEFINE m
, %%i
, mm
%+ %%i
572 CAT_XDEFINE nmm
, %%i
, %%i
583 %assign avx_enabled
0
584 %define RESET_MM_PERMUTATION INIT_XMM
588 %define num_mmregs
16
593 %define movnta movntdq
596 CAT_XDEFINE m
, %%i
, xmm
%+ %%i
597 CAT_XDEFINE nxmm
, %%i
, %%i
604 %assign avx_enabled
1
605 %define PALIGNR PALIGNR_SSSE3
606 %define RESET_MM_PERMUTATION INIT_AVX
610 %assign avx_enabled
1
611 %define RESET_MM_PERMUTATION INIT_YMM
615 %define num_mmregs
16
621 CAT_XDEFINE m
, %%i
, ymm
%+ %%i
622 CAT_XDEFINE nymm
, %%i
, %%i
629 ; I often want to use macros that permute their arguments. e.g. there's no
630 ; efficient way to implement butterfly or transpose or dct without swapping some
633 ; I would like to not have to manually keep track of the permutations:
634 ; If I insert a permutation in the middle of a function, it should automatically
635 ; change everything that follows. For more complex macros I may also have multiple
636 ; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
638 ; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
639 ; permutes its arguments. It's equivalent to exchanging the contents of the
640 ; registers, except that this way you exchange the register names instead, so it
641 ; doesn't cost any cycles.
643 %macro PERMUTE
2-* ; takes a list of pairs to swap
658 %macro SWAP
2-* ; swaps a single chain (sometimes more concise than pairs)
664 CAT_XDEFINE n
, m
%1, %1
665 CAT_XDEFINE n
, m
%2, %2
667 ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
668 ; Be careful using this mode in nested macros though, as in some cases there may be
669 ; other copies of m# that have already been dereferenced and don't get updated correctly.
670 %xdefine
%%n1 n
%+ %1
671 %xdefine
%%n2 n
%+ %2
672 %xdefine tmp m
%+ %%n1
673 CAT_XDEFINE m
, %%n1
, m
%+ %%n2
674 CAT_XDEFINE m
, %%n2
, tmp
675 CAT_XDEFINE n
, m
%+ %%n1
, %%n1
676 CAT_XDEFINE n
, m
%+ %%n2
, %%n2
683 ; If SAVE_MM_PERMUTATION is placed at the end of a function and given the
684 ; function name, then any later calls to that function will automatically
685 ; load the permutation, so values can be returned in mmregs.
686 %macro SAVE_MM_PERMUTATION
1 ; name to save as
689 CAT_XDEFINE
%1_m
, %%i
, m
%+ %%i
694 %macro LOAD_MM_PERMUTATION
1 ; name to load from
697 CAT_XDEFINE m
, %%i
, %1_m
%+ %%i
698 CAT_XDEFINE n
, m
%+ %%i
, %%i
706 LOAD_MM_PERMUTATION
%1
710 ; Substitutions that reduce instruction size but are functionally equivalent
735 ;=============================================================================
736 ; AVX abstraction layer
737 ;=============================================================================
742 CAT_XDEFINE sizeofmm
, i
, 8
744 CAT_XDEFINE sizeofxmm
, i
, 16
745 CAT_XDEFINE sizeofymm
, i
, 32
751 ;%2 == 1 if float, 0 if int
752 ;%3 == 0 if 3-operand (xmm, xmm, xmm), 1 if 4-operand (xmm, xmm, xmm, imm)
753 ;%4 == number of operands given
755 %macro RUN_AVX_INSTR
6-7+
760 %define
%%regmov movq
762 %define
%%regmov movaps
764 %define
%%regmov movdqa
769 %if avx_enabled
&& sizeof
%5==16
787 ;%2 == 1 if float, 0 if int
788 ;%3 == 0 if 3-operand (xmm, xmm, xmm), 1 if 4-operand (xmm, xmm, xmm, imm)
790 %macro %1 2-8 fnord
, fnord
, fnord
, %1, %2, %3
792 RUN_AVX_INSTR
%6, %7, %8, 2, %1, %2
794 RUN_AVX_INSTR
%6, %7, %8, 3, %1, %2, %3
796 RUN_AVX_INSTR
%6, %7, %8, 4, %1, %2, %3, %4
798 RUN_AVX_INSTR
%6, %7, %8, 5, %1, %2, %3, %4, %5
803 AVX_INSTR addpd
, 1, 0
804 AVX_INSTR addps
, 1, 0
805 AVX_INSTR addsd
, 1, 0
806 AVX_INSTR addss
, 1, 0
807 AVX_INSTR addsubpd
, 1, 0
808 AVX_INSTR addsubps
, 1, 0
809 AVX_INSTR andpd
, 1, 0
810 AVX_INSTR andps
, 1, 0
811 AVX_INSTR andnpd
, 1, 0
812 AVX_INSTR andnps
, 1, 0
813 AVX_INSTR blendpd
, 1, 0
814 AVX_INSTR blendps
, 1, 0
815 AVX_INSTR blendvpd
, 1, 0
816 AVX_INSTR blendvps
, 1, 0
817 AVX_INSTR cmppd
, 1, 0
818 AVX_INSTR cmpps
, 1, 0
819 AVX_INSTR
cmpsd, 1, 0
820 AVX_INSTR cmpss
, 1, 0
821 AVX_INSTR divpd
, 1, 0
822 AVX_INSTR divps
, 1, 0
823 AVX_INSTR divsd
, 1, 0
824 AVX_INSTR divss
, 1, 0
827 AVX_INSTR haddpd
, 1, 0
828 AVX_INSTR haddps
, 1, 0
829 AVX_INSTR hsubpd
, 1, 0
830 AVX_INSTR hsubps
, 1, 0
831 AVX_INSTR maxpd
, 1, 0
832 AVX_INSTR maxps
, 1, 0
833 AVX_INSTR maxsd
, 1, 0
834 AVX_INSTR maxss
, 1, 0
835 AVX_INSTR minpd
, 1, 0
836 AVX_INSTR minps
, 1, 0
837 AVX_INSTR minsd
, 1, 0
838 AVX_INSTR minss
, 1, 0
839 AVX_INSTR mpsadbw
, 0, 1
840 AVX_INSTR mulpd
, 1, 0
841 AVX_INSTR mulps
, 1, 0
842 AVX_INSTR mulsd
, 1, 0
843 AVX_INSTR mulss
, 1, 0
846 AVX_INSTR packsswb
, 0, 0
847 AVX_INSTR packssdw
, 0, 0
848 AVX_INSTR packuswb
, 0, 0
849 AVX_INSTR packusdw
, 0, 0
850 AVX_INSTR paddb
, 0, 0
851 AVX_INSTR paddw
, 0, 0
852 AVX_INSTR paddd
, 0, 0
853 AVX_INSTR paddq
, 0, 0
854 AVX_INSTR paddsb
, 0, 0
855 AVX_INSTR paddsw
, 0, 0
856 AVX_INSTR paddusb
, 0, 0
857 AVX_INSTR paddusw
, 0, 0
858 AVX_INSTR palignr
, 0, 1
860 AVX_INSTR pandn
, 0, 0
861 AVX_INSTR pavgb
, 0, 0
862 AVX_INSTR pavgw
, 0, 0
863 AVX_INSTR pblendvb
, 0, 0
864 AVX_INSTR pblendw
, 0, 1
865 AVX_INSTR pcmpestri
, 0, 0
866 AVX_INSTR pcmpestrm
, 0, 0
867 AVX_INSTR pcmpistri
, 0, 0
868 AVX_INSTR pcmpistrm
, 0, 0
869 AVX_INSTR pcmpeqb
, 0, 0
870 AVX_INSTR pcmpeqw
, 0, 0
871 AVX_INSTR pcmpeqd
, 0, 0
872 AVX_INSTR pcmpeqq
, 0, 0
873 AVX_INSTR pcmpgtb
, 0, 0
874 AVX_INSTR pcmpgtw
, 0, 0
875 AVX_INSTR pcmpgtd
, 0, 0
876 AVX_INSTR pcmpgtq
, 0, 0
877 AVX_INSTR phaddw
, 0, 0
878 AVX_INSTR phaddd
, 0, 0
879 AVX_INSTR phaddsw
, 0, 0
880 AVX_INSTR phsubw
, 0, 0
881 AVX_INSTR phsubd
, 0, 0
882 AVX_INSTR phsubsw
, 0, 0
883 AVX_INSTR pmaddwd
, 0, 0
884 AVX_INSTR pmaddubsw
, 0, 0
885 AVX_INSTR pmaxsb
, 0, 0
886 AVX_INSTR pmaxsw
, 0, 0
887 AVX_INSTR pmaxsd
, 0, 0
888 AVX_INSTR pmaxub
, 0, 0
889 AVX_INSTR pmaxuw
, 0, 0
890 AVX_INSTR pmaxud
, 0, 0
891 AVX_INSTR pminsb
, 0, 0
892 AVX_INSTR pminsw
, 0, 0
893 AVX_INSTR pminsd
, 0, 0
894 AVX_INSTR pminub
, 0, 0
895 AVX_INSTR pminuw
, 0, 0
896 AVX_INSTR pminud
, 0, 0
897 AVX_INSTR pmulhuw
, 0, 0
898 AVX_INSTR pmulhrsw
, 0, 0
899 AVX_INSTR pmulhw
, 0, 0
900 AVX_INSTR pmullw
, 0, 0
901 AVX_INSTR pmulld
, 0, 0
902 AVX_INSTR pmuludq
, 0, 0
903 AVX_INSTR pmuldq
, 0, 0
905 AVX_INSTR psadbw
, 0, 0
906 AVX_INSTR pshufb
, 0, 0
907 AVX_INSTR psignb
, 0, 0
908 AVX_INSTR psignw
, 0, 0
909 AVX_INSTR psignd
, 0, 0
910 AVX_INSTR psllw
, 0, 0
911 AVX_INSTR pslld
, 0, 0
912 AVX_INSTR psllq
, 0, 0
913 AVX_INSTR pslldq
, 0, 0
914 AVX_INSTR psraw
, 0, 0
915 AVX_INSTR psrad
, 0, 0
916 AVX_INSTR psrlw
, 0, 0
917 AVX_INSTR psrld
, 0, 0
918 AVX_INSTR psrlq
, 0, 0
919 AVX_INSTR psrldq
, 0, 0
920 AVX_INSTR psubb
, 0, 0
921 AVX_INSTR psubw
, 0, 0
922 AVX_INSTR psubd
, 0, 0
923 AVX_INSTR psubq
, 0, 0
924 AVX_INSTR psubsb
, 0, 0
925 AVX_INSTR psubsw
, 0, 0
926 AVX_INSTR psubusb
, 0, 0
927 AVX_INSTR psubusw
, 0, 0
928 AVX_INSTR punpckhbw
, 0, 0
929 AVX_INSTR punpckhwd
, 0, 0
930 AVX_INSTR punpckhdq
, 0, 0
931 AVX_INSTR punpckhqdq
, 0, 0
932 AVX_INSTR punpcklbw
, 0, 0
933 AVX_INSTR punpcklwd
, 0, 0
934 AVX_INSTR punpckldq
, 0, 0
935 AVX_INSTR punpcklqdq
, 0, 0
937 AVX_INSTR shufps
, 0, 1
938 AVX_INSTR subpd
, 1, 0
939 AVX_INSTR subps
, 1, 0
940 AVX_INSTR subsd
, 1, 0
941 AVX_INSTR subss
, 1, 0
942 AVX_INSTR unpckhpd
, 1, 0
943 AVX_INSTR unpckhps
, 1, 0
944 AVX_INSTR unpcklpd
, 1, 0
945 AVX_INSTR unpcklps
, 1, 0
946 AVX_INSTR xorpd
, 1, 0
947 AVX_INSTR xorps
, 1, 0
949 ; 3DNow instructions, for sharing code between AVX, SSE and 3DN
950 AVX_INSTR pfadd
, 1, 0
951 AVX_INSTR pfsub
, 1, 0
952 AVX_INSTR pfmul
, 1, 0
954 ;=============================================================================
955 ; Chromium extensions
956 ;=============================================================================
959 ; Always build PIC code on Mac or Linux for Chromium.
968 ; LOAD_SYM %1 (reg), %2 (sym)
969 ; Copies the address to a local symbol to the specified register.
990 ; MOVq %1 (xmm), %2 (reg)
991 ; MOVq %1 (reg), %2 (xmm)
992 ; Copies a general-purpose register to an XMM register, and vice versa.
1004 %endif
; THIRD_PARTY_X86INC_X86INC_ASM_