Sync usage with man page.
[netbsd-mini2440.git] / gnu / dist / gcc4 / gcc / config / i386 / xmmintrin.h
blob56c15eda29152cbd3a591ad11cedbbf9d7cb1a8c
1 /* Copyright (C) 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc.
3 This file is part of GCC.
5 GCC is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2, or (at your option)
8 any later version.
10 GCC is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with GCC; see the file COPYING. If not, write to
17 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
18 Boston, MA 02110-1301, USA. */
20 /* As a special exception, if you include this header file into source
21 files compiled by GCC, this header file does not by itself cause
22 the resulting executable to be covered by the GNU General Public
23 License. This exception does not however invalidate any other
24 reasons why the executable file might be covered by the GNU General
25 Public License. */
27 /* Implemented from the specification included in the Intel C++ Compiler
28 User Guide and Reference, version 8.0. */
30 #ifndef _XMMINTRIN_H_INCLUDED
31 #define _XMMINTRIN_H_INCLUDED
33 #ifndef __SSE__
34 # error "SSE instruction set not enabled"
35 #else
37 /* We need type definitions from the MMX header file. */
38 #include <mmintrin.h>
40 /* Get _mm_malloc () and _mm_free (). */
41 #include <mm_malloc.h>
43 /* The Intel API is flexible enough that we must allow aliasing with other
44 vector types, and their scalar components. */
45 typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
47 /* Internal data types for implementing the intrinsics. */
48 typedef float __v4sf __attribute__ ((__vector_size__ (16)));
50 /* Create a selector for use with the SHUFPS instruction. */
51 #define _MM_SHUFFLE(fp3,fp2,fp1,fp0) \
52 (((fp3) << 6) | ((fp2) << 4) | ((fp1) << 2) | (fp0))
54 /* Constants for use with _mm_prefetch. */
55 enum _mm_hint
57 _MM_HINT_T0 = 3,
58 _MM_HINT_T1 = 2,
59 _MM_HINT_T2 = 1,
60 _MM_HINT_NTA = 0
63 /* Bits in the MXCSR. */
64 #define _MM_EXCEPT_MASK 0x003f
65 #define _MM_EXCEPT_INVALID 0x0001
66 #define _MM_EXCEPT_DENORM 0x0002
67 #define _MM_EXCEPT_DIV_ZERO 0x0004
68 #define _MM_EXCEPT_OVERFLOW 0x0008
69 #define _MM_EXCEPT_UNDERFLOW 0x0010
70 #define _MM_EXCEPT_INEXACT 0x0020
72 #define _MM_MASK_MASK 0x1f80
73 #define _MM_MASK_INVALID 0x0080
74 #define _MM_MASK_DENORM 0x0100
75 #define _MM_MASK_DIV_ZERO 0x0200
76 #define _MM_MASK_OVERFLOW 0x0400
77 #define _MM_MASK_UNDERFLOW 0x0800
78 #define _MM_MASK_INEXACT 0x1000
80 #define _MM_ROUND_MASK 0x6000
81 #define _MM_ROUND_NEAREST 0x0000
82 #define _MM_ROUND_DOWN 0x2000
83 #define _MM_ROUND_UP 0x4000
84 #define _MM_ROUND_TOWARD_ZERO 0x6000
86 #define _MM_FLUSH_ZERO_MASK 0x8000
87 #define _MM_FLUSH_ZERO_ON 0x8000
88 #define _MM_FLUSH_ZERO_OFF 0x0000
90 /* Create a vector of zeros. */
91 static __inline __m128 __attribute__((__always_inline__))
92 _mm_setzero_ps (void)
94 return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
97 /* Perform the respective operation on the lower SPFP (single-precision
98 floating-point) values of A and B; the upper three SPFP values are
99 passed through from A. */
101 static __inline __m128 __attribute__((__always_inline__))
102 _mm_add_ss (__m128 __A, __m128 __B)
104 return (__m128) __builtin_ia32_addss ((__v4sf)__A, (__v4sf)__B);
107 static __inline __m128 __attribute__((__always_inline__))
108 _mm_sub_ss (__m128 __A, __m128 __B)
110 return (__m128) __builtin_ia32_subss ((__v4sf)__A, (__v4sf)__B);
113 static __inline __m128 __attribute__((__always_inline__))
114 _mm_mul_ss (__m128 __A, __m128 __B)
116 return (__m128) __builtin_ia32_mulss ((__v4sf)__A, (__v4sf)__B);
119 static __inline __m128 __attribute__((__always_inline__))
120 _mm_div_ss (__m128 __A, __m128 __B)
122 return (__m128) __builtin_ia32_divss ((__v4sf)__A, (__v4sf)__B);
125 static __inline __m128 __attribute__((__always_inline__))
126 _mm_sqrt_ss (__m128 __A)
128 return (__m128) __builtin_ia32_sqrtss ((__v4sf)__A);
131 static __inline __m128 __attribute__((__always_inline__))
132 _mm_rcp_ss (__m128 __A)
134 return (__m128) __builtin_ia32_rcpss ((__v4sf)__A);
137 static __inline __m128 __attribute__((__always_inline__))
138 _mm_rsqrt_ss (__m128 __A)
140 return (__m128) __builtin_ia32_rsqrtss ((__v4sf)__A);
143 static __inline __m128 __attribute__((__always_inline__))
144 _mm_min_ss (__m128 __A, __m128 __B)
146 return (__m128) __builtin_ia32_minss ((__v4sf)__A, (__v4sf)__B);
149 static __inline __m128 __attribute__((__always_inline__))
150 _mm_max_ss (__m128 __A, __m128 __B)
152 return (__m128) __builtin_ia32_maxss ((__v4sf)__A, (__v4sf)__B);
155 /* Perform the respective operation on the four SPFP values in A and B. */
157 static __inline __m128 __attribute__((__always_inline__))
158 _mm_add_ps (__m128 __A, __m128 __B)
160 return (__m128) __builtin_ia32_addps ((__v4sf)__A, (__v4sf)__B);
163 static __inline __m128 __attribute__((__always_inline__))
164 _mm_sub_ps (__m128 __A, __m128 __B)
166 return (__m128) __builtin_ia32_subps ((__v4sf)__A, (__v4sf)__B);
169 static __inline __m128 __attribute__((__always_inline__))
170 _mm_mul_ps (__m128 __A, __m128 __B)
172 return (__m128) __builtin_ia32_mulps ((__v4sf)__A, (__v4sf)__B);
175 static __inline __m128 __attribute__((__always_inline__))
176 _mm_div_ps (__m128 __A, __m128 __B)
178 return (__m128) __builtin_ia32_divps ((__v4sf)__A, (__v4sf)__B);
181 static __inline __m128 __attribute__((__always_inline__))
182 _mm_sqrt_ps (__m128 __A)
184 return (__m128) __builtin_ia32_sqrtps ((__v4sf)__A);
187 static __inline __m128 __attribute__((__always_inline__))
188 _mm_rcp_ps (__m128 __A)
190 return (__m128) __builtin_ia32_rcpps ((__v4sf)__A);
193 static __inline __m128 __attribute__((__always_inline__))
194 _mm_rsqrt_ps (__m128 __A)
196 return (__m128) __builtin_ia32_rsqrtps ((__v4sf)__A);
199 static __inline __m128 __attribute__((__always_inline__))
200 _mm_min_ps (__m128 __A, __m128 __B)
202 return (__m128) __builtin_ia32_minps ((__v4sf)__A, (__v4sf)__B);
205 static __inline __m128 __attribute__((__always_inline__))
206 _mm_max_ps (__m128 __A, __m128 __B)
208 return (__m128) __builtin_ia32_maxps ((__v4sf)__A, (__v4sf)__B);
211 /* Perform logical bit-wise operations on 128-bit values. */
213 static __inline __m128 __attribute__((__always_inline__))
214 _mm_and_ps (__m128 __A, __m128 __B)
216 return __builtin_ia32_andps (__A, __B);
219 static __inline __m128 __attribute__((__always_inline__))
220 _mm_andnot_ps (__m128 __A, __m128 __B)
222 return __builtin_ia32_andnps (__A, __B);
225 static __inline __m128 __attribute__((__always_inline__))
226 _mm_or_ps (__m128 __A, __m128 __B)
228 return __builtin_ia32_orps (__A, __B);
231 static __inline __m128 __attribute__((__always_inline__))
232 _mm_xor_ps (__m128 __A, __m128 __B)
234 return __builtin_ia32_xorps (__A, __B);
237 /* Perform a comparison on the lower SPFP values of A and B. If the
238 comparison is true, place a mask of all ones in the result, otherwise a
239 mask of zeros. The upper three SPFP values are passed through from A. */
241 static __inline __m128 __attribute__((__always_inline__))
242 _mm_cmpeq_ss (__m128 __A, __m128 __B)
244 return (__m128) __builtin_ia32_cmpeqss ((__v4sf)__A, (__v4sf)__B);
247 static __inline __m128 __attribute__((__always_inline__))
248 _mm_cmplt_ss (__m128 __A, __m128 __B)
250 return (__m128) __builtin_ia32_cmpltss ((__v4sf)__A, (__v4sf)__B);
253 static __inline __m128 __attribute__((__always_inline__))
254 _mm_cmple_ss (__m128 __A, __m128 __B)
256 return (__m128) __builtin_ia32_cmpless ((__v4sf)__A, (__v4sf)__B);
259 static __inline __m128 __attribute__((__always_inline__))
260 _mm_cmpgt_ss (__m128 __A, __m128 __B)
262 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
263 (__v4sf)
264 __builtin_ia32_cmpltss ((__v4sf) __B,
265 (__v4sf)
266 __A));
269 static __inline __m128 __attribute__((__always_inline__))
270 _mm_cmpge_ss (__m128 __A, __m128 __B)
272 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
273 (__v4sf)
274 __builtin_ia32_cmpless ((__v4sf) __B,
275 (__v4sf)
276 __A));
279 static __inline __m128 __attribute__((__always_inline__))
280 _mm_cmpneq_ss (__m128 __A, __m128 __B)
282 return (__m128) __builtin_ia32_cmpneqss ((__v4sf)__A, (__v4sf)__B);
285 static __inline __m128 __attribute__((__always_inline__))
286 _mm_cmpnlt_ss (__m128 __A, __m128 __B)
288 return (__m128) __builtin_ia32_cmpnltss ((__v4sf)__A, (__v4sf)__B);
291 static __inline __m128 __attribute__((__always_inline__))
292 _mm_cmpnle_ss (__m128 __A, __m128 __B)
294 return (__m128) __builtin_ia32_cmpnless ((__v4sf)__A, (__v4sf)__B);
297 static __inline __m128 __attribute__((__always_inline__))
298 _mm_cmpngt_ss (__m128 __A, __m128 __B)
300 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
301 (__v4sf)
302 __builtin_ia32_cmpnltss ((__v4sf) __B,
303 (__v4sf)
304 __A));
307 static __inline __m128 __attribute__((__always_inline__))
308 _mm_cmpnge_ss (__m128 __A, __m128 __B)
310 return (__m128) __builtin_ia32_movss ((__v4sf) __A,
311 (__v4sf)
312 __builtin_ia32_cmpnless ((__v4sf) __B,
313 (__v4sf)
314 __A));
317 static __inline __m128 __attribute__((__always_inline__))
318 _mm_cmpord_ss (__m128 __A, __m128 __B)
320 return (__m128) __builtin_ia32_cmpordss ((__v4sf)__A, (__v4sf)__B);
323 static __inline __m128 __attribute__((__always_inline__))
324 _mm_cmpunord_ss (__m128 __A, __m128 __B)
326 return (__m128) __builtin_ia32_cmpunordss ((__v4sf)__A, (__v4sf)__B);
329 /* Perform a comparison on the four SPFP values of A and B. For each
330 element, if the comparison is true, place a mask of all ones in the
331 result, otherwise a mask of zeros. */
333 static __inline __m128 __attribute__((__always_inline__))
334 _mm_cmpeq_ps (__m128 __A, __m128 __B)
336 return (__m128) __builtin_ia32_cmpeqps ((__v4sf)__A, (__v4sf)__B);
339 static __inline __m128 __attribute__((__always_inline__))
340 _mm_cmplt_ps (__m128 __A, __m128 __B)
342 return (__m128) __builtin_ia32_cmpltps ((__v4sf)__A, (__v4sf)__B);
345 static __inline __m128 __attribute__((__always_inline__))
346 _mm_cmple_ps (__m128 __A, __m128 __B)
348 return (__m128) __builtin_ia32_cmpleps ((__v4sf)__A, (__v4sf)__B);
351 static __inline __m128 __attribute__((__always_inline__))
352 _mm_cmpgt_ps (__m128 __A, __m128 __B)
354 return (__m128) __builtin_ia32_cmpgtps ((__v4sf)__A, (__v4sf)__B);
357 static __inline __m128 __attribute__((__always_inline__))
358 _mm_cmpge_ps (__m128 __A, __m128 __B)
360 return (__m128) __builtin_ia32_cmpgeps ((__v4sf)__A, (__v4sf)__B);
363 static __inline __m128 __attribute__((__always_inline__))
364 _mm_cmpneq_ps (__m128 __A, __m128 __B)
366 return (__m128) __builtin_ia32_cmpneqps ((__v4sf)__A, (__v4sf)__B);
369 static __inline __m128 __attribute__((__always_inline__))
370 _mm_cmpnlt_ps (__m128 __A, __m128 __B)
372 return (__m128) __builtin_ia32_cmpnltps ((__v4sf)__A, (__v4sf)__B);
375 static __inline __m128 __attribute__((__always_inline__))
376 _mm_cmpnle_ps (__m128 __A, __m128 __B)
378 return (__m128) __builtin_ia32_cmpnleps ((__v4sf)__A, (__v4sf)__B);
381 static __inline __m128 __attribute__((__always_inline__))
382 _mm_cmpngt_ps (__m128 __A, __m128 __B)
384 return (__m128) __builtin_ia32_cmpngtps ((__v4sf)__A, (__v4sf)__B);
387 static __inline __m128 __attribute__((__always_inline__))
388 _mm_cmpnge_ps (__m128 __A, __m128 __B)
390 return (__m128) __builtin_ia32_cmpngeps ((__v4sf)__A, (__v4sf)__B);
393 static __inline __m128 __attribute__((__always_inline__))
394 _mm_cmpord_ps (__m128 __A, __m128 __B)
396 return (__m128) __builtin_ia32_cmpordps ((__v4sf)__A, (__v4sf)__B);
399 static __inline __m128 __attribute__((__always_inline__))
400 _mm_cmpunord_ps (__m128 __A, __m128 __B)
402 return (__m128) __builtin_ia32_cmpunordps ((__v4sf)__A, (__v4sf)__B);
405 /* Compare the lower SPFP values of A and B and return 1 if true
406 and 0 if false. */
408 static __inline int __attribute__((__always_inline__))
409 _mm_comieq_ss (__m128 __A, __m128 __B)
411 return __builtin_ia32_comieq ((__v4sf)__A, (__v4sf)__B);
414 static __inline int __attribute__((__always_inline__))
415 _mm_comilt_ss (__m128 __A, __m128 __B)
417 return __builtin_ia32_comilt ((__v4sf)__A, (__v4sf)__B);
420 static __inline int __attribute__((__always_inline__))
421 _mm_comile_ss (__m128 __A, __m128 __B)
423 return __builtin_ia32_comile ((__v4sf)__A, (__v4sf)__B);
426 static __inline int __attribute__((__always_inline__))
427 _mm_comigt_ss (__m128 __A, __m128 __B)
429 return __builtin_ia32_comigt ((__v4sf)__A, (__v4sf)__B);
432 static __inline int __attribute__((__always_inline__))
433 _mm_comige_ss (__m128 __A, __m128 __B)
435 return __builtin_ia32_comige ((__v4sf)__A, (__v4sf)__B);
438 static __inline int __attribute__((__always_inline__))
439 _mm_comineq_ss (__m128 __A, __m128 __B)
441 return __builtin_ia32_comineq ((__v4sf)__A, (__v4sf)__B);
444 static __inline int __attribute__((__always_inline__))
445 _mm_ucomieq_ss (__m128 __A, __m128 __B)
447 return __builtin_ia32_ucomieq ((__v4sf)__A, (__v4sf)__B);
450 static __inline int __attribute__((__always_inline__))
451 _mm_ucomilt_ss (__m128 __A, __m128 __B)
453 return __builtin_ia32_ucomilt ((__v4sf)__A, (__v4sf)__B);
456 static __inline int __attribute__((__always_inline__))
457 _mm_ucomile_ss (__m128 __A, __m128 __B)
459 return __builtin_ia32_ucomile ((__v4sf)__A, (__v4sf)__B);
462 static __inline int __attribute__((__always_inline__))
463 _mm_ucomigt_ss (__m128 __A, __m128 __B)
465 return __builtin_ia32_ucomigt ((__v4sf)__A, (__v4sf)__B);
468 static __inline int __attribute__((__always_inline__))
469 _mm_ucomige_ss (__m128 __A, __m128 __B)
471 return __builtin_ia32_ucomige ((__v4sf)__A, (__v4sf)__B);
474 static __inline int __attribute__((__always_inline__))
475 _mm_ucomineq_ss (__m128 __A, __m128 __B)
477 return __builtin_ia32_ucomineq ((__v4sf)__A, (__v4sf)__B);
480 /* Convert the lower SPFP value to a 32-bit integer according to the current
481 rounding mode. */
482 static __inline int __attribute__((__always_inline__))
483 _mm_cvtss_si32 (__m128 __A)
485 return __builtin_ia32_cvtss2si ((__v4sf) __A);
488 static __inline int __attribute__((__always_inline__))
489 _mm_cvt_ss2si (__m128 __A)
491 return _mm_cvtss_si32 (__A);
494 #ifdef __x86_64__
495 /* Convert the lower SPFP value to a 32-bit integer according to the current
496 rounding mode. */
497 static __inline long long __attribute__((__always_inline__))
498 _mm_cvtss_si64x (__m128 __A)
500 return __builtin_ia32_cvtss2si64 ((__v4sf) __A);
502 #endif
504 /* Convert the two lower SPFP values to 32-bit integers according to the
505 current rounding mode. Return the integers in packed form. */
506 static __inline __m64 __attribute__((__always_inline__))
507 _mm_cvtps_pi32 (__m128 __A)
509 return (__m64) __builtin_ia32_cvtps2pi ((__v4sf) __A);
512 static __inline __m64 __attribute__((__always_inline__))
513 _mm_cvt_ps2pi (__m128 __A)
515 return _mm_cvtps_pi32 (__A);
518 /* Truncate the lower SPFP value to a 32-bit integer. */
519 static __inline int __attribute__((__always_inline__))
520 _mm_cvttss_si32 (__m128 __A)
522 return __builtin_ia32_cvttss2si ((__v4sf) __A);
525 static __inline int __attribute__((__always_inline__))
526 _mm_cvtt_ss2si (__m128 __A)
528 return _mm_cvttss_si32 (__A);
531 #ifdef __x86_64__
532 /* Truncate the lower SPFP value to a 32-bit integer. */
533 static __inline long long __attribute__((__always_inline__))
534 _mm_cvttss_si64x (__m128 __A)
536 return __builtin_ia32_cvttss2si64 ((__v4sf) __A);
538 #endif
540 /* Truncate the two lower SPFP values to 32-bit integers. Return the
541 integers in packed form. */
542 static __inline __m64 __attribute__((__always_inline__))
543 _mm_cvttps_pi32 (__m128 __A)
545 return (__m64) __builtin_ia32_cvttps2pi ((__v4sf) __A);
548 static __inline __m64 __attribute__((__always_inline__))
549 _mm_cvtt_ps2pi (__m128 __A)
551 return _mm_cvttps_pi32 (__A);
554 /* Convert B to a SPFP value and insert it as element zero in A. */
555 static __inline __m128 __attribute__((__always_inline__))
556 _mm_cvtsi32_ss (__m128 __A, int __B)
558 return (__m128) __builtin_ia32_cvtsi2ss ((__v4sf) __A, __B);
561 static __inline __m128 __attribute__((__always_inline__))
562 _mm_cvt_si2ss (__m128 __A, int __B)
564 return _mm_cvtsi32_ss (__A, __B);
567 #ifdef __x86_64__
568 /* Convert B to a SPFP value and insert it as element zero in A. */
569 static __inline __m128 __attribute__((__always_inline__))
570 _mm_cvtsi64x_ss (__m128 __A, long long __B)
572 return (__m128) __builtin_ia32_cvtsi642ss ((__v4sf) __A, __B);
574 #endif
576 /* Convert the two 32-bit values in B to SPFP form and insert them
577 as the two lower elements in A. */
578 static __inline __m128 __attribute__((__always_inline__))
579 _mm_cvtpi32_ps (__m128 __A, __m64 __B)
581 return (__m128) __builtin_ia32_cvtpi2ps ((__v4sf) __A, (__v2si)__B);
584 static __inline __m128 __attribute__((__always_inline__))
585 _mm_cvt_pi2ps (__m128 __A, __m64 __B)
587 return _mm_cvtpi32_ps (__A, __B);
590 /* Convert the four signed 16-bit values in A to SPFP form. */
591 static __inline __m128 __attribute__((__always_inline__))
592 _mm_cvtpi16_ps (__m64 __A)
594 __v4hi __sign;
595 __v2si __hisi, __losi;
596 __v4sf __r;
598 /* This comparison against zero gives us a mask that can be used to
599 fill in the missing sign bits in the unpack operations below, so
600 that we get signed values after unpacking. */
601 __sign = __builtin_ia32_pcmpgtw ((__v4hi)0LL, (__v4hi)__A);
603 /* Convert the four words to doublewords. */
604 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, __sign);
605 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, __sign);
607 /* Convert the doublewords to floating point two at a time. */
608 __r = (__v4sf) _mm_setzero_ps ();
609 __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
610 __r = __builtin_ia32_movlhps (__r, __r);
611 __r = __builtin_ia32_cvtpi2ps (__r, __losi);
613 return (__m128) __r;
616 /* Convert the four unsigned 16-bit values in A to SPFP form. */
617 static __inline __m128 __attribute__((__always_inline__))
618 _mm_cvtpu16_ps (__m64 __A)
620 __v2si __hisi, __losi;
621 __v4sf __r;
623 /* Convert the four words to doublewords. */
624 __hisi = (__v2si) __builtin_ia32_punpckhwd ((__v4hi)__A, (__v4hi)0LL);
625 __losi = (__v2si) __builtin_ia32_punpcklwd ((__v4hi)__A, (__v4hi)0LL);
627 /* Convert the doublewords to floating point two at a time. */
628 __r = (__v4sf) _mm_setzero_ps ();
629 __r = __builtin_ia32_cvtpi2ps (__r, __hisi);
630 __r = __builtin_ia32_movlhps (__r, __r);
631 __r = __builtin_ia32_cvtpi2ps (__r, __losi);
633 return (__m128) __r;
636 /* Convert the low four signed 8-bit values in A to SPFP form. */
637 static __inline __m128 __attribute__((__always_inline__))
638 _mm_cvtpi8_ps (__m64 __A)
640 __v8qi __sign;
642 /* This comparison against zero gives us a mask that can be used to
643 fill in the missing sign bits in the unpack operations below, so
644 that we get signed values after unpacking. */
645 __sign = __builtin_ia32_pcmpgtb ((__v8qi)0LL, (__v8qi)__A);
647 /* Convert the four low bytes to words. */
648 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, __sign);
650 return _mm_cvtpi16_ps(__A);
653 /* Convert the low four unsigned 8-bit values in A to SPFP form. */
654 static __inline __m128 __attribute__((__always_inline__))
655 _mm_cvtpu8_ps(__m64 __A)
657 __A = (__m64) __builtin_ia32_punpcklbw ((__v8qi)__A, (__v8qi)0LL);
658 return _mm_cvtpu16_ps(__A);
661 /* Convert the four signed 32-bit values in A and B to SPFP form. */
662 static __inline __m128 __attribute__((__always_inline__))
663 _mm_cvtpi32x2_ps(__m64 __A, __m64 __B)
665 __v4sf __zero = (__v4sf) _mm_setzero_ps ();
666 __v4sf __sfa = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__A);
667 __v4sf __sfb = __builtin_ia32_cvtpi2ps (__zero, (__v2si)__B);
668 return (__m128) __builtin_ia32_movlhps (__sfa, __sfb);
671 /* Convert the four SPFP values in A to four signed 16-bit integers. */
672 static __inline __m64 __attribute__((__always_inline__))
673 _mm_cvtps_pi16(__m128 __A)
675 __v4sf __hisf = (__v4sf)__A;
676 __v4sf __losf = __builtin_ia32_movhlps (__hisf, __hisf);
677 __v2si __hisi = __builtin_ia32_cvtps2pi (__hisf);
678 __v2si __losi = __builtin_ia32_cvtps2pi (__losf);
679 return (__m64) __builtin_ia32_packssdw (__hisi, __losi);
682 /* Convert the four SPFP values in A to four signed 8-bit integers. */
683 static __inline __m64 __attribute__((__always_inline__))
684 _mm_cvtps_pi8(__m128 __A)
686 __v4hi __tmp = (__v4hi) _mm_cvtps_pi16 (__A);
687 return (__m64) __builtin_ia32_packsswb (__tmp, (__v4hi)0LL);
690 /* Selects four specific SPFP values from A and B based on MASK. */
691 #if 0
692 static __inline __m128 __attribute__((__always_inline__))
693 _mm_shuffle_ps (__m128 __A, __m128 __B, int __mask)
695 return (__m128) __builtin_ia32_shufps ((__v4sf)__A, (__v4sf)__B, __mask);
697 #else
698 #define _mm_shuffle_ps(A, B, MASK) \
699 ((__m128) __builtin_ia32_shufps ((__v4sf)(A), (__v4sf)(B), (MASK)))
700 #endif
703 /* Selects and interleaves the upper two SPFP values from A and B. */
704 static __inline __m128 __attribute__((__always_inline__))
705 _mm_unpackhi_ps (__m128 __A, __m128 __B)
707 return (__m128) __builtin_ia32_unpckhps ((__v4sf)__A, (__v4sf)__B);
710 /* Selects and interleaves the lower two SPFP values from A and B. */
711 static __inline __m128 __attribute__((__always_inline__))
712 _mm_unpacklo_ps (__m128 __A, __m128 __B)
714 return (__m128) __builtin_ia32_unpcklps ((__v4sf)__A, (__v4sf)__B);
717 /* Sets the upper two SPFP values with 64-bits of data loaded from P;
718 the lower two values are passed through from A. */
719 static __inline __m128 __attribute__((__always_inline__))
720 _mm_loadh_pi (__m128 __A, __m64 const *__P)
722 return (__m128) __builtin_ia32_loadhps ((__v4sf)__A, (__v2si *)__P);
725 /* Stores the upper two SPFP values of A into P. */
726 static __inline void __attribute__((__always_inline__))
727 _mm_storeh_pi (__m64 *__P, __m128 __A)
729 __builtin_ia32_storehps ((__v2si *)__P, (__v4sf)__A);
732 /* Moves the upper two values of B into the lower two values of A. */
733 static __inline __m128 __attribute__((__always_inline__))
734 _mm_movehl_ps (__m128 __A, __m128 __B)
736 return (__m128) __builtin_ia32_movhlps ((__v4sf)__A, (__v4sf)__B);
739 /* Moves the lower two values of B into the upper two values of A. */
740 static __inline __m128 __attribute__((__always_inline__))
741 _mm_movelh_ps (__m128 __A, __m128 __B)
743 return (__m128) __builtin_ia32_movlhps ((__v4sf)__A, (__v4sf)__B);
746 /* Sets the lower two SPFP values with 64-bits of data loaded from P;
747 the upper two values are passed through from A. */
748 static __inline __m128 __attribute__((__always_inline__))
749 _mm_loadl_pi (__m128 __A, __m64 const *__P)
751 return (__m128) __builtin_ia32_loadlps ((__v4sf)__A, (__v2si *)__P);
754 /* Stores the lower two SPFP values of A into P. */
755 static __inline void __attribute__((__always_inline__))
756 _mm_storel_pi (__m64 *__P, __m128 __A)
758 __builtin_ia32_storelps ((__v2si *)__P, (__v4sf)__A);
761 /* Creates a 4-bit mask from the most significant bits of the SPFP values. */
762 static __inline int __attribute__((__always_inline__))
763 _mm_movemask_ps (__m128 __A)
765 return __builtin_ia32_movmskps ((__v4sf)__A);
768 /* Return the contents of the control register. */
769 static __inline unsigned int __attribute__((__always_inline__))
770 _mm_getcsr (void)
772 return __builtin_ia32_stmxcsr ();
775 /* Read exception bits from the control register. */
776 static __inline unsigned int __attribute__((__always_inline__))
777 _MM_GET_EXCEPTION_STATE (void)
779 return _mm_getcsr() & _MM_EXCEPT_MASK;
782 static __inline unsigned int __attribute__((__always_inline__))
783 _MM_GET_EXCEPTION_MASK (void)
785 return _mm_getcsr() & _MM_MASK_MASK;
788 static __inline unsigned int __attribute__((__always_inline__))
789 _MM_GET_ROUNDING_MODE (void)
791 return _mm_getcsr() & _MM_ROUND_MASK;
794 static __inline unsigned int __attribute__((__always_inline__))
795 _MM_GET_FLUSH_ZERO_MODE (void)
797 return _mm_getcsr() & _MM_FLUSH_ZERO_MASK;
800 /* Set the control register to I. */
801 static __inline void __attribute__((__always_inline__))
802 _mm_setcsr (unsigned int __I)
804 __builtin_ia32_ldmxcsr (__I);
807 /* Set exception bits in the control register. */
808 static __inline void __attribute__((__always_inline__))
809 _MM_SET_EXCEPTION_STATE(unsigned int __mask)
811 _mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | __mask);
814 static __inline void __attribute__((__always_inline__))
815 _MM_SET_EXCEPTION_MASK (unsigned int __mask)
817 _mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | __mask);
820 static __inline void __attribute__((__always_inline__))
821 _MM_SET_ROUNDING_MODE (unsigned int __mode)
823 _mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | __mode);
826 static __inline void __attribute__((__always_inline__))
827 _MM_SET_FLUSH_ZERO_MODE (unsigned int __mode)
829 _mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | __mode);
832 /* Create a vector with element 0 as F and the rest zero. */
833 static __inline __m128 __attribute__((__always_inline__))
834 _mm_set_ss (float __F)
836 return __extension__ (__m128)(__v4sf){ __F, 0, 0, 0 };
839 /* Create a vector with all four elements equal to F. */
840 static __inline __m128 __attribute__((__always_inline__))
841 _mm_set1_ps (float __F)
843 return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
846 static __inline __m128 __attribute__((__always_inline__))
847 _mm_set_ps1 (float __F)
849 return _mm_set1_ps (__F);
852 /* Create a vector with element 0 as *P and the rest zero. */
853 static __inline __m128 __attribute__((__always_inline__))
854 _mm_load_ss (float const *__P)
856 return _mm_set_ss (*__P);
859 /* Create a vector with all four elements equal to *P. */
860 static __inline __m128 __attribute__((__always_inline__))
861 _mm_load1_ps (float const *__P)
863 return _mm_set1_ps (*__P);
866 static __inline __m128 __attribute__((__always_inline__))
867 _mm_load_ps1 (float const *__P)
869 return _mm_load1_ps (__P);
872 /* Load four SPFP values from P. The address must be 16-byte aligned. */
873 static __inline __m128 __attribute__((__always_inline__))
874 _mm_load_ps (float const *__P)
876 return (__m128) *(__v4sf *)__P;
879 /* Load four SPFP values from P. The address need not be 16-byte aligned. */
880 static __inline __m128 __attribute__((__always_inline__))
881 _mm_loadu_ps (float const *__P)
883 return (__m128) __builtin_ia32_loadups (__P);
886 /* Load four SPFP values in reverse order. The address must be aligned. */
887 static __inline __m128 __attribute__((__always_inline__))
888 _mm_loadr_ps (float const *__P)
890 __v4sf __tmp = *(__v4sf *)__P;
891 return (__m128) __builtin_ia32_shufps (__tmp, __tmp, _MM_SHUFFLE (0,1,2,3));
894 /* Create the vector [Z Y X W]. */
895 static __inline __m128 __attribute__((__always_inline__))
896 _mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
898 return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
901 /* Create the vector [W X Y Z]. */
902 static __inline __m128 __attribute__((__always_inline__))
903 _mm_setr_ps (float __Z, float __Y, float __X, float __W)
905 return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
908 /* Stores the lower SPFP value. */
909 static __inline void __attribute__((__always_inline__))
910 _mm_store_ss (float *__P, __m128 __A)
912 *__P = __builtin_ia32_vec_ext_v4sf ((__v4sf)__A, 0);
915 /* Store four SPFP values. The address must be 16-byte aligned. */
916 static __inline void __attribute__((__always_inline__))
917 _mm_store_ps (float *__P, __m128 __A)
919 *(__v4sf *)__P = (__v4sf)__A;
922 /* Store four SPFP values. The address need not be 16-byte aligned. */
923 static __inline void __attribute__((__always_inline__))
924 _mm_storeu_ps (float *__P, __m128 __A)
926 __builtin_ia32_storeups (__P, (__v4sf)__A);
929 /* Store the lower SPFP value across four words. */
930 static __inline void __attribute__((__always_inline__))
931 _mm_store1_ps (float *__P, __m128 __A)
933 __v4sf __va = (__v4sf)__A;
934 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,0,0,0));
935 _mm_storeu_ps (__P, __tmp);
938 static __inline void __attribute__((__always_inline__))
939 _mm_store_ps1 (float *__P, __m128 __A)
941 _mm_store1_ps (__P, __A);
944 /* Store four SPFP values in reverse order. The address must be aligned. */
945 static __inline void __attribute__((__always_inline__))
946 _mm_storer_ps (float *__P, __m128 __A)
948 __v4sf __va = (__v4sf)__A;
949 __v4sf __tmp = __builtin_ia32_shufps (__va, __va, _MM_SHUFFLE (0,1,2,3));
950 _mm_store_ps (__P, __tmp);
953 /* Sets the low SPFP value of A from the low value of B. */
954 static __inline __m128 __attribute__((__always_inline__))
955 _mm_move_ss (__m128 __A, __m128 __B)
957 return (__m128) __builtin_ia32_movss ((__v4sf)__A, (__v4sf)__B);
960 /* Extracts one of the four words of A. The selector N must be immediate. */
961 #if 0
962 static __inline int __attribute__((__always_inline__))
963 _mm_extract_pi16 (__m64 const __A, int const __N)
965 return __builtin_ia32_vec_ext_v4hi ((__v4hi)__A, __N);
968 static __inline int __attribute__((__always_inline__))
969 _m_pextrw (__m64 const __A, int const __N)
971 return _mm_extract_pi16 (__A, __N);
973 #else
974 #define _mm_extract_pi16(A, N) __builtin_ia32_vec_ext_v4hi ((__v4hi)(A), (N))
975 #define _m_pextrw(A, N) _mm_extract_pi16((A), (N))
976 #endif
978 /* Inserts word D into one of four words of A. The selector N must be
979 immediate. */
980 #if 0
981 static __inline __m64 __attribute__((__always_inline__))
982 _mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
984 return (__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)__A, __D, __N);
987 static __inline __m64 __attribute__((__always_inline__))
988 _m_pinsrw (__m64 const __A, int const __D, int const __N)
990 return _mm_insert_pi16 (__A, __D, __N);
992 #else
993 #define _mm_insert_pi16(A, D, N) \
994 ((__m64) __builtin_ia32_vec_set_v4hi ((__v4hi)(A), (D), (N)))
995 #define _m_pinsrw(A, D, N) _mm_insert_pi16((A), (D), (N))
996 #endif
998 /* Compute the element-wise maximum of signed 16-bit values. */
999 static __inline __m64 __attribute__((__always_inline__))
1000 _mm_max_pi16 (__m64 __A, __m64 __B)
1002 return (__m64) __builtin_ia32_pmaxsw ((__v4hi)__A, (__v4hi)__B);
1005 static __inline __m64 __attribute__((__always_inline__))
1006 _m_pmaxsw (__m64 __A, __m64 __B)
1008 return _mm_max_pi16 (__A, __B);
1011 /* Compute the element-wise maximum of unsigned 8-bit values. */
1012 static __inline __m64 __attribute__((__always_inline__))
1013 _mm_max_pu8 (__m64 __A, __m64 __B)
1015 return (__m64) __builtin_ia32_pmaxub ((__v8qi)__A, (__v8qi)__B);
1018 static __inline __m64 __attribute__((__always_inline__))
1019 _m_pmaxub (__m64 __A, __m64 __B)
1021 return _mm_max_pu8 (__A, __B);
1024 /* Compute the element-wise minimum of signed 16-bit values. */
1025 static __inline __m64 __attribute__((__always_inline__))
1026 _mm_min_pi16 (__m64 __A, __m64 __B)
1028 return (__m64) __builtin_ia32_pminsw ((__v4hi)__A, (__v4hi)__B);
1031 static __inline __m64 __attribute__((__always_inline__))
1032 _m_pminsw (__m64 __A, __m64 __B)
1034 return _mm_min_pi16 (__A, __B);
1037 /* Compute the element-wise minimum of unsigned 8-bit values. */
1038 static __inline __m64 __attribute__((__always_inline__))
1039 _mm_min_pu8 (__m64 __A, __m64 __B)
1041 return (__m64) __builtin_ia32_pminub ((__v8qi)__A, (__v8qi)__B);
1044 static __inline __m64 __attribute__((__always_inline__))
1045 _m_pminub (__m64 __A, __m64 __B)
1047 return _mm_min_pu8 (__A, __B);
1050 /* Create an 8-bit mask of the signs of 8-bit values. */
1051 static __inline int __attribute__((__always_inline__))
1052 _mm_movemask_pi8 (__m64 __A)
1054 return __builtin_ia32_pmovmskb ((__v8qi)__A);
1057 static __inline int __attribute__((__always_inline__))
1058 _m_pmovmskb (__m64 __A)
1060 return _mm_movemask_pi8 (__A);
1063 /* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
1064 in B and produce the high 16 bits of the 32-bit results. */
1065 static __inline __m64 __attribute__((__always_inline__))
1066 _mm_mulhi_pu16 (__m64 __A, __m64 __B)
1068 return (__m64) __builtin_ia32_pmulhuw ((__v4hi)__A, (__v4hi)__B);
1071 static __inline __m64 __attribute__((__always_inline__))
1072 _m_pmulhuw (__m64 __A, __m64 __B)
1074 return _mm_mulhi_pu16 (__A, __B);
1077 /* Return a combination of the four 16-bit values in A. The selector
1078 must be an immediate. */
1079 #if 0
1080 static __inline __m64 __attribute__((__always_inline__))
1081 _mm_shuffle_pi16 (__m64 __A, int __N)
1083 return (__m64) __builtin_ia32_pshufw ((__v4hi)__A, __N);
1086 static __inline __m64 __attribute__((__always_inline__))
1087 _m_pshufw (__m64 __A, int __N)
1089 return _mm_shuffle_pi16 (__A, __N);
1091 #else
1092 #define _mm_shuffle_pi16(A, N) \
1093 ((__m64) __builtin_ia32_pshufw ((__v4hi)(A), (N)))
1094 #define _m_pshufw(A, N) _mm_shuffle_pi16 ((A), (N))
1095 #endif
1097 /* Conditionally store byte elements of A into P. The high bit of each
1098 byte in the selector N determines whether the corresponding byte from
1099 A is stored. */
1100 static __inline void __attribute__((__always_inline__))
1101 _mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
1103 __builtin_ia32_maskmovq ((__v8qi)__A, (__v8qi)__N, __P);
1106 static __inline void __attribute__((__always_inline__))
1107 _m_maskmovq (__m64 __A, __m64 __N, char *__P)
1109 _mm_maskmove_si64 (__A, __N, __P);
1112 /* Compute the rounded averages of the unsigned 8-bit values in A and B. */
1113 static __inline __m64 __attribute__((__always_inline__))
1114 _mm_avg_pu8 (__m64 __A, __m64 __B)
1116 return (__m64) __builtin_ia32_pavgb ((__v8qi)__A, (__v8qi)__B);
1119 static __inline __m64 __attribute__((__always_inline__))
1120 _m_pavgb (__m64 __A, __m64 __B)
1122 return _mm_avg_pu8 (__A, __B);
1125 /* Compute the rounded averages of the unsigned 16-bit values in A and B. */
1126 static __inline __m64 __attribute__((__always_inline__))
1127 _mm_avg_pu16 (__m64 __A, __m64 __B)
1129 return (__m64) __builtin_ia32_pavgw ((__v4hi)__A, (__v4hi)__B);
1132 static __inline __m64 __attribute__((__always_inline__))
1133 _m_pavgw (__m64 __A, __m64 __B)
1135 return _mm_avg_pu16 (__A, __B);
1138 /* Compute the sum of the absolute differences of the unsigned 8-bit
1139 values in A and B. Return the value in the lower 16-bit word; the
1140 upper words are cleared. */
1141 static __inline __m64 __attribute__((__always_inline__))
1142 _mm_sad_pu8 (__m64 __A, __m64 __B)
1144 return (__m64) __builtin_ia32_psadbw ((__v8qi)__A, (__v8qi)__B);
1147 static __inline __m64 __attribute__((__always_inline__))
1148 _m_psadbw (__m64 __A, __m64 __B)
1150 return _mm_sad_pu8 (__A, __B);
1153 /* Loads one cache line from address P to a location "closer" to the
1154 processor. The selector I specifies the type of prefetch operation. */
1155 #if 0
1156 static __inline void __attribute__((__always_inline__))
1157 _mm_prefetch (void *__P, enum _mm_hint __I)
1159 __builtin_prefetch (__P, 0, __I);
1161 #else
1162 #define _mm_prefetch(P, I) \
1163 __builtin_prefetch ((P), 0, (I))
1164 #endif
1166 /* Stores the data in A to the address P without polluting the caches. */
1167 static __inline void __attribute__((__always_inline__))
1168 _mm_stream_pi (__m64 *__P, __m64 __A)
1170 __builtin_ia32_movntq ((unsigned long long *)__P, (unsigned long long)__A);
1173 /* Likewise. The address must be 16-byte aligned. */
1174 static __inline void __attribute__((__always_inline__))
1175 _mm_stream_ps (float *__P, __m128 __A)
1177 __builtin_ia32_movntps (__P, (__v4sf)__A);
1180 /* Guarantees that every preceding store is globally visible before
1181 any subsequent store. */
1182 static __inline void __attribute__((__always_inline__))
1183 _mm_sfence (void)
1185 __builtin_ia32_sfence ();
1188 /* The execution of the next instruction is delayed by an implementation
1189 specific amount of time. The instruction does not modify the
1190 architectural state. */
1191 static __inline void __attribute__((__always_inline__))
1192 _mm_pause (void)
1194 __asm__ __volatile__ ("rep; nop" : : );
1197 /* Transpose the 4x4 matrix composed of row[0-3]. */
1198 #define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
1199 do { \
1200 __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
1201 __v4sf __t0 = __builtin_ia32_shufps (__r0, __r1, 0x44); \
1202 __v4sf __t2 = __builtin_ia32_shufps (__r0, __r1, 0xEE); \
1203 __v4sf __t1 = __builtin_ia32_shufps (__r2, __r3, 0x44); \
1204 __v4sf __t3 = __builtin_ia32_shufps (__r2, __r3, 0xEE); \
1205 (row0) = __builtin_ia32_shufps (__t0, __t1, 0x88); \
1206 (row1) = __builtin_ia32_shufps (__t0, __t1, 0xDD); \
1207 (row2) = __builtin_ia32_shufps (__t2, __t3, 0x88); \
1208 (row3) = __builtin_ia32_shufps (__t2, __t3, 0xDD); \
1209 } while (0)
1211 /* For backward source compatibility. */
1212 #include <emmintrin.h>
1214 #endif /* __SSE__ */
1215 #endif /* _XMMINTRIN_H_INCLUDED */