1 /**************************************************************************
3 * Copyright 2008 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
21 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
30 * SSE intrinsics portability header.
32 * Although the SSE intrinsics are support by all modern x86 and x86-64
33 * compilers, there are some intrisincs missing in some implementations
34 * (especially older MSVC versions). This header abstracts that away.
40 #include "pipe/p_config.h"
42 #if defined(PIPE_ARCH_SSE)
44 #include <emmintrin.h>
47 /* MSVC before VC8 does not support the _mm_castxxx_yyy */
48 #if defined(_MSC_VER) && _MSC_VER < 1500
50 union __declspec(align(16)) m128_types
{
56 static __inline __m128
57 _mm_castsi128_ps(__m128i a
)
64 static __inline __m128i
65 _mm_castps_si128(__m128 a
)
72 #endif /* defined(_MSC_VER) && _MSC_VER < 1500 */
81 static INLINE
void u_print_epi8(const char *name
, __m128i r
)
83 union { __m128i m
; ubyte ub
[16]; } u
;
104 u
.ub
[0], u
.ub
[1], u
.ub
[2], u
.ub
[3],
105 u
.ub
[4], u
.ub
[5], u
.ub
[6], u
.ub
[7],
106 u
.ub
[8], u
.ub
[9], u
.ub
[10], u
.ub
[11],
107 u
.ub
[12], u
.ub
[13], u
.ub
[14], u
.ub
[15]);
110 static INLINE
void u_print_epi16(const char *name
, __m128i r
)
112 union { __m128i m
; ushort us
[8]; } u
;
125 u
.us
[0], u
.us
[1], u
.us
[2], u
.us
[3],
126 u
.us
[4], u
.us
[5], u
.us
[6], u
.us
[7]);
129 static INLINE
void u_print_epi32(const char *name
, __m128i r
)
131 union { __m128i m
; uint ui
[4]; } u
;
140 u
.ui
[0], u
.ui
[1], u
.ui
[2], u
.ui
[3]);
143 static INLINE
void u_print_ps(const char *name
, __m128 r
)
145 union { __m128 m
; float f
[4]; } u
;
154 u
.f
[0], u
.f
[1], u
.f
[2], u
.f
[3]);
158 #define U_DUMP_EPI32(a) u_print_epi32(#a, a)
159 #define U_DUMP_EPI16(a) u_print_epi16(#a, a)
160 #define U_DUMP_EPI8(a) u_print_epi8(#a, a)
161 #define U_DUMP_PS(a) u_print_ps(#a, a)
165 #if defined(PIPE_ARCH_SSSE3)
167 #include <tmmintrin.h>
169 #else /* !PIPE_ARCH_SSSE3 */
172 * Describe _mm_shuffle_epi8() with gcc extended inline assembly, for cases
173 * where -mssse3 is not supported/enabled.
175 * MSVC will never get in here as its intrinsics support do not rely on
176 * compiler command line options.
178 static __inline __m128i
__attribute__((__gnu_inline__
, __always_inline__
, __artificial__
))
179 _mm_shuffle_epi8(__m128i a
, __m128i mask
)
182 __asm__("pshufb %1, %0"
184 : "xm" (mask
), "0" (a
));
188 #endif /* !PIPE_ARCH_SSSE3 */
193 /* Provide an SSE2 implementation of _mm_mullo_epi32() in terms of
196 * I suspect this works fine for us because one of our operands is
197 * always positive, but not sure that this can be used for general
198 * signed integer multiplication.
200 * This seems close enough to the speed of SSE4 and the real
201 * _mm_mullo_epi32() intrinsic as to not justify adding an sse4
202 * dependency at this point.
204 static INLINE __m128i
mm_mullo_epi32(const __m128i a
, const __m128i b
)
206 __m128i a4
= _mm_srli_epi64(a
, 32); /* shift by one dword */
207 __m128i b4
= _mm_srli_epi64(b
, 32); /* shift by one dword */
208 __m128i ba
= _mm_mul_epu32(b
, a
); /* multply dwords 0, 2 */
209 __m128i b4a4
= _mm_mul_epu32(b4
, a4
); /* multiply dwords 1, 3 */
211 /* Interleave the results, either with shuffles or (slightly
212 * faster) direct bit operations:
215 __m128i ba8
= _mm_shuffle_epi32(ba
, 8);
216 __m128i b4a48
= _mm_shuffle_epi32(b4a4
, 8);
217 __m128i result
= _mm_unpacklo_epi32(ba8
, b4a48
);
219 __m128i mask
= _mm_setr_epi32(~0,0,~0,0);
220 __m128i ba_mask
= _mm_and_si128(ba
, mask
);
221 __m128i b4a4_mask_shift
= _mm_slli_epi64(b4a4
, 32);
222 __m128i result
= _mm_or_si128(ba_mask
, b4a4_mask_shift
);
230 transpose4_epi32(const __m128i
* restrict a
,
231 const __m128i
* restrict b
,
232 const __m128i
* restrict c
,
233 const __m128i
* restrict d
,
234 __m128i
* restrict o
,
235 __m128i
* restrict p
,
236 __m128i
* restrict q
,
237 __m128i
* restrict r
)
239 __m128i t0
= _mm_unpacklo_epi32(*a
, *b
);
240 __m128i t1
= _mm_unpacklo_epi32(*c
, *d
);
241 __m128i t2
= _mm_unpackhi_epi32(*a
, *b
);
242 __m128i t3
= _mm_unpackhi_epi32(*c
, *d
);
244 *o
= _mm_unpacklo_epi64(t0
, t1
);
245 *p
= _mm_unpackhi_epi64(t0
, t1
);
246 *q
= _mm_unpacklo_epi64(t2
, t3
);
247 *r
= _mm_unpackhi_epi64(t2
, t3
);
250 #define SCALAR_EPI32(m, i) _mm_shuffle_epi32((m), _MM_SHUFFLE(i,i,i,i))
253 #endif /* PIPE_ARCH_SSE */
255 #endif /* U_SSE_H_ */