Fix compilation on GCC 7.
[vkmodelviewer.git] / DirectXMath / DirectXMath.h
blobb53ee586d8ef5505e022f8b9dcc26b0ecb0f3107
1 //-------------------------------------------------------------------------------------
2 // DirectXMath.h -- SIMD C++ Math library
3 //
4 // THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF
5 // ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO
6 // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A
7 // PARTICULAR PURPOSE.
8 //
9 // Copyright (c) Microsoft Corporation. All rights reserved.
11 // http://go.microsoft.com/fwlink/?LinkID=615560
12 //-------------------------------------------------------------------------------------
14 #pragma once
16 #ifdef __x86_64__
17 #define _M_X64 100
18 #endif
19 #ifdef __i386__
20 #define _M_IX86 600
21 #endif
23 #ifndef __cplusplus
24 #error DirectX Math requires C++
25 #endif
27 #define DIRECTX_MATH_VERSION 311
29 #if defined(_MSC_VER) && (_MSC_VER < 1800)
30 #error DirectX Math Visual C++ 2013 or later.
31 #endif
33 #if defined(_MSC_VER) && !defined(_M_ARM) && !defined(_M_ARM64) && !defined(_M_HYBRID_X86_ARM64) && (!_MANAGED) && (!_M_CEE) && (!defined(_M_IX86_FP) || (_M_IX86_FP > 1)) && !defined(_XM_NO_INTRINSICS_) && !defined(_XM_VECTORCALL_)
34 #if ((_MSC_FULL_VER >= 170065501) && (_MSC_VER < 1800)) || (_MSC_FULL_VER >= 180020418)
35 #define _XM_VECTORCALL_ 1
36 #endif
37 #endif
39 #if _XM_VECTORCALL_
40 #define XM_CALLCONV __vectorcall
41 #else
42 #define XM_CALLCONV __fastcall
43 #endif
45 #if defined(_MSC_VER) && (_MSC_VER < 1800)
46 #define XM_CTOR_DEFAULT {}
47 #else
48 #define XM_CTOR_DEFAULT =default;
49 #endif
51 #if defined(_MSC_VER) && (_MSC_FULL_VER < 190023506)
52 #define XM_CONST const
53 #define XM_CONSTEXPR
54 #else
55 #define XM_CONST constexpr
56 #define XM_CONSTEXPR constexpr
57 #endif
59 #ifndef XM_DEPRECATED
60 #define XM_DEPRECATED __declspec(deprecated("This is deprecated and will be removed in a future version."))
61 #endif
63 #if !defined(_XM_AVX2_INTRINSICS_) && defined(__AVX2__) && !defined(_XM_NO_INTRINSICS_)
64 #define _XM_AVX2_INTRINSICS_
65 #endif
67 #if !defined(_XM_FMA3_INTRINSICS_) && defined(_XM_AVX2_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
68 #define _XM_FMA3_INTRINSICS_
69 #endif
71 #if !defined(_XM_F16C_INTRINSICS_) && defined(_XM_AVX2_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
72 #define _XM_F16C_INTRINSICS_
73 #endif
75 #if defined(_XM_FMA3_INTRINSICS_) && !defined(_XM_AVX_INTRINSICS_)
76 #define _XM_AVX_INTRINSICS_
77 #endif
79 #if defined(_XM_F16C_INTRINSICS_) && !defined(_XM_AVX_INTRINSICS_)
80 #define _XM_AVX_INTRINSICS_
81 #endif
83 #if !defined(_XM_AVX_INTRINSICS_) && defined(__AVX__) && !defined(_XM_NO_INTRINSICS_)
84 #define _XM_AVX_INTRINSICS_
85 #endif
87 #if defined(_XM_AVX_INTRINSICS_) && !defined(_XM_SSE4_INTRINSICS_)
88 #define _XM_SSE4_INTRINSICS_
89 #endif
91 #if defined(_XM_SSE4_INTRINSICS_) && !defined(_XM_SSE3_INTRINSICS_)
92 #define _XM_SSE3_INTRINSICS_
93 #endif
95 #if defined(_XM_SSE3_INTRINSICS_) && !defined(_XM_SSE_INTRINSICS_)
96 #define _XM_SSE_INTRINSICS_
97 #endif
99 #if !defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
100 #if (defined(_M_IX86) || defined(_M_X64)) && !defined(_M_HYBRID_X86_ARM64)
101 #define _XM_SSE_INTRINSICS_
102 #elif defined(_M_ARM) || defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
103 #define _XM_ARM_NEON_INTRINSICS_
104 #elif !defined(_XM_NO_INTRINSICS_)
105 #error DirectX Math does not support this target
106 #endif
107 #endif // !_XM_ARM_NEON_INTRINSICS_ && !_XM_SSE_INTRINSICS_ && !_XM_NO_INTRINSICS_
109 #if !defined(_XM_NO_XMVECTOR_OVERLOADS_) && (defined(__clang__) || defined(__GNUC__))
110 #define _XM_NO_XMVECTOR_OVERLOADS_
111 #endif
113 #pragma warning(push)
114 #pragma warning(disable:4514 4820)
115 // C4514/4820: Off by default noise
116 #include <math.h>
117 #include <float.h>
118 #include <malloc.h>
119 #pragma warning(pop)
121 #ifndef _XM_NO_INTRINSICS_
122 #ifdef _XM_SSE_INTRINSICS_
123 #include <xmmintrin.h>
124 #include <emmintrin.h>
126 #ifdef _XM_SSE3_INTRINSICS_
127 #include <pmmintrin.h>
128 #endif
130 #ifdef _XM_SSE4_INTRINSICS_
131 #include <smmintrin.h>
132 #endif
134 #ifdef _XM_AVX_INTRINSICS_
135 #include <immintrin.h>
136 #endif
138 #elif defined(_XM_ARM_NEON_INTRINSICS_)
139 #if defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64)
140 #include <arm64_neon.h>
141 #else
142 #include <arm_neon.h>
143 #endif
144 #endif
145 #endif // !_XM_NO_INTRINSICS_
147 #include <assert.h>
149 #pragma warning(push)
150 #pragma warning(disable : 4005 4668)
151 // C4005/4668: Old header issue
152 #include <stdint.h>
153 #pragma warning(pop)
155 /****************************************************************************
157 * Conditional intrinsics
159 ****************************************************************************/
161 #if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
163 #if defined(_XM_NO_MOVNT_)
164 #define XM_STREAM_PS( p, a ) _mm_store_ps( p, a )
165 #define XM_SFENCE()
166 #else
167 #define XM_STREAM_PS( p, a ) _mm_stream_ps( p, a )
168 #define XM_SFENCE() _mm_sfence()
169 #endif
171 #if defined(_XM_AVX_INTRINSICS_)
172 #define XM_PERMUTE_PS( v, c ) _mm_permute_ps( v, c )
173 #else
174 #define XM_PERMUTE_PS( v, c ) _mm_shuffle_ps( v, v, c )
175 #endif
177 #endif // _XM_SSE_INTRINSICS_ && !_XM_NO_INTRINSICS_
179 namespace DirectX
182 /****************************************************************************
184 * Constant definitions
186 ****************************************************************************/
188 #if defined(__XNAMATH_H__) && defined(XM_PI)
189 #undef XM_PI
190 #undef XM_2PI
191 #undef XM_1DIVPI
192 #undef XM_1DIV2PI
193 #undef XM_PIDIV2
194 #undef XM_PIDIV4
195 #undef XM_SELECT_0
196 #undef XM_SELECT_1
197 #undef XM_PERMUTE_0X
198 #undef XM_PERMUTE_0Y
199 #undef XM_PERMUTE_0Z
200 #undef XM_PERMUTE_0W
201 #undef XM_PERMUTE_1X
202 #undef XM_PERMUTE_1Y
203 #undef XM_PERMUTE_1Z
204 #undef XM_PERMUTE_1W
205 #undef XM_CRMASK_CR6
206 #undef XM_CRMASK_CR6TRUE
207 #undef XM_CRMASK_CR6FALSE
208 #undef XM_CRMASK_CR6BOUNDS
209 #undef XM_CACHE_LINE_SIZE
210 #endif
212 XM_CONST float XM_PI = 3.141592654f;
213 XM_CONST float XM_2PI = 6.283185307f;
214 XM_CONST float XM_1DIVPI = 0.318309886f;
215 XM_CONST float XM_1DIV2PI = 0.159154943f;
216 XM_CONST float XM_PIDIV2 = 1.570796327f;
217 XM_CONST float XM_PIDIV4 = 0.785398163f;
219 XM_CONST uint32_t XM_SELECT_0 = 0x00000000;
220 XM_CONST uint32_t XM_SELECT_1 = 0xFFFFFFFF;
222 XM_CONST uint32_t XM_PERMUTE_0X = 0;
223 XM_CONST uint32_t XM_PERMUTE_0Y = 1;
224 XM_CONST uint32_t XM_PERMUTE_0Z = 2;
225 XM_CONST uint32_t XM_PERMUTE_0W = 3;
226 XM_CONST uint32_t XM_PERMUTE_1X = 4;
227 XM_CONST uint32_t XM_PERMUTE_1Y = 5;
228 XM_CONST uint32_t XM_PERMUTE_1Z = 6;
229 XM_CONST uint32_t XM_PERMUTE_1W = 7;
231 XM_CONST uint32_t XM_SWIZZLE_X = 0;
232 XM_CONST uint32_t XM_SWIZZLE_Y = 1;
233 XM_CONST uint32_t XM_SWIZZLE_Z = 2;
234 XM_CONST uint32_t XM_SWIZZLE_W = 3;
236 XM_CONST uint32_t XM_CRMASK_CR6 = 0x000000F0;
237 XM_CONST uint32_t XM_CRMASK_CR6TRUE = 0x00000080;
238 XM_CONST uint32_t XM_CRMASK_CR6FALSE = 0x00000020;
239 XM_CONST uint32_t XM_CRMASK_CR6BOUNDS = XM_CRMASK_CR6FALSE;
241 XM_CONST size_t XM_CACHE_LINE_SIZE = 64;
244 /****************************************************************************
246 * Macros
248 ****************************************************************************/
250 #if defined(__XNAMATH_H__) && defined(XMComparisonAllTrue)
251 #undef XMComparisonAllTrue
252 #undef XMComparisonAnyTrue
253 #undef XMComparisonAllFalse
254 #undef XMComparisonAnyFalse
255 #undef XMComparisonMixed
256 #undef XMComparisonAllInBounds
257 #undef XMComparisonAnyOutOfBounds
258 #endif
260 // Unit conversion
262 inline XM_CONSTEXPR float XMConvertToRadians(float fDegrees) { return fDegrees * (XM_PI / 180.0f); }
263 inline XM_CONSTEXPR float XMConvertToDegrees(float fRadians) { return fRadians * (180.0f / XM_PI); }
265 // Condition register evaluation proceeding a recording (R) comparison
267 inline bool XMComparisonAllTrue(uint32_t CR) { return (((CR) & XM_CRMASK_CR6TRUE) == XM_CRMASK_CR6TRUE); }
268 inline bool XMComparisonAnyTrue(uint32_t CR) { return (((CR) & XM_CRMASK_CR6FALSE) != XM_CRMASK_CR6FALSE); }
269 inline bool XMComparisonAllFalse(uint32_t CR) { return (((CR) & XM_CRMASK_CR6FALSE) == XM_CRMASK_CR6FALSE); }
270 inline bool XMComparisonAnyFalse(uint32_t CR) { return (((CR) & XM_CRMASK_CR6TRUE) != XM_CRMASK_CR6TRUE); }
271 inline bool XMComparisonMixed(uint32_t CR) { return (((CR) & XM_CRMASK_CR6) == 0); }
272 inline bool XMComparisonAllInBounds(uint32_t CR) { return (((CR) & XM_CRMASK_CR6BOUNDS) == XM_CRMASK_CR6BOUNDS); }
273 inline bool XMComparisonAnyOutOfBounds(uint32_t CR) { return (((CR) & XM_CRMASK_CR6BOUNDS) != XM_CRMASK_CR6BOUNDS); }
276 /****************************************************************************
278 * Data types
280 ****************************************************************************/
282 #pragma warning(push)
283 #pragma warning(disable:4068 4201 4365 4324 4820)
284 // C4068: ignore unknown pragmas
285 // C4201: nonstandard extension used : nameless struct/union
286 // C4365: Off by default noise
287 // C4324/4820: padding warnings
289 #ifdef _PREFAST_
290 #pragma prefast(push)
291 #pragma prefast(disable : 25000, "FXMVECTOR is 16 bytes")
292 #endif
294 //------------------------------------------------------------------------------
295 #if defined(_XM_NO_INTRINSICS_)
296 struct __vector4
298 union
300 float vector4_f32[4];
301 uint32_t vector4_u32[4];
304 #endif // _XM_NO_INTRINSICS_
306 //------------------------------------------------------------------------------
307 // Vector intrinsic: Four 32 bit floating point components aligned on a 16 byte
308 // boundary and mapped to hardware vector registers
309 #if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
310 typedef __m128 XMVECTOR;
311 #elif defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
312 typedef float32x4_t XMVECTOR;
313 #else
314 typedef __vector4 XMVECTOR;
315 #endif
317 // Fix-up for (1st-3rd) XMVECTOR parameters that are pass-in-register for x86, ARM, ARM64, and vector call; by reference otherwise
318 #if ( defined(_M_IX86) || defined(_M_ARM) || defined(_M_ARM64) || _XM_VECTORCALL_ ) && !defined(_XM_NO_INTRINSICS_)
319 typedef const XMVECTOR FXMVECTOR;
320 #else
321 typedef const XMVECTOR& FXMVECTOR;
322 #endif
324 // Fix-up for (4th) XMVECTOR parameter to pass in-register for ARM, ARM64, and x64 vector call; by reference otherwise
325 #if ( defined(_M_ARM) || defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || (_XM_VECTORCALL_ && !defined(_M_IX86) ) ) && !defined(_XM_NO_INTRINSICS_)
326 typedef const XMVECTOR GXMVECTOR;
327 #else
328 typedef const XMVECTOR& GXMVECTOR;
329 #endif
331 // Fix-up for (5th & 6th) XMVECTOR parameter to pass in-register for ARM64 and vector call; by reference otherwise
332 #if ( defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || _XM_VECTORCALL_ ) && !defined(_XM_NO_INTRINSICS_)
333 typedef const XMVECTOR HXMVECTOR;
334 #else
335 typedef const XMVECTOR& HXMVECTOR;
336 #endif
338 // Fix-up for (7th+) XMVECTOR parameters to pass by reference
339 typedef const XMVECTOR& CXMVECTOR;
341 //------------------------------------------------------------------------------
342 // Conversion types for constants
343 struct DECLSPEC_ALIGN(16) XMVECTORF32
345 union
347 float f[4];
348 XMVECTOR v;
351 inline operator XMVECTOR() const { return v; }
352 inline operator const float*() const { return f; }
353 #if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_)
354 inline operator __m128i() const { return _mm_castps_si128(v); }
355 inline operator __m128d() const { return _mm_castps_pd(v); }
356 #endif
359 struct DECLSPEC_ALIGN(16)XMVECTORI32
361 union
363 int32_t i[4];
364 XMVECTOR v;
367 inline operator XMVECTOR() const { return v; }
368 #if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_)
369 inline operator __m128i() const { return _mm_castps_si128(v); }
370 inline operator __m128d() const { return _mm_castps_pd(v); }
371 #endif
374 struct DECLSPEC_ALIGN(16) XMVECTORU8
376 union
378 uint8_t u[16];
379 XMVECTOR v;
382 inline operator XMVECTOR() const { return v; }
383 #if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_)
384 inline operator __m128i() const { return _mm_castps_si128(v); }
385 inline operator __m128d() const { return _mm_castps_pd(v); }
386 #endif
389 struct DECLSPEC_ALIGN(16) XMVECTORU32
391 union
393 uint32_t u[4];
394 XMVECTOR v;
397 inline operator XMVECTOR() const { return v; }
398 #if !defined(_XM_NO_INTRINSICS_) && defined(_XM_SSE_INTRINSICS_)
399 inline operator __m128i() const { return _mm_castps_si128(v); }
400 inline operator __m128d() const { return _mm_castps_pd(v); }
401 #endif
404 //------------------------------------------------------------------------------
405 // Vector operators
407 #ifndef _XM_NO_XMVECTOR_OVERLOADS_
408 XMVECTOR XM_CALLCONV operator+ (FXMVECTOR V);
409 XMVECTOR XM_CALLCONV operator- (FXMVECTOR V);
411 XMVECTOR& XM_CALLCONV operator+= (XMVECTOR& V1, FXMVECTOR V2);
412 XMVECTOR& XM_CALLCONV operator-= (XMVECTOR& V1, FXMVECTOR V2);
413 XMVECTOR& XM_CALLCONV operator*= (XMVECTOR& V1, FXMVECTOR V2);
414 XMVECTOR& XM_CALLCONV operator/= (XMVECTOR& V1, FXMVECTOR V2);
416 XMVECTOR& operator*= (XMVECTOR& V, float S);
417 XMVECTOR& operator/= (XMVECTOR& V, float S);
419 XMVECTOR XM_CALLCONV operator+ (FXMVECTOR V1, FXMVECTOR V2);
420 XMVECTOR XM_CALLCONV operator- (FXMVECTOR V1, FXMVECTOR V2);
421 XMVECTOR XM_CALLCONV operator* (FXMVECTOR V1, FXMVECTOR V2);
422 XMVECTOR XM_CALLCONV operator/ (FXMVECTOR V1, FXMVECTOR V2);
423 XMVECTOR XM_CALLCONV operator* (FXMVECTOR V, float S);
424 XMVECTOR XM_CALLCONV operator* (float S, FXMVECTOR V);
425 XMVECTOR XM_CALLCONV operator/ (FXMVECTOR V, float S);
426 #endif /* !_XM_NO_XMVECTOR_OVERLOADS_ */
428 //------------------------------------------------------------------------------
429 // Matrix type: Sixteen 32 bit floating point components aligned on a
430 // 16 byte boundary and mapped to four hardware vector registers
432 struct XMMATRIX;
434 // Fix-up for (1st) XMMATRIX parameter to pass in-register for ARM64 and vector call; by reference otherwise
435 #if ( defined(_M_ARM64) || defined(_M_HYBRID_X86_ARM64) || _XM_VECTORCALL_ ) && !defined(_XM_NO_INTRINSICS_)
436 typedef const XMMATRIX FXMMATRIX;
437 #else
438 typedef const XMMATRIX& FXMMATRIX;
439 #endif
441 // Fix-up for (2nd+) XMMATRIX parameters to pass by reference
442 typedef const XMMATRIX& CXMMATRIX;
444 #ifdef _XM_NO_INTRINSICS_
445 struct XMMATRIX
446 #else
447 struct DECLSPEC_ALIGN(16) XMMATRIX
448 #endif
450 #ifdef _XM_NO_INTRINSICS_
451 union
453 XMVECTOR r[4];
454 struct
456 float _11, _12, _13, _14;
457 float _21, _22, _23, _24;
458 float _31, _32, _33, _34;
459 float _41, _42, _43, _44;
461 float m[4][4];
463 #else
464 XMVECTOR r[4];
465 #endif
467 XMMATRIX() XM_CTOR_DEFAULT
468 #if defined(_MSC_VER) && _MSC_VER >= 1900
469 constexpr XMMATRIX(FXMVECTOR R0, FXMVECTOR R1, FXMVECTOR R2, CXMVECTOR R3) : r{ R0,R1,R2,R3 } {}
470 #else
471 XMMATRIX(FXMVECTOR R0, FXMVECTOR R1, FXMVECTOR R2, CXMVECTOR R3) { r[0] = R0; r[1] = R1; r[2] = R2; r[3] = R3; }
472 #endif
473 XMMATRIX(float m00, float m01, float m02, float m03,
474 float m10, float m11, float m12, float m13,
475 float m20, float m21, float m22, float m23,
476 float m30, float m31, float m32, float m33);
477 explicit XMMATRIX(_In_reads_(16) const float *pArray);
479 #ifdef _XM_NO_INTRINSICS_
480 float operator() (size_t Row, size_t Column) const { return m[Row][Column]; }
481 float& operator() (size_t Row, size_t Column) { return m[Row][Column]; }
482 #endif
484 XMMATRIX& operator= (const XMMATRIX& M) { r[0] = M.r[0]; r[1] = M.r[1]; r[2] = M.r[2]; r[3] = M.r[3]; return *this; }
486 XMMATRIX operator+ () const { return *this; }
487 XMMATRIX operator- () const;
489 XMMATRIX& XM_CALLCONV operator+= (FXMMATRIX M);
490 XMMATRIX& XM_CALLCONV operator-= (FXMMATRIX M);
491 XMMATRIX& XM_CALLCONV operator*= (FXMMATRIX M);
492 XMMATRIX& operator*= (float S);
493 XMMATRIX& operator/= (float S);
495 XMMATRIX XM_CALLCONV operator+ (FXMMATRIX M) const;
496 XMMATRIX XM_CALLCONV operator- (FXMMATRIX M) const;
497 XMMATRIX XM_CALLCONV operator* (FXMMATRIX M) const;
498 XMMATRIX operator* (float S) const;
499 XMMATRIX operator/ (float S) const;
501 friend XMMATRIX XM_CALLCONV operator* (float S, FXMMATRIX M);
504 //------------------------------------------------------------------------------
505 // 2D Vector; 32 bit floating point components
506 struct XMFLOAT2
508 float x;
509 float y;
511 XMFLOAT2() XM_CTOR_DEFAULT
512 XM_CONSTEXPR XMFLOAT2(float _x, float _y) : x(_x), y(_y) {}
513 explicit XMFLOAT2(_In_reads_(2) const float *pArray) : x(pArray[0]), y(pArray[1]) {}
515 XMFLOAT2& operator= (const XMFLOAT2& Float2) { x = Float2.x; y = Float2.y; return *this; }
518 // 2D Vector; 32 bit floating point components aligned on a 16 byte boundary
519 struct DECLSPEC_ALIGN(16) XMFLOAT2A : public XMFLOAT2
521 XMFLOAT2A() XM_CTOR_DEFAULT
522 XM_CONSTEXPR XMFLOAT2A(float _x, float _y) : XMFLOAT2(_x, _y) {}
523 explicit XMFLOAT2A(_In_reads_(2) const float *pArray) : XMFLOAT2(pArray) {}
525 XMFLOAT2A& operator= (const XMFLOAT2A& Float2) { x = Float2.x; y = Float2.y; return *this; }
528 //------------------------------------------------------------------------------
529 // 2D Vector; 32 bit signed integer components
530 struct XMINT2
532 int32_t x;
533 int32_t y;
535 XMINT2() XM_CTOR_DEFAULT
536 XM_CONSTEXPR XMINT2(int32_t _x, int32_t _y) : x(_x), y(_y) {}
537 explicit XMINT2(_In_reads_(2) const int32_t *pArray) : x(pArray[0]), y(pArray[1]) {}
539 XMINT2& operator= (const XMINT2& Int2) { x = Int2.x; y = Int2.y; return *this; }
542 // 2D Vector; 32 bit unsigned integer components
543 struct XMUINT2
545 uint32_t x;
546 uint32_t y;
548 XMUINT2() XM_CTOR_DEFAULT
549 XM_CONSTEXPR XMUINT2(uint32_t _x, uint32_t _y) : x(_x), y(_y) {}
550 explicit XMUINT2(_In_reads_(2) const uint32_t *pArray) : x(pArray[0]), y(pArray[1]) {}
552 XMUINT2& operator= (const XMUINT2& UInt2) { x = UInt2.x; y = UInt2.y; return *this; }
555 //------------------------------------------------------------------------------
556 // 3D Vector; 32 bit floating point components
557 struct XMFLOAT3
559 float x;
560 float y;
561 float z;
563 XMFLOAT3() XM_CTOR_DEFAULT
564 XM_CONSTEXPR XMFLOAT3(float _x, float _y, float _z) : x(_x), y(_y), z(_z) {}
565 explicit XMFLOAT3(_In_reads_(3) const float *pArray) : x(pArray[0]), y(pArray[1]), z(pArray[2]) {}
567 XMFLOAT3& operator= (const XMFLOAT3& Float3) { x = Float3.x; y = Float3.y; z = Float3.z; return *this; }
570 // 3D Vector; 32 bit floating point components aligned on a 16 byte boundary
571 struct DECLSPEC_ALIGN(16) XMFLOAT3A : public XMFLOAT3
573 XMFLOAT3A() XM_CTOR_DEFAULT
574 XM_CONSTEXPR XMFLOAT3A(float _x, float _y, float _z) : XMFLOAT3(_x, _y, _z) {}
575 explicit XMFLOAT3A(_In_reads_(3) const float *pArray) : XMFLOAT3(pArray) {}
577 XMFLOAT3A& operator= (const XMFLOAT3A& Float3) { x = Float3.x; y = Float3.y; z = Float3.z; return *this; }
580 //------------------------------------------------------------------------------
581 // 3D Vector; 32 bit signed integer components
582 struct XMINT3
584 int32_t x;
585 int32_t y;
586 int32_t z;
588 XMINT3() XM_CTOR_DEFAULT
589 XM_CONSTEXPR XMINT3(int32_t _x, int32_t _y, int32_t _z) : x(_x), y(_y), z(_z) {}
590 explicit XMINT3(_In_reads_(3) const int32_t *pArray) : x(pArray[0]), y(pArray[1]), z(pArray[2]) {}
592 XMINT3& operator= (const XMINT3& i3) { x = i3.x; y = i3.y; z = i3.z; return *this; }
595 // 3D Vector; 32 bit unsigned integer components
596 struct XMUINT3
598 uint32_t x;
599 uint32_t y;
600 uint32_t z;
602 XMUINT3() XM_CTOR_DEFAULT
603 XM_CONSTEXPR XMUINT3(uint32_t _x, uint32_t _y, uint32_t _z) : x(_x), y(_y), z(_z) {}
604 explicit XMUINT3(_In_reads_(3) const uint32_t *pArray) : x(pArray[0]), y(pArray[1]), z(pArray[2]) {}
606 XMUINT3& operator= (const XMUINT3& u3) { x = u3.x; y = u3.y; z = u3.z; return *this; }
609 //------------------------------------------------------------------------------
610 // 4D Vector; 32 bit floating point components
611 struct XMFLOAT4
613 float x;
614 float y;
615 float z;
616 float w;
618 XMFLOAT4() XM_CTOR_DEFAULT
619 XM_CONSTEXPR XMFLOAT4(float _x, float _y, float _z, float _w) : x(_x), y(_y), z(_z), w(_w) {}
620 explicit XMFLOAT4(_In_reads_(4) const float *pArray) : x(pArray[0]), y(pArray[1]), z(pArray[2]), w(pArray[3]) {}
622 XMFLOAT4& operator= (const XMFLOAT4& Float4) { x = Float4.x; y = Float4.y; z = Float4.z; w = Float4.w; return *this; }
625 // 4D Vector; 32 bit floating point components aligned on a 16 byte boundary
626 struct DECLSPEC_ALIGN(16) XMFLOAT4A : public XMFLOAT4
628 XMFLOAT4A() XM_CTOR_DEFAULT
629 XM_CONSTEXPR XMFLOAT4A(float _x, float _y, float _z, float _w) : XMFLOAT4(_x, _y, _z, _w) {}
630 explicit XMFLOAT4A(_In_reads_(4) const float *pArray) : XMFLOAT4(pArray) {}
632 XMFLOAT4A& operator= (const XMFLOAT4A& Float4) { x = Float4.x; y = Float4.y; z = Float4.z; w = Float4.w; return *this; }
635 //------------------------------------------------------------------------------
636 // 4D Vector; 32 bit signed integer components
637 struct XMINT4
639 int32_t x;
640 int32_t y;
641 int32_t z;
642 int32_t w;
644 XMINT4() XM_CTOR_DEFAULT
645 XM_CONSTEXPR XMINT4(int32_t _x, int32_t _y, int32_t _z, int32_t _w) : x(_x), y(_y), z(_z), w(_w) {}
646 explicit XMINT4(_In_reads_(4) const int32_t *pArray) : x(pArray[0]), y(pArray[1]), z(pArray[2]), w(pArray[3]) {}
648 XMINT4& operator= (const XMINT4& Int4) { x = Int4.x; y = Int4.y; z = Int4.z; w = Int4.w; return *this; }
651 // 4D Vector; 32 bit unsigned integer components
652 struct XMUINT4
654 uint32_t x;
655 uint32_t y;
656 uint32_t z;
657 uint32_t w;
659 XMUINT4() XM_CTOR_DEFAULT
660 XM_CONSTEXPR XMUINT4(uint32_t _x, uint32_t _y, uint32_t _z, uint32_t _w) : x(_x), y(_y), z(_z), w(_w) {}
661 explicit XMUINT4(_In_reads_(4) const uint32_t *pArray) : x(pArray[0]), y(pArray[1]), z(pArray[2]), w(pArray[3]) {}
663 XMUINT4& operator= (const XMUINT4& UInt4) { x = UInt4.x; y = UInt4.y; z = UInt4.z; w = UInt4.w; return *this; }
666 //------------------------------------------------------------------------------
667 // 3x3 Matrix: 32 bit floating point components
668 struct XMFLOAT3X3
670 union
672 struct
674 float _11, _12, _13;
675 float _21, _22, _23;
676 float _31, _32, _33;
678 float m[3][3];
681 XMFLOAT3X3() XM_CTOR_DEFAULT
682 XM_CONSTEXPR XMFLOAT3X3(float m00, float m01, float m02,
683 float m10, float m11, float m12,
684 float m20, float m21, float m22)
685 : _11(m00), _12(m01), _13(m02),
686 _21(m10), _22(m11), _23(m12),
687 _31(m20), _32(m21), _33(m22) {}
688 explicit XMFLOAT3X3(_In_reads_(9) const float *pArray);
690 float operator() (size_t Row, size_t Column) const { return m[Row][Column]; }
691 float& operator() (size_t Row, size_t Column) { return m[Row][Column]; }
693 XMFLOAT3X3& operator= (const XMFLOAT3X3& Float3x3);
696 //------------------------------------------------------------------------------
697 // 4x3 Matrix: 32 bit floating point components
698 struct XMFLOAT4X3
700 union
702 struct
704 float _11, _12, _13;
705 float _21, _22, _23;
706 float _31, _32, _33;
707 float _41, _42, _43;
709 float m[4][3];
712 XMFLOAT4X3() XM_CTOR_DEFAULT
713 XM_CONSTEXPR XMFLOAT4X3(float m00, float m01, float m02,
714 float m10, float m11, float m12,
715 float m20, float m21, float m22,
716 float m30, float m31, float m32)
717 : _11(m00), _12(m01), _13(m02),
718 _21(m10), _22(m11), _23(m12),
719 _31(m20), _32(m21), _33(m22),
720 _41(m30), _42(m31), _43(m32) {}
721 explicit XMFLOAT4X3(_In_reads_(12) const float *pArray);
723 float operator() (size_t Row, size_t Column) const { return m[Row][Column]; }
724 float& operator() (size_t Row, size_t Column) { return m[Row][Column]; }
726 XMFLOAT4X3& operator= (const XMFLOAT4X3& Float4x3);
730 // 4x3 Matrix: 32 bit floating point components aligned on a 16 byte boundary
731 struct DECLSPEC_ALIGN(16) XMFLOAT4X3A : public XMFLOAT4X3
733 XMFLOAT4X3A() XM_CTOR_DEFAULT
734 XM_CONSTEXPR XMFLOAT4X3A(float m00, float m01, float m02,
735 float m10, float m11, float m12,
736 float m20, float m21, float m22,
737 float m30, float m31, float m32) :
738 XMFLOAT4X3(m00,m01,m02,m10,m11,m12,m20,m21,m22,m30,m31,m32) {}
739 explicit XMFLOAT4X3A(_In_reads_(12) const float *pArray) : XMFLOAT4X3(pArray) {}
741 float operator() (size_t Row, size_t Column) const { return m[Row][Column]; }
742 float& operator() (size_t Row, size_t Column) { return m[Row][Column]; }
744 XMFLOAT4X3A& operator= (const XMFLOAT4X3A& Float4x3);
747 //------------------------------------------------------------------------------
748 // 4x4 Matrix: 32 bit floating point components
749 struct XMFLOAT4X4
751 union
753 struct
755 float _11, _12, _13, _14;
756 float _21, _22, _23, _24;
757 float _31, _32, _33, _34;
758 float _41, _42, _43, _44;
760 float m[4][4];
763 XMFLOAT4X4() XM_CTOR_DEFAULT
764 XM_CONSTEXPR XMFLOAT4X4(float m00, float m01, float m02, float m03,
765 float m10, float m11, float m12, float m13,
766 float m20, float m21, float m22, float m23,
767 float m30, float m31, float m32, float m33)
768 : _11(m00), _12(m01), _13(m02), _14(m03),
769 _21(m10), _22(m11), _23(m12), _24(m13),
770 _31(m20), _32(m21), _33(m22), _34(m23),
771 _41(m30), _42(m31), _43(m32), _44(m33) {}
772 explicit XMFLOAT4X4(_In_reads_(16) const float *pArray);
774 float operator() (size_t Row, size_t Column) const { return m[Row][Column]; }
775 float& operator() (size_t Row, size_t Column) { return m[Row][Column]; }
777 XMFLOAT4X4& operator= (const XMFLOAT4X4& Float4x4);
780 // 4x4 Matrix: 32 bit floating point components aligned on a 16 byte boundary
781 struct DECLSPEC_ALIGN(16) XMFLOAT4X4A : public XMFLOAT4X4
783 XMFLOAT4X4A() XM_CTOR_DEFAULT
784 XM_CONSTEXPR XMFLOAT4X4A(float m00, float m01, float m02, float m03,
785 float m10, float m11, float m12, float m13,
786 float m20, float m21, float m22, float m23,
787 float m30, float m31, float m32, float m33)
788 : XMFLOAT4X4(m00,m01,m02,m03,m10,m11,m12,m13,m20,m21,m22,m23,m30,m31,m32,m33) {}
789 explicit XMFLOAT4X4A(_In_reads_(16) const float *pArray) : XMFLOAT4X4(pArray) {}
791 float operator() (size_t Row, size_t Column) const { return m[Row][Column]; }
792 float& operator() (size_t Row, size_t Column) { return m[Row][Column]; }
794 XMFLOAT4X4A& operator= (const XMFLOAT4X4A& Float4x4);
797 ////////////////////////////////////////////////////////////////////////////////
799 #ifdef _PREFAST_
800 #pragma prefast(pop)
801 #endif
803 #pragma warning(pop)
805 /****************************************************************************
807 * Data conversion operations
809 ****************************************************************************/
811 XMVECTOR XM_CALLCONV XMConvertVectorIntToFloat(FXMVECTOR VInt, uint32_t DivExponent);
812 XMVECTOR XM_CALLCONV XMConvertVectorFloatToInt(FXMVECTOR VFloat, uint32_t MulExponent);
813 XMVECTOR XM_CALLCONV XMConvertVectorUIntToFloat(FXMVECTOR VUInt, uint32_t DivExponent);
814 XMVECTOR XM_CALLCONV XMConvertVectorFloatToUInt(FXMVECTOR VFloat, uint32_t MulExponent);
816 #if defined(__XNAMATH_H__) && defined(XMVectorSetBinaryConstant)
817 #undef XMVectorSetBinaryConstant
818 #undef XMVectorSplatConstant
819 #undef XMVectorSplatConstantInt
820 #endif
822 XMVECTOR XM_CALLCONV XMVectorSetBinaryConstant(uint32_t C0, uint32_t C1, uint32_t C2, uint32_t C3);
823 XMVECTOR XM_CALLCONV XMVectorSplatConstant(int32_t IntConstant, uint32_t DivExponent);
824 XMVECTOR XM_CALLCONV XMVectorSplatConstantInt(int32_t IntConstant);
826 /****************************************************************************
828 * Load operations
830 ****************************************************************************/
832 XMVECTOR XM_CALLCONV XMLoadInt(_In_ const uint32_t* pSource);
833 XMVECTOR XM_CALLCONV XMLoadFloat(_In_ const float* pSource);
835 XMVECTOR XM_CALLCONV XMLoadInt2(_In_reads_(2) const uint32_t* pSource);
836 XMVECTOR XM_CALLCONV XMLoadInt2A(_In_reads_(2) const uint32_t* PSource);
837 XMVECTOR XM_CALLCONV XMLoadFloat2(_In_ const XMFLOAT2* pSource);
838 XMVECTOR XM_CALLCONV XMLoadFloat2A(_In_ const XMFLOAT2A* pSource);
839 XMVECTOR XM_CALLCONV XMLoadSInt2(_In_ const XMINT2* pSource);
840 XMVECTOR XM_CALLCONV XMLoadUInt2(_In_ const XMUINT2* pSource);
842 XMVECTOR XM_CALLCONV XMLoadInt3(_In_reads_(3) const uint32_t* pSource);
843 XMVECTOR XM_CALLCONV XMLoadInt3A(_In_reads_(3) const uint32_t* pSource);
844 XMVECTOR XM_CALLCONV XMLoadFloat3(_In_ const XMFLOAT3* pSource);
845 XMVECTOR XM_CALLCONV XMLoadFloat3A(_In_ const XMFLOAT3A* pSource);
846 XMVECTOR XM_CALLCONV XMLoadSInt3(_In_ const XMINT3* pSource);
847 XMVECTOR XM_CALLCONV XMLoadUInt3(_In_ const XMUINT3* pSource);
849 XMVECTOR XM_CALLCONV XMLoadInt4(_In_reads_(4) const uint32_t* pSource);
850 XMVECTOR XM_CALLCONV XMLoadInt4A(_In_reads_(4) const uint32_t* pSource);
851 XMVECTOR XM_CALLCONV XMLoadFloat4(_In_ const XMFLOAT4* pSource);
852 XMVECTOR XM_CALLCONV XMLoadFloat4A(_In_ const XMFLOAT4A* pSource);
853 XMVECTOR XM_CALLCONV XMLoadSInt4(_In_ const XMINT4* pSource);
854 XMVECTOR XM_CALLCONV XMLoadUInt4(_In_ const XMUINT4* pSource);
856 XMMATRIX XM_CALLCONV XMLoadFloat3x3(_In_ const XMFLOAT3X3* pSource);
857 XMMATRIX XM_CALLCONV XMLoadFloat4x3(_In_ const XMFLOAT4X3* pSource);
858 XMMATRIX XM_CALLCONV XMLoadFloat4x3A(_In_ const XMFLOAT4X3A* pSource);
859 XMMATRIX XM_CALLCONV XMLoadFloat4x4(_In_ const XMFLOAT4X4* pSource);
860 XMMATRIX XM_CALLCONV XMLoadFloat4x4A(_In_ const XMFLOAT4X4A* pSource);
862 /****************************************************************************
864 * Store operations
866 ****************************************************************************/
868 void XM_CALLCONV XMStoreInt(_Out_ uint32_t* pDestination, _In_ FXMVECTOR V);
869 void XM_CALLCONV XMStoreFloat(_Out_ float* pDestination, _In_ FXMVECTOR V);
871 void XM_CALLCONV XMStoreInt2(_Out_writes_(2) uint32_t* pDestination, _In_ FXMVECTOR V);
872 void XM_CALLCONV XMStoreInt2A(_Out_writes_(2) uint32_t* pDestination, _In_ FXMVECTOR V);
873 void XM_CALLCONV XMStoreFloat2(_Out_ XMFLOAT2* pDestination, _In_ FXMVECTOR V);
874 void XM_CALLCONV XMStoreFloat2A(_Out_ XMFLOAT2A* pDestination, _In_ FXMVECTOR V);
875 void XM_CALLCONV XMStoreSInt2(_Out_ XMINT2* pDestination, _In_ FXMVECTOR V);
876 void XM_CALLCONV XMStoreUInt2(_Out_ XMUINT2* pDestination, _In_ FXMVECTOR V);
878 void XM_CALLCONV XMStoreInt3(_Out_writes_(3) uint32_t* pDestination, _In_ FXMVECTOR V);
879 void XM_CALLCONV XMStoreInt3A(_Out_writes_(3) uint32_t* pDestination, _In_ FXMVECTOR V);
880 void XM_CALLCONV XMStoreFloat3(_Out_ XMFLOAT3* pDestination, _In_ FXMVECTOR V);
881 void XM_CALLCONV XMStoreFloat3A(_Out_ XMFLOAT3A* pDestination, _In_ FXMVECTOR V);
882 void XM_CALLCONV XMStoreSInt3(_Out_ XMINT3* pDestination, _In_ FXMVECTOR V);
883 void XM_CALLCONV XMStoreUInt3(_Out_ XMUINT3* pDestination, _In_ FXMVECTOR V);
885 void XM_CALLCONV XMStoreInt4(_Out_writes_(4) uint32_t* pDestination, _In_ FXMVECTOR V);
886 void XM_CALLCONV XMStoreInt4A(_Out_writes_(4) uint32_t* pDestination, _In_ FXMVECTOR V);
887 void XM_CALLCONV XMStoreFloat4(_Out_ XMFLOAT4* pDestination, _In_ FXMVECTOR V);
888 void XM_CALLCONV XMStoreFloat4A(_Out_ XMFLOAT4A* pDestination, _In_ FXMVECTOR V);
889 void XM_CALLCONV XMStoreSInt4(_Out_ XMINT4* pDestination, _In_ FXMVECTOR V);
890 void XM_CALLCONV XMStoreUInt4(_Out_ XMUINT4* pDestination, _In_ FXMVECTOR V);
892 void XM_CALLCONV XMStoreFloat3x3(_Out_ XMFLOAT3X3* pDestination, _In_ FXMMATRIX M);
893 void XM_CALLCONV XMStoreFloat4x3(_Out_ XMFLOAT4X3* pDestination, _In_ FXMMATRIX M);
894 void XM_CALLCONV XMStoreFloat4x3A(_Out_ XMFLOAT4X3A* pDestination, _In_ FXMMATRIX M);
895 void XM_CALLCONV XMStoreFloat4x4(_Out_ XMFLOAT4X4* pDestination, _In_ FXMMATRIX M);
896 void XM_CALLCONV XMStoreFloat4x4A(_Out_ XMFLOAT4X4A* pDestination, _In_ FXMMATRIX M);
898 /****************************************************************************
900 * General vector operations
902 ****************************************************************************/
904 XMVECTOR XM_CALLCONV XMVectorZero();
905 XMVECTOR XM_CALLCONV XMVectorSet(float x, float y, float z, float w);
906 XMVECTOR XM_CALLCONV XMVectorSetInt(uint32_t x, uint32_t y, uint32_t z, uint32_t w);
907 XMVECTOR XM_CALLCONV XMVectorReplicate(float Value);
908 XMVECTOR XM_CALLCONV XMVectorReplicatePtr(_In_ const float *pValue);
909 XMVECTOR XM_CALLCONV XMVectorReplicateInt(uint32_t Value);
910 XMVECTOR XM_CALLCONV XMVectorReplicateIntPtr(_In_ const uint32_t *pValue);
911 XMVECTOR XM_CALLCONV XMVectorTrueInt();
912 XMVECTOR XM_CALLCONV XMVectorFalseInt();
913 XMVECTOR XM_CALLCONV XMVectorSplatX(FXMVECTOR V);
914 XMVECTOR XM_CALLCONV XMVectorSplatY(FXMVECTOR V);
915 XMVECTOR XM_CALLCONV XMVectorSplatZ(FXMVECTOR V);
916 XMVECTOR XM_CALLCONV XMVectorSplatW(FXMVECTOR V);
917 XMVECTOR XM_CALLCONV XMVectorSplatOne();
918 XMVECTOR XM_CALLCONV XMVectorSplatInfinity();
919 XMVECTOR XM_CALLCONV XMVectorSplatQNaN();
920 XMVECTOR XM_CALLCONV XMVectorSplatEpsilon();
921 XMVECTOR XM_CALLCONV XMVectorSplatSignMask();
923 float XM_CALLCONV XMVectorGetByIndex(FXMVECTOR V, size_t i);
924 float XM_CALLCONV XMVectorGetX(FXMVECTOR V);
925 float XM_CALLCONV XMVectorGetY(FXMVECTOR V);
926 float XM_CALLCONV XMVectorGetZ(FXMVECTOR V);
927 float XM_CALLCONV XMVectorGetW(FXMVECTOR V);
929 void XM_CALLCONV XMVectorGetByIndexPtr(_Out_ float *f, _In_ FXMVECTOR V, _In_ size_t i);
930 void XM_CALLCONV XMVectorGetXPtr(_Out_ float *x, _In_ FXMVECTOR V);
931 void XM_CALLCONV XMVectorGetYPtr(_Out_ float *y, _In_ FXMVECTOR V);
932 void XM_CALLCONV XMVectorGetZPtr(_Out_ float *z, _In_ FXMVECTOR V);
933 void XM_CALLCONV XMVectorGetWPtr(_Out_ float *w, _In_ FXMVECTOR V);
935 uint32_t XM_CALLCONV XMVectorGetIntByIndex(FXMVECTOR V, size_t i);
936 uint32_t XM_CALLCONV XMVectorGetIntX(FXMVECTOR V);
937 uint32_t XM_CALLCONV XMVectorGetIntY(FXMVECTOR V);
938 uint32_t XM_CALLCONV XMVectorGetIntZ(FXMVECTOR V);
939 uint32_t XM_CALLCONV XMVectorGetIntW(FXMVECTOR V);
941 void XM_CALLCONV XMVectorGetIntByIndexPtr(_Out_ uint32_t *x, _In_ FXMVECTOR V, _In_ size_t i);
942 void XM_CALLCONV XMVectorGetIntXPtr(_Out_ uint32_t *x, _In_ FXMVECTOR V);
943 void XM_CALLCONV XMVectorGetIntYPtr(_Out_ uint32_t *y, _In_ FXMVECTOR V);
944 void XM_CALLCONV XMVectorGetIntZPtr(_Out_ uint32_t *z, _In_ FXMVECTOR V);
945 void XM_CALLCONV XMVectorGetIntWPtr(_Out_ uint32_t *w, _In_ FXMVECTOR V);
947 XMVECTOR XM_CALLCONV XMVectorSetByIndex(FXMVECTOR V,float f, size_t i);
948 XMVECTOR XM_CALLCONV XMVectorSetX(FXMVECTOR V, float x);
949 XMVECTOR XM_CALLCONV XMVectorSetY(FXMVECTOR V, float y);
950 XMVECTOR XM_CALLCONV XMVectorSetZ(FXMVECTOR V, float z);
951 XMVECTOR XM_CALLCONV XMVectorSetW(FXMVECTOR V, float w);
953 XMVECTOR XM_CALLCONV XMVectorSetByIndexPtr(_In_ FXMVECTOR V, _In_ const float *f, _In_ size_t i);
954 XMVECTOR XM_CALLCONV XMVectorSetXPtr(_In_ FXMVECTOR V, _In_ const float *x);
955 XMVECTOR XM_CALLCONV XMVectorSetYPtr(_In_ FXMVECTOR V, _In_ const float *y);
956 XMVECTOR XM_CALLCONV XMVectorSetZPtr(_In_ FXMVECTOR V, _In_ const float *z);
957 XMVECTOR XM_CALLCONV XMVectorSetWPtr(_In_ FXMVECTOR V, _In_ const float *w);
959 XMVECTOR XM_CALLCONV XMVectorSetIntByIndex(FXMVECTOR V, uint32_t x, size_t i);
960 XMVECTOR XM_CALLCONV XMVectorSetIntX(FXMVECTOR V, uint32_t x);
961 XMVECTOR XM_CALLCONV XMVectorSetIntY(FXMVECTOR V, uint32_t y);
962 XMVECTOR XM_CALLCONV XMVectorSetIntZ(FXMVECTOR V, uint32_t z);
963 XMVECTOR XM_CALLCONV XMVectorSetIntW(FXMVECTOR V, uint32_t w);
965 XMVECTOR XM_CALLCONV XMVectorSetIntByIndexPtr(_In_ FXMVECTOR V, _In_ const uint32_t *x, _In_ size_t i);
966 XMVECTOR XM_CALLCONV XMVectorSetIntXPtr(_In_ FXMVECTOR V, _In_ const uint32_t *x);
967 XMVECTOR XM_CALLCONV XMVectorSetIntYPtr(_In_ FXMVECTOR V, _In_ const uint32_t *y);
968 XMVECTOR XM_CALLCONV XMVectorSetIntZPtr(_In_ FXMVECTOR V, _In_ const uint32_t *z);
969 XMVECTOR XM_CALLCONV XMVectorSetIntWPtr(_In_ FXMVECTOR V, _In_ const uint32_t *w);
971 #if defined(__XNAMATH_H__) && defined(XMVectorSwizzle)
972 #undef XMVectorSwizzle
973 #endif
975 XMVECTOR XM_CALLCONV XMVectorSwizzle(FXMVECTOR V, uint32_t E0, uint32_t E1, uint32_t E2, uint32_t E3);
976 XMVECTOR XM_CALLCONV XMVectorPermute(FXMVECTOR V1, FXMVECTOR V2, uint32_t PermuteX, uint32_t PermuteY, uint32_t PermuteZ, uint32_t PermuteW);
977 XMVECTOR XM_CALLCONV XMVectorSelectControl(uint32_t VectorIndex0, uint32_t VectorIndex1, uint32_t VectorIndex2, uint32_t VectorIndex3);
978 XMVECTOR XM_CALLCONV XMVectorSelect(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Control);
979 XMVECTOR XM_CALLCONV XMVectorMergeXY(FXMVECTOR V1, FXMVECTOR V2);
980 XMVECTOR XM_CALLCONV XMVectorMergeZW(FXMVECTOR V1, FXMVECTOR V2);
982 #if defined(__XNAMATH_H__) && defined(XMVectorShiftLeft)
983 #undef XMVectorShiftLeft
984 #undef XMVectorRotateLeft
985 #undef XMVectorRotateRight
986 #undef XMVectorInsert
987 #endif
989 XMVECTOR XM_CALLCONV XMVectorShiftLeft(FXMVECTOR V1, FXMVECTOR V2, uint32_t Elements);
990 XMVECTOR XM_CALLCONV XMVectorRotateLeft(FXMVECTOR V, uint32_t Elements);
991 XMVECTOR XM_CALLCONV XMVectorRotateRight(FXMVECTOR V, uint32_t Elements);
992 XMVECTOR XM_CALLCONV XMVectorInsert(FXMVECTOR VD, FXMVECTOR VS, uint32_t VSLeftRotateElements,
993 uint32_t Select0, uint32_t Select1, uint32_t Select2, uint32_t Select3);
995 XMVECTOR XM_CALLCONV XMVectorEqual(FXMVECTOR V1, FXMVECTOR V2);
996 XMVECTOR XM_CALLCONV XMVectorEqualR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V1, _In_ FXMVECTOR V2);
997 XMVECTOR XM_CALLCONV XMVectorEqualInt(FXMVECTOR V1, FXMVECTOR V2);
998 XMVECTOR XM_CALLCONV XMVectorEqualIntR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V, _In_ FXMVECTOR V2);
999 XMVECTOR XM_CALLCONV XMVectorNearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon);
1000 XMVECTOR XM_CALLCONV XMVectorNotEqual(FXMVECTOR V1, FXMVECTOR V2);
1001 XMVECTOR XM_CALLCONV XMVectorNotEqualInt(FXMVECTOR V1, FXMVECTOR V2);
1002 XMVECTOR XM_CALLCONV XMVectorGreater(FXMVECTOR V1, FXMVECTOR V2);
1003 XMVECTOR XM_CALLCONV XMVectorGreaterR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V1, _In_ FXMVECTOR V2);
1004 XMVECTOR XM_CALLCONV XMVectorGreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2);
1005 XMVECTOR XM_CALLCONV XMVectorGreaterOrEqualR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V1, _In_ FXMVECTOR V2);
1006 XMVECTOR XM_CALLCONV XMVectorLess(FXMVECTOR V1, FXMVECTOR V2);
1007 XMVECTOR XM_CALLCONV XMVectorLessOrEqual(FXMVECTOR V1, FXMVECTOR V2);
1008 XMVECTOR XM_CALLCONV XMVectorInBounds(FXMVECTOR V, FXMVECTOR Bounds);
1009 XMVECTOR XM_CALLCONV XMVectorInBoundsR(_Out_ uint32_t* pCR, _In_ FXMVECTOR V, _In_ FXMVECTOR Bounds);
1011 XMVECTOR XM_CALLCONV XMVectorIsNaN(FXMVECTOR V);
1012 XMVECTOR XM_CALLCONV XMVectorIsInfinite(FXMVECTOR V);
1014 XMVECTOR XM_CALLCONV XMVectorMin(FXMVECTOR V1,FXMVECTOR V2);
1015 XMVECTOR XM_CALLCONV XMVectorMax(FXMVECTOR V1, FXMVECTOR V2);
1016 XMVECTOR XM_CALLCONV XMVectorRound(FXMVECTOR V);
1017 XMVECTOR XM_CALLCONV XMVectorTruncate(FXMVECTOR V);
1018 XMVECTOR XM_CALLCONV XMVectorFloor(FXMVECTOR V);
1019 XMVECTOR XM_CALLCONV XMVectorCeiling(FXMVECTOR V);
1020 XMVECTOR XM_CALLCONV XMVectorClamp(FXMVECTOR V, FXMVECTOR Min, FXMVECTOR Max);
1021 XMVECTOR XM_CALLCONV XMVectorSaturate(FXMVECTOR V);
1023 XMVECTOR XM_CALLCONV XMVectorAndInt(FXMVECTOR V1, FXMVECTOR V2);
1024 XMVECTOR XM_CALLCONV XMVectorAndCInt(FXMVECTOR V1, FXMVECTOR V2);
1025 XMVECTOR XM_CALLCONV XMVectorOrInt(FXMVECTOR V1, FXMVECTOR V2);
1026 XMVECTOR XM_CALLCONV XMVectorNorInt(FXMVECTOR V1, FXMVECTOR V2);
1027 XMVECTOR XM_CALLCONV XMVectorXorInt(FXMVECTOR V1, FXMVECTOR V2);
1029 XMVECTOR XM_CALLCONV XMVectorNegate(FXMVECTOR V);
1030 XMVECTOR XM_CALLCONV XMVectorAdd(FXMVECTOR V1, FXMVECTOR V2);
1031 XMVECTOR XM_CALLCONV XMVectorSum(FXMVECTOR V);
1032 XMVECTOR XM_CALLCONV XMVectorAddAngles(FXMVECTOR V1, FXMVECTOR V2);
1033 XMVECTOR XM_CALLCONV XMVectorSubtract(FXMVECTOR V1, FXMVECTOR V2);
1034 XMVECTOR XM_CALLCONV XMVectorSubtractAngles(FXMVECTOR V1, FXMVECTOR V2);
1035 XMVECTOR XM_CALLCONV XMVectorMultiply(FXMVECTOR V1, FXMVECTOR V2);
1036 XMVECTOR XM_CALLCONV XMVectorMultiplyAdd(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR V3);
1037 XMVECTOR XM_CALLCONV XMVectorDivide(FXMVECTOR V1, FXMVECTOR V2);
1038 XMVECTOR XM_CALLCONV XMVectorNegativeMultiplySubtract(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR V3);
1039 XMVECTOR XM_CALLCONV XMVectorScale(FXMVECTOR V, float ScaleFactor);
1040 XMVECTOR XM_CALLCONV XMVectorReciprocalEst(FXMVECTOR V);
1041 XMVECTOR XM_CALLCONV XMVectorReciprocal(FXMVECTOR V);
1042 XMVECTOR XM_CALLCONV XMVectorSqrtEst(FXMVECTOR V);
1043 XMVECTOR XM_CALLCONV XMVectorSqrt(FXMVECTOR V);
1044 XMVECTOR XM_CALLCONV XMVectorReciprocalSqrtEst(FXMVECTOR V);
1045 XMVECTOR XM_CALLCONV XMVectorReciprocalSqrt(FXMVECTOR V);
1046 XMVECTOR XM_CALLCONV XMVectorExp2(FXMVECTOR V);
1047 XMVECTOR XM_CALLCONV XMVectorExpE(FXMVECTOR V);
1048 XMVECTOR XM_CALLCONV XMVectorExp(FXMVECTOR V);
1049 XMVECTOR XM_CALLCONV XMVectorLog2(FXMVECTOR V);
1050 XMVECTOR XM_CALLCONV XMVectorLogE(FXMVECTOR V);
1051 XMVECTOR XM_CALLCONV XMVectorLog(FXMVECTOR V);
1052 XMVECTOR XM_CALLCONV XMVectorPow(FXMVECTOR V1, FXMVECTOR V2);
1053 XMVECTOR XM_CALLCONV XMVectorAbs(FXMVECTOR V);
1054 XMVECTOR XM_CALLCONV XMVectorMod(FXMVECTOR V1, FXMVECTOR V2);
1055 XMVECTOR XM_CALLCONV XMVectorModAngles(FXMVECTOR Angles);
1056 XMVECTOR XM_CALLCONV XMVectorSin(FXMVECTOR V);
1057 XMVECTOR XM_CALLCONV XMVectorSinEst(FXMVECTOR V);
1058 XMVECTOR XM_CALLCONV XMVectorCos(FXMVECTOR V);
1059 XMVECTOR XM_CALLCONV XMVectorCosEst(FXMVECTOR V);
1060 void XM_CALLCONV XMVectorSinCos(_Out_ XMVECTOR* pSin, _Out_ XMVECTOR* pCos, _In_ FXMVECTOR V);
1061 void XM_CALLCONV XMVectorSinCosEst(_Out_ XMVECTOR* pSin, _Out_ XMVECTOR* pCos, _In_ FXMVECTOR V);
1062 XMVECTOR XM_CALLCONV XMVectorTan(FXMVECTOR V);
1063 XMVECTOR XM_CALLCONV XMVectorTanEst(FXMVECTOR V);
1064 XMVECTOR XM_CALLCONV XMVectorSinH(FXMVECTOR V);
1065 XMVECTOR XM_CALLCONV XMVectorCosH(FXMVECTOR V);
1066 XMVECTOR XM_CALLCONV XMVectorTanH(FXMVECTOR V);
1067 XMVECTOR XM_CALLCONV XMVectorASin(FXMVECTOR V);
1068 XMVECTOR XM_CALLCONV XMVectorASinEst(FXMVECTOR V);
1069 XMVECTOR XM_CALLCONV XMVectorACos(FXMVECTOR V);
1070 XMVECTOR XM_CALLCONV XMVectorACosEst(FXMVECTOR V);
1071 XMVECTOR XM_CALLCONV XMVectorATan(FXMVECTOR V);
1072 XMVECTOR XM_CALLCONV XMVectorATanEst(FXMVECTOR V);
1073 XMVECTOR XM_CALLCONV XMVectorATan2(FXMVECTOR Y, FXMVECTOR X);
1074 XMVECTOR XM_CALLCONV XMVectorATan2Est(FXMVECTOR Y, FXMVECTOR X);
1075 XMVECTOR XM_CALLCONV XMVectorLerp(FXMVECTOR V0, FXMVECTOR V1, float t);
1076 XMVECTOR XM_CALLCONV XMVectorLerpV(FXMVECTOR V0, FXMVECTOR V1, FXMVECTOR T);
1077 XMVECTOR XM_CALLCONV XMVectorHermite(FXMVECTOR Position0, FXMVECTOR Tangent0, FXMVECTOR Position1, GXMVECTOR Tangent1, float t);
1078 XMVECTOR XM_CALLCONV XMVectorHermiteV(FXMVECTOR Position0, FXMVECTOR Tangent0, FXMVECTOR Position1, GXMVECTOR Tangent1, HXMVECTOR T);
1079 XMVECTOR XM_CALLCONV XMVectorCatmullRom(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, GXMVECTOR Position3, float t);
1080 XMVECTOR XM_CALLCONV XMVectorCatmullRomV(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, GXMVECTOR Position3, HXMVECTOR T);
1081 XMVECTOR XM_CALLCONV XMVectorBaryCentric(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, float f, float g);
1082 XMVECTOR XM_CALLCONV XMVectorBaryCentricV(FXMVECTOR Position0, FXMVECTOR Position1, FXMVECTOR Position2, GXMVECTOR F, HXMVECTOR G);
1084 /****************************************************************************
1086 * 2D vector operations
1088 ****************************************************************************/
1090 bool XM_CALLCONV XMVector2Equal(FXMVECTOR V1, FXMVECTOR V2);
1091 uint32_t XM_CALLCONV XMVector2EqualR(FXMVECTOR V1, FXMVECTOR V2);
1092 bool XM_CALLCONV XMVector2EqualInt(FXMVECTOR V1, FXMVECTOR V2);
1093 uint32_t XM_CALLCONV XMVector2EqualIntR(FXMVECTOR V1, FXMVECTOR V2);
1094 bool XM_CALLCONV XMVector2NearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon);
1095 bool XM_CALLCONV XMVector2NotEqual(FXMVECTOR V1, FXMVECTOR V2);
1096 bool XM_CALLCONV XMVector2NotEqualInt(FXMVECTOR V1, FXMVECTOR V2);
1097 bool XM_CALLCONV XMVector2Greater(FXMVECTOR V1, FXMVECTOR V2);
1098 uint32_t XM_CALLCONV XMVector2GreaterR(FXMVECTOR V1, FXMVECTOR V2);
1099 bool XM_CALLCONV XMVector2GreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2);
1100 uint32_t XM_CALLCONV XMVector2GreaterOrEqualR(FXMVECTOR V1, FXMVECTOR V2);
1101 bool XM_CALLCONV XMVector2Less(FXMVECTOR V1, FXMVECTOR V2);
1102 bool XM_CALLCONV XMVector2LessOrEqual(FXMVECTOR V1, FXMVECTOR V2);
1103 bool XM_CALLCONV XMVector2InBounds(FXMVECTOR V, FXMVECTOR Bounds);
1105 bool XM_CALLCONV XMVector2IsNaN(FXMVECTOR V);
1106 bool XM_CALLCONV XMVector2IsInfinite(FXMVECTOR V);
1108 XMVECTOR XM_CALLCONV XMVector2Dot(FXMVECTOR V1, FXMVECTOR V2);
1109 XMVECTOR XM_CALLCONV XMVector2Cross(FXMVECTOR V1, FXMVECTOR V2);
1110 XMVECTOR XM_CALLCONV XMVector2LengthSq(FXMVECTOR V);
1111 XMVECTOR XM_CALLCONV XMVector2ReciprocalLengthEst(FXMVECTOR V);
1112 XMVECTOR XM_CALLCONV XMVector2ReciprocalLength(FXMVECTOR V);
1113 XMVECTOR XM_CALLCONV XMVector2LengthEst(FXMVECTOR V);
1114 XMVECTOR XM_CALLCONV XMVector2Length(FXMVECTOR V);
1115 XMVECTOR XM_CALLCONV XMVector2NormalizeEst(FXMVECTOR V);
1116 XMVECTOR XM_CALLCONV XMVector2Normalize(FXMVECTOR V);
1117 XMVECTOR XM_CALLCONV XMVector2ClampLength(FXMVECTOR V, float LengthMin, float LengthMax);
1118 XMVECTOR XM_CALLCONV XMVector2ClampLengthV(FXMVECTOR V, FXMVECTOR LengthMin, FXMVECTOR LengthMax);
1119 XMVECTOR XM_CALLCONV XMVector2Reflect(FXMVECTOR Incident, FXMVECTOR Normal);
1120 XMVECTOR XM_CALLCONV XMVector2Refract(FXMVECTOR Incident, FXMVECTOR Normal, float RefractionIndex);
1121 XMVECTOR XM_CALLCONV XMVector2RefractV(FXMVECTOR Incident, FXMVECTOR Normal, FXMVECTOR RefractionIndex);
1122 XMVECTOR XM_CALLCONV XMVector2Orthogonal(FXMVECTOR V);
1123 XMVECTOR XM_CALLCONV XMVector2AngleBetweenNormalsEst(FXMVECTOR N1, FXMVECTOR N2);
1124 XMVECTOR XM_CALLCONV XMVector2AngleBetweenNormals(FXMVECTOR N1, FXMVECTOR N2);
1125 XMVECTOR XM_CALLCONV XMVector2AngleBetweenVectors(FXMVECTOR V1, FXMVECTOR V2);
1126 XMVECTOR XM_CALLCONV XMVector2LinePointDistance(FXMVECTOR LinePoint1, FXMVECTOR LinePoint2, FXMVECTOR Point);
1127 XMVECTOR XM_CALLCONV XMVector2IntersectLine(FXMVECTOR Line1Point1, FXMVECTOR Line1Point2, FXMVECTOR Line2Point1, GXMVECTOR Line2Point2);
1128 XMVECTOR XM_CALLCONV XMVector2Transform(FXMVECTOR V, FXMMATRIX M);
1129 XMFLOAT4* XM_CALLCONV XMVector2TransformStream(_Out_writes_bytes_(sizeof(XMFLOAT4)+OutputStride*(VectorCount-1)) XMFLOAT4* pOutputStream,
1130 _In_ size_t OutputStride,
1131 _In_reads_bytes_(sizeof(XMFLOAT2)+InputStride*(VectorCount-1)) const XMFLOAT2* pInputStream,
1132 _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M);
1133 XMVECTOR XM_CALLCONV XMVector2TransformCoord(FXMVECTOR V, FXMMATRIX M);
1134 XMFLOAT2* XM_CALLCONV XMVector2TransformCoordStream(_Out_writes_bytes_(sizeof(XMFLOAT2)+OutputStride*(VectorCount-1)) XMFLOAT2* pOutputStream,
1135 _In_ size_t OutputStride,
1136 _In_reads_bytes_(sizeof(XMFLOAT2)+InputStride*(VectorCount-1)) const XMFLOAT2* pInputStream,
1137 _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M);
1138 XMVECTOR XM_CALLCONV XMVector2TransformNormal(FXMVECTOR V, FXMMATRIX M);
1139 XMFLOAT2* XM_CALLCONV XMVector2TransformNormalStream(_Out_writes_bytes_(sizeof(XMFLOAT2)+OutputStride*(VectorCount-1)) XMFLOAT2* pOutputStream,
1140 _In_ size_t OutputStride,
1141 _In_reads_bytes_(sizeof(XMFLOAT2)+InputStride*(VectorCount-1)) const XMFLOAT2* pInputStream,
1142 _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M);
1144 /****************************************************************************
1146 * 3D vector operations
1148 ****************************************************************************/
1150 bool XM_CALLCONV XMVector3Equal(FXMVECTOR V1, FXMVECTOR V2);
1151 uint32_t XM_CALLCONV XMVector3EqualR(FXMVECTOR V1, FXMVECTOR V2);
1152 bool XM_CALLCONV XMVector3EqualInt(FXMVECTOR V1, FXMVECTOR V2);
1153 uint32_t XM_CALLCONV XMVector3EqualIntR(FXMVECTOR V1, FXMVECTOR V2);
1154 bool XM_CALLCONV XMVector3NearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon);
1155 bool XM_CALLCONV XMVector3NotEqual(FXMVECTOR V1, FXMVECTOR V2);
1156 bool XM_CALLCONV XMVector3NotEqualInt(FXMVECTOR V1, FXMVECTOR V2);
1157 bool XM_CALLCONV XMVector3Greater(FXMVECTOR V1, FXMVECTOR V2);
1158 uint32_t XM_CALLCONV XMVector3GreaterR(FXMVECTOR V1, FXMVECTOR V2);
1159 bool XM_CALLCONV XMVector3GreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2);
1160 uint32_t XM_CALLCONV XMVector3GreaterOrEqualR(FXMVECTOR V1, FXMVECTOR V2);
1161 bool XM_CALLCONV XMVector3Less(FXMVECTOR V1, FXMVECTOR V2);
1162 bool XM_CALLCONV XMVector3LessOrEqual(FXMVECTOR V1, FXMVECTOR V2);
1163 bool XM_CALLCONV XMVector3InBounds(FXMVECTOR V, FXMVECTOR Bounds);
1165 bool XM_CALLCONV XMVector3IsNaN(FXMVECTOR V);
1166 bool XM_CALLCONV XMVector3IsInfinite(FXMVECTOR V);
1168 XMVECTOR XM_CALLCONV XMVector3Dot(FXMVECTOR V1, FXMVECTOR V2);
1169 XMVECTOR XM_CALLCONV XMVector3Cross(FXMVECTOR V1, FXMVECTOR V2);
1170 XMVECTOR XM_CALLCONV XMVector3LengthSq(FXMVECTOR V);
1171 XMVECTOR XM_CALLCONV XMVector3ReciprocalLengthEst(FXMVECTOR V);
1172 XMVECTOR XM_CALLCONV XMVector3ReciprocalLength(FXMVECTOR V);
1173 XMVECTOR XM_CALLCONV XMVector3LengthEst(FXMVECTOR V);
1174 XMVECTOR XM_CALLCONV XMVector3Length(FXMVECTOR V);
1175 XMVECTOR XM_CALLCONV XMVector3NormalizeEst(FXMVECTOR V);
1176 XMVECTOR XM_CALLCONV XMVector3Normalize(FXMVECTOR V);
1177 XMVECTOR XM_CALLCONV XMVector3ClampLength(FXMVECTOR V, float LengthMin, float LengthMax);
1178 XMVECTOR XM_CALLCONV XMVector3ClampLengthV(FXMVECTOR V, FXMVECTOR LengthMin, FXMVECTOR LengthMax);
1179 XMVECTOR XM_CALLCONV XMVector3Reflect(FXMVECTOR Incident, FXMVECTOR Normal);
1180 XMVECTOR XM_CALLCONV XMVector3Refract(FXMVECTOR Incident, FXMVECTOR Normal, float RefractionIndex);
1181 XMVECTOR XM_CALLCONV XMVector3RefractV(FXMVECTOR Incident, FXMVECTOR Normal, FXMVECTOR RefractionIndex);
1182 XMVECTOR XM_CALLCONV XMVector3Orthogonal(FXMVECTOR V);
1183 XMVECTOR XM_CALLCONV XMVector3AngleBetweenNormalsEst(FXMVECTOR N1, FXMVECTOR N2);
1184 XMVECTOR XM_CALLCONV XMVector3AngleBetweenNormals(FXMVECTOR N1, FXMVECTOR N2);
1185 XMVECTOR XM_CALLCONV XMVector3AngleBetweenVectors(FXMVECTOR V1, FXMVECTOR V2);
1186 XMVECTOR XM_CALLCONV XMVector3LinePointDistance(FXMVECTOR LinePoint1, FXMVECTOR LinePoint2, FXMVECTOR Point);
1187 void XM_CALLCONV XMVector3ComponentsFromNormal(_Out_ XMVECTOR* pParallel, _Out_ XMVECTOR* pPerpendicular, _In_ FXMVECTOR V, _In_ FXMVECTOR Normal);
1188 XMVECTOR XM_CALLCONV XMVector3Rotate(FXMVECTOR V, FXMVECTOR RotationQuaternion);
1189 XMVECTOR XM_CALLCONV XMVector3InverseRotate(FXMVECTOR V, FXMVECTOR RotationQuaternion);
1190 XMVECTOR XM_CALLCONV XMVector3Transform(FXMVECTOR V, FXMMATRIX M);
1191 XMFLOAT4* XM_CALLCONV XMVector3TransformStream(_Out_writes_bytes_(sizeof(XMFLOAT4)+OutputStride*(VectorCount-1)) XMFLOAT4* pOutputStream,
1192 _In_ size_t OutputStride,
1193 _In_reads_bytes_(sizeof(XMFLOAT3)+InputStride*(VectorCount-1)) const XMFLOAT3* pInputStream,
1194 _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M);
1195 XMVECTOR XM_CALLCONV XMVector3TransformCoord(FXMVECTOR V, FXMMATRIX M);
1196 XMFLOAT3* XM_CALLCONV XMVector3TransformCoordStream(_Out_writes_bytes_(sizeof(XMFLOAT3)+OutputStride*(VectorCount-1)) XMFLOAT3* pOutputStream,
1197 _In_ size_t OutputStride,
1198 _In_reads_bytes_(sizeof(XMFLOAT3)+InputStride*(VectorCount-1)) const XMFLOAT3* pInputStream,
1199 _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M);
1200 XMVECTOR XM_CALLCONV XMVector3TransformNormal(FXMVECTOR V, FXMMATRIX M);
1201 XMFLOAT3* XM_CALLCONV XMVector3TransformNormalStream(_Out_writes_bytes_(sizeof(XMFLOAT3)+OutputStride*(VectorCount-1)) XMFLOAT3* pOutputStream,
1202 _In_ size_t OutputStride,
1203 _In_reads_bytes_(sizeof(XMFLOAT3)+InputStride*(VectorCount-1)) const XMFLOAT3* pInputStream,
1204 _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M);
1205 XMVECTOR XM_CALLCONV XMVector3Project(FXMVECTOR V, float ViewportX, float ViewportY, float ViewportWidth, float ViewportHeight, float ViewportMinZ, float ViewportMaxZ,
1206 FXMMATRIX Projection, CXMMATRIX View, CXMMATRIX World);
1207 XMFLOAT3* XM_CALLCONV XMVector3ProjectStream(_Out_writes_bytes_(sizeof(XMFLOAT3)+OutputStride*(VectorCount-1)) XMFLOAT3* pOutputStream,
1208 _In_ size_t OutputStride,
1209 _In_reads_bytes_(sizeof(XMFLOAT3)+InputStride*(VectorCount-1)) const XMFLOAT3* pInputStream,
1210 _In_ size_t InputStride, _In_ size_t VectorCount,
1211 _In_ float ViewportX, _In_ float ViewportY, _In_ float ViewportWidth, _In_ float ViewportHeight, _In_ float ViewportMinZ, _In_ float ViewportMaxZ,
1212 _In_ FXMMATRIX Projection, _In_ CXMMATRIX View, _In_ CXMMATRIX World);
1213 XMVECTOR XM_CALLCONV XMVector3Unproject(FXMVECTOR V, float ViewportX, float ViewportY, float ViewportWidth, float ViewportHeight, float ViewportMinZ, float ViewportMaxZ,
1214 FXMMATRIX Projection, CXMMATRIX View, CXMMATRIX World);
1215 XMFLOAT3* XM_CALLCONV XMVector3UnprojectStream(_Out_writes_bytes_(sizeof(XMFLOAT3)+OutputStride*(VectorCount-1)) XMFLOAT3* pOutputStream,
1216 _In_ size_t OutputStride,
1217 _In_reads_bytes_(sizeof(XMFLOAT3)+InputStride*(VectorCount-1)) const XMFLOAT3* pInputStream,
1218 _In_ size_t InputStride, _In_ size_t VectorCount,
1219 _In_ float ViewportX, _In_ float ViewportY, _In_ float ViewportWidth, _In_ float ViewportHeight, _In_ float ViewportMinZ, _In_ float ViewportMaxZ,
1220 _In_ FXMMATRIX Projection, _In_ CXMMATRIX View, _In_ CXMMATRIX World);
1222 /****************************************************************************
1224 * 4D vector operations
1226 ****************************************************************************/
1228 bool XM_CALLCONV XMVector4Equal(FXMVECTOR V1, FXMVECTOR V2);
1229 uint32_t XM_CALLCONV XMVector4EqualR(FXMVECTOR V1, FXMVECTOR V2);
1230 bool XM_CALLCONV XMVector4EqualInt(FXMVECTOR V1, FXMVECTOR V2);
1231 uint32_t XM_CALLCONV XMVector4EqualIntR(FXMVECTOR V1, FXMVECTOR V2);
1232 bool XM_CALLCONV XMVector4NearEqual(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR Epsilon);
1233 bool XM_CALLCONV XMVector4NotEqual(FXMVECTOR V1, FXMVECTOR V2);
1234 bool XM_CALLCONV XMVector4NotEqualInt(FXMVECTOR V1, FXMVECTOR V2);
1235 bool XM_CALLCONV XMVector4Greater(FXMVECTOR V1, FXMVECTOR V2);
1236 uint32_t XM_CALLCONV XMVector4GreaterR(FXMVECTOR V1, FXMVECTOR V2);
1237 bool XM_CALLCONV XMVector4GreaterOrEqual(FXMVECTOR V1, FXMVECTOR V2);
1238 uint32_t XM_CALLCONV XMVector4GreaterOrEqualR(FXMVECTOR V1, FXMVECTOR V2);
1239 bool XM_CALLCONV XMVector4Less(FXMVECTOR V1, FXMVECTOR V2);
1240 bool XM_CALLCONV XMVector4LessOrEqual(FXMVECTOR V1, FXMVECTOR V2);
1241 bool XM_CALLCONV XMVector4InBounds(FXMVECTOR V, FXMVECTOR Bounds);
1243 bool XM_CALLCONV XMVector4IsNaN(FXMVECTOR V);
1244 bool XM_CALLCONV XMVector4IsInfinite(FXMVECTOR V);
1246 XMVECTOR XM_CALLCONV XMVector4Dot(FXMVECTOR V1, FXMVECTOR V2);
1247 XMVECTOR XM_CALLCONV XMVector4Cross(FXMVECTOR V1, FXMVECTOR V2, FXMVECTOR V3);
1248 XMVECTOR XM_CALLCONV XMVector4LengthSq(FXMVECTOR V);
1249 XMVECTOR XM_CALLCONV XMVector4ReciprocalLengthEst(FXMVECTOR V);
1250 XMVECTOR XM_CALLCONV XMVector4ReciprocalLength(FXMVECTOR V);
1251 XMVECTOR XM_CALLCONV XMVector4LengthEst(FXMVECTOR V);
1252 XMVECTOR XM_CALLCONV XMVector4Length(FXMVECTOR V);
1253 XMVECTOR XM_CALLCONV XMVector4NormalizeEst(FXMVECTOR V);
1254 XMVECTOR XM_CALLCONV XMVector4Normalize(FXMVECTOR V);
1255 XMVECTOR XM_CALLCONV XMVector4ClampLength(FXMVECTOR V, float LengthMin, float LengthMax);
1256 XMVECTOR XM_CALLCONV XMVector4ClampLengthV(FXMVECTOR V, FXMVECTOR LengthMin, FXMVECTOR LengthMax);
1257 XMVECTOR XM_CALLCONV XMVector4Reflect(FXMVECTOR Incident, FXMVECTOR Normal);
1258 XMVECTOR XM_CALLCONV XMVector4Refract(FXMVECTOR Incident, FXMVECTOR Normal, float RefractionIndex);
1259 XMVECTOR XM_CALLCONV XMVector4RefractV(FXMVECTOR Incident, FXMVECTOR Normal, FXMVECTOR RefractionIndex);
1260 XMVECTOR XM_CALLCONV XMVector4Orthogonal(FXMVECTOR V);
1261 XMVECTOR XM_CALLCONV XMVector4AngleBetweenNormalsEst(FXMVECTOR N1, FXMVECTOR N2);
1262 XMVECTOR XM_CALLCONV XMVector4AngleBetweenNormals(FXMVECTOR N1, FXMVECTOR N2);
1263 XMVECTOR XM_CALLCONV XMVector4AngleBetweenVectors(FXMVECTOR V1, FXMVECTOR V2);
1264 XMVECTOR XM_CALLCONV XMVector4Transform(FXMVECTOR V, FXMMATRIX M);
1265 XMFLOAT4* XM_CALLCONV XMVector4TransformStream(_Out_writes_bytes_(sizeof(XMFLOAT4)+OutputStride*(VectorCount-1)) XMFLOAT4* pOutputStream,
1266 _In_ size_t OutputStride,
1267 _In_reads_bytes_(sizeof(XMFLOAT4)+InputStride*(VectorCount-1)) const XMFLOAT4* pInputStream,
1268 _In_ size_t InputStride, _In_ size_t VectorCount, _In_ FXMMATRIX M);
1270 /****************************************************************************
1272 * Matrix operations
1274 ****************************************************************************/
1276 bool XM_CALLCONV XMMatrixIsNaN(FXMMATRIX M);
1277 bool XM_CALLCONV XMMatrixIsInfinite(FXMMATRIX M);
1278 bool XM_CALLCONV XMMatrixIsIdentity(FXMMATRIX M);
1280 XMMATRIX XM_CALLCONV XMMatrixMultiply(FXMMATRIX M1, CXMMATRIX M2);
1281 XMMATRIX XM_CALLCONV XMMatrixMultiplyTranspose(FXMMATRIX M1, CXMMATRIX M2);
1282 XMMATRIX XM_CALLCONV XMMatrixTranspose(FXMMATRIX M);
1283 XMMATRIX XM_CALLCONV XMMatrixInverse(_Out_opt_ XMVECTOR* pDeterminant, _In_ FXMMATRIX M);
1284 XMVECTOR XM_CALLCONV XMMatrixDeterminant(FXMMATRIX M);
1285 _Success_(return)
1286 bool XM_CALLCONV XMMatrixDecompose(_Out_ XMVECTOR *outScale, _Out_ XMVECTOR *outRotQuat, _Out_ XMVECTOR *outTrans, _In_ FXMMATRIX M);
1288 XMMATRIX XM_CALLCONV XMMatrixIdentity();
1289 XMMATRIX XM_CALLCONV XMMatrixSet(float m00, float m01, float m02, float m03,
1290 float m10, float m11, float m12, float m13,
1291 float m20, float m21, float m22, float m23,
1292 float m30, float m31, float m32, float m33);
1293 XMMATRIX XM_CALLCONV XMMatrixTranslation(float OffsetX, float OffsetY, float OffsetZ);
1294 XMMATRIX XM_CALLCONV XMMatrixTranslationFromVector(FXMVECTOR Offset);
1295 XMMATRIX XM_CALLCONV XMMatrixScaling(float ScaleX, float ScaleY, float ScaleZ);
1296 XMMATRIX XM_CALLCONV XMMatrixScalingFromVector(FXMVECTOR Scale);
1297 XMMATRIX XM_CALLCONV XMMatrixRotationX(float Angle);
1298 XMMATRIX XM_CALLCONV XMMatrixRotationY(float Angle);
1299 XMMATRIX XM_CALLCONV XMMatrixRotationZ(float Angle);
1300 XMMATRIX XM_CALLCONV XMMatrixRotationRollPitchYaw(float Pitch, float Yaw, float Roll);
1301 XMMATRIX XM_CALLCONV XMMatrixRotationRollPitchYawFromVector(FXMVECTOR Angles);
1302 XMMATRIX XM_CALLCONV XMMatrixRotationNormal(FXMVECTOR NormalAxis, float Angle);
1303 XMMATRIX XM_CALLCONV XMMatrixRotationAxis(FXMVECTOR Axis, float Angle);
1304 XMMATRIX XM_CALLCONV XMMatrixRotationQuaternion(FXMVECTOR Quaternion);
1305 XMMATRIX XM_CALLCONV XMMatrixTransformation2D(FXMVECTOR ScalingOrigin, float ScalingOrientation, FXMVECTOR Scaling,
1306 FXMVECTOR RotationOrigin, float Rotation, GXMVECTOR Translation);
1307 XMMATRIX XM_CALLCONV XMMatrixTransformation(FXMVECTOR ScalingOrigin, FXMVECTOR ScalingOrientationQuaternion, FXMVECTOR Scaling,
1308 GXMVECTOR RotationOrigin, HXMVECTOR RotationQuaternion, HXMVECTOR Translation);
1309 XMMATRIX XM_CALLCONV XMMatrixAffineTransformation2D(FXMVECTOR Scaling, FXMVECTOR RotationOrigin, float Rotation, FXMVECTOR Translation);
1310 XMMATRIX XM_CALLCONV XMMatrixAffineTransformation(FXMVECTOR Scaling, FXMVECTOR RotationOrigin, FXMVECTOR RotationQuaternion, GXMVECTOR Translation);
1311 XMMATRIX XM_CALLCONV XMMatrixReflect(FXMVECTOR ReflectionPlane);
1312 XMMATRIX XM_CALLCONV XMMatrixShadow(FXMVECTOR ShadowPlane, FXMVECTOR LightPosition);
1314 XMMATRIX XM_CALLCONV XMMatrixLookAtLH(FXMVECTOR EyePosition, FXMVECTOR FocusPosition, FXMVECTOR UpDirection);
1315 XMMATRIX XM_CALLCONV XMMatrixLookAtRH(FXMVECTOR EyePosition, FXMVECTOR FocusPosition, FXMVECTOR UpDirection);
1316 XMMATRIX XM_CALLCONV XMMatrixLookToLH(FXMVECTOR EyePosition, FXMVECTOR EyeDirection, FXMVECTOR UpDirection);
1317 XMMATRIX XM_CALLCONV XMMatrixLookToRH(FXMVECTOR EyePosition, FXMVECTOR EyeDirection, FXMVECTOR UpDirection);
1318 XMMATRIX XM_CALLCONV XMMatrixPerspectiveLH(float ViewWidth, float ViewHeight, float NearZ, float FarZ);
1319 XMMATRIX XM_CALLCONV XMMatrixPerspectiveRH(float ViewWidth, float ViewHeight, float NearZ, float FarZ);
1320 XMMATRIX XM_CALLCONV XMMatrixPerspectiveFovLH(float FovAngleY, float AspectRatio, float NearZ, float FarZ);
1321 XMMATRIX XM_CALLCONV XMMatrixPerspectiveFovRH(float FovAngleY, float AspectRatio, float NearZ, float FarZ);
1322 XMMATRIX XM_CALLCONV XMMatrixPerspectiveOffCenterLH(float ViewLeft, float ViewRight, float ViewBottom, float ViewTop, float NearZ, float FarZ);
1323 XMMATRIX XM_CALLCONV XMMatrixPerspectiveOffCenterRH(float ViewLeft, float ViewRight, float ViewBottom, float ViewTop, float NearZ, float FarZ);
1324 XMMATRIX XM_CALLCONV XMMatrixOrthographicLH(float ViewWidth, float ViewHeight, float NearZ, float FarZ);
1325 XMMATRIX XM_CALLCONV XMMatrixOrthographicRH(float ViewWidth, float ViewHeight, float NearZ, float FarZ);
1326 XMMATRIX XM_CALLCONV XMMatrixOrthographicOffCenterLH(float ViewLeft, float ViewRight, float ViewBottom, float ViewTop, float NearZ, float FarZ);
1327 XMMATRIX XM_CALLCONV XMMatrixOrthographicOffCenterRH(float ViewLeft, float ViewRight, float ViewBottom, float ViewTop, float NearZ, float FarZ);
1330 /****************************************************************************
1332 * Quaternion operations
1334 ****************************************************************************/
1336 bool XM_CALLCONV XMQuaternionEqual(FXMVECTOR Q1, FXMVECTOR Q2);
1337 bool XM_CALLCONV XMQuaternionNotEqual(FXMVECTOR Q1, FXMVECTOR Q2);
1339 bool XM_CALLCONV XMQuaternionIsNaN(FXMVECTOR Q);
1340 bool XM_CALLCONV XMQuaternionIsInfinite(FXMVECTOR Q);
1341 bool XM_CALLCONV XMQuaternionIsIdentity(FXMVECTOR Q);
1343 XMVECTOR XM_CALLCONV XMQuaternionDot(FXMVECTOR Q1, FXMVECTOR Q2);
1344 XMVECTOR XM_CALLCONV XMQuaternionMultiply(FXMVECTOR Q1, FXMVECTOR Q2);
1345 XMVECTOR XM_CALLCONV XMQuaternionLengthSq(FXMVECTOR Q);
1346 XMVECTOR XM_CALLCONV XMQuaternionReciprocalLength(FXMVECTOR Q);
1347 XMVECTOR XM_CALLCONV XMQuaternionLength(FXMVECTOR Q);
1348 XMVECTOR XM_CALLCONV XMQuaternionNormalizeEst(FXMVECTOR Q);
1349 XMVECTOR XM_CALLCONV XMQuaternionNormalize(FXMVECTOR Q);
1350 XMVECTOR XM_CALLCONV XMQuaternionConjugate(FXMVECTOR Q);
1351 XMVECTOR XM_CALLCONV XMQuaternionInverse(FXMVECTOR Q);
1352 XMVECTOR XM_CALLCONV XMQuaternionLn(FXMVECTOR Q);
1353 XMVECTOR XM_CALLCONV XMQuaternionExp(FXMVECTOR Q);
1354 XMVECTOR XM_CALLCONV XMQuaternionSlerp(FXMVECTOR Q0, FXMVECTOR Q1, float t);
1355 XMVECTOR XM_CALLCONV XMQuaternionSlerpV(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR T);
1356 XMVECTOR XM_CALLCONV XMQuaternionSquad(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, GXMVECTOR Q3, float t);
1357 XMVECTOR XM_CALLCONV XMQuaternionSquadV(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, GXMVECTOR Q3, HXMVECTOR T);
1358 void XM_CALLCONV XMQuaternionSquadSetup(_Out_ XMVECTOR* pA, _Out_ XMVECTOR* pB, _Out_ XMVECTOR* pC, _In_ FXMVECTOR Q0, _In_ FXMVECTOR Q1, _In_ FXMVECTOR Q2, _In_ GXMVECTOR Q3);
1359 XMVECTOR XM_CALLCONV XMQuaternionBaryCentric(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, float f, float g);
1360 XMVECTOR XM_CALLCONV XMQuaternionBaryCentricV(FXMVECTOR Q0, FXMVECTOR Q1, FXMVECTOR Q2, GXMVECTOR F, HXMVECTOR G);
1362 XMVECTOR XM_CALLCONV XMQuaternionIdentity();
1363 XMVECTOR XM_CALLCONV XMQuaternionRotationRollPitchYaw(float Pitch, float Yaw, float Roll);
1364 XMVECTOR XM_CALLCONV XMQuaternionRotationRollPitchYawFromVector(FXMVECTOR Angles);
1365 XMVECTOR XM_CALLCONV XMQuaternionRotationNormal(FXMVECTOR NormalAxis, float Angle);
1366 XMVECTOR XM_CALLCONV XMQuaternionRotationAxis(FXMVECTOR Axis, float Angle);
1367 XMVECTOR XM_CALLCONV XMQuaternionRotationMatrix(FXMMATRIX M);
1369 void XM_CALLCONV XMQuaternionToAxisAngle(_Out_ XMVECTOR* pAxis, _Out_ float* pAngle, _In_ FXMVECTOR Q);
1371 /****************************************************************************
1373 * Plane operations
1375 ****************************************************************************/
1377 bool XM_CALLCONV XMPlaneEqual(FXMVECTOR P1, FXMVECTOR P2);
1378 bool XM_CALLCONV XMPlaneNearEqual(FXMVECTOR P1, FXMVECTOR P2, FXMVECTOR Epsilon);
1379 bool XM_CALLCONV XMPlaneNotEqual(FXMVECTOR P1, FXMVECTOR P2);
1381 bool XM_CALLCONV XMPlaneIsNaN(FXMVECTOR P);
1382 bool XM_CALLCONV XMPlaneIsInfinite(FXMVECTOR P);
1384 XMVECTOR XM_CALLCONV XMPlaneDot(FXMVECTOR P, FXMVECTOR V);
1385 XMVECTOR XM_CALLCONV XMPlaneDotCoord(FXMVECTOR P, FXMVECTOR V);
1386 XMVECTOR XM_CALLCONV XMPlaneDotNormal(FXMVECTOR P, FXMVECTOR V);
1387 XMVECTOR XM_CALLCONV XMPlaneNormalizeEst(FXMVECTOR P);
1388 XMVECTOR XM_CALLCONV XMPlaneNormalize(FXMVECTOR P);
1389 XMVECTOR XM_CALLCONV XMPlaneIntersectLine(FXMVECTOR P, FXMVECTOR LinePoint1, FXMVECTOR LinePoint2);
1390 void XM_CALLCONV XMPlaneIntersectPlane(_Out_ XMVECTOR* pLinePoint1, _Out_ XMVECTOR* pLinePoint2, _In_ FXMVECTOR P1, _In_ FXMVECTOR P2);
1391 XMVECTOR XM_CALLCONV XMPlaneTransform(FXMVECTOR P, FXMMATRIX M);
1392 XMFLOAT4* XM_CALLCONV XMPlaneTransformStream(_Out_writes_bytes_(sizeof(XMFLOAT4)+OutputStride*(PlaneCount-1)) XMFLOAT4* pOutputStream,
1393 _In_ size_t OutputStride,
1394 _In_reads_bytes_(sizeof(XMFLOAT4)+InputStride*(PlaneCount-1)) const XMFLOAT4* pInputStream,
1395 _In_ size_t InputStride, _In_ size_t PlaneCount, _In_ FXMMATRIX M);
1397 XMVECTOR XM_CALLCONV XMPlaneFromPointNormal(FXMVECTOR Point, FXMVECTOR Normal);
1398 XMVECTOR XM_CALLCONV XMPlaneFromPoints(FXMVECTOR Point1, FXMVECTOR Point2, FXMVECTOR Point3);
1400 /****************************************************************************
1402 * Color operations
1404 ****************************************************************************/
1406 bool XM_CALLCONV XMColorEqual(FXMVECTOR C1, FXMVECTOR C2);
1407 bool XM_CALLCONV XMColorNotEqual(FXMVECTOR C1, FXMVECTOR C2);
1408 bool XM_CALLCONV XMColorGreater(FXMVECTOR C1, FXMVECTOR C2);
1409 bool XM_CALLCONV XMColorGreaterOrEqual(FXMVECTOR C1, FXMVECTOR C2);
1410 bool XM_CALLCONV XMColorLess(FXMVECTOR C1, FXMVECTOR C2);
1411 bool XM_CALLCONV XMColorLessOrEqual(FXMVECTOR C1, FXMVECTOR C2);
1413 bool XM_CALLCONV XMColorIsNaN(FXMVECTOR C);
1414 bool XM_CALLCONV XMColorIsInfinite(FXMVECTOR C);
1416 XMVECTOR XM_CALLCONV XMColorNegative(FXMVECTOR C);
1417 XMVECTOR XM_CALLCONV XMColorModulate(FXMVECTOR C1, FXMVECTOR C2);
1418 XMVECTOR XM_CALLCONV XMColorAdjustSaturation(FXMVECTOR C, float Saturation);
1419 XMVECTOR XM_CALLCONV XMColorAdjustContrast(FXMVECTOR C, float Contrast);
1421 XMVECTOR XM_CALLCONV XMColorRGBToHSL( FXMVECTOR rgb );
1422 XMVECTOR XM_CALLCONV XMColorHSLToRGB( FXMVECTOR hsl );
1424 XMVECTOR XM_CALLCONV XMColorRGBToHSV( FXMVECTOR rgb );
1425 XMVECTOR XM_CALLCONV XMColorHSVToRGB( FXMVECTOR hsv );
1427 XMVECTOR XM_CALLCONV XMColorRGBToYUV( FXMVECTOR rgb );
1428 XMVECTOR XM_CALLCONV XMColorYUVToRGB( FXMVECTOR yuv );
1430 XMVECTOR XM_CALLCONV XMColorRGBToYUV_HD( FXMVECTOR rgb );
1431 XMVECTOR XM_CALLCONV XMColorYUVToRGB_HD( FXMVECTOR yuv );
1433 XMVECTOR XM_CALLCONV XMColorRGBToXYZ( FXMVECTOR rgb );
1434 XMVECTOR XM_CALLCONV XMColorXYZToRGB( FXMVECTOR xyz );
1436 XMVECTOR XM_CALLCONV XMColorXYZToSRGB( FXMVECTOR xyz );
1437 XMVECTOR XM_CALLCONV XMColorSRGBToXYZ( FXMVECTOR srgb );
1439 XMVECTOR XM_CALLCONV XMColorRGBToSRGB( FXMVECTOR rgb );
1440 XMVECTOR XM_CALLCONV XMColorSRGBToRGB( FXMVECTOR srgb );
1443 /****************************************************************************
1445 * Miscellaneous operations
1447 ****************************************************************************/
1449 bool XMVerifyCPUSupport();
1451 XMVECTOR XM_CALLCONV XMFresnelTerm(FXMVECTOR CosIncidentAngle, FXMVECTOR RefractionIndex);
1453 bool XMScalarNearEqual(float S1, float S2, float Epsilon);
1454 float XMScalarModAngle(float Value);
1456 float XMScalarSin(float Value);
1457 float XMScalarSinEst(float Value);
1459 float XMScalarCos(float Value);
1460 float XMScalarCosEst(float Value);
1462 void XMScalarSinCos(_Out_ float* pSin, _Out_ float* pCos, float Value);
1463 void XMScalarSinCosEst(_Out_ float* pSin, _Out_ float* pCos, float Value);
1465 float XMScalarASin(float Value);
1466 float XMScalarASinEst(float Value);
1468 float XMScalarACos(float Value);
1469 float XMScalarACosEst(float Value);
1471 /****************************************************************************
1473 * Templates
1475 ****************************************************************************/
1477 #if defined(__XNAMATH_H__) && defined(XMMin)
1478 #undef XMMin
1479 #undef XMMax
1480 #endif
1482 template<class T> inline T XMMin(T a, T b) { return (a < b) ? a : b; }
1483 template<class T> inline T XMMax(T a, T b) { return (a > b) ? a : b; }
1485 //------------------------------------------------------------------------------
1487 #if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
1489 // PermuteHelper internal template (SSE only)
1490 namespace Internal
1492 // Slow path fallback for permutes that do not map to a single SSE shuffle opcode.
1493 template<uint32_t Shuffle, bool WhichX, bool WhichY, bool WhichZ, bool WhichW> struct PermuteHelper
1495 static XMVECTOR XM_CALLCONV Permute(FXMVECTOR v1, FXMVECTOR v2)
1497 static const XMVECTORU32 selectMask =
1498 { { {
1499 WhichX ? 0xFFFFFFFF : 0,
1500 WhichY ? 0xFFFFFFFF : 0,
1501 WhichZ ? 0xFFFFFFFF : 0,
1502 WhichW ? 0xFFFFFFFF : 0,
1503 } } };
1505 XMVECTOR shuffled1 = XM_PERMUTE_PS(v1, Shuffle);
1506 XMVECTOR shuffled2 = XM_PERMUTE_PS(v2, Shuffle);
1508 XMVECTOR masked1 = _mm_andnot_ps(selectMask, shuffled1);
1509 XMVECTOR masked2 = _mm_and_ps(selectMask, shuffled2);
1511 return _mm_or_ps(masked1, masked2);
1515 // Fast path for permutes that only read from the first vector.
1516 template<uint32_t Shuffle> struct PermuteHelper<Shuffle, false, false, false, false>
1518 static XMVECTOR XM_CALLCONV Permute(FXMVECTOR v1, FXMVECTOR) { return XM_PERMUTE_PS(v1, Shuffle); }
1521 // Fast path for permutes that only read from the second vector.
1522 template<uint32_t Shuffle> struct PermuteHelper<Shuffle, true, true, true, true>
1524 static XMVECTOR XM_CALLCONV Permute(FXMVECTOR, FXMVECTOR v2){ return XM_PERMUTE_PS(v2, Shuffle); }
1527 // Fast path for permutes that read XY from the first vector, ZW from the second.
1528 template<uint32_t Shuffle> struct PermuteHelper<Shuffle, false, false, true, true>
1530 static XMVECTOR XM_CALLCONV Permute(FXMVECTOR v1, FXMVECTOR v2) { return _mm_shuffle_ps(v1, v2, Shuffle); }
1533 // Fast path for permutes that read XY from the second vector, ZW from the first.
1534 template<uint32_t Shuffle> struct PermuteHelper<Shuffle, true, true, false, false>
1536 static XMVECTOR XM_CALLCONV Permute(FXMVECTOR v1, FXMVECTOR v2) { return _mm_shuffle_ps(v2, v1, Shuffle); }
1540 #endif // _XM_SSE_INTRINSICS_ && !_XM_NO_INTRINSICS_
1542 // General permute template
1543 template<uint32_t PermuteX, uint32_t PermuteY, uint32_t PermuteZ, uint32_t PermuteW>
1544 inline XMVECTOR XM_CALLCONV XMVectorPermute(FXMVECTOR V1, FXMVECTOR V2)
1546 static_assert(PermuteX <= 7, "PermuteX template parameter out of range");
1547 static_assert(PermuteY <= 7, "PermuteY template parameter out of range");
1548 static_assert(PermuteZ <= 7, "PermuteZ template parameter out of range");
1549 static_assert(PermuteW <= 7, "PermuteW template parameter out of range");
1551 #if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
1552 const uint32_t Shuffle = _MM_SHUFFLE(PermuteW & 3, PermuteZ & 3, PermuteY & 3, PermuteX & 3);
1554 const bool WhichX = PermuteX > 3;
1555 const bool WhichY = PermuteY > 3;
1556 const bool WhichZ = PermuteZ > 3;
1557 const bool WhichW = PermuteW > 3;
1559 return Internal::PermuteHelper<Shuffle, WhichX, WhichY, WhichZ, WhichW>::Permute(V1, V2);
1560 #else
1562 return XMVectorPermute( V1, V2, PermuteX, PermuteY, PermuteZ, PermuteW );
1564 #endif
1567 // Special-case permute templates
1568 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,1,2,3>(FXMVECTOR V1, FXMVECTOR) { return V1; }
1569 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4,5,6,7>(FXMVECTOR, FXMVECTOR V2) { return V2; }
1571 #if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
1572 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,1,4,5>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_movelh_ps(V1,V2); }
1573 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<6,7,2,3>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_movehl_ps(V1,V2); }
1574 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,4,1,5>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_unpacklo_ps(V1,V2); }
1575 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2,6,3,7>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_unpackhi_ps(V1,V2); }
1576 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2,3,6,7>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_castpd_ps(_mm_unpackhi_pd(_mm_castps_pd(V1), _mm_castps_pd(V2))); }
1577 #endif
1579 #if defined(_XM_SSE4_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
1580 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4,1,2,3>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0x1); }
1581 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,5,2,3>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0x2); }
1582 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4,5,2,3>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0x3); }
1583 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,1,6,3>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0x4); }
1584 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4,1,6,3>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0x5); }
1585 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,5,6,3>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0x6); }
1586 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4,5,6,3>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0x7); }
1587 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,1,2,7>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0x8); }
1588 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4,1,2,7>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0x9); }
1589 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,5,2,7>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0xA); }
1590 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4,5,2,7>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0xB); }
1591 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,1,6,7>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0xC); }
1592 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<4,1,6,7>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0xD); }
1593 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,5,6,7>(FXMVECTOR V1, FXMVECTOR V2) { return _mm_blend_ps(V1,V2,0xE); }
1594 #endif
1596 #if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
1598 // If the indices are all in the range 0-3 or 4-7, then use XMVectorSwizzle instead
1599 // The mirror cases are not spelled out here as the programmer can always swap the arguments
1600 // (i.e. prefer permutes where the X element comes from the V1 vector instead of the V2 vector)
1602 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,1,4,5>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vget_low_f32(V1), vget_low_f32(V2) ); }
1603 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1,0,4,5>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vrev64_f32( vget_low_f32(V1) ), vget_low_f32(V2) ); }
1604 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,1,5,4>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vget_low_f32(V1), vrev64_f32( vget_low_f32(V2) ) ); }
1605 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1,0,5,4>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vrev64_f32( vget_low_f32(V1) ), vrev64_f32( vget_low_f32(V2) ) ); }
1607 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2,3,6,7>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vget_high_f32(V1), vget_high_f32(V2) ); }
1608 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3,2,6,7>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vrev64_f32( vget_high_f32(V1) ), vget_high_f32(V2) ); }
1609 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2,3,7,6>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vget_high_f32(V1), vrev64_f32( vget_high_f32(V2) ) ); }
1610 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3,2,7,6>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vrev64_f32( vget_high_f32(V1) ), vrev64_f32( vget_high_f32(V2) ) ); }
1612 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,1,6,7>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vget_low_f32(V1), vget_high_f32(V2) ); }
1613 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1,0,6,7>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vrev64_f32( vget_low_f32(V1) ), vget_high_f32(V2) ); }
1614 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,1,7,6>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vget_low_f32(V1), vrev64_f32( vget_high_f32(V2) ) ); }
1615 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1,0,7,6>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vrev64_f32( vget_low_f32(V1) ), vrev64_f32( vget_high_f32(V2) ) ); }
1617 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3,2,4,5>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vrev64_f32( vget_high_f32(V1) ), vget_low_f32(V2) ); }
1618 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2,3,5,4>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vget_high_f32(V1), vrev64_f32( vget_low_f32(V2) ) ); }
1619 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3,2,5,4>(FXMVECTOR V1, FXMVECTOR V2) { return vcombine_f32( vrev64_f32( vget_high_f32(V1) ), vrev64_f32( vget_low_f32(V2) ) ); }
1621 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,4,2,6>(FXMVECTOR V1, FXMVECTOR V2) { return vtrnq_f32(V1,V2).val[0]; }
1622 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1,5,3,7>(FXMVECTOR V1, FXMVECTOR V2) { return vtrnq_f32(V1,V2).val[1]; }
1624 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,4,1,5>(FXMVECTOR V1, FXMVECTOR V2) { return vzipq_f32(V1,V2).val[0]; }
1625 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2,6,3,7>(FXMVECTOR V1, FXMVECTOR V2) { return vzipq_f32(V1,V2).val[1]; }
1627 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<0,2,4,6>(FXMVECTOR V1, FXMVECTOR V2) { return vuzpq_f32(V1,V2).val[0]; }
1628 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1,3,5,7>(FXMVECTOR V1, FXMVECTOR V2) { return vuzpq_f32(V1,V2).val[1]; }
1630 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<1,2,3,4>(FXMVECTOR V1, FXMVECTOR V2) { return vextq_f32(V1, V2, 1); }
1631 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<2,3,4,5>(FXMVECTOR V1, FXMVECTOR V2) { return vextq_f32(V1, V2, 2); }
1632 template<> inline XMVECTOR XM_CALLCONV XMVectorPermute<3,4,5,6>(FXMVECTOR V1, FXMVECTOR V2) { return vextq_f32(V1, V2, 3); }
1634 #endif // _XM_ARM_NEON_INTRINSICS_ && !_XM_NO_INTRINSICS_
1636 //------------------------------------------------------------------------------
1638 // General swizzle template
1639 template<uint32_t SwizzleX, uint32_t SwizzleY, uint32_t SwizzleZ, uint32_t SwizzleW>
1640 inline XMVECTOR XM_CALLCONV XMVectorSwizzle(FXMVECTOR V)
1642 static_assert(SwizzleX <= 3, "SwizzleX template parameter out of range");
1643 static_assert(SwizzleY <= 3, "SwizzleY template parameter out of range");
1644 static_assert(SwizzleZ <= 3, "SwizzleZ template parameter out of range");
1645 static_assert(SwizzleW <= 3, "SwizzleW template parameter out of range");
1647 #if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
1648 return XM_PERMUTE_PS( V, _MM_SHUFFLE( SwizzleW, SwizzleZ, SwizzleY, SwizzleX ) );
1649 #else
1651 return XMVectorSwizzle( V, SwizzleX, SwizzleY, SwizzleZ, SwizzleW );
1653 #endif
1656 // Specialized swizzles
1657 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0,1,2,3>(FXMVECTOR V) { return V; }
1659 #if defined(_XM_SSE_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
1660 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0,1,0,1>(FXMVECTOR V) { return _mm_movelh_ps(V,V); }
1661 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2,3,2,3>(FXMVECTOR V) { return _mm_movehl_ps(V,V); }
1662 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0,0,1,1>(FXMVECTOR V) { return _mm_unpacklo_ps(V,V); }
1663 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2,2,3,3>(FXMVECTOR V) { return _mm_unpackhi_ps(V,V); }
1664 #endif
1666 #if defined(_XM_SSE3_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
1667 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0,0,2,2>(FXMVECTOR V) { return _mm_moveldup_ps(V); }
1668 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1,1,3,3>(FXMVECTOR V) { return _mm_movehdup_ps(V); }
1669 #endif
1671 #if defined(_XM_AVX2_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
1672 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0,0,0,0>(FXMVECTOR V) { return _mm_broadcastss_ps( V ); }
1673 #endif
1675 #if defined(_XM_ARM_NEON_INTRINSICS_) && !defined(_XM_NO_INTRINSICS_)
1677 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0,0,0,0>(FXMVECTOR V) { return vdupq_lane_f32( vget_low_f32(V), 0); }
1678 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1,1,1,1>(FXMVECTOR V) { return vdupq_lane_f32( vget_low_f32(V), 1); }
1679 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2,2,2,2>(FXMVECTOR V) { return vdupq_lane_f32( vget_high_f32(V), 0); }
1680 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3,3,3,3>(FXMVECTOR V) { return vdupq_lane_f32( vget_high_f32(V), 1); }
1682 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1,0,3,2>(FXMVECTOR V) { return vrev64q_f32(V); }
1684 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0,1,0,1>(FXMVECTOR V) { float32x2_t vt = vget_low_f32(V); return vcombine_f32( vt, vt ); }
1685 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2,3,2,3>(FXMVECTOR V) { float32x2_t vt = vget_high_f32(V); return vcombine_f32( vt, vt ); }
1686 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1,0,1,0>(FXMVECTOR V) { float32x2_t vt = vrev64_f32( vget_low_f32(V) ); return vcombine_f32( vt, vt ); }
1687 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3,2,3,2>(FXMVECTOR V) { float32x2_t vt = vrev64_f32( vget_high_f32(V) ); return vcombine_f32( vt, vt ); }
1689 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0,1,3,2>(FXMVECTOR V) { return vcombine_f32( vget_low_f32(V), vrev64_f32( vget_high_f32(V) ) ); }
1690 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1,0,2,3>(FXMVECTOR V) { return vcombine_f32( vrev64_f32( vget_low_f32(V) ), vget_high_f32(V) ); }
1691 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2,3,1,0>(FXMVECTOR V) { return vcombine_f32( vget_high_f32(V), vrev64_f32( vget_low_f32(V) ) ); }
1692 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3,2,0,1>(FXMVECTOR V) { return vcombine_f32( vrev64_f32( vget_high_f32(V) ), vget_low_f32(V) ); }
1693 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3,2,1,0>(FXMVECTOR V) { return vcombine_f32( vrev64_f32( vget_high_f32(V) ), vrev64_f32( vget_low_f32(V) ) ); }
1695 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0,0,2,2>(FXMVECTOR V) { return vtrnq_f32(V,V).val[0]; }
1696 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1,1,3,3>(FXMVECTOR V) { return vtrnq_f32(V,V).val[1]; }
1698 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0,0,1,1>(FXMVECTOR V) { return vzipq_f32(V,V).val[0]; }
1699 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2,2,3,3>(FXMVECTOR V) { return vzipq_f32(V,V).val[1]; }
1701 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<0,2,0,2>(FXMVECTOR V) { return vuzpq_f32(V,V).val[0]; }
1702 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1,3,1,3>(FXMVECTOR V) { return vuzpq_f32(V,V).val[1]; }
1704 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<1,2,3,0>(FXMVECTOR V) { return vextq_f32(V, V, 1); }
1705 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<2,3,0,1>(FXMVECTOR V) { return vextq_f32(V, V, 2); }
1706 template<> inline XMVECTOR XM_CALLCONV XMVectorSwizzle<3,0,1,2>(FXMVECTOR V) { return vextq_f32(V, V, 3); }
1708 #endif // _XM_ARM_NEON_INTRINSICS_ && !_XM_NO_INTRINSICS_
1710 //------------------------------------------------------------------------------
1712 template<uint32_t Elements>
1713 inline XMVECTOR XM_CALLCONV XMVectorShiftLeft(FXMVECTOR V1, FXMVECTOR V2)
1715 static_assert( Elements < 4, "Elements template parameter out of range" );
1716 return XMVectorPermute<Elements, (Elements + 1), (Elements + 2), (Elements + 3)>(V1, V2);
1719 template<uint32_t Elements>
1720 inline XMVECTOR XM_CALLCONV XMVectorRotateLeft(FXMVECTOR V)
1722 static_assert( Elements < 4, "Elements template parameter out of range" );
1723 return XMVectorSwizzle<Elements & 3, (Elements + 1) & 3, (Elements + 2) & 3, (Elements + 3) & 3>(V);
1726 template<uint32_t Elements>
1727 inline XMVECTOR XM_CALLCONV XMVectorRotateRight(FXMVECTOR V)
1729 static_assert( Elements < 4, "Elements template parameter out of range" );
1730 return XMVectorSwizzle<(4 - Elements) & 3, (5 - Elements) & 3, (6 - Elements) & 3, (7 - Elements) & 3>(V);
1733 template<uint32_t VSLeftRotateElements, uint32_t Select0, uint32_t Select1, uint32_t Select2, uint32_t Select3>
1734 inline XMVECTOR XM_CALLCONV XMVectorInsert(FXMVECTOR VD, FXMVECTOR VS)
1736 XMVECTOR Control = XMVectorSelectControl(Select0&1, Select1&1, Select2&1, Select3&1);
1737 return XMVectorSelect( VD, XMVectorRotateLeft<VSLeftRotateElements>(VS), Control );
1740 /****************************************************************************
1742 * Globals
1744 ****************************************************************************/
1746 // The purpose of the following global constants is to prevent redundant
1747 // reloading of the constants when they are referenced by more than one
1748 // separate inline math routine called within the same function. Declaring
1749 // a constant locally within a routine is sufficient to prevent redundant
1750 // reloads of that constant when that single routine is called multiple
1751 // times in a function, but if the constant is used (and declared) in a
1752 // separate math routine it would be reloaded.
1754 #ifndef XMGLOBALCONST
1755 #define XMGLOBALCONST extern const DECLSPEC_SELECTANY
1756 #endif
1758 XMGLOBALCONST XMVECTORF32 g_XMSinCoefficients0 = { { { -0.16666667f, +0.0083333310f, -0.00019840874f, +2.7525562e-06f } } };
1759 XMGLOBALCONST XMVECTORF32 g_XMSinCoefficients1 = { { { -2.3889859e-08f, -0.16665852f /*Est1*/, +0.0083139502f /*Est2*/, -0.00018524670f /*Est3*/ } } };
1760 XMGLOBALCONST XMVECTORF32 g_XMCosCoefficients0 = { { { -0.5f, +0.041666638f, -0.0013888378f, +2.4760495e-05f } } };
1761 XMGLOBALCONST XMVECTORF32 g_XMCosCoefficients1 = { { { -2.6051615e-07f, -0.49992746f /*Est1*/, +0.041493919f /*Est2*/, -0.0012712436f /*Est3*/ } } };
1762 XMGLOBALCONST XMVECTORF32 g_XMTanCoefficients0 = { { { 1.0f, 0.333333333f, 0.133333333f, 5.396825397e-2f } } };
1763 XMGLOBALCONST XMVECTORF32 g_XMTanCoefficients1 = { { { 2.186948854e-2f, 8.863235530e-3f, 3.592128167e-3f, 1.455834485e-3f } } };
1764 XMGLOBALCONST XMVECTORF32 g_XMTanCoefficients2 = { { { 5.900274264e-4f, 2.391290764e-4f, 9.691537707e-5f, 3.927832950e-5f } } };
1765 XMGLOBALCONST XMVECTORF32 g_XMArcCoefficients0 = { { { +1.5707963050f, -0.2145988016f, +0.0889789874f, -0.0501743046f } } };
1766 XMGLOBALCONST XMVECTORF32 g_XMArcCoefficients1 = { { { +0.0308918810f, -0.0170881256f, +0.0066700901f, -0.0012624911f } } };
1767 XMGLOBALCONST XMVECTORF32 g_XMATanCoefficients0 = { { { -0.3333314528f, +0.1999355085f, -0.1420889944f, +0.1065626393f } } };
1768 XMGLOBALCONST XMVECTORF32 g_XMATanCoefficients1 = { { { -0.0752896400f, +0.0429096138f, -0.0161657367f, +0.0028662257f } } };
1769 XMGLOBALCONST XMVECTORF32 g_XMATanEstCoefficients0 = { { { +0.999866f, +0.999866f, +0.999866f, +0.999866f } } };
1770 XMGLOBALCONST XMVECTORF32 g_XMATanEstCoefficients1 = { { { -0.3302995f, +0.180141f, -0.085133f, +0.0208351f } } };
1771 XMGLOBALCONST XMVECTORF32 g_XMTanEstCoefficients = { { { 2.484f, -1.954923183e-1f, 2.467401101f, XM_1DIVPI } } };
1772 XMGLOBALCONST XMVECTORF32 g_XMArcEstCoefficients = { { { +1.5707288f, -0.2121144f, +0.0742610f, -0.0187293f } } };
1773 XMGLOBALCONST XMVECTORF32 g_XMPiConstants0 = { { { XM_PI, XM_2PI, XM_1DIVPI, XM_1DIV2PI } } };
1774 XMGLOBALCONST XMVECTORF32 g_XMIdentityR0 = { { { 1.0f, 0.0f, 0.0f, 0.0f } } };
1775 XMGLOBALCONST XMVECTORF32 g_XMIdentityR1 = { { { 0.0f, 1.0f, 0.0f, 0.0f } } };
1776 XMGLOBALCONST XMVECTORF32 g_XMIdentityR2 = { { { 0.0f, 0.0f, 1.0f, 0.0f } } };
1777 XMGLOBALCONST XMVECTORF32 g_XMIdentityR3 = { { { 0.0f, 0.0f, 0.0f, 1.0f } } };
1778 XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR0 = { { { -1.0f, 0.0f, 0.0f, 0.0f } } };
1779 XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR1 = { { { 0.0f, -1.0f, 0.0f, 0.0f } } };
1780 XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR2 = { { { 0.0f, 0.0f, -1.0f, 0.0f } } };
1781 XMGLOBALCONST XMVECTORF32 g_XMNegIdentityR3 = { { { 0.0f, 0.0f, 0.0f, -1.0f } } };
1782 XMGLOBALCONST XMVECTORU32 g_XMNegativeZero = { { { 0x80000000, 0x80000000, 0x80000000, 0x80000000 } } };
1783 XMGLOBALCONST XMVECTORU32 g_XMNegate3 = { { { 0x80000000, 0x80000000, 0x80000000, 0x00000000 } } };
1784 XMGLOBALCONST XMVECTORU32 g_XMMaskXY = { { { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 } } };
1785 XMGLOBALCONST XMVECTORU32 g_XMMask3 = { { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 } } };
1786 XMGLOBALCONST XMVECTORU32 g_XMMaskX = { { { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 } } };
1787 XMGLOBALCONST XMVECTORU32 g_XMMaskY = { { { 0x00000000, 0xFFFFFFFF, 0x00000000, 0x00000000 } } };
1788 XMGLOBALCONST XMVECTORU32 g_XMMaskZ = { { { 0x00000000, 0x00000000, 0xFFFFFFFF, 0x00000000 } } };
1789 XMGLOBALCONST XMVECTORU32 g_XMMaskW = { { { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF } } };
1790 XMGLOBALCONST XMVECTORF32 g_XMOne = { { { 1.0f, 1.0f, 1.0f, 1.0f } } };
1791 XMGLOBALCONST XMVECTORF32 g_XMOne3 = { { { 1.0f, 1.0f, 1.0f, 0.0f } } };
1792 XMGLOBALCONST XMVECTORF32 g_XMZero = { { { 0.0f, 0.0f, 0.0f, 0.0f } } };
1793 XMGLOBALCONST XMVECTORF32 g_XMTwo = { { { 2.f, 2.f, 2.f, 2.f } } };
1794 XMGLOBALCONST XMVECTORF32 g_XMFour = { { { 4.f, 4.f, 4.f, 4.f } } };
1795 XMGLOBALCONST XMVECTORF32 g_XMSix = { { { 6.f, 6.f, 6.f, 6.f } } };
1796 XMGLOBALCONST XMVECTORF32 g_XMNegativeOne = { { { -1.0f, -1.0f, -1.0f, -1.0f } } };
1797 XMGLOBALCONST XMVECTORF32 g_XMOneHalf = { { { 0.5f, 0.5f, 0.5f, 0.5f } } };
1798 XMGLOBALCONST XMVECTORF32 g_XMNegativeOneHalf = { { { -0.5f, -0.5f, -0.5f, -0.5f } } };
1799 XMGLOBALCONST XMVECTORF32 g_XMNegativeTwoPi = { { { -XM_2PI, -XM_2PI, -XM_2PI, -XM_2PI } } };
1800 XMGLOBALCONST XMVECTORF32 g_XMNegativePi = { { { -XM_PI, -XM_PI, -XM_PI, -XM_PI } } };
1801 XMGLOBALCONST XMVECTORF32 g_XMHalfPi = { { { XM_PIDIV2, XM_PIDIV2, XM_PIDIV2, XM_PIDIV2 } } };
1802 XMGLOBALCONST XMVECTORF32 g_XMPi = { { { XM_PI, XM_PI, XM_PI, XM_PI } } };
1803 XMGLOBALCONST XMVECTORF32 g_XMReciprocalPi = { { { XM_1DIVPI, XM_1DIVPI, XM_1DIVPI, XM_1DIVPI } } };
1804 XMGLOBALCONST XMVECTORF32 g_XMTwoPi = { { { XM_2PI, XM_2PI, XM_2PI, XM_2PI } } };
1805 XMGLOBALCONST XMVECTORF32 g_XMReciprocalTwoPi = { { { XM_1DIV2PI, XM_1DIV2PI, XM_1DIV2PI, XM_1DIV2PI } } };
1806 XMGLOBALCONST XMVECTORF32 g_XMEpsilon = { { { 1.192092896e-7f, 1.192092896e-7f, 1.192092896e-7f, 1.192092896e-7f } } };
1807 XMGLOBALCONST XMVECTORI32 g_XMInfinity = { { { 0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000 } } };
1808 XMGLOBALCONST XMVECTORI32 g_XMQNaN = { { { 0x7FC00000, 0x7FC00000, 0x7FC00000, 0x7FC00000 } } };
1809 XMGLOBALCONST XMVECTORI32 g_XMQNaNTest = { { { 0x007FFFFF, 0x007FFFFF, 0x007FFFFF, 0x007FFFFF } } };
1810 XMGLOBALCONST XMVECTORI32 g_XMAbsMask = { { { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF } } };
1811 XMGLOBALCONST XMVECTORI32 g_XMFltMin = { { { 0x00800000, 0x00800000, 0x00800000, 0x00800000 } } };
1812 XMGLOBALCONST XMVECTORI32 g_XMFltMax = { { { 0x7F7FFFFF, 0x7F7FFFFF, 0x7F7FFFFF, 0x7F7FFFFF } } };
1813 XMGLOBALCONST XMVECTORU32 g_XMNegOneMask = { { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF } } };
1814 XMGLOBALCONST XMVECTORU32 g_XMMaskA8R8G8B8 = { { { 0x00FF0000, 0x0000FF00, 0x000000FF, 0xFF000000 } } };
1815 XMGLOBALCONST XMVECTORU32 g_XMFlipA8R8G8B8 = { { { 0x00000000, 0x00000000, 0x00000000, 0x80000000 } } };
1816 XMGLOBALCONST XMVECTORF32 g_XMFixAA8R8G8B8 = { { { 0.0f, 0.0f, 0.0f, (float) (0x80000000U) } } };
1817 XMGLOBALCONST XMVECTORF32 g_XMNormalizeA8R8G8B8 = { { { 1.0f / (255.0f*(float) (0x10000)), 1.0f / (255.0f*(float) (0x100)), 1.0f / 255.0f, 1.0f / (255.0f*(float) (0x1000000)) } } };
1818 XMGLOBALCONST XMVECTORU32 g_XMMaskA2B10G10R10 = { { { 0x000003FF, 0x000FFC00, 0x3FF00000, 0xC0000000 } } };
1819 XMGLOBALCONST XMVECTORU32 g_XMFlipA2B10G10R10 = { { { 0x00000200, 0x00080000, 0x20000000, 0x80000000 } } };
1820 XMGLOBALCONST XMVECTORF32 g_XMFixAA2B10G10R10 = { { { -512.0f, -512.0f*(float) (0x400), -512.0f*(float) (0x100000), (float) (0x80000000U) } } };
1821 XMGLOBALCONST XMVECTORF32 g_XMNormalizeA2B10G10R10 = { { { 1.0f / 511.0f, 1.0f / (511.0f*(float) (0x400)), 1.0f / (511.0f*(float) (0x100000)), 1.0f / (3.0f*(float) (0x40000000)) } } };
1822 XMGLOBALCONST XMVECTORU32 g_XMMaskX16Y16 = { { { 0x0000FFFF, 0xFFFF0000, 0x00000000, 0x00000000 } } };
1823 XMGLOBALCONST XMVECTORI32 g_XMFlipX16Y16 = { { { 0x00008000, 0x00000000, 0x00000000, 0x00000000 } } };
1824 XMGLOBALCONST XMVECTORF32 g_XMFixX16Y16 = { { { -32768.0f, 0.0f, 0.0f, 0.0f } } };
1825 XMGLOBALCONST XMVECTORF32 g_XMNormalizeX16Y16 = { { { 1.0f / 32767.0f, 1.0f / (32767.0f*65536.0f), 0.0f, 0.0f } } };
1826 XMGLOBALCONST XMVECTORU32 g_XMMaskX16Y16Z16W16 = { { { 0x0000FFFF, 0x0000FFFF, 0xFFFF0000, 0xFFFF0000 } } };
1827 XMGLOBALCONST XMVECTORI32 g_XMFlipX16Y16Z16W16 = { { { 0x00008000, 0x00008000, 0x00000000, 0x00000000 } } };
1828 XMGLOBALCONST XMVECTORF32 g_XMFixX16Y16Z16W16 = { { { -32768.0f, -32768.0f, 0.0f, 0.0f } } };
1829 XMGLOBALCONST XMVECTORF32 g_XMNormalizeX16Y16Z16W16 = { { { 1.0f / 32767.0f, 1.0f / 32767.0f, 1.0f / (32767.0f*65536.0f), 1.0f / (32767.0f*65536.0f) } } };
1830 XMGLOBALCONST XMVECTORF32 g_XMNoFraction = { { { 8388608.0f, 8388608.0f, 8388608.0f, 8388608.0f } } };
1831 XMGLOBALCONST XMVECTORI32 g_XMMaskByte = { { { 0x000000FF, 0x000000FF, 0x000000FF, 0x000000FF } } };
1832 XMGLOBALCONST XMVECTORF32 g_XMNegateX = { { { -1.0f, 1.0f, 1.0f, 1.0f } } };
1833 XMGLOBALCONST XMVECTORF32 g_XMNegateY = { { { 1.0f, -1.0f, 1.0f, 1.0f } } };
1834 XMGLOBALCONST XMVECTORF32 g_XMNegateZ = { { { 1.0f, 1.0f, -1.0f, 1.0f } } };
1835 XMGLOBALCONST XMVECTORF32 g_XMNegateW = { { { 1.0f, 1.0f, 1.0f, -1.0f } } };
1836 XMGLOBALCONST XMVECTORU32 g_XMSelect0101 = { { { XM_SELECT_0, XM_SELECT_1, XM_SELECT_0, XM_SELECT_1 } } };
1837 XMGLOBALCONST XMVECTORU32 g_XMSelect1010 = { { { XM_SELECT_1, XM_SELECT_0, XM_SELECT_1, XM_SELECT_0 } } };
1838 XMGLOBALCONST XMVECTORI32 g_XMOneHalfMinusEpsilon = { { { 0x3EFFFFFD, 0x3EFFFFFD, 0x3EFFFFFD, 0x3EFFFFFD } } };
1839 XMGLOBALCONST XMVECTORU32 g_XMSelect1000 = { { { XM_SELECT_1, XM_SELECT_0, XM_SELECT_0, XM_SELECT_0 } } };
1840 XMGLOBALCONST XMVECTORU32 g_XMSelect1100 = { { { XM_SELECT_1, XM_SELECT_1, XM_SELECT_0, XM_SELECT_0 } } };
1841 XMGLOBALCONST XMVECTORU32 g_XMSelect1110 = { { { XM_SELECT_1, XM_SELECT_1, XM_SELECT_1, XM_SELECT_0 } } };
1842 XMGLOBALCONST XMVECTORU32 g_XMSelect1011 = { { { XM_SELECT_1, XM_SELECT_0, XM_SELECT_1, XM_SELECT_1 } } };
1843 XMGLOBALCONST XMVECTORF32 g_XMFixupY16 = { { { 1.0f, 1.0f / 65536.0f, 0.0f, 0.0f } } };
1844 XMGLOBALCONST XMVECTORF32 g_XMFixupY16W16 = { { { 1.0f, 1.0f, 1.0f / 65536.0f, 1.0f / 65536.0f } } };
1845 XMGLOBALCONST XMVECTORU32 g_XMFlipY = { { { 0, 0x80000000, 0, 0 } } };
1846 XMGLOBALCONST XMVECTORU32 g_XMFlipZ = { { { 0, 0, 0x80000000, 0 } } };
1847 XMGLOBALCONST XMVECTORU32 g_XMFlipW = { { { 0, 0, 0, 0x80000000 } } };
1848 XMGLOBALCONST XMVECTORU32 g_XMFlipYZ = { { { 0, 0x80000000, 0x80000000, 0 } } };
1849 XMGLOBALCONST XMVECTORU32 g_XMFlipZW = { { { 0, 0, 0x80000000, 0x80000000 } } };
1850 XMGLOBALCONST XMVECTORU32 g_XMFlipYW = { { { 0, 0x80000000, 0, 0x80000000 } } };
1851 XMGLOBALCONST XMVECTORI32 g_XMMaskDec4 = { { { 0x3FF, 0x3FF << 10, 0x3FF << 20, 0x3 << 30 } } };
1852 XMGLOBALCONST XMVECTORI32 g_XMXorDec4 = { { { 0x200, 0x200 << 10, 0x200 << 20, 0 } } };
1853 XMGLOBALCONST XMVECTORF32 g_XMAddUDec4 = { { { 0, 0, 0, 32768.0f*65536.0f } } };
1854 XMGLOBALCONST XMVECTORF32 g_XMAddDec4 = { { { -512.0f, -512.0f*1024.0f, -512.0f*1024.0f*1024.0f, 0 } } };
1855 XMGLOBALCONST XMVECTORF32 g_XMMulDec4 = { { { 1.0f, 1.0f / 1024.0f, 1.0f / (1024.0f*1024.0f), 1.0f / (1024.0f*1024.0f*1024.0f) } } };
1856 XMGLOBALCONST XMVECTORU32 g_XMMaskByte4 = { { { 0xFF, 0xFF00, 0xFF0000, 0xFF000000 } } };
1857 XMGLOBALCONST XMVECTORI32 g_XMXorByte4 = { { { 0x80, 0x8000, 0x800000, 0x00000000 } } };
1858 XMGLOBALCONST XMVECTORF32 g_XMAddByte4 = { { { -128.0f, -128.0f*256.0f, -128.0f*65536.0f, 0 } } };
1859 XMGLOBALCONST XMVECTORF32 g_XMFixUnsigned = { { { 32768.0f*65536.0f, 32768.0f*65536.0f, 32768.0f*65536.0f, 32768.0f*65536.0f } } };
1860 XMGLOBALCONST XMVECTORF32 g_XMMaxInt = { { { 65536.0f*32768.0f - 128.0f, 65536.0f*32768.0f - 128.0f, 65536.0f*32768.0f - 128.0f, 65536.0f*32768.0f - 128.0f } } };
1861 XMGLOBALCONST XMVECTORF32 g_XMMaxUInt = { { { 65536.0f*65536.0f - 256.0f, 65536.0f*65536.0f - 256.0f, 65536.0f*65536.0f - 256.0f, 65536.0f*65536.0f - 256.0f } } };
1862 XMGLOBALCONST XMVECTORF32 g_XMUnsignedFix = { { { 32768.0f*65536.0f, 32768.0f*65536.0f, 32768.0f*65536.0f, 32768.0f*65536.0f } } };
1863 XMGLOBALCONST XMVECTORF32 g_XMsrgbScale = { { { 12.92f, 12.92f, 12.92f, 1.0f } } };
1864 XMGLOBALCONST XMVECTORF32 g_XMsrgbA = { { { 0.055f, 0.055f, 0.055f, 0.0f } } };
1865 XMGLOBALCONST XMVECTORF32 g_XMsrgbA1 = { { { 1.055f, 1.055f, 1.055f, 1.0f } } };
1866 XMGLOBALCONST XMVECTORI32 g_XMExponentBias = { { { 127, 127, 127, 127 } } };
1867 XMGLOBALCONST XMVECTORI32 g_XMSubnormalExponent = { { { -126, -126, -126, -126 } } };
1868 XMGLOBALCONST XMVECTORI32 g_XMNumTrailing = { { { 23, 23, 23, 23 } } };
1869 XMGLOBALCONST XMVECTORI32 g_XMMinNormal = { { { 0x00800000, 0x00800000, 0x00800000, 0x00800000 } } };
1870 XMGLOBALCONST XMVECTORU32 g_XMNegInfinity = { { { 0xFF800000, 0xFF800000, 0xFF800000, 0xFF800000 } } };
1871 XMGLOBALCONST XMVECTORU32 g_XMNegQNaN = { { { 0xFFC00000, 0xFFC00000, 0xFFC00000, 0xFFC00000 } } };
1872 XMGLOBALCONST XMVECTORI32 g_XMBin128 = { { { 0x43000000, 0x43000000, 0x43000000, 0x43000000 } } };
1873 XMGLOBALCONST XMVECTORU32 g_XMBinNeg150 = { { { 0xC3160000, 0xC3160000, 0xC3160000, 0xC3160000 } } };
1874 XMGLOBALCONST XMVECTORI32 g_XM253 = { { { 253, 253, 253, 253 } } };
1875 XMGLOBALCONST XMVECTORF32 g_XMExpEst1 = { { { -6.93147182e-1f, -6.93147182e-1f, -6.93147182e-1f, -6.93147182e-1f } } };
1876 XMGLOBALCONST XMVECTORF32 g_XMExpEst2 = { { { +2.40226462e-1f, +2.40226462e-1f, +2.40226462e-1f, +2.40226462e-1f } } };
1877 XMGLOBALCONST XMVECTORF32 g_XMExpEst3 = { { { -5.55036440e-2f, -5.55036440e-2f, -5.55036440e-2f, -5.55036440e-2f } } };
1878 XMGLOBALCONST XMVECTORF32 g_XMExpEst4 = { { { +9.61597636e-3f, +9.61597636e-3f, +9.61597636e-3f, +9.61597636e-3f } } };
1879 XMGLOBALCONST XMVECTORF32 g_XMExpEst5 = { { { -1.32823968e-3f, -1.32823968e-3f, -1.32823968e-3f, -1.32823968e-3f } } };
1880 XMGLOBALCONST XMVECTORF32 g_XMExpEst6 = { { { +1.47491097e-4f, +1.47491097e-4f, +1.47491097e-4f, +1.47491097e-4f } } };
1881 XMGLOBALCONST XMVECTORF32 g_XMExpEst7 = { { { -1.08635004e-5f, -1.08635004e-5f, -1.08635004e-5f, -1.08635004e-5f } } };
1882 XMGLOBALCONST XMVECTORF32 g_XMLogEst0 = { { { +1.442693f, +1.442693f, +1.442693f, +1.442693f } } };
1883 XMGLOBALCONST XMVECTORF32 g_XMLogEst1 = { { { -0.721242f, -0.721242f, -0.721242f, -0.721242f } } };
1884 XMGLOBALCONST XMVECTORF32 g_XMLogEst2 = { { { +0.479384f, +0.479384f, +0.479384f, +0.479384f } } };
1885 XMGLOBALCONST XMVECTORF32 g_XMLogEst3 = { { { -0.350295f, -0.350295f, -0.350295f, -0.350295f } } };
1886 XMGLOBALCONST XMVECTORF32 g_XMLogEst4 = { { { +0.248590f, +0.248590f, +0.248590f, +0.248590f } } };
1887 XMGLOBALCONST XMVECTORF32 g_XMLogEst5 = { { { -0.145700f, -0.145700f, -0.145700f, -0.145700f } } };
1888 XMGLOBALCONST XMVECTORF32 g_XMLogEst6 = { { { +0.057148f, +0.057148f, +0.057148f, +0.057148f } } };
1889 XMGLOBALCONST XMVECTORF32 g_XMLogEst7 = { { { -0.010578f, -0.010578f, -0.010578f, -0.010578f } } };
1890 XMGLOBALCONST XMVECTORF32 g_XMLgE = { { { +1.442695f, +1.442695f, +1.442695f, +1.442695f } } };
1891 XMGLOBALCONST XMVECTORF32 g_XMInvLgE = { { { +6.93147182e-1f, +6.93147182e-1f, +6.93147182e-1f, +6.93147182e-1f } } };
1892 XMGLOBALCONST XMVECTORF32 g_UByteMax = { { { 255.0f, 255.0f, 255.0f, 255.0f } } };
1893 XMGLOBALCONST XMVECTORF32 g_ByteMin = { { { -127.0f, -127.0f, -127.0f, -127.0f } } };
1894 XMGLOBALCONST XMVECTORF32 g_ByteMax = { { { 127.0f, 127.0f, 127.0f, 127.0f } } };
1895 XMGLOBALCONST XMVECTORF32 g_ShortMin = { { { -32767.0f, -32767.0f, -32767.0f, -32767.0f } } };
1896 XMGLOBALCONST XMVECTORF32 g_ShortMax = { { { 32767.0f, 32767.0f, 32767.0f, 32767.0f } } };
1897 XMGLOBALCONST XMVECTORF32 g_UShortMax = { { { 65535.0f, 65535.0f, 65535.0f, 65535.0f } } };
1899 /****************************************************************************
1901 * Implementation
1903 ****************************************************************************/
1905 #pragma warning(push)
1906 #pragma warning(disable:4068 4214 4204 4365 4616 4640 6001 6101)
1907 // C4068/4616: ignore unknown pragmas
1908 // C4214/4204: nonstandard extension used
1909 // C4365/4640: Off by default noise
1910 // C6001/6101: False positives
1912 #ifdef _PREFAST_
1913 #pragma prefast(push)
1914 #pragma prefast(disable : 25000, "FXMVECTOR is 16 bytes")
1915 #endif
1917 //------------------------------------------------------------------------------
1919 inline XMVECTOR XM_CALLCONV XMVectorSetBinaryConstant(uint32_t C0, uint32_t C1, uint32_t C2, uint32_t C3)
1921 #if defined(_XM_NO_INTRINSICS_)
1922 XMVECTORU32 vResult;
1923 vResult.u[0] = (0-(C0&1)) & 0x3F800000;
1924 vResult.u[1] = (0-(C1&1)) & 0x3F800000;
1925 vResult.u[2] = (0-(C2&1)) & 0x3F800000;
1926 vResult.u[3] = (0-(C3&1)) & 0x3F800000;
1927 return vResult.v;
1928 #elif defined(_XM_ARM_NEON_INTRINSICS_)
1929 XMVECTORU32 vResult;
1930 vResult.u[0] = (0-(C0&1)) & 0x3F800000;
1931 vResult.u[1] = (0-(C1&1)) & 0x3F800000;
1932 vResult.u[2] = (0-(C2&1)) & 0x3F800000;
1933 vResult.u[3] = (0-(C3&1)) & 0x3F800000;
1934 return vResult.v;
1935 #else // XM_SSE_INTRINSICS_
1936 static const XMVECTORU32 g_vMask1 = { { { 1, 1, 1, 1 } } };
1937 // Move the parms to a vector
1938 __m128i vTemp = _mm_set_epi32(C3,C2,C1,C0);
1939 // Mask off the low bits
1940 vTemp = _mm_and_si128(vTemp,g_vMask1);
1941 // 0xFFFFFFFF on true bits
1942 vTemp = _mm_cmpeq_epi32(vTemp,g_vMask1);
1943 // 0xFFFFFFFF -> 1.0f, 0x00000000 -> 0.0f
1944 vTemp = _mm_and_si128(vTemp,g_XMOne);
1945 return _mm_castsi128_ps(vTemp);
1946 #endif
1949 //------------------------------------------------------------------------------
1951 inline XMVECTOR XM_CALLCONV XMVectorSplatConstant(int32_t IntConstant, uint32_t DivExponent)
1953 assert( IntConstant >= -16 && IntConstant <= 15 );
1954 assert( DivExponent < 32 );
1955 #if defined(_XM_NO_INTRINSICS_)
1957 using DirectX::XMConvertVectorIntToFloat;
1959 XMVECTORI32 V = { { { IntConstant, IntConstant, IntConstant, IntConstant } } };
1960 return XMConvertVectorIntToFloat( V.v, DivExponent);
1962 #elif defined(_XM_ARM_NEON_INTRINSICS_)
1963 // Splat the int
1964 int32x4_t vScale = vdupq_n_s32(IntConstant);
1965 // Convert to a float
1966 XMVECTOR vResult = vcvtq_f32_s32(vScale);
1967 // Convert DivExponent into 1.0f/(1<<DivExponent)
1968 uint32_t uScale = 0x3F800000U - (DivExponent << 23);
1969 // Splat the scalar value (It's really a float)
1970 vScale = vdupq_n_s32(uScale);
1971 // Multiply by the reciprocal (Perform a right shift by DivExponent)
1972 vResult = vmulq_f32(vResult,reinterpret_cast<const float32x4_t *>(&vScale)[0]);
1973 return vResult;
1974 #else // XM_SSE_INTRINSICS_
1975 // Splat the int
1976 __m128i vScale = _mm_set1_epi32(IntConstant);
1977 // Convert to a float
1978 XMVECTOR vResult = _mm_cvtepi32_ps(vScale);
1979 // Convert DivExponent into 1.0f/(1<<DivExponent)
1980 uint32_t uScale = 0x3F800000U - (DivExponent << 23);
1981 // Splat the scalar value (It's really a float)
1982 vScale = _mm_set1_epi32(uScale);
1983 // Multiply by the reciprocal (Perform a right shift by DivExponent)
1984 vResult = _mm_mul_ps(vResult,_mm_castsi128_ps(vScale));
1985 return vResult;
1986 #endif
1989 //------------------------------------------------------------------------------
1991 inline XMVECTOR XM_CALLCONV XMVectorSplatConstantInt(int32_t IntConstant)
1993 assert( IntConstant >= -16 && IntConstant <= 15 );
1994 #if defined(_XM_NO_INTRINSICS_)
1996 XMVECTORI32 V = { { { IntConstant, IntConstant, IntConstant, IntConstant } } };
1997 return V.v;
1999 #elif defined(_XM_ARM_NEON_INTRINSICS_)
2000 int32x4_t V = vdupq_n_s32( IntConstant );
2001 return reinterpret_cast<float32x4_t *>(&V)[0];
2002 #else // XM_SSE_INTRINSICS_
2003 __m128i V = _mm_set1_epi32( IntConstant );
2004 return _mm_castsi128_ps(V);
2005 #endif
2008 #include "DirectXMathConvert.inl"
2009 #include "DirectXMathVector.inl"
2010 #include "DirectXMathMatrix.inl"
2011 #include "DirectXMathMisc.inl"
2013 #ifdef _PREFAST_
2014 #pragma prefast(pop)
2015 #endif
2017 #pragma warning(pop)
2019 }; // namespace DirectX