1 /*===----------------- gfniintrin.h - GFNI intrinsics ----------------------===
4 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 * See https://llvm.org/LICENSE.txt for license information.
6 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
8 *===-----------------------------------------------------------------------===
11 #error "Never use <gfniintrin.h> directly; include <immintrin.h> instead."
14 #ifndef __GFNIINTRIN_H
15 #define __GFNIINTRIN_H
17 /* Default attributes for simple form (no masking). */
18 #define __DEFAULT_FN_ATTRS \
19 __attribute__((__always_inline__, __nodebug__, \
20 __target__("gfni,no-evex512"), __min_vector_width__(128)))
22 /* Default attributes for YMM unmasked form. */
23 #define __DEFAULT_FN_ATTRS_Y \
24 __attribute__((__always_inline__, __nodebug__, \
25 __target__("avx,gfni,no-evex512"), \
26 __min_vector_width__(256)))
28 /* Default attributes for ZMM unmasked forms. */
29 #define __DEFAULT_FN_ATTRS_Z \
30 __attribute__((__always_inline__, __nodebug__, \
31 __target__("avx512f,evex512,gfni"), \
32 __min_vector_width__(512)))
33 /* Default attributes for ZMM masked forms. */
34 #define __DEFAULT_FN_ATTRS_Z_MASK \
35 __attribute__((__always_inline__, __nodebug__, \
36 __target__("avx512bw,evex512,gfni"), \
37 __min_vector_width__(512)))
39 /* Default attributes for VLX masked forms. */
40 #define __DEFAULT_FN_ATTRS_VL128 \
41 __attribute__((__always_inline__, __nodebug__, \
42 __target__("avx512bw,avx512vl,gfni,no-evex512"), \
43 __min_vector_width__(128)))
44 #define __DEFAULT_FN_ATTRS_VL256 \
45 __attribute__((__always_inline__, __nodebug__, \
46 __target__("avx512bw,avx512vl,gfni,no-evex512"), \
47 __min_vector_width__(256)))
49 #define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \
50 ((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \
51 (__v16qi)(__m128i)(B), \
54 #define _mm_gf2p8affine_epi64_epi8(A, B, I) \
55 ((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \
56 (__v16qi)(__m128i)(B), \
59 static __inline__ __m128i __DEFAULT_FN_ATTRS
60 _mm_gf2p8mul_epi8(__m128i __A
, __m128i __B
)
62 return (__m128i
) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi
) __A
,
67 #define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \
68 ((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \
69 (__v32qi)(__m256i)(B), \
72 #define _mm256_gf2p8affine_epi64_epi8(A, B, I) \
73 ((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \
74 (__v32qi)(__m256i)(B), \
77 static __inline__ __m256i __DEFAULT_FN_ATTRS_Y
78 _mm256_gf2p8mul_epi8(__m256i __A
, __m256i __B
)
80 return (__m256i
) __builtin_ia32_vgf2p8mulb_v32qi((__v32qi
) __A
,
83 #endif /* __AVXINTRIN_H */
85 #ifdef __AVX512BWINTRIN_H
86 #define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \
87 ((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \
88 (__v64qi)(__m512i)(B), \
91 #define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
92 ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
93 (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I), \
94 (__v64qi)(__m512i)(S)))
96 #define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
97 _mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \
100 #define _mm512_gf2p8affine_epi64_epi8(A, B, I) \
101 ((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \
102 (__v64qi)(__m512i)(B), \
105 #define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
106 ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
107 (__v64qi)_mm512_gf2p8affine_epi64_epi8((A), (B), (I)), \
108 (__v64qi)(__m512i)(S)))
110 #define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
111 _mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \
114 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
115 _mm512_gf2p8mul_epi8(__m512i __A
, __m512i __B
)
117 return (__m512i
) __builtin_ia32_vgf2p8mulb_v64qi((__v64qi
) __A
,
121 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK
122 _mm512_mask_gf2p8mul_epi8(__m512i __S
, __mmask64 __U
, __m512i __A
, __m512i __B
)
124 return (__m512i
) __builtin_ia32_selectb_512(__U
,
125 (__v64qi
) _mm512_gf2p8mul_epi8(__A
, __B
),
129 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK
130 _mm512_maskz_gf2p8mul_epi8(__mmask64 __U
, __m512i __A
, __m512i __B
)
132 return _mm512_mask_gf2p8mul_epi8((__m512i
)_mm512_setzero_si512(),
135 #endif /* __AVX512BWINTRIN_H */
137 #ifdef __AVX512VLBWINTRIN_H
138 #define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
139 ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
140 (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \
141 (__v16qi)(__m128i)(S)))
143 #define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
144 _mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \
147 #define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
148 ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
149 (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \
150 (__v32qi)(__m256i)(S)))
152 #define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
153 _mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
156 #define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
157 ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
158 (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \
159 (__v16qi)(__m128i)(S)))
161 #define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
162 _mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), U, A, B, I)
164 #define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
165 ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
166 (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \
167 (__v32qi)(__m256i)(S)))
169 #define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
170 _mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \
173 static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
174 _mm_mask_gf2p8mul_epi8(__m128i __S
, __mmask16 __U
, __m128i __A
, __m128i __B
)
176 return (__m128i
) __builtin_ia32_selectb_128(__U
,
177 (__v16qi
) _mm_gf2p8mul_epi8(__A
, __B
),
181 static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
182 _mm_maskz_gf2p8mul_epi8(__mmask16 __U
, __m128i __A
, __m128i __B
)
184 return _mm_mask_gf2p8mul_epi8((__m128i
)_mm_setzero_si128(),
188 static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
189 _mm256_mask_gf2p8mul_epi8(__m256i __S
, __mmask32 __U
, __m256i __A
, __m256i __B
)
191 return (__m256i
) __builtin_ia32_selectb_256(__U
,
192 (__v32qi
) _mm256_gf2p8mul_epi8(__A
, __B
),
196 static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
197 _mm256_maskz_gf2p8mul_epi8(__mmask32 __U
, __m256i __A
, __m256i __B
)
199 return _mm256_mask_gf2p8mul_epi8((__m256i
)_mm256_setzero_si256(),
202 #endif /* __AVX512VLBWINTRIN_H */
204 #undef __DEFAULT_FN_ATTRS
205 #undef __DEFAULT_FN_ATTRS_Y
206 #undef __DEFAULT_FN_ATTRS_Z
207 #undef __DEFAULT_FN_ATTRS_VL128
208 #undef __DEFAULT_FN_ATTRS_VL256
210 #endif /* __GFNIINTRIN_H */