1 /*===----------------- gfniintrin.h - GFNI intrinsics ----------------------===
4 * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 * See https://llvm.org/LICENSE.txt for license information.
6 * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
8 *===-----------------------------------------------------------------------===
11 #error "Never use <gfniintrin.h> directly; include <immintrin.h> instead."
14 #ifndef __GFNIINTRIN_H
15 #define __GFNIINTRIN_H
17 #if defined(__EVEX512__) && !defined(__AVX10_1_512__)
18 /* Default attributes for simple form (no masking). */
19 #define __DEFAULT_FN_ATTRS \
20 __attribute__((__always_inline__, __nodebug__, \
21 __target__("gfni,no-evex512"), __min_vector_width__(128)))
23 /* Default attributes for YMM unmasked form. */
24 #define __DEFAULT_FN_ATTRS_Y \
25 __attribute__((__always_inline__, __nodebug__, \
26 __target__("avx,gfni,no-evex512"), \
27 __min_vector_width__(256)))
29 /* Default attributes for VLX masked forms. */
30 #define __DEFAULT_FN_ATTRS_VL128 \
31 __attribute__((__always_inline__, __nodebug__, \
32 __target__("avx512bw,avx512vl,gfni,no-evex512"), \
33 __min_vector_width__(128)))
34 #define __DEFAULT_FN_ATTRS_VL256 \
35 __attribute__((__always_inline__, __nodebug__, \
36 __target__("avx512bw,avx512vl,gfni,no-evex512"), \
37 __min_vector_width__(256)))
39 /* Default attributes for simple form (no masking). */
40 #define __DEFAULT_FN_ATTRS \
41 __attribute__((__always_inline__, __nodebug__, __target__("gfni"), \
42 __min_vector_width__(128)))
44 /* Default attributes for YMM unmasked form. */
45 #define __DEFAULT_FN_ATTRS_Y \
46 __attribute__((__always_inline__, __nodebug__, __target__("avx,gfni"), \
47 __min_vector_width__(256)))
49 /* Default attributes for VLX masked forms. */
50 #define __DEFAULT_FN_ATTRS_VL128 \
51 __attribute__((__always_inline__, __nodebug__, \
52 __target__("avx512bw,avx512vl,gfni"), \
53 __min_vector_width__(128)))
54 #define __DEFAULT_FN_ATTRS_VL256 \
55 __attribute__((__always_inline__, __nodebug__, \
56 __target__("avx512bw,avx512vl,gfni"), \
57 __min_vector_width__(256)))
60 /* Default attributes for ZMM unmasked forms. */
61 #define __DEFAULT_FN_ATTRS_Z \
62 __attribute__((__always_inline__, __nodebug__, \
63 __target__("avx512f,evex512,gfni"), \
64 __min_vector_width__(512)))
65 /* Default attributes for ZMM masked forms. */
66 #define __DEFAULT_FN_ATTRS_Z_MASK \
67 __attribute__((__always_inline__, __nodebug__, \
68 __target__("avx512bw,evex512,gfni"), \
69 __min_vector_width__(512)))
71 #define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \
72 ((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \
73 (__v16qi)(__m128i)(B), \
76 #define _mm_gf2p8affine_epi64_epi8(A, B, I) \
77 ((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \
78 (__v16qi)(__m128i)(B), \
81 static __inline__ __m128i __DEFAULT_FN_ATTRS
82 _mm_gf2p8mul_epi8(__m128i __A
, __m128i __B
)
84 return (__m128i
) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi
) __A
,
89 #define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \
90 ((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \
91 (__v32qi)(__m256i)(B), \
94 #define _mm256_gf2p8affine_epi64_epi8(A, B, I) \
95 ((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \
96 (__v32qi)(__m256i)(B), \
99 static __inline__ __m256i __DEFAULT_FN_ATTRS_Y
100 _mm256_gf2p8mul_epi8(__m256i __A
, __m256i __B
)
102 return (__m256i
) __builtin_ia32_vgf2p8mulb_v32qi((__v32qi
) __A
,
105 #endif /* __AVXINTRIN_H */
107 #ifdef __AVX512BWINTRIN_H
108 #define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \
109 ((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \
110 (__v64qi)(__m512i)(B), \
113 #define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
114 ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
115 (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I), \
116 (__v64qi)(__m512i)(S)))
118 #define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
119 _mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \
122 #define _mm512_gf2p8affine_epi64_epi8(A, B, I) \
123 ((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \
124 (__v64qi)(__m512i)(B), \
127 #define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
128 ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
129 (__v64qi)_mm512_gf2p8affine_epi64_epi8((A), (B), (I)), \
130 (__v64qi)(__m512i)(S)))
132 #define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
133 _mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \
136 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
137 _mm512_gf2p8mul_epi8(__m512i __A
, __m512i __B
)
139 return (__m512i
) __builtin_ia32_vgf2p8mulb_v64qi((__v64qi
) __A
,
143 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK
144 _mm512_mask_gf2p8mul_epi8(__m512i __S
, __mmask64 __U
, __m512i __A
, __m512i __B
)
146 return (__m512i
) __builtin_ia32_selectb_512(__U
,
147 (__v64qi
) _mm512_gf2p8mul_epi8(__A
, __B
),
151 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK
152 _mm512_maskz_gf2p8mul_epi8(__mmask64 __U
, __m512i __A
, __m512i __B
)
154 return _mm512_mask_gf2p8mul_epi8((__m512i
)_mm512_setzero_si512(),
157 #endif /* __AVX512BWINTRIN_H */
159 #ifdef __AVX512VLBWINTRIN_H
160 #define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
161 ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
162 (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \
163 (__v16qi)(__m128i)(S)))
165 #define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
166 _mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \
169 #define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
170 ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
171 (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \
172 (__v32qi)(__m256i)(S)))
174 #define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
175 _mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
178 #define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
179 ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
180 (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \
181 (__v16qi)(__m128i)(S)))
183 #define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
184 _mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), U, A, B, I)
186 #define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
187 ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
188 (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \
189 (__v32qi)(__m256i)(S)))
191 #define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
192 _mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \
195 static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
196 _mm_mask_gf2p8mul_epi8(__m128i __S
, __mmask16 __U
, __m128i __A
, __m128i __B
)
198 return (__m128i
) __builtin_ia32_selectb_128(__U
,
199 (__v16qi
) _mm_gf2p8mul_epi8(__A
, __B
),
203 static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
204 _mm_maskz_gf2p8mul_epi8(__mmask16 __U
, __m128i __A
, __m128i __B
)
206 return _mm_mask_gf2p8mul_epi8((__m128i
)_mm_setzero_si128(),
210 static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
211 _mm256_mask_gf2p8mul_epi8(__m256i __S
, __mmask32 __U
, __m256i __A
, __m256i __B
)
213 return (__m256i
) __builtin_ia32_selectb_256(__U
,
214 (__v32qi
) _mm256_gf2p8mul_epi8(__A
, __B
),
218 static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
219 _mm256_maskz_gf2p8mul_epi8(__mmask32 __U
, __m256i __A
, __m256i __B
)
221 return _mm256_mask_gf2p8mul_epi8((__m256i
)_mm256_setzero_si256(),
224 #endif /* __AVX512VLBWINTRIN_H */
226 #undef __DEFAULT_FN_ATTRS
227 #undef __DEFAULT_FN_ATTRS_Y
228 #undef __DEFAULT_FN_ATTRS_Z
229 #undef __DEFAULT_FN_ATTRS_VL128
230 #undef __DEFAULT_FN_ATTRS_VL256
232 #endif /* __GFNIINTRIN_H */