Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / X86 / fma-builtins-constrained.c
blob74177ef7d1c9d63489408eedafc913f25d515f45
1 // REQUIRES: x86-registered-target
2 // RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +fma -O2 -emit-llvm -o - | FileCheck %s --check-prefixes=COMMON,COMMONIR,UNCONSTRAINED
3 // RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +fma -ffp-exception-behavior=maytrap -DSTRICT=1 -O2 -emit-llvm -o - | FileCheck %s --check-prefixes=COMMON,COMMONIR,CONSTRAINED
4 // RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +fma -O2 -S -o - | FileCheck %s --check-prefixes=COMMON,CHECK-ASM
5 // RUN: %clang_cc1 -ffreestanding %s -triple=x86_64-unknown-linux-gnu -target-feature +fma -O2 -ffp-exception-behavior=maytrap -DSTRICT=1 -S -o - | FileCheck %s --check-prefixes=COMMON,CHECK-ASM
7 #ifdef STRICT
8 // Test that the constrained intrinsics are picking up the exception
9 // metadata from the AST instead of the global default from the command line.
11 #pragma float_control(except, on)
12 #endif
14 #include <immintrin.h>
16 __m128 test_mm_fmadd_ps(__m128 a, __m128 b, __m128 c) {
17 // COMMON-LABEL: test_mm_fmadd_ps
18 // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
19 // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
20 // CHECK-ASM: vfmadd213ps
21 return _mm_fmadd_ps(a, b, c);
24 __m128d test_mm_fmadd_pd(__m128d a, __m128d b, __m128d c) {
25 // COMMON-LABEL: test_mm_fmadd_pd
26 // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
27 // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
28 // CHECK-ASM: vfmadd213pd
29 return _mm_fmadd_pd(a, b, c);
32 __m128 test_mm_fmadd_ss(__m128 a, __m128 b, __m128 c) {
33 // COMMON-LABEL: test_mm_fmadd_ss
34 // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
35 // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
36 // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
37 // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}})
38 // CONSTRAINED: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !{{.*}})
39 // CHECK-ASM: vfmadd213ss
40 // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0
41 return _mm_fmadd_ss(a, b, c);
44 __m128d test_mm_fmadd_sd(__m128d a, __m128d b, __m128d c) {
45 // COMMON-LABEL: test_mm_fmadd_sd
46 // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
47 // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
48 // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
49 // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}})
50 // CONSTRAINED: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !{{.*}})
51 // CHECK-ASM: vfmadd213sd
52 // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
53 return _mm_fmadd_sd(a, b, c);
56 __m128 test_mm_fmsub_ps(__m128 a, __m128 b, __m128 c) {
57 // COMMON-LABEL: test_mm_fmsub_ps
58 // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
59 // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
60 // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
61 // CHECK-ASM: vfmsub213ps
62 return _mm_fmsub_ps(a, b, c);
65 __m128d test_mm_fmsub_pd(__m128d a, __m128d b, __m128d c) {
66 // COMMON-LABEL: test_mm_fmsub_pd
67 // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
68 // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
69 // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
70 // CHECK-ASM: vfmsub213pd
71 return _mm_fmsub_pd(a, b, c);
74 __m128 test_mm_fmsub_ss(__m128 a, __m128 b, __m128 c) {
75 // COMMON-LABEL: test_mm_fmsub_ss
76 // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
77 // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
78 // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
79 // COMMONIR: [[NEG:%.+]] = fneg float %{{.+}}
80 // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}})
81 // CONSTRAINED: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !{{.*}})
82 // CHECK-ASM: vfmsub213ss
83 // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0
84 return _mm_fmsub_ss(a, b, c);
87 __m128d test_mm_fmsub_sd(__m128d a, __m128d b, __m128d c) {
88 // COMMON-LABEL: test_mm_fmsub_sd
89 // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
90 // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
91 // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
92 // COMMONIR: [[NEG:%.+]] = fneg double %{{.+}}
93 // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}})
94 // CONSTRAINED: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !{{.*}})
95 // CHECK-ASM: vfmsub213sd
96 // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
97 return _mm_fmsub_sd(a, b, c);
100 __m128 test_mm_fnmadd_ps(__m128 a, __m128 b, __m128 c) {
101 // COMMON-LABEL: test_mm_fnmadd_ps
102 // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
103 // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
104 // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
105 // CHECK-ASM: vfnmadd213ps
106 return _mm_fnmadd_ps(a, b, c);
109 __m128d test_mm_fnmadd_pd(__m128d a, __m128d b, __m128d c) {
110 // COMMON-LABEL: test_mm_fnmadd_pd
111 // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
112 // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
113 // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
114 // CHECK-ASM: vfnmadd213pd
115 return _mm_fnmadd_pd(a, b, c);
118 __m128 test_mm_fnmadd_ss(__m128 a, __m128 b, __m128 c) {
119 // COMMON-LABEL: test_mm_fnmadd_ss
120 // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
121 // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
122 // COMMONIR: [[NEG:%.+]] = fneg float %{{.+}}
123 // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
124 // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}})
125 // CONSTRAINED: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !{{.*}})
126 // CHECK-ASM: vfnmadd213ss
127 // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0
128 return _mm_fnmadd_ss(a, b, c);
131 __m128d test_mm_fnmadd_sd(__m128d a, __m128d b, __m128d c) {
132 // COMMON-LABEL: test_mm_fnmadd_sd
133 // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
134 // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
135 // COMMONIR: [[NEG:%.+]] = fneg double %{{.+}}
136 // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
137 // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}})
138 // CONSTRAINED: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !{{.*}})
139 // CHECK-ASM: vfnmadd213sd
140 // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
141 return _mm_fnmadd_sd(a, b, c);
144 __m128 test_mm_fnmsub_ps(__m128 a, __m128 b, __m128 c) {
145 // COMMON-LABEL: test_mm_fnmsub_ps
146 // COMMONIR: [[NEG:%.+]] = fneg <4 x float> %{{.+}}
147 // COMMONIR: [[NEG2:%.+]] = fneg <4 x float> %{{.+}}
148 // UNCONSTRAINED: call <4 x float> @llvm.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
149 // CONSTRAINED: call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}}, metadata !{{.*}})
150 // CHECK-ASM: vfnmsub213ps
151 return _mm_fnmsub_ps(a, b, c);
154 __m128d test_mm_fnmsub_pd(__m128d a, __m128d b, __m128d c) {
155 // COMMON-LABEL: test_mm_fnmsub_pd
156 // COMMONIR: [[NEG:%.+]] = fneg <2 x double> %{{.+}}
157 // COMMONIR: [[NEG2:%.+]] = fneg <2 x double> %{{.+}}
158 // UNCONSTRAINED: call <2 x double> @llvm.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
159 // CONSTRAINED: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
160 // CHECK-ASM: vfnmsub213pd
161 return _mm_fnmsub_pd(a, b, c);
164 __m128 test_mm_fnmsub_ss(__m128 a, __m128 b, __m128 c) {
165 // COMMON-LABEL: test_mm_fnmsub_ss
166 // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
167 // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
168 // COMMONIR: [[NEG:%.+]] = fneg float %{{.+}}
169 // COMMONIR: extractelement <4 x float> %{{.*}}, i64 0
170 // COMMONIR: [[NEG2:%.+]] = fneg float %{{.+}}
171 // UNCONSTRAINED: call float @llvm.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}})
172 // CONSTRAINED: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !{{.*}})
173 // CHECK-ASM: vfnmsub213ss
174 // COMMONIR: insertelement <4 x float> %{{.*}}, float %{{.*}}, i64 0
175 return _mm_fnmsub_ss(a, b, c);
178 __m128d test_mm_fnmsub_sd(__m128d a, __m128d b, __m128d c) {
179 // COMMON-LABEL: test_mm_fnmsub_sd
180 // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
181 // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
182 // COMMONIR: [[NEG:%.+]] = fneg double %{{.+}}
183 // COMMONIR: extractelement <2 x double> %{{.*}}, i64 0
184 // COMMONIR: [[NEG2:%.+]] = fneg double %{{.+}}
185 // UNCONSTRAINED: call double @llvm.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}})
186 // CONSTRAINED: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !{{.*}})
187 // CHECK-ASM: vfnmsub213sd
188 // COMMONIR: insertelement <2 x double> %{{.*}}, double %{{.*}}, i64 0
189 return _mm_fnmsub_sd(a, b, c);
192 __m128 test_mm_fmaddsub_ps(__m128 a, __m128 b, __m128 c) {
193 // COMMON-LABEL: test_mm_fmaddsub_ps
194 // COMMONIR-NOT: fneg
195 // COMMONIR: tail call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> %{{.*}})
196 // CHECK-ASM: vfmaddsub213ps
197 return _mm_fmaddsub_ps(a, b, c);
200 __m128d test_mm_fmaddsub_pd(__m128d a, __m128d b, __m128d c) {
201 // COMMON-LABEL: test_mm_fmaddsub_pd
202 // COMMONIR-NOT: fneg
203 // COMMONIR: tail call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}})
204 // CHECK-ASM: vfmaddsub213pd
205 return _mm_fmaddsub_pd(a, b, c);
208 __m128 test_mm_fmsubadd_ps(__m128 a, __m128 b, __m128 c) {
209 // COMMON-LABEL: test_mm_fmsubadd_ps
210 // COMMONIR: [[FNEG:%.+]] = fneg <4 x float> %{{.*}}
211 // COMMONIR: tail call <4 x float> @llvm.x86.fma.vfmaddsub.ps(<4 x float> %{{.*}}, <4 x float> %{{.*}}, <4 x float> [[FNEG]])
212 // CHECK-ASM: vfmsubadd213ps
213 return _mm_fmsubadd_ps(a, b, c);
216 __m128d test_mm_fmsubadd_pd(__m128d a, __m128d b, __m128d c) {
217 // COMMON-LABEL: test_mm_fmsubadd_pd
218 // COMMONIR: [[FNEG:%.+]] = fneg <2 x double> %{{.*}}
219 // COMMONIR: tail call <2 x double> @llvm.x86.fma.vfmaddsub.pd(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[FNEG]])
220 // CHECK-ASM: vfmsubadd213pd
221 return _mm_fmsubadd_pd(a, b, c);
224 __m256 test_mm256_fmadd_ps(__m256 a, __m256 b, __m256 c) {
225 // COMMON-LABEL: test_mm256_fmadd_ps
226 // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
227 // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
228 // CHECK-ASM: vfmadd213ps
229 return _mm256_fmadd_ps(a, b, c);
232 __m256d test_mm256_fmadd_pd(__m256d a, __m256d b, __m256d c) {
233 // COMMON-LABEL: test_mm256_fmadd_pd
234 // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
235 // CONSTRAINED: call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
236 // CHECK-ASM: vfmadd213pd
237 return _mm256_fmadd_pd(a, b, c);
240 __m256 test_mm256_fmsub_ps(__m256 a, __m256 b, __m256 c) {
241 // COMMON-LABEL: test_mm256_fmsub_ps
242 // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
243 // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
244 // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
245 // CHECK-ASM: vfmsub213ps
246 return _mm256_fmsub_ps(a, b, c);
249 __m256d test_mm256_fmsub_pd(__m256d a, __m256d b, __m256d c) {
250 // COMMON-LABEL: test_mm256_fmsub_pd
251 // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
252 // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
253 // CONSTRAINED: call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
254 // CHECK-ASM: vfmsub213pd
255 return _mm256_fmsub_pd(a, b, c);
258 __m256 test_mm256_fnmadd_ps(__m256 a, __m256 b, __m256 c) {
259 // COMMON-LABEL: test_mm256_fnmadd_ps
260 // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
261 // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
262 // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
263 // CHECK-ASM: vfnmadd213ps
264 return _mm256_fnmadd_ps(a, b, c);
267 __m256d test_mm256_fnmadd_pd(__m256d a, __m256d b, __m256d c) {
268 // COMMON-LABEL: test_mm256_fnmadd_pd
269 // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
270 // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
271 // CONSTRAINED: call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
272 // CHECK-ASM: vfnmadd213pd
273 return _mm256_fnmadd_pd(a, b, c);
276 __m256 test_mm256_fnmsub_ps(__m256 a, __m256 b, __m256 c) {
277 // COMMON-LABEL: test_mm256_fnmsub_ps
278 // COMMONIR: [[NEG:%.+]] = fneg <8 x float> %{{.*}}
279 // COMMONIR: [[NEG2:%.+]] = fneg <8 x float> %{{.*}}
280 // UNCONSTRAINED: call <8 x float> @llvm.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
281 // CONSTRAINED: call <8 x float> @llvm.experimental.constrained.fma.v8f32(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}}, metadata !{{.*}})
282 // CHECK-ASM: vfnmsub213ps
283 return _mm256_fnmsub_ps(a, b, c);
286 __m256d test_mm256_fnmsub_pd(__m256d a, __m256d b, __m256d c) {
287 // COMMON-LABEL: test_mm256_fnmsub_pd
288 // COMMONIR: [[NEG:%.+]] = fneg <4 x double> %{{.+}}
289 // COMMONIR: [[NEG2:%.+]] = fneg <4 x double> %{{.+}}
290 // UNCONSTRAINED: call <4 x double> @llvm.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
291 // CONSTRAINED: call <4 x double> @llvm.experimental.constrained.fma.v4f64(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}}, metadata !{{.*}})
292 // CHECK-ASM: vfnmsub213pd
293 return _mm256_fnmsub_pd(a, b, c);
296 __m256 test_mm256_fmaddsub_ps(__m256 a, __m256 b, __m256 c) {
297 // COMMON-LABEL: test_mm256_fmaddsub_ps
298 // COMMONIR-NOT: fneg
299 // COMMONIR: tail call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> %{{.*}})
300 // CHECK-ASM: vfmaddsub213ps
301 return _mm256_fmaddsub_ps(a, b, c);
304 __m256d test_mm256_fmaddsub_pd(__m256d a, __m256d b, __m256d c) {
305 // COMMON-LABEL: test_mm256_fmaddsub_pd
306 // COMMONIR-NOT: fneg
307 // COMMONIR: tail call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> %{{.*}})
308 // CHECK-ASM: vfmaddsub213pd
309 return _mm256_fmaddsub_pd(a, b, c);
312 __m256 test_mm256_fmsubadd_ps(__m256 a, __m256 b, __m256 c) {
313 // COMMON-LABEL: test_mm256_fmsubadd_ps
314 // COMMONIR: [[FNEG:%.+]] = fneg <8 x float> %{{.*}}
315 // COMMONIR: tail call <8 x float> @llvm.x86.fma.vfmaddsub.ps.256(<8 x float> %{{.*}}, <8 x float> %{{.*}}, <8 x float> [[FNEG]])
316 // CHECK-ASM: vfmsubadd213ps
317 return _mm256_fmsubadd_ps(a, b, c);
320 __m256d test_mm256_fmsubadd_pd(__m256d a, __m256d b, __m256d c) {
321 // COMMON-LABEL: test_mm256_fmsubadd_pd
322 // COMMONIR: [[FNEG:%.+]] = fneg <4 x double> %{{.*}}
323 // COMMONIR: tail call <4 x double> @llvm.x86.fma.vfmaddsub.pd.256(<4 x double> %{{.*}}, <4 x double> %{{.*}}, <4 x double> [[FNEG]])
324 // CHECK-ASM: vfmsubadd213pd
325 return _mm256_fmsubadd_pd(a, b, c);