1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
2 // RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
3 // RUN: -o - | FileCheck %s --check-prefix=FULL
5 // RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
6 // RUN: -complex-range=basic -o - | FileCheck %s --check-prefix=BASIC
8 // RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
9 // RUN: -fno-cx-limited-range -o - | FileCheck %s --check-prefix=FULL
11 // RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
12 // RUN: -complex-range=improved -o - | FileCheck %s --check-prefix=IMPRVD
14 // RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
15 // RUN: -complex-range=promoted -o - | FileCheck %s --check-prefix=PRMTD
17 // RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
18 // RUN: -complex-range=full -o - | FileCheck %s --check-prefix=FULL
20 // RUN: %clang_cc1 -triple x86_64-windows-pc -complex-range=promoted \
21 // RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix=X86WINPRMTD
23 // RUN: %clang_cc1 -triple=avr-unknown-unknown -mdouble=32 \
24 // RUN: -complex-range=promoted -emit-llvm -o - %s \
25 // RUN: | FileCheck %s --check-prefix=AVRFP32
27 // RUN: %clang_cc1 -triple=avr-unknown-unknown -mdouble=64 \
28 // RUN: -complex-range=promoted -emit-llvm -o - %s \
29 // RUN: | FileCheck %s --check-prefix=AVRFP64
32 // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu \
33 // RUN: -ffast-math -complex-range=basic -emit-llvm -o - %s \
34 // RUN: | FileCheck %s --check-prefix=BASIC_FAST
36 // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu \
37 // RUN: -ffast-math -complex-range=full -emit-llvm -o - %s \
38 // RUN: | FileCheck %s --check-prefix=FULL_FAST
40 // RUN: %clang_cc1 %s -O0 -emit-llvm -triple x86_64-unknown-unknown \
41 // RUN: -fno-cx-fortran-rules -o - | FileCheck %s --check-prefix=FULL
43 // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu \
44 // RUN: -ffast-math -complex-range=improved -emit-llvm -o - %s \
45 // RUN: | FileCheck %s --check-prefix=IMPRVD_FAST
47 // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu \
48 // RUN: -ffast-math -complex-range=promoted -emit-llvm -o - %s \
49 // RUN: | FileCheck %s --check-prefix=PRMTD_FAST
52 // RUN: %clang_cc1 -triple x86_64-windows-pc -complex-range=promoted \
53 // RUN: -ffp-contract=off -frounding-math -ffp-exception-behavior=strict \
54 // RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix=X86WINPRMTD_STRICT
56 // RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -complex-range=promoted \
57 // RUN: -ffp-contract=off -frounding-math -ffp-exception-behavior=strict \
58 // RUN: -emit-llvm -o - %s | FileCheck %s --check-prefix=PRMTD_STRICT
60 // FULL-LABEL: define dso_local <2 x float> @divf(
61 // FULL-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
63 // FULL-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
64 // FULL-NEXT: [[A:%.*]] = alloca { float, float }, align 4
65 // FULL-NEXT: [[B:%.*]] = alloca { float, float }, align 4
66 // FULL-NEXT: [[COERCE:%.*]] = alloca { float, float }, align 4
67 // FULL-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
68 // FULL-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
69 // FULL-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
70 // FULL-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
71 // FULL-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
72 // FULL-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
73 // FULL-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
74 // FULL-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
75 // FULL-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
76 // FULL-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
77 // FULL-NEXT: [[CALL:%.*]] = call <2 x float> @__divsc3(float noundef [[A_REAL]], float noundef [[A_IMAG]], float noundef [[B_REAL]], float noundef [[B_IMAG]]) #[[ATTR2:[0-9]+]]
78 // FULL-NEXT: store <2 x float> [[CALL]], ptr [[COERCE]], align 4
79 // FULL-NEXT: [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
80 // FULL-NEXT: [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
81 // FULL-NEXT: [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
82 // FULL-NEXT: [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
83 // FULL-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
84 // FULL-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
85 // FULL-NEXT: store float [[COERCE_REAL]], ptr [[RETVAL_REALP]], align 4
86 // FULL-NEXT: store float [[COERCE_IMAG]], ptr [[RETVAL_IMAGP]], align 4
87 // FULL-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
88 // FULL-NEXT: ret <2 x float> [[TMP0]]
90 // BASIC-LABEL: define dso_local <2 x float> @divf(
91 // BASIC-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
93 // BASIC-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
94 // BASIC-NEXT: [[A:%.*]] = alloca { float, float }, align 4
95 // BASIC-NEXT: [[B:%.*]] = alloca { float, float }, align 4
96 // BASIC-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
97 // BASIC-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
98 // BASIC-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
99 // BASIC-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
100 // BASIC-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
101 // BASIC-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
102 // BASIC-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
103 // BASIC-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
104 // BASIC-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
105 // BASIC-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
106 // BASIC-NEXT: [[TMP0:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
107 // BASIC-NEXT: [[TMP1:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
108 // BASIC-NEXT: [[TMP2:%.*]] = fadd float [[TMP0]], [[TMP1]]
109 // BASIC-NEXT: [[TMP3:%.*]] = fmul float [[B_REAL]], [[B_REAL]]
110 // BASIC-NEXT: [[TMP4:%.*]] = fmul float [[B_IMAG]], [[B_IMAG]]
111 // BASIC-NEXT: [[TMP5:%.*]] = fadd float [[TMP3]], [[TMP4]]
112 // BASIC-NEXT: [[TMP6:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
113 // BASIC-NEXT: [[TMP7:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
114 // BASIC-NEXT: [[TMP8:%.*]] = fsub float [[TMP6]], [[TMP7]]
115 // BASIC-NEXT: [[TMP9:%.*]] = fdiv float [[TMP2]], [[TMP5]]
116 // BASIC-NEXT: [[TMP10:%.*]] = fdiv float [[TMP8]], [[TMP5]]
117 // BASIC-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
118 // BASIC-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
119 // BASIC-NEXT: store float [[TMP9]], ptr [[RETVAL_REALP]], align 4
120 // BASIC-NEXT: store float [[TMP10]], ptr [[RETVAL_IMAGP]], align 4
121 // BASIC-NEXT: [[TMP11:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
122 // BASIC-NEXT: ret <2 x float> [[TMP11]]
124 // IMPRVD-LABEL: define dso_local <2 x float> @divf(
125 // IMPRVD-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
126 // IMPRVD-NEXT: entry:
127 // IMPRVD-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
128 // IMPRVD-NEXT: [[A:%.*]] = alloca { float, float }, align 4
129 // IMPRVD-NEXT: [[B:%.*]] = alloca { float, float }, align 4
130 // IMPRVD-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
131 // IMPRVD-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
132 // IMPRVD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
133 // IMPRVD-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
134 // IMPRVD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
135 // IMPRVD-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
136 // IMPRVD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
137 // IMPRVD-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
138 // IMPRVD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
139 // IMPRVD-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
140 // IMPRVD-NEXT: [[TMP0:%.*]] = call float @llvm.fabs.f32(float [[B_REAL]])
141 // IMPRVD-NEXT: [[TMP1:%.*]] = call float @llvm.fabs.f32(float [[B_IMAG]])
142 // IMPRVD-NEXT: [[ABS_CMP:%.*]] = fcmp ugt float [[TMP0]], [[TMP1]]
143 // IMPRVD-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
144 // IMPRVD: abs_rhsr_greater_or_equal_abs_rhsi:
145 // IMPRVD-NEXT: [[TMP2:%.*]] = fdiv float [[B_IMAG]], [[B_REAL]]
146 // IMPRVD-NEXT: [[TMP3:%.*]] = fmul float [[TMP2]], [[B_IMAG]]
147 // IMPRVD-NEXT: [[TMP4:%.*]] = fadd float [[B_REAL]], [[TMP3]]
148 // IMPRVD-NEXT: [[TMP5:%.*]] = fmul float [[A_IMAG]], [[TMP2]]
149 // IMPRVD-NEXT: [[TMP6:%.*]] = fadd float [[A_REAL]], [[TMP5]]
150 // IMPRVD-NEXT: [[TMP7:%.*]] = fdiv float [[TMP6]], [[TMP4]]
151 // IMPRVD-NEXT: [[TMP8:%.*]] = fmul float [[A_REAL]], [[TMP2]]
152 // IMPRVD-NEXT: [[TMP9:%.*]] = fsub float [[A_IMAG]], [[TMP8]]
153 // IMPRVD-NEXT: [[TMP10:%.*]] = fdiv float [[TMP9]], [[TMP4]]
154 // IMPRVD-NEXT: br label [[COMPLEX_DIV:%.*]]
155 // IMPRVD: abs_rhsr_less_than_abs_rhsi:
156 // IMPRVD-NEXT: [[TMP11:%.*]] = fdiv float [[B_REAL]], [[B_IMAG]]
157 // IMPRVD-NEXT: [[TMP12:%.*]] = fmul float [[TMP11]], [[B_REAL]]
158 // IMPRVD-NEXT: [[TMP13:%.*]] = fadd float [[B_IMAG]], [[TMP12]]
159 // IMPRVD-NEXT: [[TMP14:%.*]] = fmul float [[A_REAL]], [[TMP11]]
160 // IMPRVD-NEXT: [[TMP15:%.*]] = fadd float [[TMP14]], [[A_IMAG]]
161 // IMPRVD-NEXT: [[TMP16:%.*]] = fdiv float [[TMP15]], [[TMP13]]
162 // IMPRVD-NEXT: [[TMP17:%.*]] = fmul float [[A_IMAG]], [[TMP11]]
163 // IMPRVD-NEXT: [[TMP18:%.*]] = fsub float [[TMP17]], [[A_REAL]]
164 // IMPRVD-NEXT: [[TMP19:%.*]] = fdiv float [[TMP18]], [[TMP13]]
165 // IMPRVD-NEXT: br label [[COMPLEX_DIV]]
166 // IMPRVD: complex_div:
167 // IMPRVD-NEXT: [[TMP20:%.*]] = phi float [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
168 // IMPRVD-NEXT: [[TMP21:%.*]] = phi float [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
169 // IMPRVD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
170 // IMPRVD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
171 // IMPRVD-NEXT: store float [[TMP20]], ptr [[RETVAL_REALP]], align 4
172 // IMPRVD-NEXT: store float [[TMP21]], ptr [[RETVAL_IMAGP]], align 4
173 // IMPRVD-NEXT: [[TMP22:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
174 // IMPRVD-NEXT: ret <2 x float> [[TMP22]]
176 // PRMTD-LABEL: define dso_local <2 x float> @divf(
177 // PRMTD-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
178 // PRMTD-NEXT: entry:
179 // PRMTD-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
180 // PRMTD-NEXT: [[A:%.*]] = alloca { float, float }, align 4
181 // PRMTD-NEXT: [[B:%.*]] = alloca { float, float }, align 4
182 // PRMTD-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
183 // PRMTD-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
184 // PRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
185 // PRMTD-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
186 // PRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
187 // PRMTD-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
188 // PRMTD-NEXT: [[EXT:%.*]] = fpext float [[A_REAL]] to double
189 // PRMTD-NEXT: [[EXT1:%.*]] = fpext float [[A_IMAG]] to double
190 // PRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
191 // PRMTD-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
192 // PRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
193 // PRMTD-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
194 // PRMTD-NEXT: [[EXT2:%.*]] = fpext float [[B_REAL]] to double
195 // PRMTD-NEXT: [[EXT3:%.*]] = fpext float [[B_IMAG]] to double
196 // PRMTD-NEXT: [[TMP0:%.*]] = fmul double [[EXT]], [[EXT2]]
197 // PRMTD-NEXT: [[TMP1:%.*]] = fmul double [[EXT1]], [[EXT3]]
198 // PRMTD-NEXT: [[TMP2:%.*]] = fadd double [[TMP0]], [[TMP1]]
199 // PRMTD-NEXT: [[TMP3:%.*]] = fmul double [[EXT2]], [[EXT2]]
200 // PRMTD-NEXT: [[TMP4:%.*]] = fmul double [[EXT3]], [[EXT3]]
201 // PRMTD-NEXT: [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]]
202 // PRMTD-NEXT: [[TMP6:%.*]] = fmul double [[EXT1]], [[EXT2]]
203 // PRMTD-NEXT: [[TMP7:%.*]] = fmul double [[EXT]], [[EXT3]]
204 // PRMTD-NEXT: [[TMP8:%.*]] = fsub double [[TMP6]], [[TMP7]]
205 // PRMTD-NEXT: [[TMP9:%.*]] = fdiv double [[TMP2]], [[TMP5]]
206 // PRMTD-NEXT: [[TMP10:%.*]] = fdiv double [[TMP8]], [[TMP5]]
207 // PRMTD-NEXT: [[UNPROMOTION:%.*]] = fptrunc double [[TMP9]] to float
208 // PRMTD-NEXT: [[UNPROMOTION4:%.*]] = fptrunc double [[TMP10]] to float
209 // PRMTD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
210 // PRMTD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
211 // PRMTD-NEXT: store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
212 // PRMTD-NEXT: store float [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 4
213 // PRMTD-NEXT: [[TMP11:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
214 // PRMTD-NEXT: ret <2 x float> [[TMP11]]
216 // X86WINPRMTD-LABEL: define dso_local i64 @divf(
217 // X86WINPRMTD-SAME: i64 noundef [[A_COERCE:%.*]], i64 noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
218 // X86WINPRMTD-NEXT: entry:
219 // X86WINPRMTD-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
220 // X86WINPRMTD-NEXT: [[A:%.*]] = alloca { float, float }, align 4
221 // X86WINPRMTD-NEXT: [[B:%.*]] = alloca { float, float }, align 4
222 // X86WINPRMTD-NEXT: store i64 [[A_COERCE]], ptr [[A]], align 4
223 // X86WINPRMTD-NEXT: store i64 [[B_COERCE]], ptr [[B]], align 4
224 // X86WINPRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
225 // X86WINPRMTD-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
226 // X86WINPRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
227 // X86WINPRMTD-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
228 // X86WINPRMTD-NEXT: [[EXT:%.*]] = fpext float [[A_REAL]] to double
229 // X86WINPRMTD-NEXT: [[EXT1:%.*]] = fpext float [[A_IMAG]] to double
230 // X86WINPRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
231 // X86WINPRMTD-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
232 // X86WINPRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
233 // X86WINPRMTD-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
234 // X86WINPRMTD-NEXT: [[EXT2:%.*]] = fpext float [[B_REAL]] to double
235 // X86WINPRMTD-NEXT: [[EXT3:%.*]] = fpext float [[B_IMAG]] to double
236 // X86WINPRMTD-NEXT: [[TMP0:%.*]] = fmul double [[EXT]], [[EXT2]]
237 // X86WINPRMTD-NEXT: [[TMP1:%.*]] = fmul double [[EXT1]], [[EXT3]]
238 // X86WINPRMTD-NEXT: [[TMP2:%.*]] = fadd double [[TMP0]], [[TMP1]]
239 // X86WINPRMTD-NEXT: [[TMP3:%.*]] = fmul double [[EXT2]], [[EXT2]]
240 // X86WINPRMTD-NEXT: [[TMP4:%.*]] = fmul double [[EXT3]], [[EXT3]]
241 // X86WINPRMTD-NEXT: [[TMP5:%.*]] = fadd double [[TMP3]], [[TMP4]]
242 // X86WINPRMTD-NEXT: [[TMP6:%.*]] = fmul double [[EXT1]], [[EXT2]]
243 // X86WINPRMTD-NEXT: [[TMP7:%.*]] = fmul double [[EXT]], [[EXT3]]
244 // X86WINPRMTD-NEXT: [[TMP8:%.*]] = fsub double [[TMP6]], [[TMP7]]
245 // X86WINPRMTD-NEXT: [[TMP9:%.*]] = fdiv double [[TMP2]], [[TMP5]]
246 // X86WINPRMTD-NEXT: [[TMP10:%.*]] = fdiv double [[TMP8]], [[TMP5]]
247 // X86WINPRMTD-NEXT: [[UNPROMOTION:%.*]] = fptrunc double [[TMP9]] to float
248 // X86WINPRMTD-NEXT: [[UNPROMOTION4:%.*]] = fptrunc double [[TMP10]] to float
249 // X86WINPRMTD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
250 // X86WINPRMTD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
251 // X86WINPRMTD-NEXT: store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
252 // X86WINPRMTD-NEXT: store float [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 4
253 // X86WINPRMTD-NEXT: [[TMP11:%.*]] = load i64, ptr [[RETVAL]], align 4
254 // X86WINPRMTD-NEXT: ret i64 [[TMP11]]
256 // AVRFP32-LABEL: define dso_local { float, float } @divf(
257 // AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0:[0-9]+]] {
258 // AVRFP32-NEXT: entry:
259 // AVRFP32-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 1
260 // AVRFP32-NEXT: [[A:%.*]] = alloca { float, float }, align 1
261 // AVRFP32-NEXT: [[B:%.*]] = alloca { float, float }, align 1
262 // AVRFP32-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
263 // AVRFP32-NEXT: store float [[A_COERCE0]], ptr [[TMP0]], align 1
264 // AVRFP32-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
265 // AVRFP32-NEXT: store float [[A_COERCE1]], ptr [[TMP1]], align 1
266 // AVRFP32-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
267 // AVRFP32-NEXT: store float [[B_COERCE0]], ptr [[TMP2]], align 1
268 // AVRFP32-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
269 // AVRFP32-NEXT: store float [[B_COERCE1]], ptr [[TMP3]], align 1
270 // AVRFP32-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
271 // AVRFP32-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
272 // AVRFP32-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
273 // AVRFP32-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
274 // AVRFP32-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
275 // AVRFP32-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
276 // AVRFP32-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
277 // AVRFP32-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
278 // AVRFP32-NEXT: [[TMP4:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[B_REAL]])
279 // AVRFP32-NEXT: [[TMP5:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[B_IMAG]])
280 // AVRFP32-NEXT: [[ABS_CMP:%.*]] = fcmp ugt float [[TMP4]], [[TMP5]]
281 // AVRFP32-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
282 // AVRFP32: abs_rhsr_greater_or_equal_abs_rhsi:
283 // AVRFP32-NEXT: [[TMP6:%.*]] = fdiv float [[B_IMAG]], [[B_REAL]]
284 // AVRFP32-NEXT: [[TMP7:%.*]] = fmul float [[TMP6]], [[B_IMAG]]
285 // AVRFP32-NEXT: [[TMP8:%.*]] = fadd float [[B_REAL]], [[TMP7]]
286 // AVRFP32-NEXT: [[TMP9:%.*]] = fmul float [[A_IMAG]], [[TMP6]]
287 // AVRFP32-NEXT: [[TMP10:%.*]] = fadd float [[A_REAL]], [[TMP9]]
288 // AVRFP32-NEXT: [[TMP11:%.*]] = fdiv float [[TMP10]], [[TMP8]]
289 // AVRFP32-NEXT: [[TMP12:%.*]] = fmul float [[A_REAL]], [[TMP6]]
290 // AVRFP32-NEXT: [[TMP13:%.*]] = fsub float [[A_IMAG]], [[TMP12]]
291 // AVRFP32-NEXT: [[TMP14:%.*]] = fdiv float [[TMP13]], [[TMP8]]
292 // AVRFP32-NEXT: br label [[COMPLEX_DIV:%.*]]
293 // AVRFP32: abs_rhsr_less_than_abs_rhsi:
294 // AVRFP32-NEXT: [[TMP15:%.*]] = fdiv float [[B_REAL]], [[B_IMAG]]
295 // AVRFP32-NEXT: [[TMP16:%.*]] = fmul float [[TMP15]], [[B_REAL]]
296 // AVRFP32-NEXT: [[TMP17:%.*]] = fadd float [[B_IMAG]], [[TMP16]]
297 // AVRFP32-NEXT: [[TMP18:%.*]] = fmul float [[A_REAL]], [[TMP15]]
298 // AVRFP32-NEXT: [[TMP19:%.*]] = fadd float [[TMP18]], [[A_IMAG]]
299 // AVRFP32-NEXT: [[TMP20:%.*]] = fdiv float [[TMP19]], [[TMP17]]
300 // AVRFP32-NEXT: [[TMP21:%.*]] = fmul float [[A_IMAG]], [[TMP15]]
301 // AVRFP32-NEXT: [[TMP22:%.*]] = fsub float [[TMP21]], [[A_REAL]]
302 // AVRFP32-NEXT: [[TMP23:%.*]] = fdiv float [[TMP22]], [[TMP17]]
303 // AVRFP32-NEXT: br label [[COMPLEX_DIV]]
304 // AVRFP32: complex_div:
305 // AVRFP32-NEXT: [[TMP24:%.*]] = phi float [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
306 // AVRFP32-NEXT: [[TMP25:%.*]] = phi float [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
307 // AVRFP32-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
308 // AVRFP32-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
309 // AVRFP32-NEXT: store float [[TMP24]], ptr [[RETVAL_REALP]], align 1
310 // AVRFP32-NEXT: store float [[TMP25]], ptr [[RETVAL_IMAGP]], align 1
311 // AVRFP32-NEXT: [[TMP26:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
312 // AVRFP32-NEXT: ret { float, float } [[TMP26]]
314 // AVRFP64-LABEL: define dso_local { float, float } @divf(
315 // AVRFP64-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0:[0-9]+]] {
316 // AVRFP64-NEXT: entry:
317 // AVRFP64-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 1
318 // AVRFP64-NEXT: [[A:%.*]] = alloca { float, float }, align 1
319 // AVRFP64-NEXT: [[B:%.*]] = alloca { float, float }, align 1
320 // AVRFP64-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
321 // AVRFP64-NEXT: store float [[A_COERCE0]], ptr [[TMP0]], align 1
322 // AVRFP64-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
323 // AVRFP64-NEXT: store float [[A_COERCE1]], ptr [[TMP1]], align 1
324 // AVRFP64-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
325 // AVRFP64-NEXT: store float [[B_COERCE0]], ptr [[TMP2]], align 1
326 // AVRFP64-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
327 // AVRFP64-NEXT: store float [[B_COERCE1]], ptr [[TMP3]], align 1
328 // AVRFP64-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
329 // AVRFP64-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
330 // AVRFP64-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
331 // AVRFP64-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
332 // AVRFP64-NEXT: [[EXT:%.*]] = fpext float [[A_REAL]] to double
333 // AVRFP64-NEXT: [[EXT1:%.*]] = fpext float [[A_IMAG]] to double
334 // AVRFP64-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
335 // AVRFP64-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
336 // AVRFP64-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
337 // AVRFP64-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
338 // AVRFP64-NEXT: [[EXT2:%.*]] = fpext float [[B_REAL]] to double
339 // AVRFP64-NEXT: [[EXT3:%.*]] = fpext float [[B_IMAG]] to double
340 // AVRFP64-NEXT: [[TMP4:%.*]] = fmul double [[EXT]], [[EXT2]]
341 // AVRFP64-NEXT: [[TMP5:%.*]] = fmul double [[EXT1]], [[EXT3]]
342 // AVRFP64-NEXT: [[TMP6:%.*]] = fadd double [[TMP4]], [[TMP5]]
343 // AVRFP64-NEXT: [[TMP7:%.*]] = fmul double [[EXT2]], [[EXT2]]
344 // AVRFP64-NEXT: [[TMP8:%.*]] = fmul double [[EXT3]], [[EXT3]]
345 // AVRFP64-NEXT: [[TMP9:%.*]] = fadd double [[TMP7]], [[TMP8]]
346 // AVRFP64-NEXT: [[TMP10:%.*]] = fmul double [[EXT1]], [[EXT2]]
347 // AVRFP64-NEXT: [[TMP11:%.*]] = fmul double [[EXT]], [[EXT3]]
348 // AVRFP64-NEXT: [[TMP12:%.*]] = fsub double [[TMP10]], [[TMP11]]
349 // AVRFP64-NEXT: [[TMP13:%.*]] = fdiv double [[TMP6]], [[TMP9]]
350 // AVRFP64-NEXT: [[TMP14:%.*]] = fdiv double [[TMP12]], [[TMP9]]
351 // AVRFP64-NEXT: [[UNPROMOTION:%.*]] = fptrunc double [[TMP13]] to float
352 // AVRFP64-NEXT: [[UNPROMOTION4:%.*]] = fptrunc double [[TMP14]] to float
353 // AVRFP64-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
354 // AVRFP64-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
355 // AVRFP64-NEXT: store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 1
356 // AVRFP64-NEXT: store float [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 1
357 // AVRFP64-NEXT: [[TMP15:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
358 // AVRFP64-NEXT: ret { float, float } [[TMP15]]
360 // BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @divf(
361 // BASIC_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
362 // BASIC_FAST-NEXT: entry:
363 // BASIC_FAST-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
364 // BASIC_FAST-NEXT: [[A:%.*]] = alloca { float, float }, align 4
365 // BASIC_FAST-NEXT: [[B:%.*]] = alloca { float, float }, align 4
366 // BASIC_FAST-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
367 // BASIC_FAST-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
368 // BASIC_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
369 // BASIC_FAST-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
370 // BASIC_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
371 // BASIC_FAST-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
372 // BASIC_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
373 // BASIC_FAST-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
374 // BASIC_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
375 // BASIC_FAST-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
376 // BASIC_FAST-NEXT: [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_REAL]]
377 // BASIC_FAST-NEXT: [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_IMAG]]
378 // BASIC_FAST-NEXT: [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP0]], [[TMP1]]
379 // BASIC_FAST-NEXT: [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[B_REAL]], [[B_REAL]]
380 // BASIC_FAST-NEXT: [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[B_IMAG]], [[B_IMAG]]
381 // BASIC_FAST-NEXT: [[TMP5:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP3]], [[TMP4]]
382 // BASIC_FAST-NEXT: [[TMP6:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_REAL]]
383 // BASIC_FAST-NEXT: [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_IMAG]]
384 // BASIC_FAST-NEXT: [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[TMP6]], [[TMP7]]
385 // BASIC_FAST-NEXT: [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP2]], [[TMP5]]
386 // BASIC_FAST-NEXT: [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP8]], [[TMP5]]
387 // BASIC_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
388 // BASIC_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
389 // BASIC_FAST-NEXT: store float [[TMP9]], ptr [[RETVAL_REALP]], align 4
390 // BASIC_FAST-NEXT: store float [[TMP10]], ptr [[RETVAL_IMAGP]], align 4
391 // BASIC_FAST-NEXT: [[TMP11:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
392 // BASIC_FAST-NEXT: ret <2 x float> [[TMP11]]
394 // FULL_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @divf(
395 // FULL_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
396 // FULL_FAST-NEXT: entry:
397 // FULL_FAST-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
398 // FULL_FAST-NEXT: [[A:%.*]] = alloca { float, float }, align 4
399 // FULL_FAST-NEXT: [[B:%.*]] = alloca { float, float }, align 4
400 // FULL_FAST-NEXT: [[COERCE:%.*]] = alloca { float, float }, align 4
401 // FULL_FAST-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
402 // FULL_FAST-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
403 // FULL_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
404 // FULL_FAST-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
405 // FULL_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
406 // FULL_FAST-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
407 // FULL_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
408 // FULL_FAST-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
409 // FULL_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
410 // FULL_FAST-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
411 // FULL_FAST-NEXT: [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) <2 x float> @__divsc3(float noundef nofpclass(nan inf) [[A_REAL]], float noundef nofpclass(nan inf) [[A_IMAG]], float noundef nofpclass(nan inf) [[B_REAL]], float noundef nofpclass(nan inf) [[B_IMAG]]) #[[ATTR2:[0-9]+]]
412 // FULL_FAST-NEXT: store <2 x float> [[CALL]], ptr [[COERCE]], align 4
413 // FULL_FAST-NEXT: [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
414 // FULL_FAST-NEXT: [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
415 // FULL_FAST-NEXT: [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
416 // FULL_FAST-NEXT: [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
417 // FULL_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
418 // FULL_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
419 // FULL_FAST-NEXT: store float [[COERCE_REAL]], ptr [[RETVAL_REALP]], align 4
420 // FULL_FAST-NEXT: store float [[COERCE_IMAG]], ptr [[RETVAL_IMAGP]], align 4
421 // FULL_FAST-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
422 // FULL_FAST-NEXT: ret <2 x float> [[TMP0]]
424 // IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @divf(
425 // IMPRVD_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
426 // IMPRVD_FAST-NEXT: entry:
427 // IMPRVD_FAST-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
428 // IMPRVD_FAST-NEXT: [[A:%.*]] = alloca { float, float }, align 4
429 // IMPRVD_FAST-NEXT: [[B:%.*]] = alloca { float, float }, align 4
430 // IMPRVD_FAST-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
431 // IMPRVD_FAST-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
432 // IMPRVD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
433 // IMPRVD_FAST-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
434 // IMPRVD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
435 // IMPRVD_FAST-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
436 // IMPRVD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
437 // IMPRVD_FAST-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
438 // IMPRVD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
439 // IMPRVD_FAST-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
440 // IMPRVD_FAST-NEXT: [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.fabs.f32(float [[B_REAL]])
441 // IMPRVD_FAST-NEXT: [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.fabs.f32(float [[B_IMAG]])
442 // IMPRVD_FAST-NEXT: [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt float [[TMP0]], [[TMP1]]
443 // IMPRVD_FAST-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
444 // IMPRVD_FAST: abs_rhsr_greater_or_equal_abs_rhsi:
445 // IMPRVD_FAST-NEXT: [[TMP2:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[B_IMAG]], [[B_REAL]]
446 // IMPRVD_FAST-NEXT: [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[TMP2]], [[B_IMAG]]
447 // IMPRVD_FAST-NEXT: [[TMP4:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[B_REAL]], [[TMP3]]
448 // IMPRVD_FAST-NEXT: [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[TMP2]]
449 // IMPRVD_FAST-NEXT: [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[TMP5]]
450 // IMPRVD_FAST-NEXT: [[TMP7:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP6]], [[TMP4]]
451 // IMPRVD_FAST-NEXT: [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[TMP2]]
452 // IMPRVD_FAST-NEXT: [[TMP9:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[TMP8]]
453 // IMPRVD_FAST-NEXT: [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP9]], [[TMP4]]
454 // IMPRVD_FAST-NEXT: br label [[COMPLEX_DIV:%.*]]
455 // IMPRVD_FAST: abs_rhsr_less_than_abs_rhsi:
456 // IMPRVD_FAST-NEXT: [[TMP11:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[B_REAL]], [[B_IMAG]]
457 // IMPRVD_FAST-NEXT: [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[TMP11]], [[B_REAL]]
458 // IMPRVD_FAST-NEXT: [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[B_IMAG]], [[TMP12]]
459 // IMPRVD_FAST-NEXT: [[TMP14:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[TMP11]]
460 // IMPRVD_FAST-NEXT: [[TMP15:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP14]], [[A_IMAG]]
461 // IMPRVD_FAST-NEXT: [[TMP16:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP15]], [[TMP13]]
462 // IMPRVD_FAST-NEXT: [[TMP17:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[TMP11]]
463 // IMPRVD_FAST-NEXT: [[TMP18:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[TMP17]], [[A_REAL]]
464 // IMPRVD_FAST-NEXT: [[TMP19:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP18]], [[TMP13]]
465 // IMPRVD_FAST-NEXT: br label [[COMPLEX_DIV]]
466 // IMPRVD_FAST: complex_div:
467 // IMPRVD_FAST-NEXT: [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
468 // IMPRVD_FAST-NEXT: [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
469 // IMPRVD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
470 // IMPRVD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
471 // IMPRVD_FAST-NEXT: store float [[TMP20]], ptr [[RETVAL_REALP]], align 4
472 // IMPRVD_FAST-NEXT: store float [[TMP21]], ptr [[RETVAL_IMAGP]], align 4
473 // IMPRVD_FAST-NEXT: [[TMP22:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
474 // IMPRVD_FAST-NEXT: ret <2 x float> [[TMP22]]
476 // PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @divf(
477 // PRMTD_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
478 // PRMTD_FAST-NEXT: entry:
479 // PRMTD_FAST-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
480 // PRMTD_FAST-NEXT: [[A:%.*]] = alloca { float, float }, align 4
481 // PRMTD_FAST-NEXT: [[B:%.*]] = alloca { float, float }, align 4
482 // PRMTD_FAST-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
483 // PRMTD_FAST-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
484 // PRMTD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
485 // PRMTD_FAST-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
486 // PRMTD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
487 // PRMTD_FAST-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
488 // PRMTD_FAST-NEXT: [[EXT:%.*]] = fpext float [[A_REAL]] to double
489 // PRMTD_FAST-NEXT: [[EXT1:%.*]] = fpext float [[A_IMAG]] to double
490 // PRMTD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
491 // PRMTD_FAST-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
492 // PRMTD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
493 // PRMTD_FAST-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
494 // PRMTD_FAST-NEXT: [[EXT2:%.*]] = fpext float [[B_REAL]] to double
495 // PRMTD_FAST-NEXT: [[EXT3:%.*]] = fpext float [[B_IMAG]] to double
496 // PRMTD_FAST-NEXT: [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT]], [[EXT2]]
497 // PRMTD_FAST-NEXT: [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT1]], [[EXT3]]
498 // PRMTD_FAST-NEXT: [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP0]], [[TMP1]]
499 // PRMTD_FAST-NEXT: [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT2]], [[EXT2]]
500 // PRMTD_FAST-NEXT: [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT3]], [[EXT3]]
501 // PRMTD_FAST-NEXT: [[TMP5:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP3]], [[TMP4]]
502 // PRMTD_FAST-NEXT: [[TMP6:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT1]], [[EXT2]]
503 // PRMTD_FAST-NEXT: [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT]], [[EXT3]]
504 // PRMTD_FAST-NEXT: [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[TMP6]], [[TMP7]]
505 // PRMTD_FAST-NEXT: [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP2]], [[TMP5]]
506 // PRMTD_FAST-NEXT: [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP8]], [[TMP5]]
507 // PRMTD_FAST-NEXT: [[UNPROMOTION:%.*]] = fptrunc double [[TMP9]] to float
508 // PRMTD_FAST-NEXT: [[UNPROMOTION4:%.*]] = fptrunc double [[TMP10]] to float
509 // PRMTD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
510 // PRMTD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
511 // PRMTD_FAST-NEXT: store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
512 // PRMTD_FAST-NEXT: store float [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 4
513 // PRMTD_FAST-NEXT: [[TMP11:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
514 // PRMTD_FAST-NEXT: ret <2 x float> [[TMP11]]
516 // X86WINPRMTD_STRICT-LABEL: define dso_local i64 @divf(
517 // X86WINPRMTD_STRICT-SAME: i64 noundef [[A_COERCE:%.*]], i64 noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
518 // X86WINPRMTD_STRICT-NEXT: entry:
519 // X86WINPRMTD_STRICT-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
520 // X86WINPRMTD_STRICT-NEXT: [[A:%.*]] = alloca { float, float }, align 4
521 // X86WINPRMTD_STRICT-NEXT: [[B:%.*]] = alloca { float, float }, align 4
522 // X86WINPRMTD_STRICT-NEXT: store i64 [[A_COERCE]], ptr [[A]], align 4
523 // X86WINPRMTD_STRICT-NEXT: store i64 [[B_COERCE]], ptr [[B]], align 4
524 // X86WINPRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
525 // X86WINPRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
526 // X86WINPRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
527 // X86WINPRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
528 // X86WINPRMTD_STRICT-NEXT: [[EXT:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_REAL]], metadata !"fpexcept.strict") #[[ATTR3:[0-9]+]]
529 // X86WINPRMTD_STRICT-NEXT: [[EXT1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_IMAG]], metadata !"fpexcept.strict") #[[ATTR3]]
530 // X86WINPRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
531 // X86WINPRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
532 // X86WINPRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
533 // X86WINPRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
534 // X86WINPRMTD_STRICT-NEXT: [[EXT2:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[B_REAL]], metadata !"fpexcept.strict") #[[ATTR3]]
535 // X86WINPRMTD_STRICT-NEXT: [[EXT3:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[B_IMAG]], metadata !"fpexcept.strict") #[[ATTR3]]
536 // X86WINPRMTD_STRICT-NEXT: [[TMP0:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
537 // X86WINPRMTD_STRICT-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT1]], double [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
538 // X86WINPRMTD_STRICT-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP0]], double [[TMP1]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
539 // X86WINPRMTD_STRICT-NEXT: [[TMP3:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT2]], double [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
540 // X86WINPRMTD_STRICT-NEXT: [[TMP4:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT3]], double [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
541 // X86WINPRMTD_STRICT-NEXT: [[TMP5:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP3]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
542 // X86WINPRMTD_STRICT-NEXT: [[TMP6:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT1]], double [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
543 // X86WINPRMTD_STRICT-NEXT: [[TMP7:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
544 // X86WINPRMTD_STRICT-NEXT: [[TMP8:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP6]], double [[TMP7]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
545 // X86WINPRMTD_STRICT-NEXT: [[TMP9:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP2]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
546 // X86WINPRMTD_STRICT-NEXT: [[TMP10:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP8]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
547 // X86WINPRMTD_STRICT-NEXT: [[UNPROMOTION:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP9]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
548 // X86WINPRMTD_STRICT-NEXT: [[UNPROMOTION4:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
549 // X86WINPRMTD_STRICT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
550 // X86WINPRMTD_STRICT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
551 // X86WINPRMTD_STRICT-NEXT: store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
552 // X86WINPRMTD_STRICT-NEXT: store float [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 4
553 // X86WINPRMTD_STRICT-NEXT: [[TMP11:%.*]] = load i64, ptr [[RETVAL]], align 4
554 // X86WINPRMTD_STRICT-NEXT: ret i64 [[TMP11]]
556 // PRMTD_STRICT-LABEL: define dso_local <2 x float> @divf(
557 // PRMTD_STRICT-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0:[0-9]+]] {
558 // PRMTD_STRICT-NEXT: entry:
559 // PRMTD_STRICT-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
560 // PRMTD_STRICT-NEXT: [[A:%.*]] = alloca { float, float }, align 4
561 // PRMTD_STRICT-NEXT: [[B:%.*]] = alloca { float, float }, align 4
562 // PRMTD_STRICT-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
563 // PRMTD_STRICT-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
564 // PRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
565 // PRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
566 // PRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
567 // PRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
568 // PRMTD_STRICT-NEXT: [[EXT:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_REAL]], metadata !"fpexcept.strict") #[[ATTR4:[0-9]+]]
569 // PRMTD_STRICT-NEXT: [[EXT1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
570 // PRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
571 // PRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
572 // PRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
573 // PRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
574 // PRMTD_STRICT-NEXT: [[EXT2:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[B_REAL]], metadata !"fpexcept.strict") #[[ATTR4]]
575 // PRMTD_STRICT-NEXT: [[EXT3:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[B_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
576 // PRMTD_STRICT-NEXT: [[TMP0:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
577 // PRMTD_STRICT-NEXT: [[TMP1:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT1]], double [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
578 // PRMTD_STRICT-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP0]], double [[TMP1]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
579 // PRMTD_STRICT-NEXT: [[TMP3:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT2]], double [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
580 // PRMTD_STRICT-NEXT: [[TMP4:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT3]], double [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
581 // PRMTD_STRICT-NEXT: [[TMP5:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP3]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
582 // PRMTD_STRICT-NEXT: [[TMP6:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT1]], double [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
583 // PRMTD_STRICT-NEXT: [[TMP7:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
584 // PRMTD_STRICT-NEXT: [[TMP8:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP6]], double [[TMP7]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
585 // PRMTD_STRICT-NEXT: [[TMP9:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP2]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
586 // PRMTD_STRICT-NEXT: [[TMP10:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP8]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
587 // PRMTD_STRICT-NEXT: [[UNPROMOTION:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP9]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
588 // PRMTD_STRICT-NEXT: [[UNPROMOTION4:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP10]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
589 // PRMTD_STRICT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
590 // PRMTD_STRICT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
591 // PRMTD_STRICT-NEXT: store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
592 // PRMTD_STRICT-NEXT: store float [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 4
593 // PRMTD_STRICT-NEXT: [[TMP11:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
594 // PRMTD_STRICT-NEXT: ret <2 x float> [[TMP11]]
596 _Complex
float divf(_Complex
float a
, _Complex
float b
) {
600 // FULL-LABEL: define dso_local <2 x float> @mulf(
601 // FULL-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
603 // FULL-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
604 // FULL-NEXT: [[A:%.*]] = alloca { float, float }, align 4
605 // FULL-NEXT: [[B:%.*]] = alloca { float, float }, align 4
606 // FULL-NEXT: [[COERCE:%.*]] = alloca { float, float }, align 4
607 // FULL-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
608 // FULL-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
609 // FULL-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
610 // FULL-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
611 // FULL-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
612 // FULL-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
613 // FULL-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
614 // FULL-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
615 // FULL-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
616 // FULL-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
617 // FULL-NEXT: [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
618 // FULL-NEXT: [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
619 // FULL-NEXT: [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
620 // FULL-NEXT: [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
621 // FULL-NEXT: [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
622 // FULL-NEXT: [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
623 // FULL-NEXT: [[ISNAN_CMP:%.*]] = fcmp uno float [[MUL_R]], [[MUL_R]]
624 // FULL-NEXT: br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF2:![0-9]+]]
625 // FULL: complex_mul_imag_nan:
626 // FULL-NEXT: [[ISNAN_CMP1:%.*]] = fcmp uno float [[MUL_I]], [[MUL_I]]
627 // FULL-NEXT: br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF2]]
628 // FULL: complex_mul_libcall:
629 // FULL-NEXT: [[CALL:%.*]] = call <2 x float> @__mulsc3(float noundef [[A_REAL]], float noundef [[A_IMAG]], float noundef [[B_REAL]], float noundef [[B_IMAG]]) #[[ATTR2]]
630 // FULL-NEXT: store <2 x float> [[CALL]], ptr [[COERCE]], align 4
631 // FULL-NEXT: [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
632 // FULL-NEXT: [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
633 // FULL-NEXT: [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
634 // FULL-NEXT: [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
635 // FULL-NEXT: br label [[COMPLEX_MUL_CONT]]
636 // FULL: complex_mul_cont:
637 // FULL-NEXT: [[REAL_MUL_PHI:%.*]] = phi float [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_REAL]], [[COMPLEX_MUL_LIBCALL]] ]
638 // FULL-NEXT: [[IMAG_MUL_PHI:%.*]] = phi float [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_IMAG]], [[COMPLEX_MUL_LIBCALL]] ]
639 // FULL-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
640 // FULL-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
641 // FULL-NEXT: store float [[REAL_MUL_PHI]], ptr [[RETVAL_REALP]], align 4
642 // FULL-NEXT: store float [[IMAG_MUL_PHI]], ptr [[RETVAL_IMAGP]], align 4
643 // FULL-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
644 // FULL-NEXT: ret <2 x float> [[TMP0]]
646 // BASIC-LABEL: define dso_local <2 x float> @mulf(
647 // BASIC-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
648 // BASIC-NEXT: entry:
649 // BASIC-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
650 // BASIC-NEXT: [[A:%.*]] = alloca { float, float }, align 4
651 // BASIC-NEXT: [[B:%.*]] = alloca { float, float }, align 4
652 // BASIC-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
653 // BASIC-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
654 // BASIC-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
655 // BASIC-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
656 // BASIC-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
657 // BASIC-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
658 // BASIC-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
659 // BASIC-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
660 // BASIC-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
661 // BASIC-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
662 // BASIC-NEXT: [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
663 // BASIC-NEXT: [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
664 // BASIC-NEXT: [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
665 // BASIC-NEXT: [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
666 // BASIC-NEXT: [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
667 // BASIC-NEXT: [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
668 // BASIC-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
669 // BASIC-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
670 // BASIC-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
671 // BASIC-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
672 // BASIC-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
673 // BASIC-NEXT: ret <2 x float> [[TMP0]]
675 // IMPRVD-LABEL: define dso_local <2 x float> @mulf(
676 // IMPRVD-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
677 // IMPRVD-NEXT: entry:
678 // IMPRVD-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
679 // IMPRVD-NEXT: [[A:%.*]] = alloca { float, float }, align 4
680 // IMPRVD-NEXT: [[B:%.*]] = alloca { float, float }, align 4
681 // IMPRVD-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
682 // IMPRVD-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
683 // IMPRVD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
684 // IMPRVD-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
685 // IMPRVD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
686 // IMPRVD-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
687 // IMPRVD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
688 // IMPRVD-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
689 // IMPRVD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
690 // IMPRVD-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
691 // IMPRVD-NEXT: [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
692 // IMPRVD-NEXT: [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
693 // IMPRVD-NEXT: [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
694 // IMPRVD-NEXT: [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
695 // IMPRVD-NEXT: [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
696 // IMPRVD-NEXT: [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
697 // IMPRVD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
698 // IMPRVD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
699 // IMPRVD-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
700 // IMPRVD-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
701 // IMPRVD-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
702 // IMPRVD-NEXT: ret <2 x float> [[TMP0]]
704 // PRMTD-LABEL: define dso_local <2 x float> @mulf(
705 // PRMTD-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
706 // PRMTD-NEXT: entry:
707 // PRMTD-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
708 // PRMTD-NEXT: [[A:%.*]] = alloca { float, float }, align 4
709 // PRMTD-NEXT: [[B:%.*]] = alloca { float, float }, align 4
710 // PRMTD-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
711 // PRMTD-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
712 // PRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
713 // PRMTD-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
714 // PRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
715 // PRMTD-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
716 // PRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
717 // PRMTD-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
718 // PRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
719 // PRMTD-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
720 // PRMTD-NEXT: [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
721 // PRMTD-NEXT: [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
722 // PRMTD-NEXT: [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
723 // PRMTD-NEXT: [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
724 // PRMTD-NEXT: [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
725 // PRMTD-NEXT: [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
726 // PRMTD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
727 // PRMTD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
728 // PRMTD-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
729 // PRMTD-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
730 // PRMTD-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
731 // PRMTD-NEXT: ret <2 x float> [[TMP0]]
733 // X86WINPRMTD-LABEL: define dso_local i64 @mulf(
734 // X86WINPRMTD-SAME: i64 noundef [[A_COERCE:%.*]], i64 noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
735 // X86WINPRMTD-NEXT: entry:
736 // X86WINPRMTD-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
737 // X86WINPRMTD-NEXT: [[A:%.*]] = alloca { float, float }, align 4
738 // X86WINPRMTD-NEXT: [[B:%.*]] = alloca { float, float }, align 4
739 // X86WINPRMTD-NEXT: store i64 [[A_COERCE]], ptr [[A]], align 4
740 // X86WINPRMTD-NEXT: store i64 [[B_COERCE]], ptr [[B]], align 4
741 // X86WINPRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
742 // X86WINPRMTD-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
743 // X86WINPRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
744 // X86WINPRMTD-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
745 // X86WINPRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
746 // X86WINPRMTD-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
747 // X86WINPRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
748 // X86WINPRMTD-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
749 // X86WINPRMTD-NEXT: [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
750 // X86WINPRMTD-NEXT: [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
751 // X86WINPRMTD-NEXT: [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
752 // X86WINPRMTD-NEXT: [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
753 // X86WINPRMTD-NEXT: [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
754 // X86WINPRMTD-NEXT: [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
755 // X86WINPRMTD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
756 // X86WINPRMTD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
757 // X86WINPRMTD-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
758 // X86WINPRMTD-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
759 // X86WINPRMTD-NEXT: [[TMP0:%.*]] = load i64, ptr [[RETVAL]], align 4
760 // X86WINPRMTD-NEXT: ret i64 [[TMP0]]
762 // AVRFP32-LABEL: define dso_local { float, float } @mulf(
763 // AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
764 // AVRFP32-NEXT: entry:
765 // AVRFP32-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 1
766 // AVRFP32-NEXT: [[A:%.*]] = alloca { float, float }, align 1
767 // AVRFP32-NEXT: [[B:%.*]] = alloca { float, float }, align 1
768 // AVRFP32-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
769 // AVRFP32-NEXT: store float [[A_COERCE0]], ptr [[TMP0]], align 1
770 // AVRFP32-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
771 // AVRFP32-NEXT: store float [[A_COERCE1]], ptr [[TMP1]], align 1
772 // AVRFP32-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
773 // AVRFP32-NEXT: store float [[B_COERCE0]], ptr [[TMP2]], align 1
774 // AVRFP32-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
775 // AVRFP32-NEXT: store float [[B_COERCE1]], ptr [[TMP3]], align 1
776 // AVRFP32-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
777 // AVRFP32-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
778 // AVRFP32-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
779 // AVRFP32-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
780 // AVRFP32-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
781 // AVRFP32-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
782 // AVRFP32-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
783 // AVRFP32-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
784 // AVRFP32-NEXT: [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
785 // AVRFP32-NEXT: [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
786 // AVRFP32-NEXT: [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
787 // AVRFP32-NEXT: [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
788 // AVRFP32-NEXT: [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
789 // AVRFP32-NEXT: [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
790 // AVRFP32-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
791 // AVRFP32-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
792 // AVRFP32-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 1
793 // AVRFP32-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 1
794 // AVRFP32-NEXT: [[TMP4:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
795 // AVRFP32-NEXT: ret { float, float } [[TMP4]]
797 // AVRFP64-LABEL: define dso_local { float, float } @mulf(
798 // AVRFP64-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
799 // AVRFP64-NEXT: entry:
800 // AVRFP64-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 1
801 // AVRFP64-NEXT: [[A:%.*]] = alloca { float, float }, align 1
802 // AVRFP64-NEXT: [[B:%.*]] = alloca { float, float }, align 1
803 // AVRFP64-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
804 // AVRFP64-NEXT: store float [[A_COERCE0]], ptr [[TMP0]], align 1
805 // AVRFP64-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
806 // AVRFP64-NEXT: store float [[A_COERCE1]], ptr [[TMP1]], align 1
807 // AVRFP64-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
808 // AVRFP64-NEXT: store float [[B_COERCE0]], ptr [[TMP2]], align 1
809 // AVRFP64-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
810 // AVRFP64-NEXT: store float [[B_COERCE1]], ptr [[TMP3]], align 1
811 // AVRFP64-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
812 // AVRFP64-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
813 // AVRFP64-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
814 // AVRFP64-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
815 // AVRFP64-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
816 // AVRFP64-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
817 // AVRFP64-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
818 // AVRFP64-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
819 // AVRFP64-NEXT: [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
820 // AVRFP64-NEXT: [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
821 // AVRFP64-NEXT: [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
822 // AVRFP64-NEXT: [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
823 // AVRFP64-NEXT: [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
824 // AVRFP64-NEXT: [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
825 // AVRFP64-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
826 // AVRFP64-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
827 // AVRFP64-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 1
828 // AVRFP64-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 1
829 // AVRFP64-NEXT: [[TMP4:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
830 // AVRFP64-NEXT: ret { float, float } [[TMP4]]
832 // BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @mulf(
833 // BASIC_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0]] {
834 // BASIC_FAST-NEXT: entry:
835 // BASIC_FAST-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
836 // BASIC_FAST-NEXT: [[A:%.*]] = alloca { float, float }, align 4
837 // BASIC_FAST-NEXT: [[B:%.*]] = alloca { float, float }, align 4
838 // BASIC_FAST-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
839 // BASIC_FAST-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
840 // BASIC_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
841 // BASIC_FAST-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
842 // BASIC_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
843 // BASIC_FAST-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
844 // BASIC_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
845 // BASIC_FAST-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
846 // BASIC_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
847 // BASIC_FAST-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
848 // BASIC_FAST-NEXT: [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_REAL]]
849 // BASIC_FAST-NEXT: [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_IMAG]]
850 // BASIC_FAST-NEXT: [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_IMAG]]
851 // BASIC_FAST-NEXT: [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_REAL]]
852 // BASIC_FAST-NEXT: [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[MUL_AC]], [[MUL_BD]]
853 // BASIC_FAST-NEXT: [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[MUL_AD]], [[MUL_BC]]
854 // BASIC_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
855 // BASIC_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
856 // BASIC_FAST-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
857 // BASIC_FAST-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
858 // BASIC_FAST-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
859 // BASIC_FAST-NEXT: ret <2 x float> [[TMP0]]
861 // FULL_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @mulf(
862 // FULL_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0]] {
863 // FULL_FAST-NEXT: entry:
864 // FULL_FAST-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
865 // FULL_FAST-NEXT: [[A:%.*]] = alloca { float, float }, align 4
866 // FULL_FAST-NEXT: [[B:%.*]] = alloca { float, float }, align 4
867 // FULL_FAST-NEXT: [[COERCE:%.*]] = alloca { float, float }, align 4
868 // FULL_FAST-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
869 // FULL_FAST-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
870 // FULL_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
871 // FULL_FAST-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
872 // FULL_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
873 // FULL_FAST-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
874 // FULL_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
875 // FULL_FAST-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
876 // FULL_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
877 // FULL_FAST-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
878 // FULL_FAST-NEXT: [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_REAL]]
879 // FULL_FAST-NEXT: [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_IMAG]]
880 // FULL_FAST-NEXT: [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_IMAG]]
881 // FULL_FAST-NEXT: [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_REAL]]
882 // FULL_FAST-NEXT: [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[MUL_AC]], [[MUL_BD]]
883 // FULL_FAST-NEXT: [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[MUL_AD]], [[MUL_BC]]
884 // FULL_FAST-NEXT: [[ISNAN_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn uno float [[MUL_R]], [[MUL_R]]
885 // FULL_FAST-NEXT: br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF2:![0-9]+]]
886 // FULL_FAST: complex_mul_imag_nan:
887 // FULL_FAST-NEXT: [[ISNAN_CMP1:%.*]] = fcmp reassoc nnan ninf nsz arcp afn uno float [[MUL_I]], [[MUL_I]]
888 // FULL_FAST-NEXT: br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF2]]
889 // FULL_FAST: complex_mul_libcall:
890 // FULL_FAST-NEXT: [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) <2 x float> @__mulsc3(float noundef nofpclass(nan inf) [[A_REAL]], float noundef nofpclass(nan inf) [[A_IMAG]], float noundef nofpclass(nan inf) [[B_REAL]], float noundef nofpclass(nan inf) [[B_IMAG]]) #[[ATTR2]]
891 // FULL_FAST-NEXT: store <2 x float> [[CALL]], ptr [[COERCE]], align 4
892 // FULL_FAST-NEXT: [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
893 // FULL_FAST-NEXT: [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
894 // FULL_FAST-NEXT: [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
895 // FULL_FAST-NEXT: [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
896 // FULL_FAST-NEXT: br label [[COMPLEX_MUL_CONT]]
897 // FULL_FAST: complex_mul_cont:
898 // FULL_FAST-NEXT: [[REAL_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_REAL]], [[COMPLEX_MUL_LIBCALL]] ]
899 // FULL_FAST-NEXT: [[IMAG_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[COERCE_IMAG]], [[COMPLEX_MUL_LIBCALL]] ]
900 // FULL_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
901 // FULL_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
902 // FULL_FAST-NEXT: store float [[REAL_MUL_PHI]], ptr [[RETVAL_REALP]], align 4
903 // FULL_FAST-NEXT: store float [[IMAG_MUL_PHI]], ptr [[RETVAL_IMAGP]], align 4
904 // FULL_FAST-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
905 // FULL_FAST-NEXT: ret <2 x float> [[TMP0]]
907 // IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @mulf(
908 // IMPRVD_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0]] {
909 // IMPRVD_FAST-NEXT: entry:
910 // IMPRVD_FAST-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
911 // IMPRVD_FAST-NEXT: [[A:%.*]] = alloca { float, float }, align 4
912 // IMPRVD_FAST-NEXT: [[B:%.*]] = alloca { float, float }, align 4
913 // IMPRVD_FAST-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
914 // IMPRVD_FAST-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
915 // IMPRVD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
916 // IMPRVD_FAST-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
917 // IMPRVD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
918 // IMPRVD_FAST-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
919 // IMPRVD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
920 // IMPRVD_FAST-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
921 // IMPRVD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
922 // IMPRVD_FAST-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
923 // IMPRVD_FAST-NEXT: [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_REAL]]
924 // IMPRVD_FAST-NEXT: [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_IMAG]]
925 // IMPRVD_FAST-NEXT: [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_IMAG]]
926 // IMPRVD_FAST-NEXT: [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_REAL]]
927 // IMPRVD_FAST-NEXT: [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[MUL_AC]], [[MUL_BD]]
928 // IMPRVD_FAST-NEXT: [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[MUL_AD]], [[MUL_BC]]
929 // IMPRVD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
930 // IMPRVD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
931 // IMPRVD_FAST-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
932 // IMPRVD_FAST-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
933 // IMPRVD_FAST-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
934 // IMPRVD_FAST-NEXT: ret <2 x float> [[TMP0]]
936 // PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @mulf(
937 // PRMTD_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x float> noundef nofpclass(nan inf) [[B_COERCE:%.*]]) #[[ATTR0]] {
938 // PRMTD_FAST-NEXT: entry:
939 // PRMTD_FAST-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
940 // PRMTD_FAST-NEXT: [[A:%.*]] = alloca { float, float }, align 4
941 // PRMTD_FAST-NEXT: [[B:%.*]] = alloca { float, float }, align 4
942 // PRMTD_FAST-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
943 // PRMTD_FAST-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
944 // PRMTD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
945 // PRMTD_FAST-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
946 // PRMTD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
947 // PRMTD_FAST-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
948 // PRMTD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
949 // PRMTD_FAST-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
950 // PRMTD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
951 // PRMTD_FAST-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
952 // PRMTD_FAST-NEXT: [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_REAL]]
953 // PRMTD_FAST-NEXT: [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_IMAG]]
954 // PRMTD_FAST-NEXT: [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[B_IMAG]]
955 // PRMTD_FAST-NEXT: [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[B_REAL]]
956 // PRMTD_FAST-NEXT: [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[MUL_AC]], [[MUL_BD]]
957 // PRMTD_FAST-NEXT: [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[MUL_AD]], [[MUL_BC]]
958 // PRMTD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
959 // PRMTD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
960 // PRMTD_FAST-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
961 // PRMTD_FAST-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
962 // PRMTD_FAST-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
963 // PRMTD_FAST-NEXT: ret <2 x float> [[TMP0]]
965 // X86WINPRMTD_STRICT-LABEL: define dso_local i64 @mulf(
966 // X86WINPRMTD_STRICT-SAME: i64 noundef [[A_COERCE:%.*]], i64 noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
967 // X86WINPRMTD_STRICT-NEXT: entry:
968 // X86WINPRMTD_STRICT-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
969 // X86WINPRMTD_STRICT-NEXT: [[A:%.*]] = alloca { float, float }, align 4
970 // X86WINPRMTD_STRICT-NEXT: [[B:%.*]] = alloca { float, float }, align 4
971 // X86WINPRMTD_STRICT-NEXT: store i64 [[A_COERCE]], ptr [[A]], align 4
972 // X86WINPRMTD_STRICT-NEXT: store i64 [[B_COERCE]], ptr [[B]], align 4
973 // X86WINPRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
974 // X86WINPRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
975 // X86WINPRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
976 // X86WINPRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
977 // X86WINPRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
978 // X86WINPRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
979 // X86WINPRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
980 // X86WINPRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
981 // X86WINPRMTD_STRICT-NEXT: [[MUL_AC:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_REAL]], float [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
982 // X86WINPRMTD_STRICT-NEXT: [[MUL_BD:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_IMAG]], float [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
983 // X86WINPRMTD_STRICT-NEXT: [[MUL_AD:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_REAL]], float [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
984 // X86WINPRMTD_STRICT-NEXT: [[MUL_BC:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_IMAG]], float [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
985 // X86WINPRMTD_STRICT-NEXT: [[MUL_R:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[MUL_AC]], float [[MUL_BD]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
986 // X86WINPRMTD_STRICT-NEXT: [[MUL_I:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[MUL_AD]], float [[MUL_BC]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
987 // X86WINPRMTD_STRICT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
988 // X86WINPRMTD_STRICT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
989 // X86WINPRMTD_STRICT-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
990 // X86WINPRMTD_STRICT-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
991 // X86WINPRMTD_STRICT-NEXT: [[TMP0:%.*]] = load i64, ptr [[RETVAL]], align 4
992 // X86WINPRMTD_STRICT-NEXT: ret i64 [[TMP0]]
994 // PRMTD_STRICT-LABEL: define dso_local <2 x float> @mulf(
995 // PRMTD_STRICT-SAME: <2 x float> noundef [[A_COERCE:%.*]], <2 x float> noundef [[B_COERCE:%.*]]) #[[ATTR0]] {
996 // PRMTD_STRICT-NEXT: entry:
997 // PRMTD_STRICT-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
998 // PRMTD_STRICT-NEXT: [[A:%.*]] = alloca { float, float }, align 4
999 // PRMTD_STRICT-NEXT: [[B:%.*]] = alloca { float, float }, align 4
1000 // PRMTD_STRICT-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
1001 // PRMTD_STRICT-NEXT: store <2 x float> [[B_COERCE]], ptr [[B]], align 4
1002 // PRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
1003 // PRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
1004 // PRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
1005 // PRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
1006 // PRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
1007 // PRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 4
1008 // PRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
1009 // PRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 4
1010 // PRMTD_STRICT-NEXT: [[MUL_AC:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_REAL]], float [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1011 // PRMTD_STRICT-NEXT: [[MUL_BD:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_IMAG]], float [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1012 // PRMTD_STRICT-NEXT: [[MUL_AD:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_REAL]], float [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1013 // PRMTD_STRICT-NEXT: [[MUL_BC:%.*]] = call float @llvm.experimental.constrained.fmul.f32(float [[A_IMAG]], float [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1014 // PRMTD_STRICT-NEXT: [[MUL_R:%.*]] = call float @llvm.experimental.constrained.fsub.f32(float [[MUL_AC]], float [[MUL_BD]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1015 // PRMTD_STRICT-NEXT: [[MUL_I:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[MUL_AD]], float [[MUL_BC]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1016 // PRMTD_STRICT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
1017 // PRMTD_STRICT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
1018 // PRMTD_STRICT-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 4
1019 // PRMTD_STRICT-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 4
1020 // PRMTD_STRICT-NEXT: [[TMP0:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
1021 // PRMTD_STRICT-NEXT: ret <2 x float> [[TMP0]]
1023 _Complex
float mulf(_Complex
float a
, _Complex
float b
) {
1027 // FULL-LABEL: define dso_local { double, double } @divd(
1028 // FULL-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR1:[0-9]+]] {
1029 // FULL-NEXT: entry:
1030 // FULL-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1031 // FULL-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1032 // FULL-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1033 // FULL-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1034 // FULL-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1035 // FULL-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1036 // FULL-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1037 // FULL-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1038 // FULL-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1039 // FULL-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1040 // FULL-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1041 // FULL-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1042 // FULL-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1043 // FULL-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1044 // FULL-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1045 // FULL-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1046 // FULL-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1047 // FULL-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1048 // FULL-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1049 // FULL-NEXT: [[CALL:%.*]] = call { double, double } @__divdc3(double noundef [[A_REAL]], double noundef [[A_IMAG]], double noundef [[B_REAL]], double noundef [[B_IMAG]]) #[[ATTR2]]
1050 // FULL-NEXT: [[TMP4:%.*]] = extractvalue { double, double } [[CALL]], 0
1051 // FULL-NEXT: [[TMP5:%.*]] = extractvalue { double, double } [[CALL]], 1
1052 // FULL-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1053 // FULL-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1054 // FULL-NEXT: store double [[TMP4]], ptr [[RETVAL_REALP]], align 8
1055 // FULL-NEXT: store double [[TMP5]], ptr [[RETVAL_IMAGP]], align 8
1056 // FULL-NEXT: [[TMP6:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1057 // FULL-NEXT: ret { double, double } [[TMP6]]
1059 // BASIC-LABEL: define dso_local { double, double } @divd(
1060 // BASIC-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR1:[0-9]+]] {
1061 // BASIC-NEXT: entry:
1062 // BASIC-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1063 // BASIC-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1064 // BASIC-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1065 // BASIC-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1066 // BASIC-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1067 // BASIC-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1068 // BASIC-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1069 // BASIC-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1070 // BASIC-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1071 // BASIC-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1072 // BASIC-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1073 // BASIC-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1074 // BASIC-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1075 // BASIC-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1076 // BASIC-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1077 // BASIC-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1078 // BASIC-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1079 // BASIC-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1080 // BASIC-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1081 // BASIC-NEXT: [[TMP4:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
1082 // BASIC-NEXT: [[TMP5:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
1083 // BASIC-NEXT: [[TMP6:%.*]] = fadd double [[TMP4]], [[TMP5]]
1084 // BASIC-NEXT: [[TMP7:%.*]] = fmul double [[B_REAL]], [[B_REAL]]
1085 // BASIC-NEXT: [[TMP8:%.*]] = fmul double [[B_IMAG]], [[B_IMAG]]
1086 // BASIC-NEXT: [[TMP9:%.*]] = fadd double [[TMP7]], [[TMP8]]
1087 // BASIC-NEXT: [[TMP10:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
1088 // BASIC-NEXT: [[TMP11:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
1089 // BASIC-NEXT: [[TMP12:%.*]] = fsub double [[TMP10]], [[TMP11]]
1090 // BASIC-NEXT: [[TMP13:%.*]] = fdiv double [[TMP6]], [[TMP9]]
1091 // BASIC-NEXT: [[TMP14:%.*]] = fdiv double [[TMP12]], [[TMP9]]
1092 // BASIC-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1093 // BASIC-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1094 // BASIC-NEXT: store double [[TMP13]], ptr [[RETVAL_REALP]], align 8
1095 // BASIC-NEXT: store double [[TMP14]], ptr [[RETVAL_IMAGP]], align 8
1096 // BASIC-NEXT: [[TMP15:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1097 // BASIC-NEXT: ret { double, double } [[TMP15]]
1099 // IMPRVD-LABEL: define dso_local { double, double } @divd(
1100 // IMPRVD-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR2:[0-9]+]] {
1101 // IMPRVD-NEXT: entry:
1102 // IMPRVD-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1103 // IMPRVD-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1104 // IMPRVD-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1105 // IMPRVD-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1106 // IMPRVD-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1107 // IMPRVD-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1108 // IMPRVD-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1109 // IMPRVD-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1110 // IMPRVD-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1111 // IMPRVD-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1112 // IMPRVD-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1113 // IMPRVD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1114 // IMPRVD-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1115 // IMPRVD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1116 // IMPRVD-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1117 // IMPRVD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1118 // IMPRVD-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1119 // IMPRVD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1120 // IMPRVD-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1121 // IMPRVD-NEXT: [[TMP4:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]])
1122 // IMPRVD-NEXT: [[TMP5:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]])
1123 // IMPRVD-NEXT: [[ABS_CMP:%.*]] = fcmp ugt double [[TMP4]], [[TMP5]]
1124 // IMPRVD-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
1125 // IMPRVD: abs_rhsr_greater_or_equal_abs_rhsi:
1126 // IMPRVD-NEXT: [[TMP6:%.*]] = fdiv double [[B_IMAG]], [[B_REAL]]
1127 // IMPRVD-NEXT: [[TMP7:%.*]] = fmul double [[TMP6]], [[B_IMAG]]
1128 // IMPRVD-NEXT: [[TMP8:%.*]] = fadd double [[B_REAL]], [[TMP7]]
1129 // IMPRVD-NEXT: [[TMP9:%.*]] = fmul double [[A_IMAG]], [[TMP6]]
1130 // IMPRVD-NEXT: [[TMP10:%.*]] = fadd double [[A_REAL]], [[TMP9]]
1131 // IMPRVD-NEXT: [[TMP11:%.*]] = fdiv double [[TMP10]], [[TMP8]]
1132 // IMPRVD-NEXT: [[TMP12:%.*]] = fmul double [[A_REAL]], [[TMP6]]
1133 // IMPRVD-NEXT: [[TMP13:%.*]] = fsub double [[A_IMAG]], [[TMP12]]
1134 // IMPRVD-NEXT: [[TMP14:%.*]] = fdiv double [[TMP13]], [[TMP8]]
1135 // IMPRVD-NEXT: br label [[COMPLEX_DIV:%.*]]
1136 // IMPRVD: abs_rhsr_less_than_abs_rhsi:
1137 // IMPRVD-NEXT: [[TMP15:%.*]] = fdiv double [[B_REAL]], [[B_IMAG]]
1138 // IMPRVD-NEXT: [[TMP16:%.*]] = fmul double [[TMP15]], [[B_REAL]]
1139 // IMPRVD-NEXT: [[TMP17:%.*]] = fadd double [[B_IMAG]], [[TMP16]]
1140 // IMPRVD-NEXT: [[TMP18:%.*]] = fmul double [[A_REAL]], [[TMP15]]
1141 // IMPRVD-NEXT: [[TMP19:%.*]] = fadd double [[TMP18]], [[A_IMAG]]
1142 // IMPRVD-NEXT: [[TMP20:%.*]] = fdiv double [[TMP19]], [[TMP17]]
1143 // IMPRVD-NEXT: [[TMP21:%.*]] = fmul double [[A_IMAG]], [[TMP15]]
1144 // IMPRVD-NEXT: [[TMP22:%.*]] = fsub double [[TMP21]], [[A_REAL]]
1145 // IMPRVD-NEXT: [[TMP23:%.*]] = fdiv double [[TMP22]], [[TMP17]]
1146 // IMPRVD-NEXT: br label [[COMPLEX_DIV]]
1147 // IMPRVD: complex_div:
1148 // IMPRVD-NEXT: [[TMP24:%.*]] = phi double [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
1149 // IMPRVD-NEXT: [[TMP25:%.*]] = phi double [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
1150 // IMPRVD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1151 // IMPRVD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1152 // IMPRVD-NEXT: store double [[TMP24]], ptr [[RETVAL_REALP]], align 8
1153 // IMPRVD-NEXT: store double [[TMP25]], ptr [[RETVAL_IMAGP]], align 8
1154 // IMPRVD-NEXT: [[TMP26:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1155 // IMPRVD-NEXT: ret { double, double } [[TMP26]]
1157 // PRMTD-LABEL: define dso_local { double, double } @divd(
1158 // PRMTD-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR1:[0-9]+]] {
1159 // PRMTD-NEXT: entry:
1160 // PRMTD-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1161 // PRMTD-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1162 // PRMTD-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1163 // PRMTD-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1164 // PRMTD-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1165 // PRMTD-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1166 // PRMTD-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1167 // PRMTD-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1168 // PRMTD-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1169 // PRMTD-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1170 // PRMTD-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1171 // PRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1172 // PRMTD-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1173 // PRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1174 // PRMTD-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1175 // PRMTD-NEXT: [[EXT:%.*]] = fpext double [[A_REAL]] to x86_fp80
1176 // PRMTD-NEXT: [[EXT1:%.*]] = fpext double [[A_IMAG]] to x86_fp80
1177 // PRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1178 // PRMTD-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1179 // PRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1180 // PRMTD-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1181 // PRMTD-NEXT: [[EXT2:%.*]] = fpext double [[B_REAL]] to x86_fp80
1182 // PRMTD-NEXT: [[EXT3:%.*]] = fpext double [[B_IMAG]] to x86_fp80
1183 // PRMTD-NEXT: [[TMP4:%.*]] = fmul x86_fp80 [[EXT]], [[EXT2]]
1184 // PRMTD-NEXT: [[TMP5:%.*]] = fmul x86_fp80 [[EXT1]], [[EXT3]]
1185 // PRMTD-NEXT: [[TMP6:%.*]] = fadd x86_fp80 [[TMP4]], [[TMP5]]
1186 // PRMTD-NEXT: [[TMP7:%.*]] = fmul x86_fp80 [[EXT2]], [[EXT2]]
1187 // PRMTD-NEXT: [[TMP8:%.*]] = fmul x86_fp80 [[EXT3]], [[EXT3]]
1188 // PRMTD-NEXT: [[TMP9:%.*]] = fadd x86_fp80 [[TMP7]], [[TMP8]]
1189 // PRMTD-NEXT: [[TMP10:%.*]] = fmul x86_fp80 [[EXT1]], [[EXT2]]
1190 // PRMTD-NEXT: [[TMP11:%.*]] = fmul x86_fp80 [[EXT]], [[EXT3]]
1191 // PRMTD-NEXT: [[TMP12:%.*]] = fsub x86_fp80 [[TMP10]], [[TMP11]]
1192 // PRMTD-NEXT: [[TMP13:%.*]] = fdiv x86_fp80 [[TMP6]], [[TMP9]]
1193 // PRMTD-NEXT: [[TMP14:%.*]] = fdiv x86_fp80 [[TMP12]], [[TMP9]]
1194 // PRMTD-NEXT: [[UNPROMOTION:%.*]] = fptrunc x86_fp80 [[TMP13]] to double
1195 // PRMTD-NEXT: [[UNPROMOTION4:%.*]] = fptrunc x86_fp80 [[TMP14]] to double
1196 // PRMTD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1197 // PRMTD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1198 // PRMTD-NEXT: store double [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 8
1199 // PRMTD-NEXT: store double [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 8
1200 // PRMTD-NEXT: [[TMP15:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1201 // PRMTD-NEXT: ret { double, double } [[TMP15]]
1203 // X86WINPRMTD-LABEL: define dso_local void @divd(
1204 // X86WINPRMTD-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
1205 // X86WINPRMTD-NEXT: entry:
1206 // X86WINPRMTD-NEXT: [[RESULT_PTR:%.*]] = alloca ptr, align 8
1207 // X86WINPRMTD-NEXT: [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
1208 // X86WINPRMTD-NEXT: [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
1209 // X86WINPRMTD-NEXT: store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
1210 // X86WINPRMTD-NEXT: store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
1211 // X86WINPRMTD-NEXT: store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
1212 // X86WINPRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1213 // X86WINPRMTD-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1214 // X86WINPRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1215 // X86WINPRMTD-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1216 // X86WINPRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1217 // X86WINPRMTD-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1218 // X86WINPRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1219 // X86WINPRMTD-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1220 // X86WINPRMTD-NEXT: [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]])
1221 // X86WINPRMTD-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]])
1222 // X86WINPRMTD-NEXT: [[ABS_CMP:%.*]] = fcmp ugt double [[TMP0]], [[TMP1]]
1223 // X86WINPRMTD-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
1224 // X86WINPRMTD: abs_rhsr_greater_or_equal_abs_rhsi:
1225 // X86WINPRMTD-NEXT: [[TMP2:%.*]] = fdiv double [[B_IMAG]], [[B_REAL]]
1226 // X86WINPRMTD-NEXT: [[TMP3:%.*]] = fmul double [[TMP2]], [[B_IMAG]]
1227 // X86WINPRMTD-NEXT: [[TMP4:%.*]] = fadd double [[B_REAL]], [[TMP3]]
1228 // X86WINPRMTD-NEXT: [[TMP5:%.*]] = fmul double [[A_IMAG]], [[TMP2]]
1229 // X86WINPRMTD-NEXT: [[TMP6:%.*]] = fadd double [[A_REAL]], [[TMP5]]
1230 // X86WINPRMTD-NEXT: [[TMP7:%.*]] = fdiv double [[TMP6]], [[TMP4]]
1231 // X86WINPRMTD-NEXT: [[TMP8:%.*]] = fmul double [[A_REAL]], [[TMP2]]
1232 // X86WINPRMTD-NEXT: [[TMP9:%.*]] = fsub double [[A_IMAG]], [[TMP8]]
1233 // X86WINPRMTD-NEXT: [[TMP10:%.*]] = fdiv double [[TMP9]], [[TMP4]]
1234 // X86WINPRMTD-NEXT: br label [[COMPLEX_DIV:%.*]]
1235 // X86WINPRMTD: abs_rhsr_less_than_abs_rhsi:
1236 // X86WINPRMTD-NEXT: [[TMP11:%.*]] = fdiv double [[B_REAL]], [[B_IMAG]]
1237 // X86WINPRMTD-NEXT: [[TMP12:%.*]] = fmul double [[TMP11]], [[B_REAL]]
1238 // X86WINPRMTD-NEXT: [[TMP13:%.*]] = fadd double [[B_IMAG]], [[TMP12]]
1239 // X86WINPRMTD-NEXT: [[TMP14:%.*]] = fmul double [[A_REAL]], [[TMP11]]
1240 // X86WINPRMTD-NEXT: [[TMP15:%.*]] = fadd double [[TMP14]], [[A_IMAG]]
1241 // X86WINPRMTD-NEXT: [[TMP16:%.*]] = fdiv double [[TMP15]], [[TMP13]]
1242 // X86WINPRMTD-NEXT: [[TMP17:%.*]] = fmul double [[A_IMAG]], [[TMP11]]
1243 // X86WINPRMTD-NEXT: [[TMP18:%.*]] = fsub double [[TMP17]], [[A_REAL]]
1244 // X86WINPRMTD-NEXT: [[TMP19:%.*]] = fdiv double [[TMP18]], [[TMP13]]
1245 // X86WINPRMTD-NEXT: br label [[COMPLEX_DIV]]
1246 // X86WINPRMTD: complex_div:
1247 // X86WINPRMTD-NEXT: [[TMP20:%.*]] = phi double [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
1248 // X86WINPRMTD-NEXT: [[TMP21:%.*]] = phi double [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
1249 // X86WINPRMTD-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1250 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1251 // X86WINPRMTD-NEXT: store double [[TMP20]], ptr [[AGG_RESULT_REALP]], align 8
1252 // X86WINPRMTD-NEXT: store double [[TMP21]], ptr [[AGG_RESULT_IMAGP]], align 8
1253 // X86WINPRMTD-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1254 // X86WINPRMTD-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
1255 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1256 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
1257 // X86WINPRMTD-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1258 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1259 // X86WINPRMTD-NEXT: store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
1260 // X86WINPRMTD-NEXT: store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
1261 // X86WINPRMTD-NEXT: ret void
1263 // AVRFP32-LABEL: define dso_local { float, float } @divd(
1264 // AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
1265 // AVRFP32-NEXT: entry:
1266 // AVRFP32-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 1
1267 // AVRFP32-NEXT: [[A:%.*]] = alloca { float, float }, align 1
1268 // AVRFP32-NEXT: [[B:%.*]] = alloca { float, float }, align 1
1269 // AVRFP32-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
1270 // AVRFP32-NEXT: store float [[A_COERCE0]], ptr [[TMP0]], align 1
1271 // AVRFP32-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
1272 // AVRFP32-NEXT: store float [[A_COERCE1]], ptr [[TMP1]], align 1
1273 // AVRFP32-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
1274 // AVRFP32-NEXT: store float [[B_COERCE0]], ptr [[TMP2]], align 1
1275 // AVRFP32-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
1276 // AVRFP32-NEXT: store float [[B_COERCE1]], ptr [[TMP3]], align 1
1277 // AVRFP32-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
1278 // AVRFP32-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
1279 // AVRFP32-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
1280 // AVRFP32-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
1281 // AVRFP32-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
1282 // AVRFP32-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
1283 // AVRFP32-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
1284 // AVRFP32-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
1285 // AVRFP32-NEXT: [[TMP4:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[B_REAL]])
1286 // AVRFP32-NEXT: [[TMP5:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[B_IMAG]])
1287 // AVRFP32-NEXT: [[ABS_CMP:%.*]] = fcmp ugt float [[TMP4]], [[TMP5]]
1288 // AVRFP32-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
1289 // AVRFP32: abs_rhsr_greater_or_equal_abs_rhsi:
1290 // AVRFP32-NEXT: [[TMP6:%.*]] = fdiv float [[B_IMAG]], [[B_REAL]]
1291 // AVRFP32-NEXT: [[TMP7:%.*]] = fmul float [[TMP6]], [[B_IMAG]]
1292 // AVRFP32-NEXT: [[TMP8:%.*]] = fadd float [[B_REAL]], [[TMP7]]
1293 // AVRFP32-NEXT: [[TMP9:%.*]] = fmul float [[A_IMAG]], [[TMP6]]
1294 // AVRFP32-NEXT: [[TMP10:%.*]] = fadd float [[A_REAL]], [[TMP9]]
1295 // AVRFP32-NEXT: [[TMP11:%.*]] = fdiv float [[TMP10]], [[TMP8]]
1296 // AVRFP32-NEXT: [[TMP12:%.*]] = fmul float [[A_REAL]], [[TMP6]]
1297 // AVRFP32-NEXT: [[TMP13:%.*]] = fsub float [[A_IMAG]], [[TMP12]]
1298 // AVRFP32-NEXT: [[TMP14:%.*]] = fdiv float [[TMP13]], [[TMP8]]
1299 // AVRFP32-NEXT: br label [[COMPLEX_DIV:%.*]]
1300 // AVRFP32: abs_rhsr_less_than_abs_rhsi:
1301 // AVRFP32-NEXT: [[TMP15:%.*]] = fdiv float [[B_REAL]], [[B_IMAG]]
1302 // AVRFP32-NEXT: [[TMP16:%.*]] = fmul float [[TMP15]], [[B_REAL]]
1303 // AVRFP32-NEXT: [[TMP17:%.*]] = fadd float [[B_IMAG]], [[TMP16]]
1304 // AVRFP32-NEXT: [[TMP18:%.*]] = fmul float [[A_REAL]], [[TMP15]]
1305 // AVRFP32-NEXT: [[TMP19:%.*]] = fadd float [[TMP18]], [[A_IMAG]]
1306 // AVRFP32-NEXT: [[TMP20:%.*]] = fdiv float [[TMP19]], [[TMP17]]
1307 // AVRFP32-NEXT: [[TMP21:%.*]] = fmul float [[A_IMAG]], [[TMP15]]
1308 // AVRFP32-NEXT: [[TMP22:%.*]] = fsub float [[TMP21]], [[A_REAL]]
1309 // AVRFP32-NEXT: [[TMP23:%.*]] = fdiv float [[TMP22]], [[TMP17]]
1310 // AVRFP32-NEXT: br label [[COMPLEX_DIV]]
1311 // AVRFP32: complex_div:
1312 // AVRFP32-NEXT: [[TMP24:%.*]] = phi float [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
1313 // AVRFP32-NEXT: [[TMP25:%.*]] = phi float [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
1314 // AVRFP32-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
1315 // AVRFP32-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
1316 // AVRFP32-NEXT: store float [[TMP24]], ptr [[RETVAL_REALP]], align 1
1317 // AVRFP32-NEXT: store float [[TMP25]], ptr [[RETVAL_IMAGP]], align 1
1318 // AVRFP32-NEXT: [[TMP26:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
1319 // AVRFP32-NEXT: ret { float, float } [[TMP26]]
1321 // AVRFP64-LABEL: define dso_local void @divd(
1322 // AVRFP64-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 1 [[AGG_RESULT:%.*]], double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
1323 // AVRFP64-NEXT: entry:
1324 // AVRFP64-NEXT: [[A:%.*]] = alloca { double, double }, align 1
1325 // AVRFP64-NEXT: [[B:%.*]] = alloca { double, double }, align 1
1326 // AVRFP64-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1327 // AVRFP64-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 1
1328 // AVRFP64-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1329 // AVRFP64-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 1
1330 // AVRFP64-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1331 // AVRFP64-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 1
1332 // AVRFP64-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1333 // AVRFP64-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 1
1334 // AVRFP64-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1335 // AVRFP64-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 1
1336 // AVRFP64-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1337 // AVRFP64-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 1
1338 // AVRFP64-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1339 // AVRFP64-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 1
1340 // AVRFP64-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1341 // AVRFP64-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 1
1342 // AVRFP64-NEXT: [[TMP4:%.*]] = call addrspace(1) double @llvm.fabs.f64(double [[B_REAL]])
1343 // AVRFP64-NEXT: [[TMP5:%.*]] = call addrspace(1) double @llvm.fabs.f64(double [[B_IMAG]])
1344 // AVRFP64-NEXT: [[ABS_CMP:%.*]] = fcmp ugt double [[TMP4]], [[TMP5]]
1345 // AVRFP64-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
1346 // AVRFP64: abs_rhsr_greater_or_equal_abs_rhsi:
1347 // AVRFP64-NEXT: [[TMP6:%.*]] = fdiv double [[B_IMAG]], [[B_REAL]]
1348 // AVRFP64-NEXT: [[TMP7:%.*]] = fmul double [[TMP6]], [[B_IMAG]]
1349 // AVRFP64-NEXT: [[TMP8:%.*]] = fadd double [[B_REAL]], [[TMP7]]
1350 // AVRFP64-NEXT: [[TMP9:%.*]] = fmul double [[A_IMAG]], [[TMP6]]
1351 // AVRFP64-NEXT: [[TMP10:%.*]] = fadd double [[A_REAL]], [[TMP9]]
1352 // AVRFP64-NEXT: [[TMP11:%.*]] = fdiv double [[TMP10]], [[TMP8]]
1353 // AVRFP64-NEXT: [[TMP12:%.*]] = fmul double [[A_REAL]], [[TMP6]]
1354 // AVRFP64-NEXT: [[TMP13:%.*]] = fsub double [[A_IMAG]], [[TMP12]]
1355 // AVRFP64-NEXT: [[TMP14:%.*]] = fdiv double [[TMP13]], [[TMP8]]
1356 // AVRFP64-NEXT: br label [[COMPLEX_DIV:%.*]]
1357 // AVRFP64: abs_rhsr_less_than_abs_rhsi:
1358 // AVRFP64-NEXT: [[TMP15:%.*]] = fdiv double [[B_REAL]], [[B_IMAG]]
1359 // AVRFP64-NEXT: [[TMP16:%.*]] = fmul double [[TMP15]], [[B_REAL]]
1360 // AVRFP64-NEXT: [[TMP17:%.*]] = fadd double [[B_IMAG]], [[TMP16]]
1361 // AVRFP64-NEXT: [[TMP18:%.*]] = fmul double [[A_REAL]], [[TMP15]]
1362 // AVRFP64-NEXT: [[TMP19:%.*]] = fadd double [[TMP18]], [[A_IMAG]]
1363 // AVRFP64-NEXT: [[TMP20:%.*]] = fdiv double [[TMP19]], [[TMP17]]
1364 // AVRFP64-NEXT: [[TMP21:%.*]] = fmul double [[A_IMAG]], [[TMP15]]
1365 // AVRFP64-NEXT: [[TMP22:%.*]] = fsub double [[TMP21]], [[A_REAL]]
1366 // AVRFP64-NEXT: [[TMP23:%.*]] = fdiv double [[TMP22]], [[TMP17]]
1367 // AVRFP64-NEXT: br label [[COMPLEX_DIV]]
1368 // AVRFP64: complex_div:
1369 // AVRFP64-NEXT: [[TMP24:%.*]] = phi double [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
1370 // AVRFP64-NEXT: [[TMP25:%.*]] = phi double [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
1371 // AVRFP64-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1372 // AVRFP64-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1373 // AVRFP64-NEXT: store double [[TMP24]], ptr [[AGG_RESULT_REALP]], align 1
1374 // AVRFP64-NEXT: store double [[TMP25]], ptr [[AGG_RESULT_IMAGP]], align 1
1375 // AVRFP64-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1376 // AVRFP64-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 1
1377 // AVRFP64-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1378 // AVRFP64-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 1
1379 // AVRFP64-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1380 // AVRFP64-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1381 // AVRFP64-NEXT: store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 1
1382 // AVRFP64-NEXT: store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 1
1383 // AVRFP64-NEXT: ret void
1385 // BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @divd(
1386 // BASIC_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR1:[0-9]+]] {
1387 // BASIC_FAST-NEXT: entry:
1388 // BASIC_FAST-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1389 // BASIC_FAST-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1390 // BASIC_FAST-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1391 // BASIC_FAST-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1392 // BASIC_FAST-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1393 // BASIC_FAST-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1394 // BASIC_FAST-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1395 // BASIC_FAST-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1396 // BASIC_FAST-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1397 // BASIC_FAST-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1398 // BASIC_FAST-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1399 // BASIC_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1400 // BASIC_FAST-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1401 // BASIC_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1402 // BASIC_FAST-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1403 // BASIC_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1404 // BASIC_FAST-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1405 // BASIC_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1406 // BASIC_FAST-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1407 // BASIC_FAST-NEXT: [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_REAL]]
1408 // BASIC_FAST-NEXT: [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_IMAG]]
1409 // BASIC_FAST-NEXT: [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP4]], [[TMP5]]
1410 // BASIC_FAST-NEXT: [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[B_REAL]], [[B_REAL]]
1411 // BASIC_FAST-NEXT: [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[B_IMAG]], [[B_IMAG]]
1412 // BASIC_FAST-NEXT: [[TMP9:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP7]], [[TMP8]]
1413 // BASIC_FAST-NEXT: [[TMP10:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_REAL]]
1414 // BASIC_FAST-NEXT: [[TMP11:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_IMAG]]
1415 // BASIC_FAST-NEXT: [[TMP12:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[TMP10]], [[TMP11]]
1416 // BASIC_FAST-NEXT: [[TMP13:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP6]], [[TMP9]]
1417 // BASIC_FAST-NEXT: [[TMP14:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP12]], [[TMP9]]
1418 // BASIC_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1419 // BASIC_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1420 // BASIC_FAST-NEXT: store double [[TMP13]], ptr [[RETVAL_REALP]], align 8
1421 // BASIC_FAST-NEXT: store double [[TMP14]], ptr [[RETVAL_IMAGP]], align 8
1422 // BASIC_FAST-NEXT: [[TMP15:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1423 // BASIC_FAST-NEXT: ret { double, double } [[TMP15]]
1425 // FULL_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @divd(
1426 // FULL_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR1:[0-9]+]] {
1427 // FULL_FAST-NEXT: entry:
1428 // FULL_FAST-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1429 // FULL_FAST-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1430 // FULL_FAST-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1431 // FULL_FAST-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1432 // FULL_FAST-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1433 // FULL_FAST-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1434 // FULL_FAST-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1435 // FULL_FAST-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1436 // FULL_FAST-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1437 // FULL_FAST-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1438 // FULL_FAST-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1439 // FULL_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1440 // FULL_FAST-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1441 // FULL_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1442 // FULL_FAST-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1443 // FULL_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1444 // FULL_FAST-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1445 // FULL_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1446 // FULL_FAST-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1447 // FULL_FAST-NEXT: [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) { double, double } @__divdc3(double noundef nofpclass(nan inf) [[A_REAL]], double noundef nofpclass(nan inf) [[A_IMAG]], double noundef nofpclass(nan inf) [[B_REAL]], double noundef nofpclass(nan inf) [[B_IMAG]]) #[[ATTR2]]
1448 // FULL_FAST-NEXT: [[TMP4:%.*]] = extractvalue { double, double } [[CALL]], 0
1449 // FULL_FAST-NEXT: [[TMP5:%.*]] = extractvalue { double, double } [[CALL]], 1
1450 // FULL_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1451 // FULL_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1452 // FULL_FAST-NEXT: store double [[TMP4]], ptr [[RETVAL_REALP]], align 8
1453 // FULL_FAST-NEXT: store double [[TMP5]], ptr [[RETVAL_IMAGP]], align 8
1454 // FULL_FAST-NEXT: [[TMP6:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1455 // FULL_FAST-NEXT: ret { double, double } [[TMP6]]
1457 // IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @divd(
1458 // IMPRVD_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR2:[0-9]+]] {
1459 // IMPRVD_FAST-NEXT: entry:
1460 // IMPRVD_FAST-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1461 // IMPRVD_FAST-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1462 // IMPRVD_FAST-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1463 // IMPRVD_FAST-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1464 // IMPRVD_FAST-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1465 // IMPRVD_FAST-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1466 // IMPRVD_FAST-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1467 // IMPRVD_FAST-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1468 // IMPRVD_FAST-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1469 // IMPRVD_FAST-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1470 // IMPRVD_FAST-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1471 // IMPRVD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1472 // IMPRVD_FAST-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1473 // IMPRVD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1474 // IMPRVD_FAST-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1475 // IMPRVD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1476 // IMPRVD_FAST-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1477 // IMPRVD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1478 // IMPRVD_FAST-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1479 // IMPRVD_FAST-NEXT: [[TMP4:%.*]] = call reassoc nnan ninf nsz arcp afn double @llvm.fabs.f64(double [[B_REAL]])
1480 // IMPRVD_FAST-NEXT: [[TMP5:%.*]] = call reassoc nnan ninf nsz arcp afn double @llvm.fabs.f64(double [[B_IMAG]])
1481 // IMPRVD_FAST-NEXT: [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt double [[TMP4]], [[TMP5]]
1482 // IMPRVD_FAST-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
1483 // IMPRVD_FAST: abs_rhsr_greater_or_equal_abs_rhsi:
1484 // IMPRVD_FAST-NEXT: [[TMP6:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[B_IMAG]], [[B_REAL]]
1485 // IMPRVD_FAST-NEXT: [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[TMP6]], [[B_IMAG]]
1486 // IMPRVD_FAST-NEXT: [[TMP8:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[B_REAL]], [[TMP7]]
1487 // IMPRVD_FAST-NEXT: [[TMP9:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[TMP6]]
1488 // IMPRVD_FAST-NEXT: [[TMP10:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[TMP9]]
1489 // IMPRVD_FAST-NEXT: [[TMP11:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP10]], [[TMP8]]
1490 // IMPRVD_FAST-NEXT: [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[TMP6]]
1491 // IMPRVD_FAST-NEXT: [[TMP13:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[TMP12]]
1492 // IMPRVD_FAST-NEXT: [[TMP14:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP13]], [[TMP8]]
1493 // IMPRVD_FAST-NEXT: br label [[COMPLEX_DIV:%.*]]
1494 // IMPRVD_FAST: abs_rhsr_less_than_abs_rhsi:
1495 // IMPRVD_FAST-NEXT: [[TMP15:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[B_REAL]], [[B_IMAG]]
1496 // IMPRVD_FAST-NEXT: [[TMP16:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[TMP15]], [[B_REAL]]
1497 // IMPRVD_FAST-NEXT: [[TMP17:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[B_IMAG]], [[TMP16]]
1498 // IMPRVD_FAST-NEXT: [[TMP18:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[TMP15]]
1499 // IMPRVD_FAST-NEXT: [[TMP19:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP18]], [[A_IMAG]]
1500 // IMPRVD_FAST-NEXT: [[TMP20:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP19]], [[TMP17]]
1501 // IMPRVD_FAST-NEXT: [[TMP21:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[TMP15]]
1502 // IMPRVD_FAST-NEXT: [[TMP22:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[TMP21]], [[A_REAL]]
1503 // IMPRVD_FAST-NEXT: [[TMP23:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP22]], [[TMP17]]
1504 // IMPRVD_FAST-NEXT: br label [[COMPLEX_DIV]]
1505 // IMPRVD_FAST: complex_div:
1506 // IMPRVD_FAST-NEXT: [[TMP24:%.*]] = phi reassoc nnan ninf nsz arcp afn double [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
1507 // IMPRVD_FAST-NEXT: [[TMP25:%.*]] = phi reassoc nnan ninf nsz arcp afn double [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
1508 // IMPRVD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1509 // IMPRVD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1510 // IMPRVD_FAST-NEXT: store double [[TMP24]], ptr [[RETVAL_REALP]], align 8
1511 // IMPRVD_FAST-NEXT: store double [[TMP25]], ptr [[RETVAL_IMAGP]], align 8
1512 // IMPRVD_FAST-NEXT: [[TMP26:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1513 // IMPRVD_FAST-NEXT: ret { double, double } [[TMP26]]
1515 // PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @divd(
1516 // PRMTD_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR1:[0-9]+]] {
1517 // PRMTD_FAST-NEXT: entry:
1518 // PRMTD_FAST-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1519 // PRMTD_FAST-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1520 // PRMTD_FAST-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1521 // PRMTD_FAST-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1522 // PRMTD_FAST-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1523 // PRMTD_FAST-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1524 // PRMTD_FAST-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1525 // PRMTD_FAST-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1526 // PRMTD_FAST-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1527 // PRMTD_FAST-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1528 // PRMTD_FAST-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1529 // PRMTD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1530 // PRMTD_FAST-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1531 // PRMTD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1532 // PRMTD_FAST-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1533 // PRMTD_FAST-NEXT: [[EXT:%.*]] = fpext double [[A_REAL]] to x86_fp80
1534 // PRMTD_FAST-NEXT: [[EXT1:%.*]] = fpext double [[A_IMAG]] to x86_fp80
1535 // PRMTD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1536 // PRMTD_FAST-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1537 // PRMTD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1538 // PRMTD_FAST-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1539 // PRMTD_FAST-NEXT: [[EXT2:%.*]] = fpext double [[B_REAL]] to x86_fp80
1540 // PRMTD_FAST-NEXT: [[EXT3:%.*]] = fpext double [[B_IMAG]] to x86_fp80
1541 // PRMTD_FAST-NEXT: [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT]], [[EXT2]]
1542 // PRMTD_FAST-NEXT: [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT1]], [[EXT3]]
1543 // PRMTD_FAST-NEXT: [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP4]], [[TMP5]]
1544 // PRMTD_FAST-NEXT: [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT2]], [[EXT2]]
1545 // PRMTD_FAST-NEXT: [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT3]], [[EXT3]]
1546 // PRMTD_FAST-NEXT: [[TMP9:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP7]], [[TMP8]]
1547 // PRMTD_FAST-NEXT: [[TMP10:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT1]], [[EXT2]]
1548 // PRMTD_FAST-NEXT: [[TMP11:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[EXT]], [[EXT3]]
1549 // PRMTD_FAST-NEXT: [[TMP12:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP10]], [[TMP11]]
1550 // PRMTD_FAST-NEXT: [[TMP13:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP9]]
1551 // PRMTD_FAST-NEXT: [[TMP14:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP12]], [[TMP9]]
1552 // PRMTD_FAST-NEXT: [[UNPROMOTION:%.*]] = fptrunc x86_fp80 [[TMP13]] to double
1553 // PRMTD_FAST-NEXT: [[UNPROMOTION4:%.*]] = fptrunc x86_fp80 [[TMP14]] to double
1554 // PRMTD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1555 // PRMTD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1556 // PRMTD_FAST-NEXT: store double [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 8
1557 // PRMTD_FAST-NEXT: store double [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 8
1558 // PRMTD_FAST-NEXT: [[TMP15:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1559 // PRMTD_FAST-NEXT: ret { double, double } [[TMP15]]
1561 // X86WINPRMTD_STRICT-LABEL: define dso_local void @divd(
1562 // X86WINPRMTD_STRICT-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
1563 // X86WINPRMTD_STRICT-NEXT: entry:
1564 // X86WINPRMTD_STRICT-NEXT: [[RESULT_PTR:%.*]] = alloca ptr, align 8
1565 // X86WINPRMTD_STRICT-NEXT: [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
1566 // X86WINPRMTD_STRICT-NEXT: [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
1567 // X86WINPRMTD_STRICT-NEXT: store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
1568 // X86WINPRMTD_STRICT-NEXT: store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
1569 // X86WINPRMTD_STRICT-NEXT: store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
1570 // X86WINPRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1571 // X86WINPRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1572 // X86WINPRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1573 // X86WINPRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1574 // X86WINPRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1575 // X86WINPRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1576 // X86WINPRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1577 // X86WINPRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1578 // X86WINPRMTD_STRICT-NEXT: [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]]) #[[ATTR3]]
1579 // X86WINPRMTD_STRICT-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]]) #[[ATTR3]]
1580 // X86WINPRMTD_STRICT-NEXT: [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR3]]
1581 // X86WINPRMTD_STRICT-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
1582 // X86WINPRMTD_STRICT: abs_rhsr_greater_or_equal_abs_rhsi:
1583 // X86WINPRMTD_STRICT-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[B_IMAG]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1584 // X86WINPRMTD_STRICT-NEXT: [[TMP3:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[TMP2]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1585 // X86WINPRMTD_STRICT-NEXT: [[TMP4:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[B_REAL]], double [[TMP3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1586 // X86WINPRMTD_STRICT-NEXT: [[TMP5:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1587 // X86WINPRMTD_STRICT-NEXT: [[TMP6:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A_REAL]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1588 // X86WINPRMTD_STRICT-NEXT: [[TMP7:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP6]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1589 // X86WINPRMTD_STRICT-NEXT: [[TMP8:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1590 // X86WINPRMTD_STRICT-NEXT: [[TMP9:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A_IMAG]], double [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1591 // X86WINPRMTD_STRICT-NEXT: [[TMP10:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP9]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1592 // X86WINPRMTD_STRICT-NEXT: br label [[COMPLEX_DIV:%.*]]
1593 // X86WINPRMTD_STRICT: abs_rhsr_less_than_abs_rhsi:
1594 // X86WINPRMTD_STRICT-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[B_REAL]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1595 // X86WINPRMTD_STRICT-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[TMP11]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1596 // X86WINPRMTD_STRICT-NEXT: [[TMP13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[B_IMAG]], double [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1597 // X86WINPRMTD_STRICT-NEXT: [[TMP14:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1598 // X86WINPRMTD_STRICT-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP14]], double [[A_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1599 // X86WINPRMTD_STRICT-NEXT: [[TMP16:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP15]], double [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1600 // X86WINPRMTD_STRICT-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1601 // X86WINPRMTD_STRICT-NEXT: [[TMP18:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP17]], double [[A_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1602 // X86WINPRMTD_STRICT-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP18]], double [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
1603 // X86WINPRMTD_STRICT-NEXT: br label [[COMPLEX_DIV]]
1604 // X86WINPRMTD_STRICT: complex_div:
1605 // X86WINPRMTD_STRICT-NEXT: [[TMP20:%.*]] = phi double [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
1606 // X86WINPRMTD_STRICT-NEXT: [[TMP21:%.*]] = phi double [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
1607 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1608 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1609 // X86WINPRMTD_STRICT-NEXT: store double [[TMP20]], ptr [[AGG_RESULT_REALP]], align 8
1610 // X86WINPRMTD_STRICT-NEXT: store double [[TMP21]], ptr [[AGG_RESULT_IMAGP]], align 8
1611 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1612 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
1613 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1614 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
1615 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1616 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1617 // X86WINPRMTD_STRICT-NEXT: store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
1618 // X86WINPRMTD_STRICT-NEXT: store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
1619 // X86WINPRMTD_STRICT-NEXT: ret void
1621 // PRMTD_STRICT-LABEL: define dso_local { double, double } @divd(
1622 // PRMTD_STRICT-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR2:[0-9]+]] {
1623 // PRMTD_STRICT-NEXT: entry:
1624 // PRMTD_STRICT-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1625 // PRMTD_STRICT-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1626 // PRMTD_STRICT-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1627 // PRMTD_STRICT-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1628 // PRMTD_STRICT-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1629 // PRMTD_STRICT-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1630 // PRMTD_STRICT-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1631 // PRMTD_STRICT-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1632 // PRMTD_STRICT-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1633 // PRMTD_STRICT-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1634 // PRMTD_STRICT-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1635 // PRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1636 // PRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1637 // PRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1638 // PRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1639 // PRMTD_STRICT-NEXT: [[EXT:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(double [[A_REAL]], metadata !"fpexcept.strict") #[[ATTR4]]
1640 // PRMTD_STRICT-NEXT: [[EXT1:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(double [[A_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
1641 // PRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1642 // PRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1643 // PRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1644 // PRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1645 // PRMTD_STRICT-NEXT: [[EXT2:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(double [[B_REAL]], metadata !"fpexcept.strict") #[[ATTR4]]
1646 // PRMTD_STRICT-NEXT: [[EXT3:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f64(double [[B_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
1647 // PRMTD_STRICT-NEXT: [[TMP4:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[EXT]], x86_fp80 [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1648 // PRMTD_STRICT-NEXT: [[TMP5:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[EXT1]], x86_fp80 [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1649 // PRMTD_STRICT-NEXT: [[TMP6:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[TMP4]], x86_fp80 [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1650 // PRMTD_STRICT-NEXT: [[TMP7:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[EXT2]], x86_fp80 [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1651 // PRMTD_STRICT-NEXT: [[TMP8:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[EXT3]], x86_fp80 [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1652 // PRMTD_STRICT-NEXT: [[TMP9:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[TMP7]], x86_fp80 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1653 // PRMTD_STRICT-NEXT: [[TMP10:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[EXT1]], x86_fp80 [[EXT2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1654 // PRMTD_STRICT-NEXT: [[TMP11:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[EXT]], x86_fp80 [[EXT3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1655 // PRMTD_STRICT-NEXT: [[TMP12:%.*]] = call x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80 [[TMP10]], x86_fp80 [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1656 // PRMTD_STRICT-NEXT: [[TMP13:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP6]], x86_fp80 [[TMP9]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1657 // PRMTD_STRICT-NEXT: [[TMP14:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP12]], x86_fp80 [[TMP9]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1658 // PRMTD_STRICT-NEXT: [[UNPROMOTION:%.*]] = call double @llvm.experimental.constrained.fptrunc.f64.f80(x86_fp80 [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1659 // PRMTD_STRICT-NEXT: [[UNPROMOTION4:%.*]] = call double @llvm.experimental.constrained.fptrunc.f64.f80(x86_fp80 [[TMP14]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
1660 // PRMTD_STRICT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1661 // PRMTD_STRICT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1662 // PRMTD_STRICT-NEXT: store double [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 8
1663 // PRMTD_STRICT-NEXT: store double [[UNPROMOTION4]], ptr [[RETVAL_IMAGP]], align 8
1664 // PRMTD_STRICT-NEXT: [[TMP15:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1665 // PRMTD_STRICT-NEXT: ret { double, double } [[TMP15]]
1667 _Complex
double divd(_Complex
double a
, _Complex
double b
) {
1671 // FULL-LABEL: define dso_local { double, double } @muld(
1672 // FULL-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR1]] {
1673 // FULL-NEXT: entry:
1674 // FULL-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1675 // FULL-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1676 // FULL-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1677 // FULL-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1678 // FULL-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1679 // FULL-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1680 // FULL-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1681 // FULL-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1682 // FULL-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1683 // FULL-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1684 // FULL-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1685 // FULL-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1686 // FULL-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1687 // FULL-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1688 // FULL-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1689 // FULL-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1690 // FULL-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1691 // FULL-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1692 // FULL-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1693 // FULL-NEXT: [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
1694 // FULL-NEXT: [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
1695 // FULL-NEXT: [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
1696 // FULL-NEXT: [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
1697 // FULL-NEXT: [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
1698 // FULL-NEXT: [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
1699 // FULL-NEXT: [[ISNAN_CMP:%.*]] = fcmp uno double [[MUL_R]], [[MUL_R]]
1700 // FULL-NEXT: br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF2]]
1701 // FULL: complex_mul_imag_nan:
1702 // FULL-NEXT: [[ISNAN_CMP1:%.*]] = fcmp uno double [[MUL_I]], [[MUL_I]]
1703 // FULL-NEXT: br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF2]]
1704 // FULL: complex_mul_libcall:
1705 // FULL-NEXT: [[CALL:%.*]] = call { double, double } @__muldc3(double noundef [[A_REAL]], double noundef [[A_IMAG]], double noundef [[B_REAL]], double noundef [[B_IMAG]]) #[[ATTR2]]
1706 // FULL-NEXT: [[TMP4:%.*]] = extractvalue { double, double } [[CALL]], 0
1707 // FULL-NEXT: [[TMP5:%.*]] = extractvalue { double, double } [[CALL]], 1
1708 // FULL-NEXT: br label [[COMPLEX_MUL_CONT]]
1709 // FULL: complex_mul_cont:
1710 // FULL-NEXT: [[REAL_MUL_PHI:%.*]] = phi double [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP4]], [[COMPLEX_MUL_LIBCALL]] ]
1711 // FULL-NEXT: [[IMAG_MUL_PHI:%.*]] = phi double [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP5]], [[COMPLEX_MUL_LIBCALL]] ]
1712 // FULL-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1713 // FULL-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1714 // FULL-NEXT: store double [[REAL_MUL_PHI]], ptr [[RETVAL_REALP]], align 8
1715 // FULL-NEXT: store double [[IMAG_MUL_PHI]], ptr [[RETVAL_IMAGP]], align 8
1716 // FULL-NEXT: [[TMP6:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1717 // FULL-NEXT: ret { double, double } [[TMP6]]
1719 // BASIC-LABEL: define dso_local { double, double } @muld(
1720 // BASIC-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR1]] {
1721 // BASIC-NEXT: entry:
1722 // BASIC-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1723 // BASIC-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1724 // BASIC-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1725 // BASIC-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1726 // BASIC-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1727 // BASIC-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1728 // BASIC-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1729 // BASIC-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1730 // BASIC-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1731 // BASIC-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1732 // BASIC-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1733 // BASIC-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1734 // BASIC-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1735 // BASIC-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1736 // BASIC-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1737 // BASIC-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1738 // BASIC-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1739 // BASIC-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1740 // BASIC-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1741 // BASIC-NEXT: [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
1742 // BASIC-NEXT: [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
1743 // BASIC-NEXT: [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
1744 // BASIC-NEXT: [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
1745 // BASIC-NEXT: [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
1746 // BASIC-NEXT: [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
1747 // BASIC-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1748 // BASIC-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1749 // BASIC-NEXT: store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
1750 // BASIC-NEXT: store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
1751 // BASIC-NEXT: [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1752 // BASIC-NEXT: ret { double, double } [[TMP4]]
1754 // IMPRVD-LABEL: define dso_local { double, double } @muld(
1755 // IMPRVD-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR2]] {
1756 // IMPRVD-NEXT: entry:
1757 // IMPRVD-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1758 // IMPRVD-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1759 // IMPRVD-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1760 // IMPRVD-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1761 // IMPRVD-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1762 // IMPRVD-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1763 // IMPRVD-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1764 // IMPRVD-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1765 // IMPRVD-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1766 // IMPRVD-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1767 // IMPRVD-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1768 // IMPRVD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1769 // IMPRVD-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1770 // IMPRVD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1771 // IMPRVD-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1772 // IMPRVD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1773 // IMPRVD-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1774 // IMPRVD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1775 // IMPRVD-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1776 // IMPRVD-NEXT: [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
1777 // IMPRVD-NEXT: [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
1778 // IMPRVD-NEXT: [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
1779 // IMPRVD-NEXT: [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
1780 // IMPRVD-NEXT: [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
1781 // IMPRVD-NEXT: [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
1782 // IMPRVD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1783 // IMPRVD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1784 // IMPRVD-NEXT: store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
1785 // IMPRVD-NEXT: store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
1786 // IMPRVD-NEXT: [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1787 // IMPRVD-NEXT: ret { double, double } [[TMP4]]
1789 // PRMTD-LABEL: define dso_local { double, double } @muld(
1790 // PRMTD-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR1]] {
1791 // PRMTD-NEXT: entry:
1792 // PRMTD-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1793 // PRMTD-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1794 // PRMTD-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1795 // PRMTD-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1796 // PRMTD-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1797 // PRMTD-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1798 // PRMTD-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1799 // PRMTD-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1800 // PRMTD-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1801 // PRMTD-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1802 // PRMTD-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1803 // PRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1804 // PRMTD-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1805 // PRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1806 // PRMTD-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1807 // PRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1808 // PRMTD-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1809 // PRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1810 // PRMTD-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1811 // PRMTD-NEXT: [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
1812 // PRMTD-NEXT: [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
1813 // PRMTD-NEXT: [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
1814 // PRMTD-NEXT: [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
1815 // PRMTD-NEXT: [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
1816 // PRMTD-NEXT: [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
1817 // PRMTD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1818 // PRMTD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1819 // PRMTD-NEXT: store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
1820 // PRMTD-NEXT: store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
1821 // PRMTD-NEXT: [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1822 // PRMTD-NEXT: ret { double, double } [[TMP4]]
1824 // X86WINPRMTD-LABEL: define dso_local void @muld(
1825 // X86WINPRMTD-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
1826 // X86WINPRMTD-NEXT: entry:
1827 // X86WINPRMTD-NEXT: [[RESULT_PTR:%.*]] = alloca ptr, align 8
1828 // X86WINPRMTD-NEXT: [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
1829 // X86WINPRMTD-NEXT: [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
1830 // X86WINPRMTD-NEXT: store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
1831 // X86WINPRMTD-NEXT: store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
1832 // X86WINPRMTD-NEXT: store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
1833 // X86WINPRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1834 // X86WINPRMTD-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1835 // X86WINPRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1836 // X86WINPRMTD-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1837 // X86WINPRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1838 // X86WINPRMTD-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1839 // X86WINPRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1840 // X86WINPRMTD-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1841 // X86WINPRMTD-NEXT: [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
1842 // X86WINPRMTD-NEXT: [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
1843 // X86WINPRMTD-NEXT: [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
1844 // X86WINPRMTD-NEXT: [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
1845 // X86WINPRMTD-NEXT: [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
1846 // X86WINPRMTD-NEXT: [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
1847 // X86WINPRMTD-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1848 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1849 // X86WINPRMTD-NEXT: store double [[MUL_R]], ptr [[AGG_RESULT_REALP]], align 8
1850 // X86WINPRMTD-NEXT: store double [[MUL_I]], ptr [[AGG_RESULT_IMAGP]], align 8
1851 // X86WINPRMTD-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1852 // X86WINPRMTD-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
1853 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1854 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
1855 // X86WINPRMTD-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1856 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1857 // X86WINPRMTD-NEXT: store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
1858 // X86WINPRMTD-NEXT: store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
1859 // X86WINPRMTD-NEXT: ret void
1861 // AVRFP32-LABEL: define dso_local { float, float } @muld(
1862 // AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
1863 // AVRFP32-NEXT: entry:
1864 // AVRFP32-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 1
1865 // AVRFP32-NEXT: [[A:%.*]] = alloca { float, float }, align 1
1866 // AVRFP32-NEXT: [[B:%.*]] = alloca { float, float }, align 1
1867 // AVRFP32-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
1868 // AVRFP32-NEXT: store float [[A_COERCE0]], ptr [[TMP0]], align 1
1869 // AVRFP32-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
1870 // AVRFP32-NEXT: store float [[A_COERCE1]], ptr [[TMP1]], align 1
1871 // AVRFP32-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
1872 // AVRFP32-NEXT: store float [[B_COERCE0]], ptr [[TMP2]], align 1
1873 // AVRFP32-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
1874 // AVRFP32-NEXT: store float [[B_COERCE1]], ptr [[TMP3]], align 1
1875 // AVRFP32-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
1876 // AVRFP32-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
1877 // AVRFP32-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
1878 // AVRFP32-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
1879 // AVRFP32-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
1880 // AVRFP32-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
1881 // AVRFP32-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
1882 // AVRFP32-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
1883 // AVRFP32-NEXT: [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
1884 // AVRFP32-NEXT: [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
1885 // AVRFP32-NEXT: [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
1886 // AVRFP32-NEXT: [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
1887 // AVRFP32-NEXT: [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
1888 // AVRFP32-NEXT: [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
1889 // AVRFP32-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
1890 // AVRFP32-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
1891 // AVRFP32-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 1
1892 // AVRFP32-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 1
1893 // AVRFP32-NEXT: [[TMP4:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
1894 // AVRFP32-NEXT: ret { float, float } [[TMP4]]
1896 // AVRFP64-LABEL: define dso_local void @muld(
1897 // AVRFP64-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 1 [[AGG_RESULT:%.*]], double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
1898 // AVRFP64-NEXT: entry:
1899 // AVRFP64-NEXT: [[A:%.*]] = alloca { double, double }, align 1
1900 // AVRFP64-NEXT: [[B:%.*]] = alloca { double, double }, align 1
1901 // AVRFP64-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1902 // AVRFP64-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 1
1903 // AVRFP64-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1904 // AVRFP64-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 1
1905 // AVRFP64-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1906 // AVRFP64-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 1
1907 // AVRFP64-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1908 // AVRFP64-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 1
1909 // AVRFP64-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1910 // AVRFP64-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 1
1911 // AVRFP64-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1912 // AVRFP64-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 1
1913 // AVRFP64-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1914 // AVRFP64-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 1
1915 // AVRFP64-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1916 // AVRFP64-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 1
1917 // AVRFP64-NEXT: [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
1918 // AVRFP64-NEXT: [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
1919 // AVRFP64-NEXT: [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
1920 // AVRFP64-NEXT: [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
1921 // AVRFP64-NEXT: [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
1922 // AVRFP64-NEXT: [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
1923 // AVRFP64-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1924 // AVRFP64-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1925 // AVRFP64-NEXT: store double [[MUL_R]], ptr [[AGG_RESULT_REALP]], align 1
1926 // AVRFP64-NEXT: store double [[MUL_I]], ptr [[AGG_RESULT_IMAGP]], align 1
1927 // AVRFP64-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1928 // AVRFP64-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 1
1929 // AVRFP64-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1930 // AVRFP64-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 1
1931 // AVRFP64-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
1932 // AVRFP64-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
1933 // AVRFP64-NEXT: store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 1
1934 // AVRFP64-NEXT: store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 1
1935 // AVRFP64-NEXT: ret void
1937 // BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @muld(
1938 // BASIC_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR1]] {
1939 // BASIC_FAST-NEXT: entry:
1940 // BASIC_FAST-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1941 // BASIC_FAST-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1942 // BASIC_FAST-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1943 // BASIC_FAST-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1944 // BASIC_FAST-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1945 // BASIC_FAST-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1946 // BASIC_FAST-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1947 // BASIC_FAST-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1948 // BASIC_FAST-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1949 // BASIC_FAST-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1950 // BASIC_FAST-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1951 // BASIC_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1952 // BASIC_FAST-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1953 // BASIC_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1954 // BASIC_FAST-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1955 // BASIC_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1956 // BASIC_FAST-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1957 // BASIC_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1958 // BASIC_FAST-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1959 // BASIC_FAST-NEXT: [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_REAL]]
1960 // BASIC_FAST-NEXT: [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_IMAG]]
1961 // BASIC_FAST-NEXT: [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_IMAG]]
1962 // BASIC_FAST-NEXT: [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_REAL]]
1963 // BASIC_FAST-NEXT: [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[MUL_AC]], [[MUL_BD]]
1964 // BASIC_FAST-NEXT: [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[MUL_AD]], [[MUL_BC]]
1965 // BASIC_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
1966 // BASIC_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
1967 // BASIC_FAST-NEXT: store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
1968 // BASIC_FAST-NEXT: store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
1969 // BASIC_FAST-NEXT: [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
1970 // BASIC_FAST-NEXT: ret { double, double } [[TMP4]]
1972 // FULL_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @muld(
1973 // FULL_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR1]] {
1974 // FULL_FAST-NEXT: entry:
1975 // FULL_FAST-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
1976 // FULL_FAST-NEXT: [[A:%.*]] = alloca { double, double }, align 8
1977 // FULL_FAST-NEXT: [[B:%.*]] = alloca { double, double }, align 8
1978 // FULL_FAST-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1979 // FULL_FAST-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
1980 // FULL_FAST-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1981 // FULL_FAST-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
1982 // FULL_FAST-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1983 // FULL_FAST-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
1984 // FULL_FAST-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1985 // FULL_FAST-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
1986 // FULL_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
1987 // FULL_FAST-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
1988 // FULL_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
1989 // FULL_FAST-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
1990 // FULL_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
1991 // FULL_FAST-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
1992 // FULL_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
1993 // FULL_FAST-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
1994 // FULL_FAST-NEXT: [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_REAL]]
1995 // FULL_FAST-NEXT: [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_IMAG]]
1996 // FULL_FAST-NEXT: [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_IMAG]]
1997 // FULL_FAST-NEXT: [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_REAL]]
1998 // FULL_FAST-NEXT: [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[MUL_AC]], [[MUL_BD]]
1999 // FULL_FAST-NEXT: [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[MUL_AD]], [[MUL_BC]]
2000 // FULL_FAST-NEXT: [[ISNAN_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn uno double [[MUL_R]], [[MUL_R]]
2001 // FULL_FAST-NEXT: br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF2]]
2002 // FULL_FAST: complex_mul_imag_nan:
2003 // FULL_FAST-NEXT: [[ISNAN_CMP1:%.*]] = fcmp reassoc nnan ninf nsz arcp afn uno double [[MUL_I]], [[MUL_I]]
2004 // FULL_FAST-NEXT: br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF2]]
2005 // FULL_FAST: complex_mul_libcall:
2006 // FULL_FAST-NEXT: [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) { double, double } @__muldc3(double noundef nofpclass(nan inf) [[A_REAL]], double noundef nofpclass(nan inf) [[A_IMAG]], double noundef nofpclass(nan inf) [[B_REAL]], double noundef nofpclass(nan inf) [[B_IMAG]]) #[[ATTR2]]
2007 // FULL_FAST-NEXT: [[TMP4:%.*]] = extractvalue { double, double } [[CALL]], 0
2008 // FULL_FAST-NEXT: [[TMP5:%.*]] = extractvalue { double, double } [[CALL]], 1
2009 // FULL_FAST-NEXT: br label [[COMPLEX_MUL_CONT]]
2010 // FULL_FAST: complex_mul_cont:
2011 // FULL_FAST-NEXT: [[REAL_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn double [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP4]], [[COMPLEX_MUL_LIBCALL]] ]
2012 // FULL_FAST-NEXT: [[IMAG_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn double [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP5]], [[COMPLEX_MUL_LIBCALL]] ]
2013 // FULL_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
2014 // FULL_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
2015 // FULL_FAST-NEXT: store double [[REAL_MUL_PHI]], ptr [[RETVAL_REALP]], align 8
2016 // FULL_FAST-NEXT: store double [[IMAG_MUL_PHI]], ptr [[RETVAL_IMAGP]], align 8
2017 // FULL_FAST-NEXT: [[TMP6:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
2018 // FULL_FAST-NEXT: ret { double, double } [[TMP6]]
2020 // IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @muld(
2021 // IMPRVD_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR2]] {
2022 // IMPRVD_FAST-NEXT: entry:
2023 // IMPRVD_FAST-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
2024 // IMPRVD_FAST-NEXT: [[A:%.*]] = alloca { double, double }, align 8
2025 // IMPRVD_FAST-NEXT: [[B:%.*]] = alloca { double, double }, align 8
2026 // IMPRVD_FAST-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2027 // IMPRVD_FAST-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
2028 // IMPRVD_FAST-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2029 // IMPRVD_FAST-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
2030 // IMPRVD_FAST-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2031 // IMPRVD_FAST-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
2032 // IMPRVD_FAST-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2033 // IMPRVD_FAST-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
2034 // IMPRVD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2035 // IMPRVD_FAST-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
2036 // IMPRVD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2037 // IMPRVD_FAST-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
2038 // IMPRVD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2039 // IMPRVD_FAST-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
2040 // IMPRVD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2041 // IMPRVD_FAST-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
2042 // IMPRVD_FAST-NEXT: [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_REAL]]
2043 // IMPRVD_FAST-NEXT: [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_IMAG]]
2044 // IMPRVD_FAST-NEXT: [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_IMAG]]
2045 // IMPRVD_FAST-NEXT: [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_REAL]]
2046 // IMPRVD_FAST-NEXT: [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[MUL_AC]], [[MUL_BD]]
2047 // IMPRVD_FAST-NEXT: [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[MUL_AD]], [[MUL_BC]]
2048 // IMPRVD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
2049 // IMPRVD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
2050 // IMPRVD_FAST-NEXT: store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
2051 // IMPRVD_FAST-NEXT: store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
2052 // IMPRVD_FAST-NEXT: [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
2053 // IMPRVD_FAST-NEXT: ret { double, double } [[TMP4]]
2055 // PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) { double, double } @muld(
2056 // PRMTD_FAST-SAME: double noundef nofpclass(nan inf) [[A_COERCE0:%.*]], double noundef nofpclass(nan inf) [[A_COERCE1:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]]) #[[ATTR1]] {
2057 // PRMTD_FAST-NEXT: entry:
2058 // PRMTD_FAST-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
2059 // PRMTD_FAST-NEXT: [[A:%.*]] = alloca { double, double }, align 8
2060 // PRMTD_FAST-NEXT: [[B:%.*]] = alloca { double, double }, align 8
2061 // PRMTD_FAST-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2062 // PRMTD_FAST-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
2063 // PRMTD_FAST-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2064 // PRMTD_FAST-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
2065 // PRMTD_FAST-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2066 // PRMTD_FAST-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
2067 // PRMTD_FAST-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2068 // PRMTD_FAST-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
2069 // PRMTD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2070 // PRMTD_FAST-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
2071 // PRMTD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2072 // PRMTD_FAST-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
2073 // PRMTD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2074 // PRMTD_FAST-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
2075 // PRMTD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2076 // PRMTD_FAST-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
2077 // PRMTD_FAST-NEXT: [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_REAL]]
2078 // PRMTD_FAST-NEXT: [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_IMAG]]
2079 // PRMTD_FAST-NEXT: [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_REAL]], [[B_IMAG]]
2080 // PRMTD_FAST-NEXT: [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[A_IMAG]], [[B_REAL]]
2081 // PRMTD_FAST-NEXT: [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[MUL_AC]], [[MUL_BD]]
2082 // PRMTD_FAST-NEXT: [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[MUL_AD]], [[MUL_BC]]
2083 // PRMTD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
2084 // PRMTD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
2085 // PRMTD_FAST-NEXT: store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
2086 // PRMTD_FAST-NEXT: store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
2087 // PRMTD_FAST-NEXT: [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
2088 // PRMTD_FAST-NEXT: ret { double, double } [[TMP4]]
2090 // X86WINPRMTD_STRICT-LABEL: define dso_local void @muld(
2091 // X86WINPRMTD_STRICT-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
2092 // X86WINPRMTD_STRICT-NEXT: entry:
2093 // X86WINPRMTD_STRICT-NEXT: [[RESULT_PTR:%.*]] = alloca ptr, align 8
2094 // X86WINPRMTD_STRICT-NEXT: [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
2095 // X86WINPRMTD_STRICT-NEXT: [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
2096 // X86WINPRMTD_STRICT-NEXT: store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
2097 // X86WINPRMTD_STRICT-NEXT: store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
2098 // X86WINPRMTD_STRICT-NEXT: store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
2099 // X86WINPRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2100 // X86WINPRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
2101 // X86WINPRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2102 // X86WINPRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
2103 // X86WINPRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2104 // X86WINPRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
2105 // X86WINPRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2106 // X86WINPRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
2107 // X86WINPRMTD_STRICT-NEXT: [[MUL_AC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2108 // X86WINPRMTD_STRICT-NEXT: [[MUL_BD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2109 // X86WINPRMTD_STRICT-NEXT: [[MUL_AD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2110 // X86WINPRMTD_STRICT-NEXT: [[MUL_BC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2111 // X86WINPRMTD_STRICT-NEXT: [[MUL_R:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[MUL_AC]], double [[MUL_BD]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2112 // X86WINPRMTD_STRICT-NEXT: [[MUL_I:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[MUL_AD]], double [[MUL_BC]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2113 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2114 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2115 // X86WINPRMTD_STRICT-NEXT: store double [[MUL_R]], ptr [[AGG_RESULT_REALP]], align 8
2116 // X86WINPRMTD_STRICT-NEXT: store double [[MUL_I]], ptr [[AGG_RESULT_IMAGP]], align 8
2117 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2118 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
2119 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2120 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
2121 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2122 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2123 // X86WINPRMTD_STRICT-NEXT: store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
2124 // X86WINPRMTD_STRICT-NEXT: store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
2125 // X86WINPRMTD_STRICT-NEXT: ret void
2127 // PRMTD_STRICT-LABEL: define dso_local { double, double } @muld(
2128 // PRMTD_STRICT-SAME: double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) #[[ATTR2]] {
2129 // PRMTD_STRICT-NEXT: entry:
2130 // PRMTD_STRICT-NEXT: [[RETVAL:%.*]] = alloca { double, double }, align 8
2131 // PRMTD_STRICT-NEXT: [[A:%.*]] = alloca { double, double }, align 8
2132 // PRMTD_STRICT-NEXT: [[B:%.*]] = alloca { double, double }, align 8
2133 // PRMTD_STRICT-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2134 // PRMTD_STRICT-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 8
2135 // PRMTD_STRICT-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2136 // PRMTD_STRICT-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 8
2137 // PRMTD_STRICT-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2138 // PRMTD_STRICT-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 8
2139 // PRMTD_STRICT-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2140 // PRMTD_STRICT-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 8
2141 // PRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2142 // PRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
2143 // PRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2144 // PRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
2145 // PRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2146 // PRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
2147 // PRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2148 // PRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
2149 // PRMTD_STRICT-NEXT: [[MUL_AC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2150 // PRMTD_STRICT-NEXT: [[MUL_BD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2151 // PRMTD_STRICT-NEXT: [[MUL_AD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2152 // PRMTD_STRICT-NEXT: [[MUL_BC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2153 // PRMTD_STRICT-NEXT: [[MUL_R:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[MUL_AC]], double [[MUL_BD]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2154 // PRMTD_STRICT-NEXT: [[MUL_I:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[MUL_AD]], double [[MUL_BC]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2155 // PRMTD_STRICT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 0
2156 // PRMTD_STRICT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[RETVAL]], i32 0, i32 1
2157 // PRMTD_STRICT-NEXT: store double [[MUL_R]], ptr [[RETVAL_REALP]], align 8
2158 // PRMTD_STRICT-NEXT: store double [[MUL_I]], ptr [[RETVAL_IMAGP]], align 8
2159 // PRMTD_STRICT-NEXT: [[TMP4:%.*]] = load { double, double }, ptr [[RETVAL]], align 8
2160 // PRMTD_STRICT-NEXT: ret { double, double } [[TMP4]]
2162 _Complex
double muld(_Complex
double a
, _Complex
double b
) {
2166 // FULL-LABEL: define dso_local { x86_fp80, x86_fp80 } @divld(
2167 // FULL-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
2168 // FULL-NEXT: entry:
2169 // FULL-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2170 // FULL-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2171 // FULL-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2172 // FULL-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2173 // FULL-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2174 // FULL-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2175 // FULL-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2176 // FULL-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2177 // FULL-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2178 // FULL-NEXT: [[CALL:%.*]] = call { x86_fp80, x86_fp80 } @__divxc3(x86_fp80 noundef [[A_REAL]], x86_fp80 noundef [[A_IMAG]], x86_fp80 noundef [[B_REAL]], x86_fp80 noundef [[B_IMAG]]) #[[ATTR2]]
2179 // FULL-NEXT: [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
2180 // FULL-NEXT: [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
2181 // FULL-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2182 // FULL-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2183 // FULL-NEXT: store x86_fp80 [[TMP0]], ptr [[RETVAL_REALP]], align 16
2184 // FULL-NEXT: store x86_fp80 [[TMP1]], ptr [[RETVAL_IMAGP]], align 16
2185 // FULL-NEXT: [[TMP2:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2186 // FULL-NEXT: ret { x86_fp80, x86_fp80 } [[TMP2]]
2188 // BASIC-LABEL: define dso_local { x86_fp80, x86_fp80 } @divld(
2189 // BASIC-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
2190 // BASIC-NEXT: entry:
2191 // BASIC-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2192 // BASIC-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2193 // BASIC-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2194 // BASIC-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2195 // BASIC-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2196 // BASIC-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2197 // BASIC-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2198 // BASIC-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2199 // BASIC-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2200 // BASIC-NEXT: [[TMP0:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_REAL]]
2201 // BASIC-NEXT: [[TMP1:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_IMAG]]
2202 // BASIC-NEXT: [[TMP2:%.*]] = fadd x86_fp80 [[TMP0]], [[TMP1]]
2203 // BASIC-NEXT: [[TMP3:%.*]] = fmul x86_fp80 [[B_REAL]], [[B_REAL]]
2204 // BASIC-NEXT: [[TMP4:%.*]] = fmul x86_fp80 [[B_IMAG]], [[B_IMAG]]
2205 // BASIC-NEXT: [[TMP5:%.*]] = fadd x86_fp80 [[TMP3]], [[TMP4]]
2206 // BASIC-NEXT: [[TMP6:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_REAL]]
2207 // BASIC-NEXT: [[TMP7:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_IMAG]]
2208 // BASIC-NEXT: [[TMP8:%.*]] = fsub x86_fp80 [[TMP6]], [[TMP7]]
2209 // BASIC-NEXT: [[TMP9:%.*]] = fdiv x86_fp80 [[TMP2]], [[TMP5]]
2210 // BASIC-NEXT: [[TMP10:%.*]] = fdiv x86_fp80 [[TMP8]], [[TMP5]]
2211 // BASIC-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2212 // BASIC-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2213 // BASIC-NEXT: store x86_fp80 [[TMP9]], ptr [[RETVAL_REALP]], align 16
2214 // BASIC-NEXT: store x86_fp80 [[TMP10]], ptr [[RETVAL_IMAGP]], align 16
2215 // BASIC-NEXT: [[TMP11:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2216 // BASIC-NEXT: ret { x86_fp80, x86_fp80 } [[TMP11]]
2218 // IMPRVD-LABEL: define dso_local { x86_fp80, x86_fp80 } @divld(
2219 // IMPRVD-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR2]] {
2220 // IMPRVD-NEXT: entry:
2221 // IMPRVD-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2222 // IMPRVD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2223 // IMPRVD-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2224 // IMPRVD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2225 // IMPRVD-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2226 // IMPRVD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2227 // IMPRVD-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2228 // IMPRVD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2229 // IMPRVD-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2230 // IMPRVD-NEXT: [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_REAL]])
2231 // IMPRVD-NEXT: [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_IMAG]])
2232 // IMPRVD-NEXT: [[ABS_CMP:%.*]] = fcmp ugt x86_fp80 [[TMP0]], [[TMP1]]
2233 // IMPRVD-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
2234 // IMPRVD: abs_rhsr_greater_or_equal_abs_rhsi:
2235 // IMPRVD-NEXT: [[TMP2:%.*]] = fdiv x86_fp80 [[B_IMAG]], [[B_REAL]]
2236 // IMPRVD-NEXT: [[TMP3:%.*]] = fmul x86_fp80 [[TMP2]], [[B_IMAG]]
2237 // IMPRVD-NEXT: [[TMP4:%.*]] = fadd x86_fp80 [[B_REAL]], [[TMP3]]
2238 // IMPRVD-NEXT: [[TMP5:%.*]] = fmul x86_fp80 [[A_IMAG]], [[TMP2]]
2239 // IMPRVD-NEXT: [[TMP6:%.*]] = fadd x86_fp80 [[A_REAL]], [[TMP5]]
2240 // IMPRVD-NEXT: [[TMP7:%.*]] = fdiv x86_fp80 [[TMP6]], [[TMP4]]
2241 // IMPRVD-NEXT: [[TMP8:%.*]] = fmul x86_fp80 [[A_REAL]], [[TMP2]]
2242 // IMPRVD-NEXT: [[TMP9:%.*]] = fsub x86_fp80 [[A_IMAG]], [[TMP8]]
2243 // IMPRVD-NEXT: [[TMP10:%.*]] = fdiv x86_fp80 [[TMP9]], [[TMP4]]
2244 // IMPRVD-NEXT: br label [[COMPLEX_DIV:%.*]]
2245 // IMPRVD: abs_rhsr_less_than_abs_rhsi:
2246 // IMPRVD-NEXT: [[TMP11:%.*]] = fdiv x86_fp80 [[B_REAL]], [[B_IMAG]]
2247 // IMPRVD-NEXT: [[TMP12:%.*]] = fmul x86_fp80 [[TMP11]], [[B_REAL]]
2248 // IMPRVD-NEXT: [[TMP13:%.*]] = fadd x86_fp80 [[B_IMAG]], [[TMP12]]
2249 // IMPRVD-NEXT: [[TMP14:%.*]] = fmul x86_fp80 [[A_REAL]], [[TMP11]]
2250 // IMPRVD-NEXT: [[TMP15:%.*]] = fadd x86_fp80 [[TMP14]], [[A_IMAG]]
2251 // IMPRVD-NEXT: [[TMP16:%.*]] = fdiv x86_fp80 [[TMP15]], [[TMP13]]
2252 // IMPRVD-NEXT: [[TMP17:%.*]] = fmul x86_fp80 [[A_IMAG]], [[TMP11]]
2253 // IMPRVD-NEXT: [[TMP18:%.*]] = fsub x86_fp80 [[TMP17]], [[A_REAL]]
2254 // IMPRVD-NEXT: [[TMP19:%.*]] = fdiv x86_fp80 [[TMP18]], [[TMP13]]
2255 // IMPRVD-NEXT: br label [[COMPLEX_DIV]]
2256 // IMPRVD: complex_div:
2257 // IMPRVD-NEXT: [[TMP20:%.*]] = phi x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2258 // IMPRVD-NEXT: [[TMP21:%.*]] = phi x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2259 // IMPRVD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2260 // IMPRVD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2261 // IMPRVD-NEXT: store x86_fp80 [[TMP20]], ptr [[RETVAL_REALP]], align 16
2262 // IMPRVD-NEXT: store x86_fp80 [[TMP21]], ptr [[RETVAL_IMAGP]], align 16
2263 // IMPRVD-NEXT: [[TMP22:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2264 // IMPRVD-NEXT: ret { x86_fp80, x86_fp80 } [[TMP22]]
2266 // PRMTD-LABEL: define dso_local { x86_fp80, x86_fp80 } @divld(
2267 // PRMTD-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
2268 // PRMTD-NEXT: entry:
2269 // PRMTD-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2270 // PRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2271 // PRMTD-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2272 // PRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2273 // PRMTD-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2274 // PRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2275 // PRMTD-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2276 // PRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2277 // PRMTD-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2278 // PRMTD-NEXT: [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_REAL]])
2279 // PRMTD-NEXT: [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_IMAG]])
2280 // PRMTD-NEXT: [[ABS_CMP:%.*]] = fcmp ugt x86_fp80 [[TMP0]], [[TMP1]]
2281 // PRMTD-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
2282 // PRMTD: abs_rhsr_greater_or_equal_abs_rhsi:
2283 // PRMTD-NEXT: [[TMP2:%.*]] = fdiv x86_fp80 [[B_IMAG]], [[B_REAL]]
2284 // PRMTD-NEXT: [[TMP3:%.*]] = fmul x86_fp80 [[TMP2]], [[B_IMAG]]
2285 // PRMTD-NEXT: [[TMP4:%.*]] = fadd x86_fp80 [[B_REAL]], [[TMP3]]
2286 // PRMTD-NEXT: [[TMP5:%.*]] = fmul x86_fp80 [[A_IMAG]], [[TMP2]]
2287 // PRMTD-NEXT: [[TMP6:%.*]] = fadd x86_fp80 [[A_REAL]], [[TMP5]]
2288 // PRMTD-NEXT: [[TMP7:%.*]] = fdiv x86_fp80 [[TMP6]], [[TMP4]]
2289 // PRMTD-NEXT: [[TMP8:%.*]] = fmul x86_fp80 [[A_REAL]], [[TMP2]]
2290 // PRMTD-NEXT: [[TMP9:%.*]] = fsub x86_fp80 [[A_IMAG]], [[TMP8]]
2291 // PRMTD-NEXT: [[TMP10:%.*]] = fdiv x86_fp80 [[TMP9]], [[TMP4]]
2292 // PRMTD-NEXT: br label [[COMPLEX_DIV:%.*]]
2293 // PRMTD: abs_rhsr_less_than_abs_rhsi:
2294 // PRMTD-NEXT: [[TMP11:%.*]] = fdiv x86_fp80 [[B_REAL]], [[B_IMAG]]
2295 // PRMTD-NEXT: [[TMP12:%.*]] = fmul x86_fp80 [[TMP11]], [[B_REAL]]
2296 // PRMTD-NEXT: [[TMP13:%.*]] = fadd x86_fp80 [[B_IMAG]], [[TMP12]]
2297 // PRMTD-NEXT: [[TMP14:%.*]] = fmul x86_fp80 [[A_REAL]], [[TMP11]]
2298 // PRMTD-NEXT: [[TMP15:%.*]] = fadd x86_fp80 [[TMP14]], [[A_IMAG]]
2299 // PRMTD-NEXT: [[TMP16:%.*]] = fdiv x86_fp80 [[TMP15]], [[TMP13]]
2300 // PRMTD-NEXT: [[TMP17:%.*]] = fmul x86_fp80 [[A_IMAG]], [[TMP11]]
2301 // PRMTD-NEXT: [[TMP18:%.*]] = fsub x86_fp80 [[TMP17]], [[A_REAL]]
2302 // PRMTD-NEXT: [[TMP19:%.*]] = fdiv x86_fp80 [[TMP18]], [[TMP13]]
2303 // PRMTD-NEXT: br label [[COMPLEX_DIV]]
2304 // PRMTD: complex_div:
2305 // PRMTD-NEXT: [[TMP20:%.*]] = phi x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2306 // PRMTD-NEXT: [[TMP21:%.*]] = phi x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2307 // PRMTD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2308 // PRMTD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2309 // PRMTD-NEXT: store x86_fp80 [[TMP20]], ptr [[RETVAL_REALP]], align 16
2310 // PRMTD-NEXT: store x86_fp80 [[TMP21]], ptr [[RETVAL_IMAGP]], align 16
2311 // PRMTD-NEXT: [[TMP22:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2312 // PRMTD-NEXT: ret { x86_fp80, x86_fp80 } [[TMP22]]
2314 // X86WINPRMTD-LABEL: define dso_local void @divld(
2315 // X86WINPRMTD-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
2316 // X86WINPRMTD-NEXT: entry:
2317 // X86WINPRMTD-NEXT: [[RESULT_PTR:%.*]] = alloca ptr, align 8
2318 // X86WINPRMTD-NEXT: [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
2319 // X86WINPRMTD-NEXT: [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
2320 // X86WINPRMTD-NEXT: store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
2321 // X86WINPRMTD-NEXT: store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
2322 // X86WINPRMTD-NEXT: store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
2323 // X86WINPRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2324 // X86WINPRMTD-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
2325 // X86WINPRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2326 // X86WINPRMTD-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
2327 // X86WINPRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2328 // X86WINPRMTD-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
2329 // X86WINPRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2330 // X86WINPRMTD-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
2331 // X86WINPRMTD-NEXT: [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]])
2332 // X86WINPRMTD-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]])
2333 // X86WINPRMTD-NEXT: [[ABS_CMP:%.*]] = fcmp ugt double [[TMP0]], [[TMP1]]
2334 // X86WINPRMTD-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
2335 // X86WINPRMTD: abs_rhsr_greater_or_equal_abs_rhsi:
2336 // X86WINPRMTD-NEXT: [[TMP2:%.*]] = fdiv double [[B_IMAG]], [[B_REAL]]
2337 // X86WINPRMTD-NEXT: [[TMP3:%.*]] = fmul double [[TMP2]], [[B_IMAG]]
2338 // X86WINPRMTD-NEXT: [[TMP4:%.*]] = fadd double [[B_REAL]], [[TMP3]]
2339 // X86WINPRMTD-NEXT: [[TMP5:%.*]] = fmul double [[A_IMAG]], [[TMP2]]
2340 // X86WINPRMTD-NEXT: [[TMP6:%.*]] = fadd double [[A_REAL]], [[TMP5]]
2341 // X86WINPRMTD-NEXT: [[TMP7:%.*]] = fdiv double [[TMP6]], [[TMP4]]
2342 // X86WINPRMTD-NEXT: [[TMP8:%.*]] = fmul double [[A_REAL]], [[TMP2]]
2343 // X86WINPRMTD-NEXT: [[TMP9:%.*]] = fsub double [[A_IMAG]], [[TMP8]]
2344 // X86WINPRMTD-NEXT: [[TMP10:%.*]] = fdiv double [[TMP9]], [[TMP4]]
2345 // X86WINPRMTD-NEXT: br label [[COMPLEX_DIV:%.*]]
2346 // X86WINPRMTD: abs_rhsr_less_than_abs_rhsi:
2347 // X86WINPRMTD-NEXT: [[TMP11:%.*]] = fdiv double [[B_REAL]], [[B_IMAG]]
2348 // X86WINPRMTD-NEXT: [[TMP12:%.*]] = fmul double [[TMP11]], [[B_REAL]]
2349 // X86WINPRMTD-NEXT: [[TMP13:%.*]] = fadd double [[B_IMAG]], [[TMP12]]
2350 // X86WINPRMTD-NEXT: [[TMP14:%.*]] = fmul double [[A_REAL]], [[TMP11]]
2351 // X86WINPRMTD-NEXT: [[TMP15:%.*]] = fadd double [[TMP14]], [[A_IMAG]]
2352 // X86WINPRMTD-NEXT: [[TMP16:%.*]] = fdiv double [[TMP15]], [[TMP13]]
2353 // X86WINPRMTD-NEXT: [[TMP17:%.*]] = fmul double [[A_IMAG]], [[TMP11]]
2354 // X86WINPRMTD-NEXT: [[TMP18:%.*]] = fsub double [[TMP17]], [[A_REAL]]
2355 // X86WINPRMTD-NEXT: [[TMP19:%.*]] = fdiv double [[TMP18]], [[TMP13]]
2356 // X86WINPRMTD-NEXT: br label [[COMPLEX_DIV]]
2357 // X86WINPRMTD: complex_div:
2358 // X86WINPRMTD-NEXT: [[TMP20:%.*]] = phi double [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2359 // X86WINPRMTD-NEXT: [[TMP21:%.*]] = phi double [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2360 // X86WINPRMTD-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2361 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2362 // X86WINPRMTD-NEXT: store double [[TMP20]], ptr [[AGG_RESULT_REALP]], align 8
2363 // X86WINPRMTD-NEXT: store double [[TMP21]], ptr [[AGG_RESULT_IMAGP]], align 8
2364 // X86WINPRMTD-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2365 // X86WINPRMTD-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
2366 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2367 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
2368 // X86WINPRMTD-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2369 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2370 // X86WINPRMTD-NEXT: store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
2371 // X86WINPRMTD-NEXT: store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
2372 // X86WINPRMTD-NEXT: ret void
2374 // AVRFP32-LABEL: define dso_local { float, float } @divld(
2375 // AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
2376 // AVRFP32-NEXT: entry:
2377 // AVRFP32-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 1
2378 // AVRFP32-NEXT: [[A:%.*]] = alloca { float, float }, align 1
2379 // AVRFP32-NEXT: [[B:%.*]] = alloca { float, float }, align 1
2380 // AVRFP32-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
2381 // AVRFP32-NEXT: store float [[A_COERCE0]], ptr [[TMP0]], align 1
2382 // AVRFP32-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
2383 // AVRFP32-NEXT: store float [[A_COERCE1]], ptr [[TMP1]], align 1
2384 // AVRFP32-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
2385 // AVRFP32-NEXT: store float [[B_COERCE0]], ptr [[TMP2]], align 1
2386 // AVRFP32-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
2387 // AVRFP32-NEXT: store float [[B_COERCE1]], ptr [[TMP3]], align 1
2388 // AVRFP32-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
2389 // AVRFP32-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
2390 // AVRFP32-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
2391 // AVRFP32-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
2392 // AVRFP32-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
2393 // AVRFP32-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
2394 // AVRFP32-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
2395 // AVRFP32-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
2396 // AVRFP32-NEXT: [[TMP4:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[B_REAL]])
2397 // AVRFP32-NEXT: [[TMP5:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[B_IMAG]])
2398 // AVRFP32-NEXT: [[ABS_CMP:%.*]] = fcmp ugt float [[TMP4]], [[TMP5]]
2399 // AVRFP32-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
2400 // AVRFP32: abs_rhsr_greater_or_equal_abs_rhsi:
2401 // AVRFP32-NEXT: [[TMP6:%.*]] = fdiv float [[B_IMAG]], [[B_REAL]]
2402 // AVRFP32-NEXT: [[TMP7:%.*]] = fmul float [[TMP6]], [[B_IMAG]]
2403 // AVRFP32-NEXT: [[TMP8:%.*]] = fadd float [[B_REAL]], [[TMP7]]
2404 // AVRFP32-NEXT: [[TMP9:%.*]] = fmul float [[A_IMAG]], [[TMP6]]
2405 // AVRFP32-NEXT: [[TMP10:%.*]] = fadd float [[A_REAL]], [[TMP9]]
2406 // AVRFP32-NEXT: [[TMP11:%.*]] = fdiv float [[TMP10]], [[TMP8]]
2407 // AVRFP32-NEXT: [[TMP12:%.*]] = fmul float [[A_REAL]], [[TMP6]]
2408 // AVRFP32-NEXT: [[TMP13:%.*]] = fsub float [[A_IMAG]], [[TMP12]]
2409 // AVRFP32-NEXT: [[TMP14:%.*]] = fdiv float [[TMP13]], [[TMP8]]
2410 // AVRFP32-NEXT: br label [[COMPLEX_DIV:%.*]]
2411 // AVRFP32: abs_rhsr_less_than_abs_rhsi:
2412 // AVRFP32-NEXT: [[TMP15:%.*]] = fdiv float [[B_REAL]], [[B_IMAG]]
2413 // AVRFP32-NEXT: [[TMP16:%.*]] = fmul float [[TMP15]], [[B_REAL]]
2414 // AVRFP32-NEXT: [[TMP17:%.*]] = fadd float [[B_IMAG]], [[TMP16]]
2415 // AVRFP32-NEXT: [[TMP18:%.*]] = fmul float [[A_REAL]], [[TMP15]]
2416 // AVRFP32-NEXT: [[TMP19:%.*]] = fadd float [[TMP18]], [[A_IMAG]]
2417 // AVRFP32-NEXT: [[TMP20:%.*]] = fdiv float [[TMP19]], [[TMP17]]
2418 // AVRFP32-NEXT: [[TMP21:%.*]] = fmul float [[A_IMAG]], [[TMP15]]
2419 // AVRFP32-NEXT: [[TMP22:%.*]] = fsub float [[TMP21]], [[A_REAL]]
2420 // AVRFP32-NEXT: [[TMP23:%.*]] = fdiv float [[TMP22]], [[TMP17]]
2421 // AVRFP32-NEXT: br label [[COMPLEX_DIV]]
2422 // AVRFP32: complex_div:
2423 // AVRFP32-NEXT: [[TMP24:%.*]] = phi float [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2424 // AVRFP32-NEXT: [[TMP25:%.*]] = phi float [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2425 // AVRFP32-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
2426 // AVRFP32-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
2427 // AVRFP32-NEXT: store float [[TMP24]], ptr [[RETVAL_REALP]], align 1
2428 // AVRFP32-NEXT: store float [[TMP25]], ptr [[RETVAL_IMAGP]], align 1
2429 // AVRFP32-NEXT: [[TMP26:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
2430 // AVRFP32-NEXT: ret { float, float } [[TMP26]]
2432 // AVRFP64-LABEL: define dso_local void @divld(
2433 // AVRFP64-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 1 [[AGG_RESULT:%.*]], double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
2434 // AVRFP64-NEXT: entry:
2435 // AVRFP64-NEXT: [[A:%.*]] = alloca { double, double }, align 1
2436 // AVRFP64-NEXT: [[B:%.*]] = alloca { double, double }, align 1
2437 // AVRFP64-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2438 // AVRFP64-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 1
2439 // AVRFP64-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2440 // AVRFP64-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 1
2441 // AVRFP64-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2442 // AVRFP64-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 1
2443 // AVRFP64-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2444 // AVRFP64-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 1
2445 // AVRFP64-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2446 // AVRFP64-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 1
2447 // AVRFP64-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2448 // AVRFP64-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 1
2449 // AVRFP64-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2450 // AVRFP64-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 1
2451 // AVRFP64-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2452 // AVRFP64-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 1
2453 // AVRFP64-NEXT: [[TMP4:%.*]] = call addrspace(1) double @llvm.fabs.f64(double [[B_REAL]])
2454 // AVRFP64-NEXT: [[TMP5:%.*]] = call addrspace(1) double @llvm.fabs.f64(double [[B_IMAG]])
2455 // AVRFP64-NEXT: [[ABS_CMP:%.*]] = fcmp ugt double [[TMP4]], [[TMP5]]
2456 // AVRFP64-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
2457 // AVRFP64: abs_rhsr_greater_or_equal_abs_rhsi:
2458 // AVRFP64-NEXT: [[TMP6:%.*]] = fdiv double [[B_IMAG]], [[B_REAL]]
2459 // AVRFP64-NEXT: [[TMP7:%.*]] = fmul double [[TMP6]], [[B_IMAG]]
2460 // AVRFP64-NEXT: [[TMP8:%.*]] = fadd double [[B_REAL]], [[TMP7]]
2461 // AVRFP64-NEXT: [[TMP9:%.*]] = fmul double [[A_IMAG]], [[TMP6]]
2462 // AVRFP64-NEXT: [[TMP10:%.*]] = fadd double [[A_REAL]], [[TMP9]]
2463 // AVRFP64-NEXT: [[TMP11:%.*]] = fdiv double [[TMP10]], [[TMP8]]
2464 // AVRFP64-NEXT: [[TMP12:%.*]] = fmul double [[A_REAL]], [[TMP6]]
2465 // AVRFP64-NEXT: [[TMP13:%.*]] = fsub double [[A_IMAG]], [[TMP12]]
2466 // AVRFP64-NEXT: [[TMP14:%.*]] = fdiv double [[TMP13]], [[TMP8]]
2467 // AVRFP64-NEXT: br label [[COMPLEX_DIV:%.*]]
2468 // AVRFP64: abs_rhsr_less_than_abs_rhsi:
2469 // AVRFP64-NEXT: [[TMP15:%.*]] = fdiv double [[B_REAL]], [[B_IMAG]]
2470 // AVRFP64-NEXT: [[TMP16:%.*]] = fmul double [[TMP15]], [[B_REAL]]
2471 // AVRFP64-NEXT: [[TMP17:%.*]] = fadd double [[B_IMAG]], [[TMP16]]
2472 // AVRFP64-NEXT: [[TMP18:%.*]] = fmul double [[A_REAL]], [[TMP15]]
2473 // AVRFP64-NEXT: [[TMP19:%.*]] = fadd double [[TMP18]], [[A_IMAG]]
2474 // AVRFP64-NEXT: [[TMP20:%.*]] = fdiv double [[TMP19]], [[TMP17]]
2475 // AVRFP64-NEXT: [[TMP21:%.*]] = fmul double [[A_IMAG]], [[TMP15]]
2476 // AVRFP64-NEXT: [[TMP22:%.*]] = fsub double [[TMP21]], [[A_REAL]]
2477 // AVRFP64-NEXT: [[TMP23:%.*]] = fdiv double [[TMP22]], [[TMP17]]
2478 // AVRFP64-NEXT: br label [[COMPLEX_DIV]]
2479 // AVRFP64: complex_div:
2480 // AVRFP64-NEXT: [[TMP24:%.*]] = phi double [ [[TMP11]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP20]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2481 // AVRFP64-NEXT: [[TMP25:%.*]] = phi double [ [[TMP14]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP23]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2482 // AVRFP64-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2483 // AVRFP64-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2484 // AVRFP64-NEXT: store double [[TMP24]], ptr [[AGG_RESULT_REALP]], align 1
2485 // AVRFP64-NEXT: store double [[TMP25]], ptr [[AGG_RESULT_IMAGP]], align 1
2486 // AVRFP64-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2487 // AVRFP64-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 1
2488 // AVRFP64-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2489 // AVRFP64-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 1
2490 // AVRFP64-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2491 // AVRFP64-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2492 // AVRFP64-NEXT: store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 1
2493 // AVRFP64-NEXT: store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 1
2494 // AVRFP64-NEXT: ret void
2496 // BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @divld(
2497 // BASIC_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
2498 // BASIC_FAST-NEXT: entry:
2499 // BASIC_FAST-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2500 // BASIC_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2501 // BASIC_FAST-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2502 // BASIC_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2503 // BASIC_FAST-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2504 // BASIC_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2505 // BASIC_FAST-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2506 // BASIC_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2507 // BASIC_FAST-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2508 // BASIC_FAST-NEXT: [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_REAL]]
2509 // BASIC_FAST-NEXT: [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_IMAG]]
2510 // BASIC_FAST-NEXT: [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP0]], [[TMP1]]
2511 // BASIC_FAST-NEXT: [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[B_REAL]]
2512 // BASIC_FAST-NEXT: [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[B_IMAG]]
2513 // BASIC_FAST-NEXT: [[TMP5:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP3]], [[TMP4]]
2514 // BASIC_FAST-NEXT: [[TMP6:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_REAL]]
2515 // BASIC_FAST-NEXT: [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_IMAG]]
2516 // BASIC_FAST-NEXT: [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP7]]
2517 // BASIC_FAST-NEXT: [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[TMP5]]
2518 // BASIC_FAST-NEXT: [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP8]], [[TMP5]]
2519 // BASIC_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2520 // BASIC_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2521 // BASIC_FAST-NEXT: store x86_fp80 [[TMP9]], ptr [[RETVAL_REALP]], align 16
2522 // BASIC_FAST-NEXT: store x86_fp80 [[TMP10]], ptr [[RETVAL_IMAGP]], align 16
2523 // BASIC_FAST-NEXT: [[TMP11:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2524 // BASIC_FAST-NEXT: ret { x86_fp80, x86_fp80 } [[TMP11]]
2526 // FULL_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @divld(
2527 // FULL_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
2528 // FULL_FAST-NEXT: entry:
2529 // FULL_FAST-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2530 // FULL_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2531 // FULL_FAST-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2532 // FULL_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2533 // FULL_FAST-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2534 // FULL_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2535 // FULL_FAST-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2536 // FULL_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2537 // FULL_FAST-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2538 // FULL_FAST-NEXT: [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) { x86_fp80, x86_fp80 } @__divxc3(x86_fp80 noundef nofpclass(nan inf) [[A_REAL]], x86_fp80 noundef nofpclass(nan inf) [[A_IMAG]], x86_fp80 noundef nofpclass(nan inf) [[B_REAL]], x86_fp80 noundef nofpclass(nan inf) [[B_IMAG]]) #[[ATTR2]]
2539 // FULL_FAST-NEXT: [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
2540 // FULL_FAST-NEXT: [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
2541 // FULL_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2542 // FULL_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2543 // FULL_FAST-NEXT: store x86_fp80 [[TMP0]], ptr [[RETVAL_REALP]], align 16
2544 // FULL_FAST-NEXT: store x86_fp80 [[TMP1]], ptr [[RETVAL_IMAGP]], align 16
2545 // FULL_FAST-NEXT: [[TMP2:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2546 // FULL_FAST-NEXT: ret { x86_fp80, x86_fp80 } [[TMP2]]
2548 // IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @divld(
2549 // IMPRVD_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR2]] {
2550 // IMPRVD_FAST-NEXT: entry:
2551 // IMPRVD_FAST-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2552 // IMPRVD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2553 // IMPRVD_FAST-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2554 // IMPRVD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2555 // IMPRVD_FAST-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2556 // IMPRVD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2557 // IMPRVD_FAST-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2558 // IMPRVD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2559 // IMPRVD_FAST-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2560 // IMPRVD_FAST-NEXT: [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_REAL]])
2561 // IMPRVD_FAST-NEXT: [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_IMAG]])
2562 // IMPRVD_FAST-NEXT: [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt x86_fp80 [[TMP0]], [[TMP1]]
2563 // IMPRVD_FAST-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
2564 // IMPRVD_FAST: abs_rhsr_greater_or_equal_abs_rhsi:
2565 // IMPRVD_FAST-NEXT: [[TMP2:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[B_REAL]]
2566 // IMPRVD_FAST-NEXT: [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[B_IMAG]]
2567 // IMPRVD_FAST-NEXT: [[TMP4:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP3]]
2568 // IMPRVD_FAST-NEXT: [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[TMP2]]
2569 // IMPRVD_FAST-NEXT: [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[TMP5]]
2570 // IMPRVD_FAST-NEXT: [[TMP7:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP4]]
2571 // IMPRVD_FAST-NEXT: [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[TMP2]]
2572 // IMPRVD_FAST-NEXT: [[TMP9:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[TMP8]]
2573 // IMPRVD_FAST-NEXT: [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP9]], [[TMP4]]
2574 // IMPRVD_FAST-NEXT: br label [[COMPLEX_DIV:%.*]]
2575 // IMPRVD_FAST: abs_rhsr_less_than_abs_rhsi:
2576 // IMPRVD_FAST-NEXT: [[TMP11:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[B_IMAG]]
2577 // IMPRVD_FAST-NEXT: [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP11]], [[B_REAL]]
2578 // IMPRVD_FAST-NEXT: [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP12]]
2579 // IMPRVD_FAST-NEXT: [[TMP14:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[TMP11]]
2580 // IMPRVD_FAST-NEXT: [[TMP15:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP14]], [[A_IMAG]]
2581 // IMPRVD_FAST-NEXT: [[TMP16:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP15]], [[TMP13]]
2582 // IMPRVD_FAST-NEXT: [[TMP17:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[TMP11]]
2583 // IMPRVD_FAST-NEXT: [[TMP18:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP17]], [[A_REAL]]
2584 // IMPRVD_FAST-NEXT: [[TMP19:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP18]], [[TMP13]]
2585 // IMPRVD_FAST-NEXT: br label [[COMPLEX_DIV]]
2586 // IMPRVD_FAST: complex_div:
2587 // IMPRVD_FAST-NEXT: [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2588 // IMPRVD_FAST-NEXT: [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2589 // IMPRVD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2590 // IMPRVD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2591 // IMPRVD_FAST-NEXT: store x86_fp80 [[TMP20]], ptr [[RETVAL_REALP]], align 16
2592 // IMPRVD_FAST-NEXT: store x86_fp80 [[TMP21]], ptr [[RETVAL_IMAGP]], align 16
2593 // IMPRVD_FAST-NEXT: [[TMP22:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2594 // IMPRVD_FAST-NEXT: ret { x86_fp80, x86_fp80 } [[TMP22]]
2596 // PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @divld(
2597 // PRMTD_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
2598 // PRMTD_FAST-NEXT: entry:
2599 // PRMTD_FAST-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2600 // PRMTD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2601 // PRMTD_FAST-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2602 // PRMTD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2603 // PRMTD_FAST-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2604 // PRMTD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2605 // PRMTD_FAST-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2606 // PRMTD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2607 // PRMTD_FAST-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2608 // PRMTD_FAST-NEXT: [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_REAL]])
2609 // PRMTD_FAST-NEXT: [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_IMAG]])
2610 // PRMTD_FAST-NEXT: [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt x86_fp80 [[TMP0]], [[TMP1]]
2611 // PRMTD_FAST-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
2612 // PRMTD_FAST: abs_rhsr_greater_or_equal_abs_rhsi:
2613 // PRMTD_FAST-NEXT: [[TMP2:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[B_REAL]]
2614 // PRMTD_FAST-NEXT: [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[B_IMAG]]
2615 // PRMTD_FAST-NEXT: [[TMP4:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP3]]
2616 // PRMTD_FAST-NEXT: [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[TMP2]]
2617 // PRMTD_FAST-NEXT: [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[TMP5]]
2618 // PRMTD_FAST-NEXT: [[TMP7:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP4]]
2619 // PRMTD_FAST-NEXT: [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[TMP2]]
2620 // PRMTD_FAST-NEXT: [[TMP9:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[TMP8]]
2621 // PRMTD_FAST-NEXT: [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP9]], [[TMP4]]
2622 // PRMTD_FAST-NEXT: br label [[COMPLEX_DIV:%.*]]
2623 // PRMTD_FAST: abs_rhsr_less_than_abs_rhsi:
2624 // PRMTD_FAST-NEXT: [[TMP11:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[B_IMAG]]
2625 // PRMTD_FAST-NEXT: [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP11]], [[B_REAL]]
2626 // PRMTD_FAST-NEXT: [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP12]]
2627 // PRMTD_FAST-NEXT: [[TMP14:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[TMP11]]
2628 // PRMTD_FAST-NEXT: [[TMP15:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP14]], [[A_IMAG]]
2629 // PRMTD_FAST-NEXT: [[TMP16:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP15]], [[TMP13]]
2630 // PRMTD_FAST-NEXT: [[TMP17:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[TMP11]]
2631 // PRMTD_FAST-NEXT: [[TMP18:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP17]], [[A_REAL]]
2632 // PRMTD_FAST-NEXT: [[TMP19:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP18]], [[TMP13]]
2633 // PRMTD_FAST-NEXT: br label [[COMPLEX_DIV]]
2634 // PRMTD_FAST: complex_div:
2635 // PRMTD_FAST-NEXT: [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2636 // PRMTD_FAST-NEXT: [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2637 // PRMTD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2638 // PRMTD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2639 // PRMTD_FAST-NEXT: store x86_fp80 [[TMP20]], ptr [[RETVAL_REALP]], align 16
2640 // PRMTD_FAST-NEXT: store x86_fp80 [[TMP21]], ptr [[RETVAL_IMAGP]], align 16
2641 // PRMTD_FAST-NEXT: [[TMP22:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2642 // PRMTD_FAST-NEXT: ret { x86_fp80, x86_fp80 } [[TMP22]]
2644 // X86WINPRMTD_STRICT-LABEL: define dso_local void @divld(
2645 // X86WINPRMTD_STRICT-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
2646 // X86WINPRMTD_STRICT-NEXT: entry:
2647 // X86WINPRMTD_STRICT-NEXT: [[RESULT_PTR:%.*]] = alloca ptr, align 8
2648 // X86WINPRMTD_STRICT-NEXT: [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
2649 // X86WINPRMTD_STRICT-NEXT: [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
2650 // X86WINPRMTD_STRICT-NEXT: store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
2651 // X86WINPRMTD_STRICT-NEXT: store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
2652 // X86WINPRMTD_STRICT-NEXT: store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
2653 // X86WINPRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2654 // X86WINPRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
2655 // X86WINPRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2656 // X86WINPRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
2657 // X86WINPRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2658 // X86WINPRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
2659 // X86WINPRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2660 // X86WINPRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
2661 // X86WINPRMTD_STRICT-NEXT: [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[B_REAL]]) #[[ATTR3]]
2662 // X86WINPRMTD_STRICT-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[B_IMAG]]) #[[ATTR3]]
2663 // X86WINPRMTD_STRICT-NEXT: [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR3]]
2664 // X86WINPRMTD_STRICT-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
2665 // X86WINPRMTD_STRICT: abs_rhsr_greater_or_equal_abs_rhsi:
2666 // X86WINPRMTD_STRICT-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[B_IMAG]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2667 // X86WINPRMTD_STRICT-NEXT: [[TMP3:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[TMP2]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2668 // X86WINPRMTD_STRICT-NEXT: [[TMP4:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[B_REAL]], double [[TMP3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2669 // X86WINPRMTD_STRICT-NEXT: [[TMP5:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2670 // X86WINPRMTD_STRICT-NEXT: [[TMP6:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[A_REAL]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2671 // X86WINPRMTD_STRICT-NEXT: [[TMP7:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP6]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2672 // X86WINPRMTD_STRICT-NEXT: [[TMP8:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2673 // X86WINPRMTD_STRICT-NEXT: [[TMP9:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[A_IMAG]], double [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2674 // X86WINPRMTD_STRICT-NEXT: [[TMP10:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP9]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2675 // X86WINPRMTD_STRICT-NEXT: br label [[COMPLEX_DIV:%.*]]
2676 // X86WINPRMTD_STRICT: abs_rhsr_less_than_abs_rhsi:
2677 // X86WINPRMTD_STRICT-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[B_REAL]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2678 // X86WINPRMTD_STRICT-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[TMP11]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2679 // X86WINPRMTD_STRICT-NEXT: [[TMP13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[B_IMAG]], double [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2680 // X86WINPRMTD_STRICT-NEXT: [[TMP14:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2681 // X86WINPRMTD_STRICT-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP14]], double [[A_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2682 // X86WINPRMTD_STRICT-NEXT: [[TMP16:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP15]], double [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2683 // X86WINPRMTD_STRICT-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2684 // X86WINPRMTD_STRICT-NEXT: [[TMP18:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP17]], double [[A_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2685 // X86WINPRMTD_STRICT-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP18]], double [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
2686 // X86WINPRMTD_STRICT-NEXT: br label [[COMPLEX_DIV]]
2687 // X86WINPRMTD_STRICT: complex_div:
2688 // X86WINPRMTD_STRICT-NEXT: [[TMP20:%.*]] = phi double [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2689 // X86WINPRMTD_STRICT-NEXT: [[TMP21:%.*]] = phi double [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2690 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2691 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2692 // X86WINPRMTD_STRICT-NEXT: store double [[TMP20]], ptr [[AGG_RESULT_REALP]], align 8
2693 // X86WINPRMTD_STRICT-NEXT: store double [[TMP21]], ptr [[AGG_RESULT_IMAGP]], align 8
2694 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2695 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
2696 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2697 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
2698 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2699 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2700 // X86WINPRMTD_STRICT-NEXT: store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
2701 // X86WINPRMTD_STRICT-NEXT: store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
2702 // X86WINPRMTD_STRICT-NEXT: ret void
2704 // PRMTD_STRICT-LABEL: define dso_local { x86_fp80, x86_fp80 } @divld(
2705 // PRMTD_STRICT-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR2]] {
2706 // PRMTD_STRICT-NEXT: entry:
2707 // PRMTD_STRICT-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2708 // PRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2709 // PRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2710 // PRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2711 // PRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2712 // PRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2713 // PRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2714 // PRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2715 // PRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2716 // PRMTD_STRICT-NEXT: [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_REAL]]) #[[ATTR4]]
2717 // PRMTD_STRICT-NEXT: [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[B_IMAG]]) #[[ATTR4]]
2718 // PRMTD_STRICT-NEXT: [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR4]]
2719 // PRMTD_STRICT-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
2720 // PRMTD_STRICT: abs_rhsr_greater_or_equal_abs_rhsi:
2721 // PRMTD_STRICT-NEXT: [[TMP2:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[B_IMAG]], x86_fp80 [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2722 // PRMTD_STRICT-NEXT: [[TMP3:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[TMP2]], x86_fp80 [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2723 // PRMTD_STRICT-NEXT: [[TMP4:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[B_REAL]], x86_fp80 [[TMP3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2724 // PRMTD_STRICT-NEXT: [[TMP5:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_IMAG]], x86_fp80 [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2725 // PRMTD_STRICT-NEXT: [[TMP6:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[A_REAL]], x86_fp80 [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2726 // PRMTD_STRICT-NEXT: [[TMP7:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP6]], x86_fp80 [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2727 // PRMTD_STRICT-NEXT: [[TMP8:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_REAL]], x86_fp80 [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2728 // PRMTD_STRICT-NEXT: [[TMP9:%.*]] = call x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80 [[A_IMAG]], x86_fp80 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2729 // PRMTD_STRICT-NEXT: [[TMP10:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP9]], x86_fp80 [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2730 // PRMTD_STRICT-NEXT: br label [[COMPLEX_DIV:%.*]]
2731 // PRMTD_STRICT: abs_rhsr_less_than_abs_rhsi:
2732 // PRMTD_STRICT-NEXT: [[TMP11:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[B_REAL]], x86_fp80 [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2733 // PRMTD_STRICT-NEXT: [[TMP12:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[TMP11]], x86_fp80 [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2734 // PRMTD_STRICT-NEXT: [[TMP13:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[B_IMAG]], x86_fp80 [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2735 // PRMTD_STRICT-NEXT: [[TMP14:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_REAL]], x86_fp80 [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2736 // PRMTD_STRICT-NEXT: [[TMP15:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[TMP14]], x86_fp80 [[A_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2737 // PRMTD_STRICT-NEXT: [[TMP16:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP15]], x86_fp80 [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2738 // PRMTD_STRICT-NEXT: [[TMP17:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_IMAG]], x86_fp80 [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2739 // PRMTD_STRICT-NEXT: [[TMP18:%.*]] = call x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80 [[TMP17]], x86_fp80 [[A_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2740 // PRMTD_STRICT-NEXT: [[TMP19:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP18]], x86_fp80 [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
2741 // PRMTD_STRICT-NEXT: br label [[COMPLEX_DIV]]
2742 // PRMTD_STRICT: complex_div:
2743 // PRMTD_STRICT-NEXT: [[TMP20:%.*]] = phi x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2744 // PRMTD_STRICT-NEXT: [[TMP21:%.*]] = phi x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
2745 // PRMTD_STRICT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2746 // PRMTD_STRICT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2747 // PRMTD_STRICT-NEXT: store x86_fp80 [[TMP20]], ptr [[RETVAL_REALP]], align 16
2748 // PRMTD_STRICT-NEXT: store x86_fp80 [[TMP21]], ptr [[RETVAL_IMAGP]], align 16
2749 // PRMTD_STRICT-NEXT: [[TMP22:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2750 // PRMTD_STRICT-NEXT: ret { x86_fp80, x86_fp80 } [[TMP22]]
2752 _Complex
long double divld(_Complex
long double a
, _Complex
long double b
) {
2756 // FULL-LABEL: define dso_local { x86_fp80, x86_fp80 } @mulld(
2757 // FULL-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
2758 // FULL-NEXT: entry:
2759 // FULL-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2760 // FULL-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2761 // FULL-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2762 // FULL-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2763 // FULL-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2764 // FULL-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2765 // FULL-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2766 // FULL-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2767 // FULL-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2768 // FULL-NEXT: [[MUL_AC:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_REAL]]
2769 // FULL-NEXT: [[MUL_BD:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_IMAG]]
2770 // FULL-NEXT: [[MUL_AD:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_IMAG]]
2771 // FULL-NEXT: [[MUL_BC:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_REAL]]
2772 // FULL-NEXT: [[MUL_R:%.*]] = fsub x86_fp80 [[MUL_AC]], [[MUL_BD]]
2773 // FULL-NEXT: [[MUL_I:%.*]] = fadd x86_fp80 [[MUL_AD]], [[MUL_BC]]
2774 // FULL-NEXT: [[ISNAN_CMP:%.*]] = fcmp uno x86_fp80 [[MUL_R]], [[MUL_R]]
2775 // FULL-NEXT: br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF2]]
2776 // FULL: complex_mul_imag_nan:
2777 // FULL-NEXT: [[ISNAN_CMP1:%.*]] = fcmp uno x86_fp80 [[MUL_I]], [[MUL_I]]
2778 // FULL-NEXT: br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF2]]
2779 // FULL: complex_mul_libcall:
2780 // FULL-NEXT: [[CALL:%.*]] = call { x86_fp80, x86_fp80 } @__mulxc3(x86_fp80 noundef [[A_REAL]], x86_fp80 noundef [[A_IMAG]], x86_fp80 noundef [[B_REAL]], x86_fp80 noundef [[B_IMAG]]) #[[ATTR2]]
2781 // FULL-NEXT: [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
2782 // FULL-NEXT: [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
2783 // FULL-NEXT: br label [[COMPLEX_MUL_CONT]]
2784 // FULL: complex_mul_cont:
2785 // FULL-NEXT: [[REAL_MUL_PHI:%.*]] = phi x86_fp80 [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP0]], [[COMPLEX_MUL_LIBCALL]] ]
2786 // FULL-NEXT: [[IMAG_MUL_PHI:%.*]] = phi x86_fp80 [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP1]], [[COMPLEX_MUL_LIBCALL]] ]
2787 // FULL-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2788 // FULL-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2789 // FULL-NEXT: store x86_fp80 [[REAL_MUL_PHI]], ptr [[RETVAL_REALP]], align 16
2790 // FULL-NEXT: store x86_fp80 [[IMAG_MUL_PHI]], ptr [[RETVAL_IMAGP]], align 16
2791 // FULL-NEXT: [[TMP2:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2792 // FULL-NEXT: ret { x86_fp80, x86_fp80 } [[TMP2]]
2794 // BASIC-LABEL: define dso_local { x86_fp80, x86_fp80 } @mulld(
2795 // BASIC-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
2796 // BASIC-NEXT: entry:
2797 // BASIC-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2798 // BASIC-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2799 // BASIC-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2800 // BASIC-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2801 // BASIC-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2802 // BASIC-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2803 // BASIC-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2804 // BASIC-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2805 // BASIC-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2806 // BASIC-NEXT: [[MUL_AC:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_REAL]]
2807 // BASIC-NEXT: [[MUL_BD:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_IMAG]]
2808 // BASIC-NEXT: [[MUL_AD:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_IMAG]]
2809 // BASIC-NEXT: [[MUL_BC:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_REAL]]
2810 // BASIC-NEXT: [[MUL_R:%.*]] = fsub x86_fp80 [[MUL_AC]], [[MUL_BD]]
2811 // BASIC-NEXT: [[MUL_I:%.*]] = fadd x86_fp80 [[MUL_AD]], [[MUL_BC]]
2812 // BASIC-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2813 // BASIC-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2814 // BASIC-NEXT: store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
2815 // BASIC-NEXT: store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
2816 // BASIC-NEXT: [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2817 // BASIC-NEXT: ret { x86_fp80, x86_fp80 } [[TMP0]]
2819 // IMPRVD-LABEL: define dso_local { x86_fp80, x86_fp80 } @mulld(
2820 // IMPRVD-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR2]] {
2821 // IMPRVD-NEXT: entry:
2822 // IMPRVD-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2823 // IMPRVD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2824 // IMPRVD-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2825 // IMPRVD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2826 // IMPRVD-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2827 // IMPRVD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2828 // IMPRVD-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2829 // IMPRVD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2830 // IMPRVD-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2831 // IMPRVD-NEXT: [[MUL_AC:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_REAL]]
2832 // IMPRVD-NEXT: [[MUL_BD:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_IMAG]]
2833 // IMPRVD-NEXT: [[MUL_AD:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_IMAG]]
2834 // IMPRVD-NEXT: [[MUL_BC:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_REAL]]
2835 // IMPRVD-NEXT: [[MUL_R:%.*]] = fsub x86_fp80 [[MUL_AC]], [[MUL_BD]]
2836 // IMPRVD-NEXT: [[MUL_I:%.*]] = fadd x86_fp80 [[MUL_AD]], [[MUL_BC]]
2837 // IMPRVD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2838 // IMPRVD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2839 // IMPRVD-NEXT: store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
2840 // IMPRVD-NEXT: store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
2841 // IMPRVD-NEXT: [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2842 // IMPRVD-NEXT: ret { x86_fp80, x86_fp80 } [[TMP0]]
2844 // PRMTD-LABEL: define dso_local { x86_fp80, x86_fp80 } @mulld(
2845 // PRMTD-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
2846 // PRMTD-NEXT: entry:
2847 // PRMTD-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2848 // PRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2849 // PRMTD-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2850 // PRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2851 // PRMTD-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2852 // PRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2853 // PRMTD-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2854 // PRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2855 // PRMTD-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2856 // PRMTD-NEXT: [[MUL_AC:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_REAL]]
2857 // PRMTD-NEXT: [[MUL_BD:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_IMAG]]
2858 // PRMTD-NEXT: [[MUL_AD:%.*]] = fmul x86_fp80 [[A_REAL]], [[B_IMAG]]
2859 // PRMTD-NEXT: [[MUL_BC:%.*]] = fmul x86_fp80 [[A_IMAG]], [[B_REAL]]
2860 // PRMTD-NEXT: [[MUL_R:%.*]] = fsub x86_fp80 [[MUL_AC]], [[MUL_BD]]
2861 // PRMTD-NEXT: [[MUL_I:%.*]] = fadd x86_fp80 [[MUL_AD]], [[MUL_BC]]
2862 // PRMTD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
2863 // PRMTD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
2864 // PRMTD-NEXT: store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
2865 // PRMTD-NEXT: store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
2866 // PRMTD-NEXT: [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
2867 // PRMTD-NEXT: ret { x86_fp80, x86_fp80 } [[TMP0]]
2869 // X86WINPRMTD-LABEL: define dso_local void @mulld(
2870 // X86WINPRMTD-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
2871 // X86WINPRMTD-NEXT: entry:
2872 // X86WINPRMTD-NEXT: [[RESULT_PTR:%.*]] = alloca ptr, align 8
2873 // X86WINPRMTD-NEXT: [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
2874 // X86WINPRMTD-NEXT: [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
2875 // X86WINPRMTD-NEXT: store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
2876 // X86WINPRMTD-NEXT: store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
2877 // X86WINPRMTD-NEXT: store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
2878 // X86WINPRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2879 // X86WINPRMTD-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
2880 // X86WINPRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2881 // X86WINPRMTD-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
2882 // X86WINPRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2883 // X86WINPRMTD-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
2884 // X86WINPRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2885 // X86WINPRMTD-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
2886 // X86WINPRMTD-NEXT: [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
2887 // X86WINPRMTD-NEXT: [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
2888 // X86WINPRMTD-NEXT: [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
2889 // X86WINPRMTD-NEXT: [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
2890 // X86WINPRMTD-NEXT: [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
2891 // X86WINPRMTD-NEXT: [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
2892 // X86WINPRMTD-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2893 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2894 // X86WINPRMTD-NEXT: store double [[MUL_R]], ptr [[AGG_RESULT_REALP]], align 8
2895 // X86WINPRMTD-NEXT: store double [[MUL_I]], ptr [[AGG_RESULT_IMAGP]], align 8
2896 // X86WINPRMTD-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2897 // X86WINPRMTD-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
2898 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2899 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
2900 // X86WINPRMTD-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2901 // X86WINPRMTD-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2902 // X86WINPRMTD-NEXT: store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
2903 // X86WINPRMTD-NEXT: store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
2904 // X86WINPRMTD-NEXT: ret void
2906 // AVRFP32-LABEL: define dso_local { float, float } @mulld(
2907 // AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
2908 // AVRFP32-NEXT: entry:
2909 // AVRFP32-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 1
2910 // AVRFP32-NEXT: [[A:%.*]] = alloca { float, float }, align 1
2911 // AVRFP32-NEXT: [[B:%.*]] = alloca { float, float }, align 1
2912 // AVRFP32-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
2913 // AVRFP32-NEXT: store float [[A_COERCE0]], ptr [[TMP0]], align 1
2914 // AVRFP32-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
2915 // AVRFP32-NEXT: store float [[A_COERCE1]], ptr [[TMP1]], align 1
2916 // AVRFP32-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
2917 // AVRFP32-NEXT: store float [[B_COERCE0]], ptr [[TMP2]], align 1
2918 // AVRFP32-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
2919 // AVRFP32-NEXT: store float [[B_COERCE1]], ptr [[TMP3]], align 1
2920 // AVRFP32-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
2921 // AVRFP32-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
2922 // AVRFP32-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
2923 // AVRFP32-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
2924 // AVRFP32-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
2925 // AVRFP32-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
2926 // AVRFP32-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
2927 // AVRFP32-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
2928 // AVRFP32-NEXT: [[MUL_AC:%.*]] = fmul float [[A_REAL]], [[B_REAL]]
2929 // AVRFP32-NEXT: [[MUL_BD:%.*]] = fmul float [[A_IMAG]], [[B_IMAG]]
2930 // AVRFP32-NEXT: [[MUL_AD:%.*]] = fmul float [[A_REAL]], [[B_IMAG]]
2931 // AVRFP32-NEXT: [[MUL_BC:%.*]] = fmul float [[A_IMAG]], [[B_REAL]]
2932 // AVRFP32-NEXT: [[MUL_R:%.*]] = fsub float [[MUL_AC]], [[MUL_BD]]
2933 // AVRFP32-NEXT: [[MUL_I:%.*]] = fadd float [[MUL_AD]], [[MUL_BC]]
2934 // AVRFP32-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
2935 // AVRFP32-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
2936 // AVRFP32-NEXT: store float [[MUL_R]], ptr [[RETVAL_REALP]], align 1
2937 // AVRFP32-NEXT: store float [[MUL_I]], ptr [[RETVAL_IMAGP]], align 1
2938 // AVRFP32-NEXT: [[TMP4:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
2939 // AVRFP32-NEXT: ret { float, float } [[TMP4]]
2941 // AVRFP64-LABEL: define dso_local void @mulld(
2942 // AVRFP64-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 1 [[AGG_RESULT:%.*]], double noundef [[A_COERCE0:%.*]], double noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
2943 // AVRFP64-NEXT: entry:
2944 // AVRFP64-NEXT: [[A:%.*]] = alloca { double, double }, align 1
2945 // AVRFP64-NEXT: [[B:%.*]] = alloca { double, double }, align 1
2946 // AVRFP64-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2947 // AVRFP64-NEXT: store double [[A_COERCE0]], ptr [[TMP0]], align 1
2948 // AVRFP64-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2949 // AVRFP64-NEXT: store double [[A_COERCE1]], ptr [[TMP1]], align 1
2950 // AVRFP64-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2951 // AVRFP64-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 1
2952 // AVRFP64-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2953 // AVRFP64-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 1
2954 // AVRFP64-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
2955 // AVRFP64-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 1
2956 // AVRFP64-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
2957 // AVRFP64-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 1
2958 // AVRFP64-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
2959 // AVRFP64-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 1
2960 // AVRFP64-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
2961 // AVRFP64-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 1
2962 // AVRFP64-NEXT: [[MUL_AC:%.*]] = fmul double [[A_REAL]], [[B_REAL]]
2963 // AVRFP64-NEXT: [[MUL_BD:%.*]] = fmul double [[A_IMAG]], [[B_IMAG]]
2964 // AVRFP64-NEXT: [[MUL_AD:%.*]] = fmul double [[A_REAL]], [[B_IMAG]]
2965 // AVRFP64-NEXT: [[MUL_BC:%.*]] = fmul double [[A_IMAG]], [[B_REAL]]
2966 // AVRFP64-NEXT: [[MUL_R:%.*]] = fsub double [[MUL_AC]], [[MUL_BD]]
2967 // AVRFP64-NEXT: [[MUL_I:%.*]] = fadd double [[MUL_AD]], [[MUL_BC]]
2968 // AVRFP64-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2969 // AVRFP64-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2970 // AVRFP64-NEXT: store double [[MUL_R]], ptr [[AGG_RESULT_REALP]], align 1
2971 // AVRFP64-NEXT: store double [[MUL_I]], ptr [[AGG_RESULT_IMAGP]], align 1
2972 // AVRFP64-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2973 // AVRFP64-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 1
2974 // AVRFP64-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2975 // AVRFP64-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 1
2976 // AVRFP64-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
2977 // AVRFP64-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
2978 // AVRFP64-NEXT: store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 1
2979 // AVRFP64-NEXT: store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 1
2980 // AVRFP64-NEXT: ret void
2982 // BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @mulld(
2983 // BASIC_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
2984 // BASIC_FAST-NEXT: entry:
2985 // BASIC_FAST-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
2986 // BASIC_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
2987 // BASIC_FAST-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
2988 // BASIC_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
2989 // BASIC_FAST-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
2990 // BASIC_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
2991 // BASIC_FAST-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
2992 // BASIC_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
2993 // BASIC_FAST-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
2994 // BASIC_FAST-NEXT: [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_REAL]]
2995 // BASIC_FAST-NEXT: [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_IMAG]]
2996 // BASIC_FAST-NEXT: [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_IMAG]]
2997 // BASIC_FAST-NEXT: [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_REAL]]
2998 // BASIC_FAST-NEXT: [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AC]], [[MUL_BD]]
2999 // BASIC_FAST-NEXT: [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AD]], [[MUL_BC]]
3000 // BASIC_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
3001 // BASIC_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
3002 // BASIC_FAST-NEXT: store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
3003 // BASIC_FAST-NEXT: store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
3004 // BASIC_FAST-NEXT: [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
3005 // BASIC_FAST-NEXT: ret { x86_fp80, x86_fp80 } [[TMP0]]
3007 // FULL_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @mulld(
3008 // FULL_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
3009 // FULL_FAST-NEXT: entry:
3010 // FULL_FAST-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
3011 // FULL_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
3012 // FULL_FAST-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
3013 // FULL_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
3014 // FULL_FAST-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
3015 // FULL_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
3016 // FULL_FAST-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
3017 // FULL_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
3018 // FULL_FAST-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
3019 // FULL_FAST-NEXT: [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_REAL]]
3020 // FULL_FAST-NEXT: [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_IMAG]]
3021 // FULL_FAST-NEXT: [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_IMAG]]
3022 // FULL_FAST-NEXT: [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_REAL]]
3023 // FULL_FAST-NEXT: [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AC]], [[MUL_BD]]
3024 // FULL_FAST-NEXT: [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AD]], [[MUL_BC]]
3025 // FULL_FAST-NEXT: [[ISNAN_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn uno x86_fp80 [[MUL_R]], [[MUL_R]]
3026 // FULL_FAST-NEXT: br i1 [[ISNAN_CMP]], label [[COMPLEX_MUL_IMAG_NAN:%.*]], label [[COMPLEX_MUL_CONT:%.*]], !prof [[PROF2]]
3027 // FULL_FAST: complex_mul_imag_nan:
3028 // FULL_FAST-NEXT: [[ISNAN_CMP1:%.*]] = fcmp reassoc nnan ninf nsz arcp afn uno x86_fp80 [[MUL_I]], [[MUL_I]]
3029 // FULL_FAST-NEXT: br i1 [[ISNAN_CMP1]], label [[COMPLEX_MUL_LIBCALL:%.*]], label [[COMPLEX_MUL_CONT]], !prof [[PROF2]]
3030 // FULL_FAST: complex_mul_libcall:
3031 // FULL_FAST-NEXT: [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) { x86_fp80, x86_fp80 } @__mulxc3(x86_fp80 noundef nofpclass(nan inf) [[A_REAL]], x86_fp80 noundef nofpclass(nan inf) [[A_IMAG]], x86_fp80 noundef nofpclass(nan inf) [[B_REAL]], x86_fp80 noundef nofpclass(nan inf) [[B_IMAG]]) #[[ATTR2]]
3032 // FULL_FAST-NEXT: [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
3033 // FULL_FAST-NEXT: [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
3034 // FULL_FAST-NEXT: br label [[COMPLEX_MUL_CONT]]
3035 // FULL_FAST: complex_mul_cont:
3036 // FULL_FAST-NEXT: [[REAL_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[MUL_R]], [[ENTRY:%.*]] ], [ [[MUL_R]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP0]], [[COMPLEX_MUL_LIBCALL]] ]
3037 // FULL_FAST-NEXT: [[IMAG_MUL_PHI:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[MUL_I]], [[ENTRY]] ], [ [[MUL_I]], [[COMPLEX_MUL_IMAG_NAN]] ], [ [[TMP1]], [[COMPLEX_MUL_LIBCALL]] ]
3038 // FULL_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
3039 // FULL_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
3040 // FULL_FAST-NEXT: store x86_fp80 [[REAL_MUL_PHI]], ptr [[RETVAL_REALP]], align 16
3041 // FULL_FAST-NEXT: store x86_fp80 [[IMAG_MUL_PHI]], ptr [[RETVAL_IMAGP]], align 16
3042 // FULL_FAST-NEXT: [[TMP2:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
3043 // FULL_FAST-NEXT: ret { x86_fp80, x86_fp80 } [[TMP2]]
3045 // IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @mulld(
3046 // IMPRVD_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR2]] {
3047 // IMPRVD_FAST-NEXT: entry:
3048 // IMPRVD_FAST-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
3049 // IMPRVD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
3050 // IMPRVD_FAST-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
3051 // IMPRVD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
3052 // IMPRVD_FAST-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
3053 // IMPRVD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
3054 // IMPRVD_FAST-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
3055 // IMPRVD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
3056 // IMPRVD_FAST-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
3057 // IMPRVD_FAST-NEXT: [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_REAL]]
3058 // IMPRVD_FAST-NEXT: [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_IMAG]]
3059 // IMPRVD_FAST-NEXT: [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_IMAG]]
3060 // IMPRVD_FAST-NEXT: [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_REAL]]
3061 // IMPRVD_FAST-NEXT: [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AC]], [[MUL_BD]]
3062 // IMPRVD_FAST-NEXT: [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AD]], [[MUL_BC]]
3063 // IMPRVD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
3064 // IMPRVD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
3065 // IMPRVD_FAST-NEXT: store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
3066 // IMPRVD_FAST-NEXT: store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
3067 // IMPRVD_FAST-NEXT: [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
3068 // IMPRVD_FAST-NEXT: ret { x86_fp80, x86_fp80 } [[TMP0]]
3070 // PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) { x86_fp80, x86_fp80 } @mulld(
3071 // PRMTD_FAST-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR1]] {
3072 // PRMTD_FAST-NEXT: entry:
3073 // PRMTD_FAST-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
3074 // PRMTD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
3075 // PRMTD_FAST-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
3076 // PRMTD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
3077 // PRMTD_FAST-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
3078 // PRMTD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
3079 // PRMTD_FAST-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
3080 // PRMTD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
3081 // PRMTD_FAST-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
3082 // PRMTD_FAST-NEXT: [[MUL_AC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_REAL]]
3083 // PRMTD_FAST-NEXT: [[MUL_BD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_IMAG]]
3084 // PRMTD_FAST-NEXT: [[MUL_AD:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_REAL]], [[B_IMAG]]
3085 // PRMTD_FAST-NEXT: [[MUL_BC:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[A_IMAG]], [[B_REAL]]
3086 // PRMTD_FAST-NEXT: [[MUL_R:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AC]], [[MUL_BD]]
3087 // PRMTD_FAST-NEXT: [[MUL_I:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[MUL_AD]], [[MUL_BC]]
3088 // PRMTD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
3089 // PRMTD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
3090 // PRMTD_FAST-NEXT: store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
3091 // PRMTD_FAST-NEXT: store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
3092 // PRMTD_FAST-NEXT: [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
3093 // PRMTD_FAST-NEXT: ret { x86_fp80, x86_fp80 } [[TMP0]]
3095 // X86WINPRMTD_STRICT-LABEL: define dso_local void @mulld(
3096 // X86WINPRMTD_STRICT-SAME: ptr dead_on_unwind noalias writable sret({ double, double }) align 8 [[AGG_RESULT:%.*]], ptr noundef [[A:%.*]], ptr noundef [[B:%.*]]) #[[ATTR0]] {
3097 // X86WINPRMTD_STRICT-NEXT: entry:
3098 // X86WINPRMTD_STRICT-NEXT: [[RESULT_PTR:%.*]] = alloca ptr, align 8
3099 // X86WINPRMTD_STRICT-NEXT: [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
3100 // X86WINPRMTD_STRICT-NEXT: [[A_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
3101 // X86WINPRMTD_STRICT-NEXT: store ptr [[AGG_RESULT]], ptr [[RESULT_PTR]], align 8
3102 // X86WINPRMTD_STRICT-NEXT: store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
3103 // X86WINPRMTD_STRICT-NEXT: store ptr [[A]], ptr [[A_INDIRECT_ADDR]], align 8
3104 // X86WINPRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 0
3105 // X86WINPRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load double, ptr [[A_REALP]], align 8
3106 // X86WINPRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[A]], i32 0, i32 1
3107 // X86WINPRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load double, ptr [[A_IMAGP]], align 8
3108 // X86WINPRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
3109 // X86WINPRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
3110 // X86WINPRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
3111 // X86WINPRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
3112 // X86WINPRMTD_STRICT-NEXT: [[MUL_AC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3113 // X86WINPRMTD_STRICT-NEXT: [[MUL_BD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3114 // X86WINPRMTD_STRICT-NEXT: [[MUL_AD:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_REAL]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3115 // X86WINPRMTD_STRICT-NEXT: [[MUL_BC:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[A_IMAG]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3116 // X86WINPRMTD_STRICT-NEXT: [[MUL_R:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[MUL_AC]], double [[MUL_BD]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3117 // X86WINPRMTD_STRICT-NEXT: [[MUL_I:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[MUL_AD]], double [[MUL_BC]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3118 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
3119 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
3120 // X86WINPRMTD_STRICT-NEXT: store double [[MUL_R]], ptr [[AGG_RESULT_REALP]], align 8
3121 // X86WINPRMTD_STRICT-NEXT: store double [[MUL_I]], ptr [[AGG_RESULT_IMAGP]], align 8
3122 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REALP1:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
3123 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REAL:%.*]] = load double, ptr [[AGG_RESULT_REALP1]], align 8
3124 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAGP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
3125 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAG:%.*]] = load double, ptr [[AGG_RESULT_IMAGP2]], align 8
3126 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_REALP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 0
3127 // X86WINPRMTD_STRICT-NEXT: [[AGG_RESULT_IMAGP4:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[AGG_RESULT]], i32 0, i32 1
3128 // X86WINPRMTD_STRICT-NEXT: store double [[AGG_RESULT_REAL]], ptr [[AGG_RESULT_REALP3]], align 8
3129 // X86WINPRMTD_STRICT-NEXT: store double [[AGG_RESULT_IMAG]], ptr [[AGG_RESULT_IMAGP4]], align 8
3130 // X86WINPRMTD_STRICT-NEXT: ret void
3132 // PRMTD_STRICT-LABEL: define dso_local { x86_fp80, x86_fp80 } @mulld(
3133 // PRMTD_STRICT-SAME: ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[A:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]]) #[[ATTR2]] {
3134 // PRMTD_STRICT-NEXT: entry:
3135 // PRMTD_STRICT-NEXT: [[RETVAL:%.*]] = alloca { x86_fp80, x86_fp80 }, align 16
3136 // PRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 0
3137 // PRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load x86_fp80, ptr [[A_REALP]], align 16
3138 // PRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[A]], i32 0, i32 1
3139 // PRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load x86_fp80, ptr [[A_IMAGP]], align 16
3140 // PRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
3141 // PRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
3142 // PRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
3143 // PRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
3144 // PRMTD_STRICT-NEXT: [[MUL_AC:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_REAL]], x86_fp80 [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
3145 // PRMTD_STRICT-NEXT: [[MUL_BD:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_IMAG]], x86_fp80 [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
3146 // PRMTD_STRICT-NEXT: [[MUL_AD:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_REAL]], x86_fp80 [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
3147 // PRMTD_STRICT-NEXT: [[MUL_BC:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[A_IMAG]], x86_fp80 [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
3148 // PRMTD_STRICT-NEXT: [[MUL_R:%.*]] = call x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80 [[MUL_AC]], x86_fp80 [[MUL_BD]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
3149 // PRMTD_STRICT-NEXT: [[MUL_I:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[MUL_AD]], x86_fp80 [[MUL_BC]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
3150 // PRMTD_STRICT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 0
3151 // PRMTD_STRICT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[RETVAL]], i32 0, i32 1
3152 // PRMTD_STRICT-NEXT: store x86_fp80 [[MUL_R]], ptr [[RETVAL_REALP]], align 16
3153 // PRMTD_STRICT-NEXT: store x86_fp80 [[MUL_I]], ptr [[RETVAL_IMAGP]], align 16
3154 // PRMTD_STRICT-NEXT: [[TMP0:%.*]] = load { x86_fp80, x86_fp80 }, ptr [[RETVAL]], align 16
3155 // PRMTD_STRICT-NEXT: ret { x86_fp80, x86_fp80 } [[TMP0]]
3157 _Complex
long double mulld(_Complex
long double a
, _Complex
long double b
) {
3161 // FULL-LABEL: define dso_local <2 x float> @f1(
3162 // FULL-SAME: <2 x float> noundef [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
3163 // FULL-NEXT: entry:
3164 // FULL-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
3165 // FULL-NEXT: [[A:%.*]] = alloca { float, float }, align 4
3166 // FULL-NEXT: [[C:%.*]] = alloca { float, float }, align 4
3167 // FULL-NEXT: [[COERCE:%.*]] = alloca { float, float }, align 4
3168 // FULL-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
3169 // FULL-NEXT: store <2 x float> [[C_COERCE]], ptr [[C]], align 4
3170 // FULL-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
3171 // FULL-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
3172 // FULL-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
3173 // FULL-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
3174 // FULL-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3175 // FULL-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
3176 // FULL-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3177 // FULL-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
3178 // FULL-NEXT: [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
3179 // FULL-NEXT: [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
3180 // FULL-NEXT: [[CALL:%.*]] = call { x86_fp80, x86_fp80 } @__divxc3(x86_fp80 noundef [[B_REAL]], x86_fp80 noundef [[B_IMAG]], x86_fp80 noundef [[CONV]], x86_fp80 noundef [[CONV1]]) #[[ATTR2]]
3181 // FULL-NEXT: [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
3182 // FULL-NEXT: [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
3183 // FULL-NEXT: [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP0]] to float
3184 // FULL-NEXT: [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP1]] to float
3185 // FULL-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3186 // FULL-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
3187 // FULL-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3188 // FULL-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
3189 // FULL-NEXT: [[CALL4:%.*]] = call <2 x float> @__divsc3(float noundef [[CONV2]], float noundef [[CONV3]], float noundef [[A_REAL]], float noundef [[A_IMAG]]) #[[ATTR2]]
3190 // FULL-NEXT: store <2 x float> [[CALL4]], ptr [[COERCE]], align 4
3191 // FULL-NEXT: [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
3192 // FULL-NEXT: [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
3193 // FULL-NEXT: [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
3194 // FULL-NEXT: [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
3195 // FULL-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
3196 // FULL-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
3197 // FULL-NEXT: store float [[COERCE_REAL]], ptr [[RETVAL_REALP]], align 4
3198 // FULL-NEXT: store float [[COERCE_IMAG]], ptr [[RETVAL_IMAGP]], align 4
3199 // FULL-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
3200 // FULL-NEXT: ret <2 x float> [[TMP2]]
3202 // BASIC-LABEL: define dso_local <2 x float> @f1(
3203 // BASIC-SAME: <2 x float> noundef [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
3204 // BASIC-NEXT: entry:
3205 // BASIC-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
3206 // BASIC-NEXT: [[A:%.*]] = alloca { float, float }, align 4
3207 // BASIC-NEXT: [[C:%.*]] = alloca { float, float }, align 4
3208 // BASIC-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
3209 // BASIC-NEXT: store <2 x float> [[C_COERCE]], ptr [[C]], align 4
3210 // BASIC-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
3211 // BASIC-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
3212 // BASIC-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
3213 // BASIC-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
3214 // BASIC-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3215 // BASIC-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
3216 // BASIC-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3217 // BASIC-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
3218 // BASIC-NEXT: [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
3219 // BASIC-NEXT: [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
3220 // BASIC-NEXT: [[TMP0:%.*]] = fmul x86_fp80 [[B_REAL]], [[CONV]]
3221 // BASIC-NEXT: [[TMP1:%.*]] = fmul x86_fp80 [[B_IMAG]], [[CONV1]]
3222 // BASIC-NEXT: [[TMP2:%.*]] = fadd x86_fp80 [[TMP0]], [[TMP1]]
3223 // BASIC-NEXT: [[TMP3:%.*]] = fmul x86_fp80 [[CONV]], [[CONV]]
3224 // BASIC-NEXT: [[TMP4:%.*]] = fmul x86_fp80 [[CONV1]], [[CONV1]]
3225 // BASIC-NEXT: [[TMP5:%.*]] = fadd x86_fp80 [[TMP3]], [[TMP4]]
3226 // BASIC-NEXT: [[TMP6:%.*]] = fmul x86_fp80 [[B_IMAG]], [[CONV]]
3227 // BASIC-NEXT: [[TMP7:%.*]] = fmul x86_fp80 [[B_REAL]], [[CONV1]]
3228 // BASIC-NEXT: [[TMP8:%.*]] = fsub x86_fp80 [[TMP6]], [[TMP7]]
3229 // BASIC-NEXT: [[TMP9:%.*]] = fdiv x86_fp80 [[TMP2]], [[TMP5]]
3230 // BASIC-NEXT: [[TMP10:%.*]] = fdiv x86_fp80 [[TMP8]], [[TMP5]]
3231 // BASIC-NEXT: [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP9]] to float
3232 // BASIC-NEXT: [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP10]] to float
3233 // BASIC-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3234 // BASIC-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
3235 // BASIC-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3236 // BASIC-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
3237 // BASIC-NEXT: [[TMP11:%.*]] = fmul float [[CONV2]], [[A_REAL]]
3238 // BASIC-NEXT: [[TMP12:%.*]] = fmul float [[CONV3]], [[A_IMAG]]
3239 // BASIC-NEXT: [[TMP13:%.*]] = fadd float [[TMP11]], [[TMP12]]
3240 // BASIC-NEXT: [[TMP14:%.*]] = fmul float [[A_REAL]], [[A_REAL]]
3241 // BASIC-NEXT: [[TMP15:%.*]] = fmul float [[A_IMAG]], [[A_IMAG]]
3242 // BASIC-NEXT: [[TMP16:%.*]] = fadd float [[TMP14]], [[TMP15]]
3243 // BASIC-NEXT: [[TMP17:%.*]] = fmul float [[CONV3]], [[A_REAL]]
3244 // BASIC-NEXT: [[TMP18:%.*]] = fmul float [[CONV2]], [[A_IMAG]]
3245 // BASIC-NEXT: [[TMP19:%.*]] = fsub float [[TMP17]], [[TMP18]]
3246 // BASIC-NEXT: [[TMP20:%.*]] = fdiv float [[TMP13]], [[TMP16]]
3247 // BASIC-NEXT: [[TMP21:%.*]] = fdiv float [[TMP19]], [[TMP16]]
3248 // BASIC-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
3249 // BASIC-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
3250 // BASIC-NEXT: store float [[TMP20]], ptr [[RETVAL_REALP]], align 4
3251 // BASIC-NEXT: store float [[TMP21]], ptr [[RETVAL_IMAGP]], align 4
3252 // BASIC-NEXT: [[TMP22:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
3253 // BASIC-NEXT: ret <2 x float> [[TMP22]]
3255 // IMPRVD-LABEL: define dso_local <2 x float> @f1(
3256 // IMPRVD-SAME: <2 x float> noundef [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
3257 // IMPRVD-NEXT: entry:
3258 // IMPRVD-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
3259 // IMPRVD-NEXT: [[A:%.*]] = alloca { float, float }, align 4
3260 // IMPRVD-NEXT: [[C:%.*]] = alloca { float, float }, align 4
3261 // IMPRVD-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
3262 // IMPRVD-NEXT: store <2 x float> [[C_COERCE]], ptr [[C]], align 4
3263 // IMPRVD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
3264 // IMPRVD-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
3265 // IMPRVD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
3266 // IMPRVD-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
3267 // IMPRVD-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3268 // IMPRVD-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
3269 // IMPRVD-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3270 // IMPRVD-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
3271 // IMPRVD-NEXT: [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
3272 // IMPRVD-NEXT: [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
3273 // IMPRVD-NEXT: [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
3274 // IMPRVD-NEXT: [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
3275 // IMPRVD-NEXT: [[ABS_CMP:%.*]] = fcmp ugt x86_fp80 [[TMP0]], [[TMP1]]
3276 // IMPRVD-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
3277 // IMPRVD: abs_rhsr_greater_or_equal_abs_rhsi:
3278 // IMPRVD-NEXT: [[TMP2:%.*]] = fdiv x86_fp80 [[CONV1]], [[CONV]]
3279 // IMPRVD-NEXT: [[TMP3:%.*]] = fmul x86_fp80 [[TMP2]], [[CONV1]]
3280 // IMPRVD-NEXT: [[TMP4:%.*]] = fadd x86_fp80 [[CONV]], [[TMP3]]
3281 // IMPRVD-NEXT: [[TMP5:%.*]] = fmul x86_fp80 [[B_IMAG]], [[TMP2]]
3282 // IMPRVD-NEXT: [[TMP6:%.*]] = fadd x86_fp80 [[B_REAL]], [[TMP5]]
3283 // IMPRVD-NEXT: [[TMP7:%.*]] = fdiv x86_fp80 [[TMP6]], [[TMP4]]
3284 // IMPRVD-NEXT: [[TMP8:%.*]] = fmul x86_fp80 [[B_REAL]], [[TMP2]]
3285 // IMPRVD-NEXT: [[TMP9:%.*]] = fsub x86_fp80 [[B_IMAG]], [[TMP8]]
3286 // IMPRVD-NEXT: [[TMP10:%.*]] = fdiv x86_fp80 [[TMP9]], [[TMP4]]
3287 // IMPRVD-NEXT: br label [[COMPLEX_DIV:%.*]]
3288 // IMPRVD: abs_rhsr_less_than_abs_rhsi:
3289 // IMPRVD-NEXT: [[TMP11:%.*]] = fdiv x86_fp80 [[CONV]], [[CONV1]]
3290 // IMPRVD-NEXT: [[TMP12:%.*]] = fmul x86_fp80 [[TMP11]], [[CONV]]
3291 // IMPRVD-NEXT: [[TMP13:%.*]] = fadd x86_fp80 [[CONV1]], [[TMP12]]
3292 // IMPRVD-NEXT: [[TMP14:%.*]] = fmul x86_fp80 [[B_REAL]], [[TMP11]]
3293 // IMPRVD-NEXT: [[TMP15:%.*]] = fadd x86_fp80 [[TMP14]], [[B_IMAG]]
3294 // IMPRVD-NEXT: [[TMP16:%.*]] = fdiv x86_fp80 [[TMP15]], [[TMP13]]
3295 // IMPRVD-NEXT: [[TMP17:%.*]] = fmul x86_fp80 [[B_IMAG]], [[TMP11]]
3296 // IMPRVD-NEXT: [[TMP18:%.*]] = fsub x86_fp80 [[TMP17]], [[B_REAL]]
3297 // IMPRVD-NEXT: [[TMP19:%.*]] = fdiv x86_fp80 [[TMP18]], [[TMP13]]
3298 // IMPRVD-NEXT: br label [[COMPLEX_DIV]]
3299 // IMPRVD: complex_div:
3300 // IMPRVD-NEXT: [[TMP20:%.*]] = phi x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3301 // IMPRVD-NEXT: [[TMP21:%.*]] = phi x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3302 // IMPRVD-NEXT: [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP20]] to float
3303 // IMPRVD-NEXT: [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP21]] to float
3304 // IMPRVD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3305 // IMPRVD-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
3306 // IMPRVD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3307 // IMPRVD-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
3308 // IMPRVD-NEXT: [[TMP22:%.*]] = call float @llvm.fabs.f32(float [[A_REAL]])
3309 // IMPRVD-NEXT: [[TMP23:%.*]] = call float @llvm.fabs.f32(float [[A_IMAG]])
3310 // IMPRVD-NEXT: [[ABS_CMP4:%.*]] = fcmp ugt float [[TMP22]], [[TMP23]]
3311 // IMPRVD-NEXT: br i1 [[ABS_CMP4]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI5:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI6:%.*]]
3312 // IMPRVD: abs_rhsr_greater_or_equal_abs_rhsi5:
3313 // IMPRVD-NEXT: [[TMP24:%.*]] = fdiv float [[A_IMAG]], [[A_REAL]]
3314 // IMPRVD-NEXT: [[TMP25:%.*]] = fmul float [[TMP24]], [[A_IMAG]]
3315 // IMPRVD-NEXT: [[TMP26:%.*]] = fadd float [[A_REAL]], [[TMP25]]
3316 // IMPRVD-NEXT: [[TMP27:%.*]] = fmul float [[CONV3]], [[TMP24]]
3317 // IMPRVD-NEXT: [[TMP28:%.*]] = fadd float [[CONV2]], [[TMP27]]
3318 // IMPRVD-NEXT: [[TMP29:%.*]] = fdiv float [[TMP28]], [[TMP26]]
3319 // IMPRVD-NEXT: [[TMP30:%.*]] = fmul float [[CONV2]], [[TMP24]]
3320 // IMPRVD-NEXT: [[TMP31:%.*]] = fsub float [[CONV3]], [[TMP30]]
3321 // IMPRVD-NEXT: [[TMP32:%.*]] = fdiv float [[TMP31]], [[TMP26]]
3322 // IMPRVD-NEXT: br label [[COMPLEX_DIV7:%.*]]
3323 // IMPRVD: abs_rhsr_less_than_abs_rhsi6:
3324 // IMPRVD-NEXT: [[TMP33:%.*]] = fdiv float [[A_REAL]], [[A_IMAG]]
3325 // IMPRVD-NEXT: [[TMP34:%.*]] = fmul float [[TMP33]], [[A_REAL]]
3326 // IMPRVD-NEXT: [[TMP35:%.*]] = fadd float [[A_IMAG]], [[TMP34]]
3327 // IMPRVD-NEXT: [[TMP36:%.*]] = fmul float [[CONV2]], [[TMP33]]
3328 // IMPRVD-NEXT: [[TMP37:%.*]] = fadd float [[TMP36]], [[CONV3]]
3329 // IMPRVD-NEXT: [[TMP38:%.*]] = fdiv float [[TMP37]], [[TMP35]]
3330 // IMPRVD-NEXT: [[TMP39:%.*]] = fmul float [[CONV3]], [[TMP33]]
3331 // IMPRVD-NEXT: [[TMP40:%.*]] = fsub float [[TMP39]], [[CONV2]]
3332 // IMPRVD-NEXT: [[TMP41:%.*]] = fdiv float [[TMP40]], [[TMP35]]
3333 // IMPRVD-NEXT: br label [[COMPLEX_DIV7]]
3334 // IMPRVD: complex_div7:
3335 // IMPRVD-NEXT: [[TMP42:%.*]] = phi float [ [[TMP29]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI5]] ], [ [[TMP38]], [[ABS_RHSR_LESS_THAN_ABS_RHSI6]] ]
3336 // IMPRVD-NEXT: [[TMP43:%.*]] = phi float [ [[TMP32]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI5]] ], [ [[TMP41]], [[ABS_RHSR_LESS_THAN_ABS_RHSI6]] ]
3337 // IMPRVD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
3338 // IMPRVD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
3339 // IMPRVD-NEXT: store float [[TMP42]], ptr [[RETVAL_REALP]], align 4
3340 // IMPRVD-NEXT: store float [[TMP43]], ptr [[RETVAL_IMAGP]], align 4
3341 // IMPRVD-NEXT: [[TMP44:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
3342 // IMPRVD-NEXT: ret <2 x float> [[TMP44]]
3344 // PRMTD-LABEL: define dso_local <2 x float> @f1(
3345 // PRMTD-SAME: <2 x float> noundef [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
3346 // PRMTD-NEXT: entry:
3347 // PRMTD-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
3348 // PRMTD-NEXT: [[A:%.*]] = alloca { float, float }, align 4
3349 // PRMTD-NEXT: [[C:%.*]] = alloca { float, float }, align 4
3350 // PRMTD-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
3351 // PRMTD-NEXT: store <2 x float> [[C_COERCE]], ptr [[C]], align 4
3352 // PRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
3353 // PRMTD-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
3354 // PRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
3355 // PRMTD-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
3356 // PRMTD-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3357 // PRMTD-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
3358 // PRMTD-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3359 // PRMTD-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
3360 // PRMTD-NEXT: [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
3361 // PRMTD-NEXT: [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
3362 // PRMTD-NEXT: [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
3363 // PRMTD-NEXT: [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
3364 // PRMTD-NEXT: [[ABS_CMP:%.*]] = fcmp ugt x86_fp80 [[TMP0]], [[TMP1]]
3365 // PRMTD-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
3366 // PRMTD: abs_rhsr_greater_or_equal_abs_rhsi:
3367 // PRMTD-NEXT: [[TMP2:%.*]] = fdiv x86_fp80 [[CONV1]], [[CONV]]
3368 // PRMTD-NEXT: [[TMP3:%.*]] = fmul x86_fp80 [[TMP2]], [[CONV1]]
3369 // PRMTD-NEXT: [[TMP4:%.*]] = fadd x86_fp80 [[CONV]], [[TMP3]]
3370 // PRMTD-NEXT: [[TMP5:%.*]] = fmul x86_fp80 [[B_IMAG]], [[TMP2]]
3371 // PRMTD-NEXT: [[TMP6:%.*]] = fadd x86_fp80 [[B_REAL]], [[TMP5]]
3372 // PRMTD-NEXT: [[TMP7:%.*]] = fdiv x86_fp80 [[TMP6]], [[TMP4]]
3373 // PRMTD-NEXT: [[TMP8:%.*]] = fmul x86_fp80 [[B_REAL]], [[TMP2]]
3374 // PRMTD-NEXT: [[TMP9:%.*]] = fsub x86_fp80 [[B_IMAG]], [[TMP8]]
3375 // PRMTD-NEXT: [[TMP10:%.*]] = fdiv x86_fp80 [[TMP9]], [[TMP4]]
3376 // PRMTD-NEXT: br label [[COMPLEX_DIV:%.*]]
3377 // PRMTD: abs_rhsr_less_than_abs_rhsi:
3378 // PRMTD-NEXT: [[TMP11:%.*]] = fdiv x86_fp80 [[CONV]], [[CONV1]]
3379 // PRMTD-NEXT: [[TMP12:%.*]] = fmul x86_fp80 [[TMP11]], [[CONV]]
3380 // PRMTD-NEXT: [[TMP13:%.*]] = fadd x86_fp80 [[CONV1]], [[TMP12]]
3381 // PRMTD-NEXT: [[TMP14:%.*]] = fmul x86_fp80 [[B_REAL]], [[TMP11]]
3382 // PRMTD-NEXT: [[TMP15:%.*]] = fadd x86_fp80 [[TMP14]], [[B_IMAG]]
3383 // PRMTD-NEXT: [[TMP16:%.*]] = fdiv x86_fp80 [[TMP15]], [[TMP13]]
3384 // PRMTD-NEXT: [[TMP17:%.*]] = fmul x86_fp80 [[B_IMAG]], [[TMP11]]
3385 // PRMTD-NEXT: [[TMP18:%.*]] = fsub x86_fp80 [[TMP17]], [[B_REAL]]
3386 // PRMTD-NEXT: [[TMP19:%.*]] = fdiv x86_fp80 [[TMP18]], [[TMP13]]
3387 // PRMTD-NEXT: br label [[COMPLEX_DIV]]
3388 // PRMTD: complex_div:
3389 // PRMTD-NEXT: [[TMP20:%.*]] = phi x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3390 // PRMTD-NEXT: [[TMP21:%.*]] = phi x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3391 // PRMTD-NEXT: [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP20]] to float
3392 // PRMTD-NEXT: [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP21]] to float
3393 // PRMTD-NEXT: [[EXT:%.*]] = fpext float [[CONV2]] to double
3394 // PRMTD-NEXT: [[EXT4:%.*]] = fpext float [[CONV3]] to double
3395 // PRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3396 // PRMTD-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
3397 // PRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3398 // PRMTD-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
3399 // PRMTD-NEXT: [[EXT5:%.*]] = fpext float [[A_REAL]] to double
3400 // PRMTD-NEXT: [[EXT6:%.*]] = fpext float [[A_IMAG]] to double
3401 // PRMTD-NEXT: [[TMP22:%.*]] = fmul double [[EXT]], [[EXT5]]
3402 // PRMTD-NEXT: [[TMP23:%.*]] = fmul double [[EXT4]], [[EXT6]]
3403 // PRMTD-NEXT: [[TMP24:%.*]] = fadd double [[TMP22]], [[TMP23]]
3404 // PRMTD-NEXT: [[TMP25:%.*]] = fmul double [[EXT5]], [[EXT5]]
3405 // PRMTD-NEXT: [[TMP26:%.*]] = fmul double [[EXT6]], [[EXT6]]
3406 // PRMTD-NEXT: [[TMP27:%.*]] = fadd double [[TMP25]], [[TMP26]]
3407 // PRMTD-NEXT: [[TMP28:%.*]] = fmul double [[EXT4]], [[EXT5]]
3408 // PRMTD-NEXT: [[TMP29:%.*]] = fmul double [[EXT]], [[EXT6]]
3409 // PRMTD-NEXT: [[TMP30:%.*]] = fsub double [[TMP28]], [[TMP29]]
3410 // PRMTD-NEXT: [[TMP31:%.*]] = fdiv double [[TMP24]], [[TMP27]]
3411 // PRMTD-NEXT: [[TMP32:%.*]] = fdiv double [[TMP30]], [[TMP27]]
3412 // PRMTD-NEXT: [[UNPROMOTION:%.*]] = fptrunc double [[TMP31]] to float
3413 // PRMTD-NEXT: [[UNPROMOTION7:%.*]] = fptrunc double [[TMP32]] to float
3414 // PRMTD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
3415 // PRMTD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
3416 // PRMTD-NEXT: store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
3417 // PRMTD-NEXT: store float [[UNPROMOTION7]], ptr [[RETVAL_IMAGP]], align 4
3418 // PRMTD-NEXT: [[TMP33:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
3419 // PRMTD-NEXT: ret <2 x float> [[TMP33]]
3421 // X86WINPRMTD-LABEL: define dso_local i64 @f1(
3422 // X86WINPRMTD-SAME: i64 noundef [[A_COERCE:%.*]], ptr noundef [[B:%.*]], i64 noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
3423 // X86WINPRMTD-NEXT: entry:
3424 // X86WINPRMTD-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
3425 // X86WINPRMTD-NEXT: [[A:%.*]] = alloca { float, float }, align 4
3426 // X86WINPRMTD-NEXT: [[C:%.*]] = alloca { float, float }, align 4
3427 // X86WINPRMTD-NEXT: [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
3428 // X86WINPRMTD-NEXT: store i64 [[A_COERCE]], ptr [[A]], align 4
3429 // X86WINPRMTD-NEXT: store i64 [[C_COERCE]], ptr [[C]], align 4
3430 // X86WINPRMTD-NEXT: store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
3431 // X86WINPRMTD-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
3432 // X86WINPRMTD-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
3433 // X86WINPRMTD-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
3434 // X86WINPRMTD-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
3435 // X86WINPRMTD-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3436 // X86WINPRMTD-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
3437 // X86WINPRMTD-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3438 // X86WINPRMTD-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
3439 // X86WINPRMTD-NEXT: [[CONV:%.*]] = fpext float [[C_REAL]] to double
3440 // X86WINPRMTD-NEXT: [[CONV1:%.*]] = fpext float [[C_IMAG]] to double
3441 // X86WINPRMTD-NEXT: [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[CONV]])
3442 // X86WINPRMTD-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[CONV1]])
3443 // X86WINPRMTD-NEXT: [[ABS_CMP:%.*]] = fcmp ugt double [[TMP0]], [[TMP1]]
3444 // X86WINPRMTD-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
3445 // X86WINPRMTD: abs_rhsr_greater_or_equal_abs_rhsi:
3446 // X86WINPRMTD-NEXT: [[TMP2:%.*]] = fdiv double [[CONV1]], [[CONV]]
3447 // X86WINPRMTD-NEXT: [[TMP3:%.*]] = fmul double [[TMP2]], [[CONV1]]
3448 // X86WINPRMTD-NEXT: [[TMP4:%.*]] = fadd double [[CONV]], [[TMP3]]
3449 // X86WINPRMTD-NEXT: [[TMP5:%.*]] = fmul double [[B_IMAG]], [[TMP2]]
3450 // X86WINPRMTD-NEXT: [[TMP6:%.*]] = fadd double [[B_REAL]], [[TMP5]]
3451 // X86WINPRMTD-NEXT: [[TMP7:%.*]] = fdiv double [[TMP6]], [[TMP4]]
3452 // X86WINPRMTD-NEXT: [[TMP8:%.*]] = fmul double [[B_REAL]], [[TMP2]]
3453 // X86WINPRMTD-NEXT: [[TMP9:%.*]] = fsub double [[B_IMAG]], [[TMP8]]
3454 // X86WINPRMTD-NEXT: [[TMP10:%.*]] = fdiv double [[TMP9]], [[TMP4]]
3455 // X86WINPRMTD-NEXT: br label [[COMPLEX_DIV:%.*]]
3456 // X86WINPRMTD: abs_rhsr_less_than_abs_rhsi:
3457 // X86WINPRMTD-NEXT: [[TMP11:%.*]] = fdiv double [[CONV]], [[CONV1]]
3458 // X86WINPRMTD-NEXT: [[TMP12:%.*]] = fmul double [[TMP11]], [[CONV]]
3459 // X86WINPRMTD-NEXT: [[TMP13:%.*]] = fadd double [[CONV1]], [[TMP12]]
3460 // X86WINPRMTD-NEXT: [[TMP14:%.*]] = fmul double [[B_REAL]], [[TMP11]]
3461 // X86WINPRMTD-NEXT: [[TMP15:%.*]] = fadd double [[TMP14]], [[B_IMAG]]
3462 // X86WINPRMTD-NEXT: [[TMP16:%.*]] = fdiv double [[TMP15]], [[TMP13]]
3463 // X86WINPRMTD-NEXT: [[TMP17:%.*]] = fmul double [[B_IMAG]], [[TMP11]]
3464 // X86WINPRMTD-NEXT: [[TMP18:%.*]] = fsub double [[TMP17]], [[B_REAL]]
3465 // X86WINPRMTD-NEXT: [[TMP19:%.*]] = fdiv double [[TMP18]], [[TMP13]]
3466 // X86WINPRMTD-NEXT: br label [[COMPLEX_DIV]]
3467 // X86WINPRMTD: complex_div:
3468 // X86WINPRMTD-NEXT: [[TMP20:%.*]] = phi double [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3469 // X86WINPRMTD-NEXT: [[TMP21:%.*]] = phi double [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3470 // X86WINPRMTD-NEXT: [[CONV2:%.*]] = fptrunc double [[TMP20]] to float
3471 // X86WINPRMTD-NEXT: [[CONV3:%.*]] = fptrunc double [[TMP21]] to float
3472 // X86WINPRMTD-NEXT: [[EXT:%.*]] = fpext float [[CONV2]] to double
3473 // X86WINPRMTD-NEXT: [[EXT4:%.*]] = fpext float [[CONV3]] to double
3474 // X86WINPRMTD-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3475 // X86WINPRMTD-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
3476 // X86WINPRMTD-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3477 // X86WINPRMTD-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
3478 // X86WINPRMTD-NEXT: [[EXT5:%.*]] = fpext float [[A_REAL]] to double
3479 // X86WINPRMTD-NEXT: [[EXT6:%.*]] = fpext float [[A_IMAG]] to double
3480 // X86WINPRMTD-NEXT: [[TMP22:%.*]] = fmul double [[EXT]], [[EXT5]]
3481 // X86WINPRMTD-NEXT: [[TMP23:%.*]] = fmul double [[EXT4]], [[EXT6]]
3482 // X86WINPRMTD-NEXT: [[TMP24:%.*]] = fadd double [[TMP22]], [[TMP23]]
3483 // X86WINPRMTD-NEXT: [[TMP25:%.*]] = fmul double [[EXT5]], [[EXT5]]
3484 // X86WINPRMTD-NEXT: [[TMP26:%.*]] = fmul double [[EXT6]], [[EXT6]]
3485 // X86WINPRMTD-NEXT: [[TMP27:%.*]] = fadd double [[TMP25]], [[TMP26]]
3486 // X86WINPRMTD-NEXT: [[TMP28:%.*]] = fmul double [[EXT4]], [[EXT5]]
3487 // X86WINPRMTD-NEXT: [[TMP29:%.*]] = fmul double [[EXT]], [[EXT6]]
3488 // X86WINPRMTD-NEXT: [[TMP30:%.*]] = fsub double [[TMP28]], [[TMP29]]
3489 // X86WINPRMTD-NEXT: [[TMP31:%.*]] = fdiv double [[TMP24]], [[TMP27]]
3490 // X86WINPRMTD-NEXT: [[TMP32:%.*]] = fdiv double [[TMP30]], [[TMP27]]
3491 // X86WINPRMTD-NEXT: [[UNPROMOTION:%.*]] = fptrunc double [[TMP31]] to float
3492 // X86WINPRMTD-NEXT: [[UNPROMOTION7:%.*]] = fptrunc double [[TMP32]] to float
3493 // X86WINPRMTD-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
3494 // X86WINPRMTD-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
3495 // X86WINPRMTD-NEXT: store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
3496 // X86WINPRMTD-NEXT: store float [[UNPROMOTION7]], ptr [[RETVAL_IMAGP]], align 4
3497 // X86WINPRMTD-NEXT: [[TMP33:%.*]] = load i64, ptr [[RETVAL]], align 4
3498 // X86WINPRMTD-NEXT: ret i64 [[TMP33]]
3500 // AVRFP32-LABEL: define dso_local { float, float } @f1(
3501 // AVRFP32-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], float noundef [[B_COERCE0:%.*]], float noundef [[B_COERCE1:%.*]], float noundef [[C_COERCE0:%.*]], float noundef [[C_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
3502 // AVRFP32-NEXT: entry:
3503 // AVRFP32-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 1
3504 // AVRFP32-NEXT: [[A:%.*]] = alloca { float, float }, align 1
3505 // AVRFP32-NEXT: [[B:%.*]] = alloca { float, float }, align 1
3506 // AVRFP32-NEXT: [[C:%.*]] = alloca { float, float }, align 1
3507 // AVRFP32-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3508 // AVRFP32-NEXT: store float [[A_COERCE0]], ptr [[TMP0]], align 1
3509 // AVRFP32-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3510 // AVRFP32-NEXT: store float [[A_COERCE1]], ptr [[TMP1]], align 1
3511 // AVRFP32-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
3512 // AVRFP32-NEXT: store float [[B_COERCE0]], ptr [[TMP2]], align 1
3513 // AVRFP32-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
3514 // AVRFP32-NEXT: store float [[B_COERCE1]], ptr [[TMP3]], align 1
3515 // AVRFP32-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3516 // AVRFP32-NEXT: store float [[C_COERCE0]], ptr [[TMP4]], align 1
3517 // AVRFP32-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3518 // AVRFP32-NEXT: store float [[C_COERCE1]], ptr [[TMP5]], align 1
3519 // AVRFP32-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 0
3520 // AVRFP32-NEXT: [[B_REAL:%.*]] = load float, ptr [[B_REALP]], align 1
3521 // AVRFP32-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[B]], i32 0, i32 1
3522 // AVRFP32-NEXT: [[B_IMAG:%.*]] = load float, ptr [[B_IMAGP]], align 1
3523 // AVRFP32-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3524 // AVRFP32-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 1
3525 // AVRFP32-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3526 // AVRFP32-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 1
3527 // AVRFP32-NEXT: [[TMP6:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[C_REAL]])
3528 // AVRFP32-NEXT: [[TMP7:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[C_IMAG]])
3529 // AVRFP32-NEXT: [[ABS_CMP:%.*]] = fcmp ugt float [[TMP6]], [[TMP7]]
3530 // AVRFP32-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
3531 // AVRFP32: abs_rhsr_greater_or_equal_abs_rhsi:
3532 // AVRFP32-NEXT: [[TMP8:%.*]] = fdiv float [[C_IMAG]], [[C_REAL]]
3533 // AVRFP32-NEXT: [[TMP9:%.*]] = fmul float [[TMP8]], [[C_IMAG]]
3534 // AVRFP32-NEXT: [[TMP10:%.*]] = fadd float [[C_REAL]], [[TMP9]]
3535 // AVRFP32-NEXT: [[TMP11:%.*]] = fmul float [[B_IMAG]], [[TMP8]]
3536 // AVRFP32-NEXT: [[TMP12:%.*]] = fadd float [[B_REAL]], [[TMP11]]
3537 // AVRFP32-NEXT: [[TMP13:%.*]] = fdiv float [[TMP12]], [[TMP10]]
3538 // AVRFP32-NEXT: [[TMP14:%.*]] = fmul float [[B_REAL]], [[TMP8]]
3539 // AVRFP32-NEXT: [[TMP15:%.*]] = fsub float [[B_IMAG]], [[TMP14]]
3540 // AVRFP32-NEXT: [[TMP16:%.*]] = fdiv float [[TMP15]], [[TMP10]]
3541 // AVRFP32-NEXT: br label [[COMPLEX_DIV:%.*]]
3542 // AVRFP32: abs_rhsr_less_than_abs_rhsi:
3543 // AVRFP32-NEXT: [[TMP17:%.*]] = fdiv float [[C_REAL]], [[C_IMAG]]
3544 // AVRFP32-NEXT: [[TMP18:%.*]] = fmul float [[TMP17]], [[C_REAL]]
3545 // AVRFP32-NEXT: [[TMP19:%.*]] = fadd float [[C_IMAG]], [[TMP18]]
3546 // AVRFP32-NEXT: [[TMP20:%.*]] = fmul float [[B_REAL]], [[TMP17]]
3547 // AVRFP32-NEXT: [[TMP21:%.*]] = fadd float [[TMP20]], [[B_IMAG]]
3548 // AVRFP32-NEXT: [[TMP22:%.*]] = fdiv float [[TMP21]], [[TMP19]]
3549 // AVRFP32-NEXT: [[TMP23:%.*]] = fmul float [[B_IMAG]], [[TMP17]]
3550 // AVRFP32-NEXT: [[TMP24:%.*]] = fsub float [[TMP23]], [[B_REAL]]
3551 // AVRFP32-NEXT: [[TMP25:%.*]] = fdiv float [[TMP24]], [[TMP19]]
3552 // AVRFP32-NEXT: br label [[COMPLEX_DIV]]
3553 // AVRFP32: complex_div:
3554 // AVRFP32-NEXT: [[TMP26:%.*]] = phi float [ [[TMP13]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP22]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3555 // AVRFP32-NEXT: [[TMP27:%.*]] = phi float [ [[TMP16]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP25]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3556 // AVRFP32-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3557 // AVRFP32-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
3558 // AVRFP32-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3559 // AVRFP32-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
3560 // AVRFP32-NEXT: [[TMP28:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[A_REAL]])
3561 // AVRFP32-NEXT: [[TMP29:%.*]] = call addrspace(1) float @llvm.fabs.f32(float [[A_IMAG]])
3562 // AVRFP32-NEXT: [[ABS_CMP1:%.*]] = fcmp ugt float [[TMP28]], [[TMP29]]
3563 // AVRFP32-NEXT: br i1 [[ABS_CMP1]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI2:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI3:%.*]]
3564 // AVRFP32: abs_rhsr_greater_or_equal_abs_rhsi2:
3565 // AVRFP32-NEXT: [[TMP30:%.*]] = fdiv float [[A_IMAG]], [[A_REAL]]
3566 // AVRFP32-NEXT: [[TMP31:%.*]] = fmul float [[TMP30]], [[A_IMAG]]
3567 // AVRFP32-NEXT: [[TMP32:%.*]] = fadd float [[A_REAL]], [[TMP31]]
3568 // AVRFP32-NEXT: [[TMP33:%.*]] = fmul float [[TMP27]], [[TMP30]]
3569 // AVRFP32-NEXT: [[TMP34:%.*]] = fadd float [[TMP26]], [[TMP33]]
3570 // AVRFP32-NEXT: [[TMP35:%.*]] = fdiv float [[TMP34]], [[TMP32]]
3571 // AVRFP32-NEXT: [[TMP36:%.*]] = fmul float [[TMP26]], [[TMP30]]
3572 // AVRFP32-NEXT: [[TMP37:%.*]] = fsub float [[TMP27]], [[TMP36]]
3573 // AVRFP32-NEXT: [[TMP38:%.*]] = fdiv float [[TMP37]], [[TMP32]]
3574 // AVRFP32-NEXT: br label [[COMPLEX_DIV4:%.*]]
3575 // AVRFP32: abs_rhsr_less_than_abs_rhsi3:
3576 // AVRFP32-NEXT: [[TMP39:%.*]] = fdiv float [[A_REAL]], [[A_IMAG]]
3577 // AVRFP32-NEXT: [[TMP40:%.*]] = fmul float [[TMP39]], [[A_REAL]]
3578 // AVRFP32-NEXT: [[TMP41:%.*]] = fadd float [[A_IMAG]], [[TMP40]]
3579 // AVRFP32-NEXT: [[TMP42:%.*]] = fmul float [[TMP26]], [[TMP39]]
3580 // AVRFP32-NEXT: [[TMP43:%.*]] = fadd float [[TMP42]], [[TMP27]]
3581 // AVRFP32-NEXT: [[TMP44:%.*]] = fdiv float [[TMP43]], [[TMP41]]
3582 // AVRFP32-NEXT: [[TMP45:%.*]] = fmul float [[TMP27]], [[TMP39]]
3583 // AVRFP32-NEXT: [[TMP46:%.*]] = fsub float [[TMP45]], [[TMP26]]
3584 // AVRFP32-NEXT: [[TMP47:%.*]] = fdiv float [[TMP46]], [[TMP41]]
3585 // AVRFP32-NEXT: br label [[COMPLEX_DIV4]]
3586 // AVRFP32: complex_div4:
3587 // AVRFP32-NEXT: [[TMP48:%.*]] = phi float [ [[TMP35]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI2]] ], [ [[TMP44]], [[ABS_RHSR_LESS_THAN_ABS_RHSI3]] ]
3588 // AVRFP32-NEXT: [[TMP49:%.*]] = phi float [ [[TMP38]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI2]] ], [ [[TMP47]], [[ABS_RHSR_LESS_THAN_ABS_RHSI3]] ]
3589 // AVRFP32-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
3590 // AVRFP32-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
3591 // AVRFP32-NEXT: store float [[TMP48]], ptr [[RETVAL_REALP]], align 1
3592 // AVRFP32-NEXT: store float [[TMP49]], ptr [[RETVAL_IMAGP]], align 1
3593 // AVRFP32-NEXT: [[TMP50:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
3594 // AVRFP32-NEXT: ret { float, float } [[TMP50]]
3596 // AVRFP64-LABEL: define dso_local { float, float } @f1(
3597 // AVRFP64-SAME: float noundef [[A_COERCE0:%.*]], float noundef [[A_COERCE1:%.*]], double noundef [[B_COERCE0:%.*]], double noundef [[B_COERCE1:%.*]], float noundef [[C_COERCE0:%.*]], float noundef [[C_COERCE1:%.*]]) addrspace(1) #[[ATTR0]] {
3598 // AVRFP64-NEXT: entry:
3599 // AVRFP64-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 1
3600 // AVRFP64-NEXT: [[A:%.*]] = alloca { float, float }, align 1
3601 // AVRFP64-NEXT: [[B:%.*]] = alloca { double, double }, align 1
3602 // AVRFP64-NEXT: [[C:%.*]] = alloca { float, float }, align 1
3603 // AVRFP64-NEXT: [[TMP0:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3604 // AVRFP64-NEXT: store float [[A_COERCE0]], ptr [[TMP0]], align 1
3605 // AVRFP64-NEXT: [[TMP1:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3606 // AVRFP64-NEXT: store float [[A_COERCE1]], ptr [[TMP1]], align 1
3607 // AVRFP64-NEXT: [[TMP2:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
3608 // AVRFP64-NEXT: store double [[B_COERCE0]], ptr [[TMP2]], align 1
3609 // AVRFP64-NEXT: [[TMP3:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
3610 // AVRFP64-NEXT: store double [[B_COERCE1]], ptr [[TMP3]], align 1
3611 // AVRFP64-NEXT: [[TMP4:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3612 // AVRFP64-NEXT: store float [[C_COERCE0]], ptr [[TMP4]], align 1
3613 // AVRFP64-NEXT: [[TMP5:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3614 // AVRFP64-NEXT: store float [[C_COERCE1]], ptr [[TMP5]], align 1
3615 // AVRFP64-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
3616 // AVRFP64-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 1
3617 // AVRFP64-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
3618 // AVRFP64-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 1
3619 // AVRFP64-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3620 // AVRFP64-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 1
3621 // AVRFP64-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3622 // AVRFP64-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 1
3623 // AVRFP64-NEXT: [[CONV:%.*]] = fpext float [[C_REAL]] to double
3624 // AVRFP64-NEXT: [[CONV1:%.*]] = fpext float [[C_IMAG]] to double
3625 // AVRFP64-NEXT: [[TMP6:%.*]] = call addrspace(1) double @llvm.fabs.f64(double [[CONV]])
3626 // AVRFP64-NEXT: [[TMP7:%.*]] = call addrspace(1) double @llvm.fabs.f64(double [[CONV1]])
3627 // AVRFP64-NEXT: [[ABS_CMP:%.*]] = fcmp ugt double [[TMP6]], [[TMP7]]
3628 // AVRFP64-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
3629 // AVRFP64: abs_rhsr_greater_or_equal_abs_rhsi:
3630 // AVRFP64-NEXT: [[TMP8:%.*]] = fdiv double [[CONV1]], [[CONV]]
3631 // AVRFP64-NEXT: [[TMP9:%.*]] = fmul double [[TMP8]], [[CONV1]]
3632 // AVRFP64-NEXT: [[TMP10:%.*]] = fadd double [[CONV]], [[TMP9]]
3633 // AVRFP64-NEXT: [[TMP11:%.*]] = fmul double [[B_IMAG]], [[TMP8]]
3634 // AVRFP64-NEXT: [[TMP12:%.*]] = fadd double [[B_REAL]], [[TMP11]]
3635 // AVRFP64-NEXT: [[TMP13:%.*]] = fdiv double [[TMP12]], [[TMP10]]
3636 // AVRFP64-NEXT: [[TMP14:%.*]] = fmul double [[B_REAL]], [[TMP8]]
3637 // AVRFP64-NEXT: [[TMP15:%.*]] = fsub double [[B_IMAG]], [[TMP14]]
3638 // AVRFP64-NEXT: [[TMP16:%.*]] = fdiv double [[TMP15]], [[TMP10]]
3639 // AVRFP64-NEXT: br label [[COMPLEX_DIV:%.*]]
3640 // AVRFP64: abs_rhsr_less_than_abs_rhsi:
3641 // AVRFP64-NEXT: [[TMP17:%.*]] = fdiv double [[CONV]], [[CONV1]]
3642 // AVRFP64-NEXT: [[TMP18:%.*]] = fmul double [[TMP17]], [[CONV]]
3643 // AVRFP64-NEXT: [[TMP19:%.*]] = fadd double [[CONV1]], [[TMP18]]
3644 // AVRFP64-NEXT: [[TMP20:%.*]] = fmul double [[B_REAL]], [[TMP17]]
3645 // AVRFP64-NEXT: [[TMP21:%.*]] = fadd double [[TMP20]], [[B_IMAG]]
3646 // AVRFP64-NEXT: [[TMP22:%.*]] = fdiv double [[TMP21]], [[TMP19]]
3647 // AVRFP64-NEXT: [[TMP23:%.*]] = fmul double [[B_IMAG]], [[TMP17]]
3648 // AVRFP64-NEXT: [[TMP24:%.*]] = fsub double [[TMP23]], [[B_REAL]]
3649 // AVRFP64-NEXT: [[TMP25:%.*]] = fdiv double [[TMP24]], [[TMP19]]
3650 // AVRFP64-NEXT: br label [[COMPLEX_DIV]]
3651 // AVRFP64: complex_div:
3652 // AVRFP64-NEXT: [[TMP26:%.*]] = phi double [ [[TMP13]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP22]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3653 // AVRFP64-NEXT: [[TMP27:%.*]] = phi double [ [[TMP16]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP25]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3654 // AVRFP64-NEXT: [[CONV2:%.*]] = fptrunc double [[TMP26]] to float
3655 // AVRFP64-NEXT: [[CONV3:%.*]] = fptrunc double [[TMP27]] to float
3656 // AVRFP64-NEXT: [[EXT:%.*]] = fpext float [[CONV2]] to double
3657 // AVRFP64-NEXT: [[EXT4:%.*]] = fpext float [[CONV3]] to double
3658 // AVRFP64-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3659 // AVRFP64-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 1
3660 // AVRFP64-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3661 // AVRFP64-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 1
3662 // AVRFP64-NEXT: [[EXT5:%.*]] = fpext float [[A_REAL]] to double
3663 // AVRFP64-NEXT: [[EXT6:%.*]] = fpext float [[A_IMAG]] to double
3664 // AVRFP64-NEXT: [[TMP28:%.*]] = fmul double [[EXT]], [[EXT5]]
3665 // AVRFP64-NEXT: [[TMP29:%.*]] = fmul double [[EXT4]], [[EXT6]]
3666 // AVRFP64-NEXT: [[TMP30:%.*]] = fadd double [[TMP28]], [[TMP29]]
3667 // AVRFP64-NEXT: [[TMP31:%.*]] = fmul double [[EXT5]], [[EXT5]]
3668 // AVRFP64-NEXT: [[TMP32:%.*]] = fmul double [[EXT6]], [[EXT6]]
3669 // AVRFP64-NEXT: [[TMP33:%.*]] = fadd double [[TMP31]], [[TMP32]]
3670 // AVRFP64-NEXT: [[TMP34:%.*]] = fmul double [[EXT4]], [[EXT5]]
3671 // AVRFP64-NEXT: [[TMP35:%.*]] = fmul double [[EXT]], [[EXT6]]
3672 // AVRFP64-NEXT: [[TMP36:%.*]] = fsub double [[TMP34]], [[TMP35]]
3673 // AVRFP64-NEXT: [[TMP37:%.*]] = fdiv double [[TMP30]], [[TMP33]]
3674 // AVRFP64-NEXT: [[TMP38:%.*]] = fdiv double [[TMP36]], [[TMP33]]
3675 // AVRFP64-NEXT: [[UNPROMOTION:%.*]] = fptrunc double [[TMP37]] to float
3676 // AVRFP64-NEXT: [[UNPROMOTION7:%.*]] = fptrunc double [[TMP38]] to float
3677 // AVRFP64-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
3678 // AVRFP64-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
3679 // AVRFP64-NEXT: store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 1
3680 // AVRFP64-NEXT: store float [[UNPROMOTION7]], ptr [[RETVAL_IMAGP]], align 1
3681 // AVRFP64-NEXT: [[TMP39:%.*]] = load { float, float }, ptr [[RETVAL]], align 1
3682 // AVRFP64-NEXT: ret { float, float } [[TMP39]]
3684 // BASIC_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @f1(
3685 // BASIC_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef nofpclass(nan inf) [[C_COERCE:%.*]]) #[[ATTR0]] {
3686 // BASIC_FAST-NEXT: entry:
3687 // BASIC_FAST-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
3688 // BASIC_FAST-NEXT: [[A:%.*]] = alloca { float, float }, align 4
3689 // BASIC_FAST-NEXT: [[C:%.*]] = alloca { float, float }, align 4
3690 // BASIC_FAST-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
3691 // BASIC_FAST-NEXT: store <2 x float> [[C_COERCE]], ptr [[C]], align 4
3692 // BASIC_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
3693 // BASIC_FAST-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
3694 // BASIC_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
3695 // BASIC_FAST-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
3696 // BASIC_FAST-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3697 // BASIC_FAST-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
3698 // BASIC_FAST-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3699 // BASIC_FAST-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
3700 // BASIC_FAST-NEXT: [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
3701 // BASIC_FAST-NEXT: [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
3702 // BASIC_FAST-NEXT: [[TMP0:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[CONV]]
3703 // BASIC_FAST-NEXT: [[TMP1:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[CONV1]]
3704 // BASIC_FAST-NEXT: [[TMP2:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP0]], [[TMP1]]
3705 // BASIC_FAST-NEXT: [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV]], [[CONV]]
3706 // BASIC_FAST-NEXT: [[TMP4:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV1]], [[CONV1]]
3707 // BASIC_FAST-NEXT: [[TMP5:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP3]], [[TMP4]]
3708 // BASIC_FAST-NEXT: [[TMP6:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[CONV]]
3709 // BASIC_FAST-NEXT: [[TMP7:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[CONV1]]
3710 // BASIC_FAST-NEXT: [[TMP8:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP7]]
3711 // BASIC_FAST-NEXT: [[TMP9:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[TMP5]]
3712 // BASIC_FAST-NEXT: [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP8]], [[TMP5]]
3713 // BASIC_FAST-NEXT: [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP9]] to float
3714 // BASIC_FAST-NEXT: [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP10]] to float
3715 // BASIC_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3716 // BASIC_FAST-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
3717 // BASIC_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3718 // BASIC_FAST-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
3719 // BASIC_FAST-NEXT: [[TMP11:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV2]], [[A_REAL]]
3720 // BASIC_FAST-NEXT: [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV3]], [[A_IMAG]]
3721 // BASIC_FAST-NEXT: [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP11]], [[TMP12]]
3722 // BASIC_FAST-NEXT: [[TMP14:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[A_REAL]]
3723 // BASIC_FAST-NEXT: [[TMP15:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[A_IMAG]]
3724 // BASIC_FAST-NEXT: [[TMP16:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP14]], [[TMP15]]
3725 // BASIC_FAST-NEXT: [[TMP17:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV3]], [[A_REAL]]
3726 // BASIC_FAST-NEXT: [[TMP18:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV2]], [[A_IMAG]]
3727 // BASIC_FAST-NEXT: [[TMP19:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[TMP17]], [[TMP18]]
3728 // BASIC_FAST-NEXT: [[TMP20:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP13]], [[TMP16]]
3729 // BASIC_FAST-NEXT: [[TMP21:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP19]], [[TMP16]]
3730 // BASIC_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
3731 // BASIC_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
3732 // BASIC_FAST-NEXT: store float [[TMP20]], ptr [[RETVAL_REALP]], align 4
3733 // BASIC_FAST-NEXT: store float [[TMP21]], ptr [[RETVAL_IMAGP]], align 4
3734 // BASIC_FAST-NEXT: [[TMP22:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
3735 // BASIC_FAST-NEXT: ret <2 x float> [[TMP22]]
3737 // FULL_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @f1(
3738 // FULL_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef nofpclass(nan inf) [[C_COERCE:%.*]]) #[[ATTR0]] {
3739 // FULL_FAST-NEXT: entry:
3740 // FULL_FAST-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
3741 // FULL_FAST-NEXT: [[A:%.*]] = alloca { float, float }, align 4
3742 // FULL_FAST-NEXT: [[C:%.*]] = alloca { float, float }, align 4
3743 // FULL_FAST-NEXT: [[COERCE:%.*]] = alloca { float, float }, align 4
3744 // FULL_FAST-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
3745 // FULL_FAST-NEXT: store <2 x float> [[C_COERCE]], ptr [[C]], align 4
3746 // FULL_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
3747 // FULL_FAST-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
3748 // FULL_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
3749 // FULL_FAST-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
3750 // FULL_FAST-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3751 // FULL_FAST-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
3752 // FULL_FAST-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3753 // FULL_FAST-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
3754 // FULL_FAST-NEXT: [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
3755 // FULL_FAST-NEXT: [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
3756 // FULL_FAST-NEXT: [[CALL:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) { x86_fp80, x86_fp80 } @__divxc3(x86_fp80 noundef nofpclass(nan inf) [[B_REAL]], x86_fp80 noundef nofpclass(nan inf) [[B_IMAG]], x86_fp80 noundef nofpclass(nan inf) [[CONV]], x86_fp80 noundef nofpclass(nan inf) [[CONV1]]) #[[ATTR2]]
3757 // FULL_FAST-NEXT: [[TMP0:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 0
3758 // FULL_FAST-NEXT: [[TMP1:%.*]] = extractvalue { x86_fp80, x86_fp80 } [[CALL]], 1
3759 // FULL_FAST-NEXT: [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP0]] to float
3760 // FULL_FAST-NEXT: [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP1]] to float
3761 // FULL_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3762 // FULL_FAST-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
3763 // FULL_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3764 // FULL_FAST-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
3765 // FULL_FAST-NEXT: [[CALL4:%.*]] = call reassoc nnan ninf nsz arcp afn nofpclass(nan inf) <2 x float> @__divsc3(float noundef nofpclass(nan inf) [[CONV2]], float noundef nofpclass(nan inf) [[CONV3]], float noundef nofpclass(nan inf) [[A_REAL]], float noundef nofpclass(nan inf) [[A_IMAG]]) #[[ATTR2]]
3766 // FULL_FAST-NEXT: store <2 x float> [[CALL4]], ptr [[COERCE]], align 4
3767 // FULL_FAST-NEXT: [[COERCE_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 0
3768 // FULL_FAST-NEXT: [[COERCE_REAL:%.*]] = load float, ptr [[COERCE_REALP]], align 4
3769 // FULL_FAST-NEXT: [[COERCE_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[COERCE]], i32 0, i32 1
3770 // FULL_FAST-NEXT: [[COERCE_IMAG:%.*]] = load float, ptr [[COERCE_IMAGP]], align 4
3771 // FULL_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
3772 // FULL_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
3773 // FULL_FAST-NEXT: store float [[COERCE_REAL]], ptr [[RETVAL_REALP]], align 4
3774 // FULL_FAST-NEXT: store float [[COERCE_IMAG]], ptr [[RETVAL_IMAGP]], align 4
3775 // FULL_FAST-NEXT: [[TMP2:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
3776 // FULL_FAST-NEXT: ret <2 x float> [[TMP2]]
3778 // IMPRVD_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @f1(
3779 // IMPRVD_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef nofpclass(nan inf) [[C_COERCE:%.*]]) #[[ATTR0]] {
3780 // IMPRVD_FAST-NEXT: entry:
3781 // IMPRVD_FAST-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
3782 // IMPRVD_FAST-NEXT: [[A:%.*]] = alloca { float, float }, align 4
3783 // IMPRVD_FAST-NEXT: [[C:%.*]] = alloca { float, float }, align 4
3784 // IMPRVD_FAST-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
3785 // IMPRVD_FAST-NEXT: store <2 x float> [[C_COERCE]], ptr [[C]], align 4
3786 // IMPRVD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
3787 // IMPRVD_FAST-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
3788 // IMPRVD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
3789 // IMPRVD_FAST-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
3790 // IMPRVD_FAST-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3791 // IMPRVD_FAST-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
3792 // IMPRVD_FAST-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3793 // IMPRVD_FAST-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
3794 // IMPRVD_FAST-NEXT: [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
3795 // IMPRVD_FAST-NEXT: [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
3796 // IMPRVD_FAST-NEXT: [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
3797 // IMPRVD_FAST-NEXT: [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
3798 // IMPRVD_FAST-NEXT: [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt x86_fp80 [[TMP0]], [[TMP1]]
3799 // IMPRVD_FAST-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
3800 // IMPRVD_FAST: abs_rhsr_greater_or_equal_abs_rhsi:
3801 // IMPRVD_FAST-NEXT: [[TMP2:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV1]], [[CONV]]
3802 // IMPRVD_FAST-NEXT: [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[CONV1]]
3803 // IMPRVD_FAST-NEXT: [[TMP4:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV]], [[TMP3]]
3804 // IMPRVD_FAST-NEXT: [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP2]]
3805 // IMPRVD_FAST-NEXT: [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP5]]
3806 // IMPRVD_FAST-NEXT: [[TMP7:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP4]]
3807 // IMPRVD_FAST-NEXT: [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP2]]
3808 // IMPRVD_FAST-NEXT: [[TMP9:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP8]]
3809 // IMPRVD_FAST-NEXT: [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP9]], [[TMP4]]
3810 // IMPRVD_FAST-NEXT: br label [[COMPLEX_DIV:%.*]]
3811 // IMPRVD_FAST: abs_rhsr_less_than_abs_rhsi:
3812 // IMPRVD_FAST-NEXT: [[TMP11:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV]], [[CONV1]]
3813 // IMPRVD_FAST-NEXT: [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP11]], [[CONV]]
3814 // IMPRVD_FAST-NEXT: [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV1]], [[TMP12]]
3815 // IMPRVD_FAST-NEXT: [[TMP14:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP11]]
3816 // IMPRVD_FAST-NEXT: [[TMP15:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP14]], [[B_IMAG]]
3817 // IMPRVD_FAST-NEXT: [[TMP16:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP15]], [[TMP13]]
3818 // IMPRVD_FAST-NEXT: [[TMP17:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP11]]
3819 // IMPRVD_FAST-NEXT: [[TMP18:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP17]], [[B_REAL]]
3820 // IMPRVD_FAST-NEXT: [[TMP19:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP18]], [[TMP13]]
3821 // IMPRVD_FAST-NEXT: br label [[COMPLEX_DIV]]
3822 // IMPRVD_FAST: complex_div:
3823 // IMPRVD_FAST-NEXT: [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3824 // IMPRVD_FAST-NEXT: [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3825 // IMPRVD_FAST-NEXT: [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP20]] to float
3826 // IMPRVD_FAST-NEXT: [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP21]] to float
3827 // IMPRVD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3828 // IMPRVD_FAST-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
3829 // IMPRVD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3830 // IMPRVD_FAST-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
3831 // IMPRVD_FAST-NEXT: [[TMP22:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.fabs.f32(float [[A_REAL]])
3832 // IMPRVD_FAST-NEXT: [[TMP23:%.*]] = call reassoc nnan ninf nsz arcp afn float @llvm.fabs.f32(float [[A_IMAG]])
3833 // IMPRVD_FAST-NEXT: [[ABS_CMP4:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt float [[TMP22]], [[TMP23]]
3834 // IMPRVD_FAST-NEXT: br i1 [[ABS_CMP4]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI5:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI6:%.*]]
3835 // IMPRVD_FAST: abs_rhsr_greater_or_equal_abs_rhsi5:
3836 // IMPRVD_FAST-NEXT: [[TMP24:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[A_REAL]]
3837 // IMPRVD_FAST-NEXT: [[TMP25:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[TMP24]], [[A_IMAG]]
3838 // IMPRVD_FAST-NEXT: [[TMP26:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[TMP25]]
3839 // IMPRVD_FAST-NEXT: [[TMP27:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV3]], [[TMP24]]
3840 // IMPRVD_FAST-NEXT: [[TMP28:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[CONV2]], [[TMP27]]
3841 // IMPRVD_FAST-NEXT: [[TMP29:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP28]], [[TMP26]]
3842 // IMPRVD_FAST-NEXT: [[TMP30:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV2]], [[TMP24]]
3843 // IMPRVD_FAST-NEXT: [[TMP31:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[CONV3]], [[TMP30]]
3844 // IMPRVD_FAST-NEXT: [[TMP32:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP31]], [[TMP26]]
3845 // IMPRVD_FAST-NEXT: br label [[COMPLEX_DIV7:%.*]]
3846 // IMPRVD_FAST: abs_rhsr_less_than_abs_rhsi6:
3847 // IMPRVD_FAST-NEXT: [[TMP33:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[A_REAL]], [[A_IMAG]]
3848 // IMPRVD_FAST-NEXT: [[TMP34:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[TMP33]], [[A_REAL]]
3849 // IMPRVD_FAST-NEXT: [[TMP35:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[A_IMAG]], [[TMP34]]
3850 // IMPRVD_FAST-NEXT: [[TMP36:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV2]], [[TMP33]]
3851 // IMPRVD_FAST-NEXT: [[TMP37:%.*]] = fadd reassoc nnan ninf nsz arcp afn float [[TMP36]], [[CONV3]]
3852 // IMPRVD_FAST-NEXT: [[TMP38:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP37]], [[TMP35]]
3853 // IMPRVD_FAST-NEXT: [[TMP39:%.*]] = fmul reassoc nnan ninf nsz arcp afn float [[CONV3]], [[TMP33]]
3854 // IMPRVD_FAST-NEXT: [[TMP40:%.*]] = fsub reassoc nnan ninf nsz arcp afn float [[TMP39]], [[CONV2]]
3855 // IMPRVD_FAST-NEXT: [[TMP41:%.*]] = fdiv reassoc nnan ninf nsz arcp afn float [[TMP40]], [[TMP35]]
3856 // IMPRVD_FAST-NEXT: br label [[COMPLEX_DIV7]]
3857 // IMPRVD_FAST: complex_div7:
3858 // IMPRVD_FAST-NEXT: [[TMP42:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[TMP29]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI5]] ], [ [[TMP38]], [[ABS_RHSR_LESS_THAN_ABS_RHSI6]] ]
3859 // IMPRVD_FAST-NEXT: [[TMP43:%.*]] = phi reassoc nnan ninf nsz arcp afn float [ [[TMP32]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI5]] ], [ [[TMP41]], [[ABS_RHSR_LESS_THAN_ABS_RHSI6]] ]
3860 // IMPRVD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
3861 // IMPRVD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
3862 // IMPRVD_FAST-NEXT: store float [[TMP42]], ptr [[RETVAL_REALP]], align 4
3863 // IMPRVD_FAST-NEXT: store float [[TMP43]], ptr [[RETVAL_IMAGP]], align 4
3864 // IMPRVD_FAST-NEXT: [[TMP44:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
3865 // IMPRVD_FAST-NEXT: ret <2 x float> [[TMP44]]
3867 // PRMTD_FAST-LABEL: define dso_local nofpclass(nan inf) <2 x float> @f1(
3868 // PRMTD_FAST-SAME: <2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef nofpclass(nan inf) [[C_COERCE:%.*]]) #[[ATTR0]] {
3869 // PRMTD_FAST-NEXT: entry:
3870 // PRMTD_FAST-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
3871 // PRMTD_FAST-NEXT: [[A:%.*]] = alloca { float, float }, align 4
3872 // PRMTD_FAST-NEXT: [[C:%.*]] = alloca { float, float }, align 4
3873 // PRMTD_FAST-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
3874 // PRMTD_FAST-NEXT: store <2 x float> [[C_COERCE]], ptr [[C]], align 4
3875 // PRMTD_FAST-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
3876 // PRMTD_FAST-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
3877 // PRMTD_FAST-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
3878 // PRMTD_FAST-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
3879 // PRMTD_FAST-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3880 // PRMTD_FAST-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
3881 // PRMTD_FAST-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3882 // PRMTD_FAST-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
3883 // PRMTD_FAST-NEXT: [[CONV:%.*]] = fpext float [[C_REAL]] to x86_fp80
3884 // PRMTD_FAST-NEXT: [[CONV1:%.*]] = fpext float [[C_IMAG]] to x86_fp80
3885 // PRMTD_FAST-NEXT: [[TMP0:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]])
3886 // PRMTD_FAST-NEXT: [[TMP1:%.*]] = call reassoc nnan ninf nsz arcp afn x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]])
3887 // PRMTD_FAST-NEXT: [[ABS_CMP:%.*]] = fcmp reassoc nnan ninf nsz arcp afn ugt x86_fp80 [[TMP0]], [[TMP1]]
3888 // PRMTD_FAST-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
3889 // PRMTD_FAST: abs_rhsr_greater_or_equal_abs_rhsi:
3890 // PRMTD_FAST-NEXT: [[TMP2:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV1]], [[CONV]]
3891 // PRMTD_FAST-NEXT: [[TMP3:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP2]], [[CONV1]]
3892 // PRMTD_FAST-NEXT: [[TMP4:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV]], [[TMP3]]
3893 // PRMTD_FAST-NEXT: [[TMP5:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP2]]
3894 // PRMTD_FAST-NEXT: [[TMP6:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP5]]
3895 // PRMTD_FAST-NEXT: [[TMP7:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP6]], [[TMP4]]
3896 // PRMTD_FAST-NEXT: [[TMP8:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP2]]
3897 // PRMTD_FAST-NEXT: [[TMP9:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP8]]
3898 // PRMTD_FAST-NEXT: [[TMP10:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP9]], [[TMP4]]
3899 // PRMTD_FAST-NEXT: br label [[COMPLEX_DIV:%.*]]
3900 // PRMTD_FAST: abs_rhsr_less_than_abs_rhsi:
3901 // PRMTD_FAST-NEXT: [[TMP11:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV]], [[CONV1]]
3902 // PRMTD_FAST-NEXT: [[TMP12:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP11]], [[CONV]]
3903 // PRMTD_FAST-NEXT: [[TMP13:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[CONV1]], [[TMP12]]
3904 // PRMTD_FAST-NEXT: [[TMP14:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_REAL]], [[TMP11]]
3905 // PRMTD_FAST-NEXT: [[TMP15:%.*]] = fadd reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP14]], [[B_IMAG]]
3906 // PRMTD_FAST-NEXT: [[TMP16:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP15]], [[TMP13]]
3907 // PRMTD_FAST-NEXT: [[TMP17:%.*]] = fmul reassoc nnan ninf nsz arcp afn x86_fp80 [[B_IMAG]], [[TMP11]]
3908 // PRMTD_FAST-NEXT: [[TMP18:%.*]] = fsub reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP17]], [[B_REAL]]
3909 // PRMTD_FAST-NEXT: [[TMP19:%.*]] = fdiv reassoc nnan ninf nsz arcp afn x86_fp80 [[TMP18]], [[TMP13]]
3910 // PRMTD_FAST-NEXT: br label [[COMPLEX_DIV]]
3911 // PRMTD_FAST: complex_div:
3912 // PRMTD_FAST-NEXT: [[TMP20:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3913 // PRMTD_FAST-NEXT: [[TMP21:%.*]] = phi reassoc nnan ninf nsz arcp afn x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3914 // PRMTD_FAST-NEXT: [[CONV2:%.*]] = fptrunc x86_fp80 [[TMP20]] to float
3915 // PRMTD_FAST-NEXT: [[CONV3:%.*]] = fptrunc x86_fp80 [[TMP21]] to float
3916 // PRMTD_FAST-NEXT: [[EXT:%.*]] = fpext float [[CONV2]] to double
3917 // PRMTD_FAST-NEXT: [[EXT4:%.*]] = fpext float [[CONV3]] to double
3918 // PRMTD_FAST-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3919 // PRMTD_FAST-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
3920 // PRMTD_FAST-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
3921 // PRMTD_FAST-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
3922 // PRMTD_FAST-NEXT: [[EXT5:%.*]] = fpext float [[A_REAL]] to double
3923 // PRMTD_FAST-NEXT: [[EXT6:%.*]] = fpext float [[A_IMAG]] to double
3924 // PRMTD_FAST-NEXT: [[TMP22:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT]], [[EXT5]]
3925 // PRMTD_FAST-NEXT: [[TMP23:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT4]], [[EXT6]]
3926 // PRMTD_FAST-NEXT: [[TMP24:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP22]], [[TMP23]]
3927 // PRMTD_FAST-NEXT: [[TMP25:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT5]], [[EXT5]]
3928 // PRMTD_FAST-NEXT: [[TMP26:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT6]], [[EXT6]]
3929 // PRMTD_FAST-NEXT: [[TMP27:%.*]] = fadd reassoc nnan ninf nsz arcp afn double [[TMP25]], [[TMP26]]
3930 // PRMTD_FAST-NEXT: [[TMP28:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT4]], [[EXT5]]
3931 // PRMTD_FAST-NEXT: [[TMP29:%.*]] = fmul reassoc nnan ninf nsz arcp afn double [[EXT]], [[EXT6]]
3932 // PRMTD_FAST-NEXT: [[TMP30:%.*]] = fsub reassoc nnan ninf nsz arcp afn double [[TMP28]], [[TMP29]]
3933 // PRMTD_FAST-NEXT: [[TMP31:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP24]], [[TMP27]]
3934 // PRMTD_FAST-NEXT: [[TMP32:%.*]] = fdiv reassoc nnan ninf nsz arcp afn double [[TMP30]], [[TMP27]]
3935 // PRMTD_FAST-NEXT: [[UNPROMOTION:%.*]] = fptrunc double [[TMP31]] to float
3936 // PRMTD_FAST-NEXT: [[UNPROMOTION7:%.*]] = fptrunc double [[TMP32]] to float
3937 // PRMTD_FAST-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
3938 // PRMTD_FAST-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
3939 // PRMTD_FAST-NEXT: store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
3940 // PRMTD_FAST-NEXT: store float [[UNPROMOTION7]], ptr [[RETVAL_IMAGP]], align 4
3941 // PRMTD_FAST-NEXT: [[TMP33:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
3942 // PRMTD_FAST-NEXT: ret <2 x float> [[TMP33]]
3944 // X86WINPRMTD_STRICT-LABEL: define dso_local i64 @f1(
3945 // X86WINPRMTD_STRICT-SAME: i64 noundef [[A_COERCE:%.*]], ptr noundef [[B:%.*]], i64 noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
3946 // X86WINPRMTD_STRICT-NEXT: entry:
3947 // X86WINPRMTD_STRICT-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
3948 // X86WINPRMTD_STRICT-NEXT: [[A:%.*]] = alloca { float, float }, align 4
3949 // X86WINPRMTD_STRICT-NEXT: [[C:%.*]] = alloca { float, float }, align 4
3950 // X86WINPRMTD_STRICT-NEXT: [[B_INDIRECT_ADDR:%.*]] = alloca ptr, align 8
3951 // X86WINPRMTD_STRICT-NEXT: store i64 [[A_COERCE]], ptr [[A]], align 4
3952 // X86WINPRMTD_STRICT-NEXT: store i64 [[C_COERCE]], ptr [[C]], align 4
3953 // X86WINPRMTD_STRICT-NEXT: store ptr [[B]], ptr [[B_INDIRECT_ADDR]], align 8
3954 // X86WINPRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 0
3955 // X86WINPRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load double, ptr [[B_REALP]], align 8
3956 // X86WINPRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { double, double }, ptr [[B]], i32 0, i32 1
3957 // X86WINPRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load double, ptr [[B_IMAGP]], align 8
3958 // X86WINPRMTD_STRICT-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
3959 // X86WINPRMTD_STRICT-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
3960 // X86WINPRMTD_STRICT-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
3961 // X86WINPRMTD_STRICT-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
3962 // X86WINPRMTD_STRICT-NEXT: [[CONV:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[C_REAL]], metadata !"fpexcept.strict") #[[ATTR3]]
3963 // X86WINPRMTD_STRICT-NEXT: [[CONV1:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[C_IMAG]], metadata !"fpexcept.strict") #[[ATTR3]]
3964 // X86WINPRMTD_STRICT-NEXT: [[TMP0:%.*]] = call double @llvm.fabs.f64(double [[CONV]]) #[[ATTR3]]
3965 // X86WINPRMTD_STRICT-NEXT: [[TMP1:%.*]] = call double @llvm.fabs.f64(double [[CONV1]]) #[[ATTR3]]
3966 // X86WINPRMTD_STRICT-NEXT: [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double [[TMP0]], double [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR3]]
3967 // X86WINPRMTD_STRICT-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
3968 // X86WINPRMTD_STRICT: abs_rhsr_greater_or_equal_abs_rhsi:
3969 // X86WINPRMTD_STRICT-NEXT: [[TMP2:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[CONV1]], double [[CONV]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3970 // X86WINPRMTD_STRICT-NEXT: [[TMP3:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[TMP2]], double [[CONV1]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3971 // X86WINPRMTD_STRICT-NEXT: [[TMP4:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[CONV]], double [[TMP3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3972 // X86WINPRMTD_STRICT-NEXT: [[TMP5:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[B_IMAG]], double [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3973 // X86WINPRMTD_STRICT-NEXT: [[TMP6:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[B_REAL]], double [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3974 // X86WINPRMTD_STRICT-NEXT: [[TMP7:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP6]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3975 // X86WINPRMTD_STRICT-NEXT: [[TMP8:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[B_REAL]], double [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3976 // X86WINPRMTD_STRICT-NEXT: [[TMP9:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[B_IMAG]], double [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3977 // X86WINPRMTD_STRICT-NEXT: [[TMP10:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP9]], double [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3978 // X86WINPRMTD_STRICT-NEXT: br label [[COMPLEX_DIV:%.*]]
3979 // X86WINPRMTD_STRICT: abs_rhsr_less_than_abs_rhsi:
3980 // X86WINPRMTD_STRICT-NEXT: [[TMP11:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[CONV]], double [[CONV1]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3981 // X86WINPRMTD_STRICT-NEXT: [[TMP12:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[TMP11]], double [[CONV]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3982 // X86WINPRMTD_STRICT-NEXT: [[TMP13:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[CONV1]], double [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3983 // X86WINPRMTD_STRICT-NEXT: [[TMP14:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[B_REAL]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3984 // X86WINPRMTD_STRICT-NEXT: [[TMP15:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP14]], double [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3985 // X86WINPRMTD_STRICT-NEXT: [[TMP16:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP15]], double [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3986 // X86WINPRMTD_STRICT-NEXT: [[TMP17:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[B_IMAG]], double [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3987 // X86WINPRMTD_STRICT-NEXT: [[TMP18:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP17]], double [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3988 // X86WINPRMTD_STRICT-NEXT: [[TMP19:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP18]], double [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3989 // X86WINPRMTD_STRICT-NEXT: br label [[COMPLEX_DIV]]
3990 // X86WINPRMTD_STRICT: complex_div:
3991 // X86WINPRMTD_STRICT-NEXT: [[TMP20:%.*]] = phi double [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3992 // X86WINPRMTD_STRICT-NEXT: [[TMP21:%.*]] = phi double [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
3993 // X86WINPRMTD_STRICT-NEXT: [[CONV2:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3994 // X86WINPRMTD_STRICT-NEXT: [[CONV3:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
3995 // X86WINPRMTD_STRICT-NEXT: [[EXT:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CONV2]], metadata !"fpexcept.strict") #[[ATTR3]]
3996 // X86WINPRMTD_STRICT-NEXT: [[EXT4:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CONV3]], metadata !"fpexcept.strict") #[[ATTR3]]
3997 // X86WINPRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
3998 // X86WINPRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
3999 // X86WINPRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
4000 // X86WINPRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
4001 // X86WINPRMTD_STRICT-NEXT: [[EXT5:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_REAL]], metadata !"fpexcept.strict") #[[ATTR3]]
4002 // X86WINPRMTD_STRICT-NEXT: [[EXT6:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_IMAG]], metadata !"fpexcept.strict") #[[ATTR3]]
4003 // X86WINPRMTD_STRICT-NEXT: [[TMP22:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4004 // X86WINPRMTD_STRICT-NEXT: [[TMP23:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT4]], double [[EXT6]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4005 // X86WINPRMTD_STRICT-NEXT: [[TMP24:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP22]], double [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4006 // X86WINPRMTD_STRICT-NEXT: [[TMP25:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT5]], double [[EXT5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4007 // X86WINPRMTD_STRICT-NEXT: [[TMP26:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT6]], double [[EXT6]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4008 // X86WINPRMTD_STRICT-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP25]], double [[TMP26]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4009 // X86WINPRMTD_STRICT-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT4]], double [[EXT5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4010 // X86WINPRMTD_STRICT-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT6]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4011 // X86WINPRMTD_STRICT-NEXT: [[TMP30:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP28]], double [[TMP29]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4012 // X86WINPRMTD_STRICT-NEXT: [[TMP31:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP24]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4013 // X86WINPRMTD_STRICT-NEXT: [[TMP32:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP30]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4014 // X86WINPRMTD_STRICT-NEXT: [[UNPROMOTION:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP31]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4015 // X86WINPRMTD_STRICT-NEXT: [[UNPROMOTION7:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP32]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR3]]
4016 // X86WINPRMTD_STRICT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
4017 // X86WINPRMTD_STRICT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
4018 // X86WINPRMTD_STRICT-NEXT: store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
4019 // X86WINPRMTD_STRICT-NEXT: store float [[UNPROMOTION7]], ptr [[RETVAL_IMAGP]], align 4
4020 // X86WINPRMTD_STRICT-NEXT: [[TMP33:%.*]] = load i64, ptr [[RETVAL]], align 4
4021 // X86WINPRMTD_STRICT-NEXT: ret i64 [[TMP33]]
4023 // PRMTD_STRICT-LABEL: define dso_local <2 x float> @f1(
4024 // PRMTD_STRICT-SAME: <2 x float> noundef [[A_COERCE:%.*]], ptr noundef byval({ x86_fp80, x86_fp80 }) align 16 [[B:%.*]], <2 x float> noundef [[C_COERCE:%.*]]) #[[ATTR0]] {
4025 // PRMTD_STRICT-NEXT: entry:
4026 // PRMTD_STRICT-NEXT: [[RETVAL:%.*]] = alloca { float, float }, align 4
4027 // PRMTD_STRICT-NEXT: [[A:%.*]] = alloca { float, float }, align 4
4028 // PRMTD_STRICT-NEXT: [[C:%.*]] = alloca { float, float }, align 4
4029 // PRMTD_STRICT-NEXT: store <2 x float> [[A_COERCE]], ptr [[A]], align 4
4030 // PRMTD_STRICT-NEXT: store <2 x float> [[C_COERCE]], ptr [[C]], align 4
4031 // PRMTD_STRICT-NEXT: [[B_REALP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 0
4032 // PRMTD_STRICT-NEXT: [[B_REAL:%.*]] = load x86_fp80, ptr [[B_REALP]], align 16
4033 // PRMTD_STRICT-NEXT: [[B_IMAGP:%.*]] = getelementptr inbounds nuw { x86_fp80, x86_fp80 }, ptr [[B]], i32 0, i32 1
4034 // PRMTD_STRICT-NEXT: [[B_IMAG:%.*]] = load x86_fp80, ptr [[B_IMAGP]], align 16
4035 // PRMTD_STRICT-NEXT: [[C_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 0
4036 // PRMTD_STRICT-NEXT: [[C_REAL:%.*]] = load float, ptr [[C_REALP]], align 4
4037 // PRMTD_STRICT-NEXT: [[C_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[C]], i32 0, i32 1
4038 // PRMTD_STRICT-NEXT: [[C_IMAG:%.*]] = load float, ptr [[C_IMAGP]], align 4
4039 // PRMTD_STRICT-NEXT: [[CONV:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f32(float [[C_REAL]], metadata !"fpexcept.strict") #[[ATTR4]]
4040 // PRMTD_STRICT-NEXT: [[CONV1:%.*]] = call x86_fp80 @llvm.experimental.constrained.fpext.f80.f32(float [[C_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
4041 // PRMTD_STRICT-NEXT: [[TMP0:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV]]) #[[ATTR4]]
4042 // PRMTD_STRICT-NEXT: [[TMP1:%.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 [[CONV1]]) #[[ATTR4]]
4043 // PRMTD_STRICT-NEXT: [[ABS_CMP:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f80(x86_fp80 [[TMP0]], x86_fp80 [[TMP1]], metadata !"ugt", metadata !"fpexcept.strict") #[[ATTR4]]
4044 // PRMTD_STRICT-NEXT: br i1 [[ABS_CMP]], label [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI:%.*]], label [[ABS_RHSR_LESS_THAN_ABS_RHSI:%.*]]
4045 // PRMTD_STRICT: abs_rhsr_greater_or_equal_abs_rhsi:
4046 // PRMTD_STRICT-NEXT: [[TMP2:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[CONV1]], x86_fp80 [[CONV]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4047 // PRMTD_STRICT-NEXT: [[TMP3:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[TMP2]], x86_fp80 [[CONV1]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4048 // PRMTD_STRICT-NEXT: [[TMP4:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[CONV]], x86_fp80 [[TMP3]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4049 // PRMTD_STRICT-NEXT: [[TMP5:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[B_IMAG]], x86_fp80 [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4050 // PRMTD_STRICT-NEXT: [[TMP6:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[B_REAL]], x86_fp80 [[TMP5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4051 // PRMTD_STRICT-NEXT: [[TMP7:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP6]], x86_fp80 [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4052 // PRMTD_STRICT-NEXT: [[TMP8:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[B_REAL]], x86_fp80 [[TMP2]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4053 // PRMTD_STRICT-NEXT: [[TMP9:%.*]] = call x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80 [[B_IMAG]], x86_fp80 [[TMP8]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4054 // PRMTD_STRICT-NEXT: [[TMP10:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP9]], x86_fp80 [[TMP4]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4055 // PRMTD_STRICT-NEXT: br label [[COMPLEX_DIV:%.*]]
4056 // PRMTD_STRICT: abs_rhsr_less_than_abs_rhsi:
4057 // PRMTD_STRICT-NEXT: [[TMP11:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[CONV]], x86_fp80 [[CONV1]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4058 // PRMTD_STRICT-NEXT: [[TMP12:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[TMP11]], x86_fp80 [[CONV]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4059 // PRMTD_STRICT-NEXT: [[TMP13:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[CONV1]], x86_fp80 [[TMP12]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4060 // PRMTD_STRICT-NEXT: [[TMP14:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[B_REAL]], x86_fp80 [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4061 // PRMTD_STRICT-NEXT: [[TMP15:%.*]] = call x86_fp80 @llvm.experimental.constrained.fadd.f80(x86_fp80 [[TMP14]], x86_fp80 [[B_IMAG]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4062 // PRMTD_STRICT-NEXT: [[TMP16:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP15]], x86_fp80 [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4063 // PRMTD_STRICT-NEXT: [[TMP17:%.*]] = call x86_fp80 @llvm.experimental.constrained.fmul.f80(x86_fp80 [[B_IMAG]], x86_fp80 [[TMP11]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4064 // PRMTD_STRICT-NEXT: [[TMP18:%.*]] = call x86_fp80 @llvm.experimental.constrained.fsub.f80(x86_fp80 [[TMP17]], x86_fp80 [[B_REAL]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4065 // PRMTD_STRICT-NEXT: [[TMP19:%.*]] = call x86_fp80 @llvm.experimental.constrained.fdiv.f80(x86_fp80 [[TMP18]], x86_fp80 [[TMP13]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4066 // PRMTD_STRICT-NEXT: br label [[COMPLEX_DIV]]
4067 // PRMTD_STRICT: complex_div:
4068 // PRMTD_STRICT-NEXT: [[TMP20:%.*]] = phi x86_fp80 [ [[TMP7]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP16]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
4069 // PRMTD_STRICT-NEXT: [[TMP21:%.*]] = phi x86_fp80 [ [[TMP10]], [[ABS_RHSR_GREATER_OR_EQUAL_ABS_RHSI]] ], [ [[TMP19]], [[ABS_RHSR_LESS_THAN_ABS_RHSI]] ]
4070 // PRMTD_STRICT-NEXT: [[CONV2:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f80(x86_fp80 [[TMP20]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4071 // PRMTD_STRICT-NEXT: [[CONV3:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f80(x86_fp80 [[TMP21]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4072 // PRMTD_STRICT-NEXT: [[EXT:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CONV2]], metadata !"fpexcept.strict") #[[ATTR4]]
4073 // PRMTD_STRICT-NEXT: [[EXT4:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[CONV3]], metadata !"fpexcept.strict") #[[ATTR4]]
4074 // PRMTD_STRICT-NEXT: [[A_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 0
4075 // PRMTD_STRICT-NEXT: [[A_REAL:%.*]] = load float, ptr [[A_REALP]], align 4
4076 // PRMTD_STRICT-NEXT: [[A_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[A]], i32 0, i32 1
4077 // PRMTD_STRICT-NEXT: [[A_IMAG:%.*]] = load float, ptr [[A_IMAGP]], align 4
4078 // PRMTD_STRICT-NEXT: [[EXT5:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_REAL]], metadata !"fpexcept.strict") #[[ATTR4]]
4079 // PRMTD_STRICT-NEXT: [[EXT6:%.*]] = call double @llvm.experimental.constrained.fpext.f64.f32(float [[A_IMAG]], metadata !"fpexcept.strict") #[[ATTR4]]
4080 // PRMTD_STRICT-NEXT: [[TMP22:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4081 // PRMTD_STRICT-NEXT: [[TMP23:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT4]], double [[EXT6]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4082 // PRMTD_STRICT-NEXT: [[TMP24:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP22]], double [[TMP23]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4083 // PRMTD_STRICT-NEXT: [[TMP25:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT5]], double [[EXT5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4084 // PRMTD_STRICT-NEXT: [[TMP26:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT6]], double [[EXT6]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4085 // PRMTD_STRICT-NEXT: [[TMP27:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[TMP25]], double [[TMP26]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4086 // PRMTD_STRICT-NEXT: [[TMP28:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT4]], double [[EXT5]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4087 // PRMTD_STRICT-NEXT: [[TMP29:%.*]] = call double @llvm.experimental.constrained.fmul.f64(double [[EXT]], double [[EXT6]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4088 // PRMTD_STRICT-NEXT: [[TMP30:%.*]] = call double @llvm.experimental.constrained.fsub.f64(double [[TMP28]], double [[TMP29]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4089 // PRMTD_STRICT-NEXT: [[TMP31:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP24]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4090 // PRMTD_STRICT-NEXT: [[TMP32:%.*]] = call double @llvm.experimental.constrained.fdiv.f64(double [[TMP30]], double [[TMP27]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4091 // PRMTD_STRICT-NEXT: [[UNPROMOTION:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP31]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4092 // PRMTD_STRICT-NEXT: [[UNPROMOTION7:%.*]] = call float @llvm.experimental.constrained.fptrunc.f32.f64(double [[TMP32]], metadata !"round.dynamic", metadata !"fpexcept.strict") #[[ATTR4]]
4093 // PRMTD_STRICT-NEXT: [[RETVAL_REALP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 0
4094 // PRMTD_STRICT-NEXT: [[RETVAL_IMAGP:%.*]] = getelementptr inbounds nuw { float, float }, ptr [[RETVAL]], i32 0, i32 1
4095 // PRMTD_STRICT-NEXT: store float [[UNPROMOTION]], ptr [[RETVAL_REALP]], align 4
4096 // PRMTD_STRICT-NEXT: store float [[UNPROMOTION7]], ptr [[RETVAL_IMAGP]], align 4
4097 // PRMTD_STRICT-NEXT: [[TMP33:%.*]] = load <2 x float>, ptr [[RETVAL]], align 4
4098 // PRMTD_STRICT-NEXT: ret <2 x float> [[TMP33]]
4100 _Complex
float f1(_Complex
float a
, _Complex
long double b
, _Complex
float c
) {
4101 return (_Complex
float)(b
/ c
) / a
;
4104 // FULL: [[PROF2]] = !{!"branch_weights", i32 1, i32 1048575}
4106 // FULL_FAST: [[PROF2]] = !{!"branch_weights", i32 1, i32 1048575}