1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s --mattr=+sve -o - | FileCheck %s
4 target triple = "aarch64"
6 %"class.std::complex" = type { { double, double } }
8 ; Zero initialized reduction. The IR is generated with predicated tail folding (-prefer-predicate-over-epilogue=predicate-dont-vectorize)
10 ; complex<double> x = 0.0 + 0.0i;
11 ; for (int i = 0; i < 100; ++i)
14 define %"class.std::complex" @complex_mul_v2f64(ptr %a, ptr %b) {
15 ; CHECK-LABEL: complex_mul_v2f64:
16 ; CHECK: // %bb.0: // %entry
17 ; CHECK-NEXT: mov w9, #100 // =0x64
18 ; CHECK-NEXT: mov z1.d, #0 // =0x0
19 ; CHECK-NEXT: ptrue p0.d
20 ; CHECK-NEXT: whilelo p1.d, xzr, x9
21 ; CHECK-NEXT: cntd x10
22 ; CHECK-NEXT: mov x8, xzr
23 ; CHECK-NEXT: rdvl x11, #2
24 ; CHECK-NEXT: mov x12, x10
25 ; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
26 ; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
27 ; CHECK-NEXT: .LBB0_1: // %vector.body
28 ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
29 ; CHECK-NEXT: zip2 p2.d, p1.d, p1.d
30 ; CHECK-NEXT: add x13, x0, x8
31 ; CHECK-NEXT: add x14, x1, x8
32 ; CHECK-NEXT: zip1 p3.d, p1.d, p1.d
33 ; CHECK-NEXT: mov z6.d, z1.d
34 ; CHECK-NEXT: mov z7.d, z0.d
35 ; CHECK-NEXT: whilelo p1.d, x12, x9
36 ; CHECK-NEXT: add x8, x8, x11
37 ; CHECK-NEXT: add x12, x12, x10
38 ; CHECK-NEXT: ld1d { z2.d }, p2/z, [x13, #1, mul vl]
39 ; CHECK-NEXT: ld1d { z3.d }, p3/z, [x13]
40 ; CHECK-NEXT: ld1d { z4.d }, p2/z, [x14, #1, mul vl]
41 ; CHECK-NEXT: ld1d { z5.d }, p3/z, [x14]
42 ; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0
43 ; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #0
44 ; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90
45 ; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90
46 ; CHECK-NEXT: mov z0.d, p2/m, z7.d
47 ; CHECK-NEXT: mov z1.d, p3/m, z6.d
48 ; CHECK-NEXT: b.mi .LBB0_1
49 ; CHECK-NEXT: // %bb.2: // %exit.block
50 ; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
51 ; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
52 ; CHECK-NEXT: faddv d0, p0, z2.d
53 ; CHECK-NEXT: faddv d1, p0, z1.d
54 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
55 ; CHECK-NEXT: // kill: def $d1 killed $d1 killed $z1
58 %active.lane.mask.entry = tail call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 100)
59 %0 = tail call i64 @llvm.vscale.i64()
61 %2 = shl nuw nsw i64 %0, 5
64 vector.body: ; preds = %vector.body, %entry
65 %lsr.iv35 = phi i64 [ %lsr.iv.next36, %vector.body ], [ %1, %entry ]
66 %lsr.iv = phi i64 [ %lsr.iv.next, %vector.body ], [ 0, %entry ]
67 %active.lane.mask = phi <vscale x 2 x i1> [ %active.lane.mask.entry, %entry ], [ %active.lane.mask.next, %vector.body ]
68 %vec.phi = phi <vscale x 2 x double> [ zeroinitializer, %entry ], [ %15, %vector.body ]
69 %vec.phi27 = phi <vscale x 2 x double> [ zeroinitializer, %entry ], [ %16, %vector.body ]
70 %scevgep = getelementptr i8, ptr %a, i64 %lsr.iv
71 %scevgep34 = getelementptr i8, ptr %b, i64 %lsr.iv
72 %interleaved.mask = tail call <vscale x 4 x i1> @llvm.experimental.vector.interleave2.nxv4i1(<vscale x 2 x i1> %active.lane.mask, <vscale x 2 x i1> %active.lane.mask)
73 %wide.masked.vec = tail call <vscale x 4 x double> @llvm.masked.load.nxv4f64.p0(ptr %scevgep, i32 8, <vscale x 4 x i1> %interleaved.mask, <vscale x 4 x double> poison)
74 %strided.vec = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %wide.masked.vec)
75 %3 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 0
76 %4 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 1
77 %interleaved.mask28 = tail call <vscale x 4 x i1> @llvm.experimental.vector.interleave2.nxv4i1(<vscale x 2 x i1> %active.lane.mask, <vscale x 2 x i1> %active.lane.mask)
78 %wide.masked.vec29 = tail call <vscale x 4 x double> @llvm.masked.load.nxv4f64.p0(ptr %scevgep34, i32 8, <vscale x 4 x i1> %interleaved.mask28, <vscale x 4 x double> poison)
79 %strided.vec30 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %wide.masked.vec29)
80 %5 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec30, 0
81 %6 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec30, 1
82 %7 = fmul fast <vscale x 2 x double> %6, %3
83 %8 = fmul fast <vscale x 2 x double> %5, %4
84 %9 = fmul fast <vscale x 2 x double> %5, %3
85 %10 = fadd fast <vscale x 2 x double> %9, %vec.phi27
86 %11 = fmul fast <vscale x 2 x double> %6, %4
87 %12 = fsub fast <vscale x 2 x double> %10, %11
88 %13 = fadd fast <vscale x 2 x double> %8, %vec.phi
89 %14 = fadd fast <vscale x 2 x double> %13, %7
90 %15 = select fast <vscale x 2 x i1> %active.lane.mask, <vscale x 2 x double> %14, <vscale x 2 x double> %vec.phi
91 %16 = select fast <vscale x 2 x i1> %active.lane.mask, <vscale x 2 x double> %12, <vscale x 2 x double> %vec.phi27
92 %active.lane.mask.next = tail call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 %lsr.iv35, i64 100)
93 %17 = extractelement <vscale x 2 x i1> %active.lane.mask.next, i64 0
94 %lsr.iv.next = add i64 %lsr.iv, %2
95 %lsr.iv.next36 = add i64 %lsr.iv35, %1
96 br i1 %17, label %vector.body, label %exit.block
98 exit.block: ; preds = %vector.body
99 %18 = tail call fast double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, <vscale x 2 x double> %16)
100 %19 = tail call fast double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, <vscale x 2 x double> %15)
101 %.fca.0.0.insert = insertvalue %"class.std::complex" poison, double %18, 0, 0
102 %.fca.0.1.insert = insertvalue %"class.std::complex" %.fca.0.0.insert, double %19, 0, 1
103 ret %"class.std::complex" %.fca.0.1.insert
106 ; Zero initialized reduction with conditional block. The IR is generated with scalar tail folding (-prefer-predicate-over-epilogue=scalar-epilogue)
108 ; complex<double> x = 0.0 + 0.0i;
109 ; for (int i = 0; i < 100; ++i)
113 define %"class.std::complex" @complex_mul_predicated_v2f64(ptr %a, ptr %b, ptr %cond) {
114 ; CHECK-LABEL: complex_mul_predicated_v2f64:
115 ; CHECK: // %bb.0: // %entry
116 ; CHECK-NEXT: mov z1.d, #0 // =0x0
117 ; CHECK-NEXT: ptrue p0.d
118 ; CHECK-NEXT: cntd x10
119 ; CHECK-NEXT: neg x11, x10
120 ; CHECK-NEXT: mov w12, #100 // =0x64
121 ; CHECK-NEXT: mov x8, xzr
122 ; CHECK-NEXT: mov x9, xzr
123 ; CHECK-NEXT: and x11, x11, x12
124 ; CHECK-NEXT: rdvl x12, #2
125 ; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
126 ; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
127 ; CHECK-NEXT: .LBB1_1: // %vector.body
128 ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
129 ; CHECK-NEXT: ld1w { z2.d }, p0/z, [x2, x9, lsl #2]
130 ; CHECK-NEXT: add x13, x0, x8
131 ; CHECK-NEXT: add x14, x1, x8
132 ; CHECK-NEXT: mov z6.d, z1.d
133 ; CHECK-NEXT: mov z7.d, z0.d
134 ; CHECK-NEXT: add x9, x9, x10
135 ; CHECK-NEXT: add x8, x8, x12
136 ; CHECK-NEXT: cmpne p1.d, p0/z, z2.d, #0
137 ; CHECK-NEXT: cmp x11, x9
138 ; CHECK-NEXT: zip2 p2.d, p1.d, p1.d
139 ; CHECK-NEXT: zip1 p1.d, p1.d, p1.d
140 ; CHECK-NEXT: ld1d { z2.d }, p2/z, [x13, #1, mul vl]
141 ; CHECK-NEXT: ld1d { z3.d }, p1/z, [x13]
142 ; CHECK-NEXT: ld1d { z4.d }, p2/z, [x14, #1, mul vl]
143 ; CHECK-NEXT: ld1d { z5.d }, p1/z, [x14]
144 ; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0
145 ; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #0
146 ; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90
147 ; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90
148 ; CHECK-NEXT: mov z0.d, p2/m, z7.d
149 ; CHECK-NEXT: mov z1.d, p1/m, z6.d
150 ; CHECK-NEXT: b.ne .LBB1_1
151 ; CHECK-NEXT: // %bb.2: // %exit.block
152 ; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
153 ; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
154 ; CHECK-NEXT: faddv d0, p0, z2.d
155 ; CHECK-NEXT: faddv d1, p0, z1.d
156 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
157 ; CHECK-NEXT: // kill: def $d1 killed $d1 killed $z1
160 %0 = tail call i64 @llvm.vscale.i64()
161 %1 = shl nuw nsw i64 %0, 1
162 %n.mod.vf = urem i64 100, %1
163 %n.vec = sub i64 100, %n.mod.vf
164 %2 = shl nuw nsw i64 %0, 5
165 br label %vector.body
167 vector.body: ; preds = %vector.body, %entry
168 %lsr.iv48 = phi i64 [ %lsr.iv.next, %vector.body ], [ 0, %entry ]
169 %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
170 %vec.phi = phi <vscale x 2 x double> [ zeroinitializer, %entry ], [ %predphi34, %vector.body ]
171 %vec.phi30 = phi <vscale x 2 x double> [ zeroinitializer, %entry ], [ %predphi, %vector.body ]
172 %3 = shl i64 %index, 2
173 %scevgep47 = getelementptr i8, ptr %cond, i64 %3
174 %wide.load = load <vscale x 2 x i32>, ptr %scevgep47, align 4
175 %4 = icmp ne <vscale x 2 x i32> %wide.load, zeroinitializer
176 %scevgep49 = getelementptr i8, ptr %a, i64 %lsr.iv48
177 %scevgep50 = getelementptr i8, ptr %b, i64 %lsr.iv48
178 %interleaved.mask = tail call <vscale x 4 x i1> @llvm.experimental.vector.interleave2.nxv4i1(<vscale x 2 x i1> %4, <vscale x 2 x i1> %4)
179 %wide.masked.vec = tail call <vscale x 4 x double> @llvm.masked.load.nxv4f64.p0(ptr %scevgep49, i32 8, <vscale x 4 x i1> %interleaved.mask, <vscale x 4 x double> poison)
180 %strided.vec = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %wide.masked.vec)
181 %5 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 0
182 %6 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 1
183 %wide.masked.vec32 = tail call <vscale x 4 x double> @llvm.masked.load.nxv4f64.p0(ptr %scevgep50, i32 8, <vscale x 4 x i1> %interleaved.mask, <vscale x 4 x double> poison)
184 %strided.vec33 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %wide.masked.vec32)
185 %7 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec33, 0
186 %8 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec33, 1
187 %9 = fmul fast <vscale x 2 x double> %8, %5
188 %10 = fmul fast <vscale x 2 x double> %7, %6
189 %11 = fmul fast <vscale x 2 x double> %7, %5
190 %12 = fadd fast <vscale x 2 x double> %11, %vec.phi30
191 %13 = fmul fast <vscale x 2 x double> %8, %6
192 %14 = fsub fast <vscale x 2 x double> %12, %13
193 %15 = fadd fast <vscale x 2 x double> %10, %vec.phi
194 %16 = fadd fast <vscale x 2 x double> %15, %9
195 %predphi = select <vscale x 2 x i1> %4, <vscale x 2 x double> %14, <vscale x 2 x double> %vec.phi30
196 %predphi34 = select <vscale x 2 x i1> %4, <vscale x 2 x double> %16, <vscale x 2 x double> %vec.phi
197 %index.next = add nuw i64 %index, %1
198 %lsr.iv.next = add i64 %lsr.iv48, %2
199 %17 = icmp eq i64 %n.vec, %index.next
200 br i1 %17, label %exit.block, label %vector.body
202 exit.block: ; preds = %vector.body
203 %18 = tail call fast double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, <vscale x 2 x double> %predphi)
204 %19 = tail call fast double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, <vscale x 2 x double> %predphi34)
205 %.fca.0.0.insert = insertvalue %"class.std::complex" poison, double %18, 0, 0
206 %.fca.0.1.insert = insertvalue %"class.std::complex" %.fca.0.0.insert, double %19, 0, 1
207 ret %"class.std::complex" %.fca.0.1.insert
210 ; Zero initialized reduction with conditional block. The IR is generated with scalar tail folding (-predicate-over-epilogue=predicate-dont-vectorize)
212 ; complex<double> x = 0.0 + 0.0i;
213 ; for (int i = 0; i < 100; ++i)
217 define %"class.std::complex" @complex_mul_predicated_x2_v2f64(ptr %a, ptr %b, ptr %cond) {
218 ; CHECK-LABEL: complex_mul_predicated_x2_v2f64:
219 ; CHECK: // %bb.0: // %entry
220 ; CHECK-NEXT: mov w10, #100 // =0x64
221 ; CHECK-NEXT: mov z1.d, #0 // =0x0
222 ; CHECK-NEXT: ptrue p0.d
223 ; CHECK-NEXT: whilelo p1.d, xzr, x10
224 ; CHECK-NEXT: mov x8, xzr
225 ; CHECK-NEXT: mov x9, xzr
226 ; CHECK-NEXT: cntd x11
227 ; CHECK-NEXT: rdvl x12, #2
228 ; CHECK-NEXT: zip2 z0.d, z1.d, z1.d
229 ; CHECK-NEXT: zip1 z1.d, z1.d, z1.d
230 ; CHECK-NEXT: .LBB2_1: // %vector.body
231 ; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
232 ; CHECK-NEXT: ld1w { z2.d }, p1/z, [x2, x9, lsl #2]
233 ; CHECK-NEXT: add x13, x0, x8
234 ; CHECK-NEXT: add x14, x1, x8
235 ; CHECK-NEXT: mov z6.d, z1.d
236 ; CHECK-NEXT: mov z7.d, z0.d
237 ; CHECK-NEXT: add x9, x9, x11
238 ; CHECK-NEXT: add x8, x8, x12
239 ; CHECK-NEXT: cmpne p1.d, p1/z, z2.d, #0
240 ; CHECK-NEXT: zip2 p2.d, p1.d, p1.d
241 ; CHECK-NEXT: zip1 p3.d, p1.d, p1.d
242 ; CHECK-NEXT: whilelo p1.d, x9, x10
243 ; CHECK-NEXT: ld1d { z2.d }, p2/z, [x13, #1, mul vl]
244 ; CHECK-NEXT: ld1d { z3.d }, p3/z, [x13]
245 ; CHECK-NEXT: ld1d { z4.d }, p2/z, [x14, #1, mul vl]
246 ; CHECK-NEXT: ld1d { z5.d }, p3/z, [x14]
247 ; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #0
248 ; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #0
249 ; CHECK-NEXT: fcmla z6.d, p0/m, z5.d, z3.d, #90
250 ; CHECK-NEXT: fcmla z7.d, p0/m, z4.d, z2.d, #90
251 ; CHECK-NEXT: mov z0.d, p2/m, z7.d
252 ; CHECK-NEXT: mov z1.d, p3/m, z6.d
253 ; CHECK-NEXT: b.mi .LBB2_1
254 ; CHECK-NEXT: // %bb.2: // %exit.block
255 ; CHECK-NEXT: uzp1 z2.d, z1.d, z0.d
256 ; CHECK-NEXT: uzp2 z1.d, z1.d, z0.d
257 ; CHECK-NEXT: faddv d0, p0, z2.d
258 ; CHECK-NEXT: faddv d1, p0, z1.d
259 ; CHECK-NEXT: // kill: def $d0 killed $d0 killed $z0
260 ; CHECK-NEXT: // kill: def $d1 killed $d1 killed $z1
263 %active.lane.mask.entry = tail call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 100)
264 %0 = tail call i64 @llvm.vscale.i64()
266 %2 = shl nuw nsw i64 %0, 5
267 br label %vector.body
269 vector.body: ; preds = %vector.body, %entry
270 %lsr.iv = phi i64 [ %lsr.iv.next, %vector.body ], [ 0, %entry ]
271 %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
272 %active.lane.mask = phi <vscale x 2 x i1> [ %active.lane.mask.entry, %entry ], [ %active.lane.mask.next, %vector.body ]
273 %vec.phi = phi <vscale x 2 x double> [ zeroinitializer, %entry ], [ %19, %vector.body ]
274 %vec.phi30 = phi <vscale x 2 x double> [ zeroinitializer, %entry ], [ %21, %vector.body ]
275 %3 = shl i64 %index, 2
276 %scevgep = getelementptr i8, ptr %cond, i64 %3
277 %wide.masked.load = tail call <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr %scevgep, i32 4, <vscale x 2 x i1> %active.lane.mask, <vscale x 2 x i32> poison)
278 %4 = icmp ne <vscale x 2 x i32> %wide.masked.load, zeroinitializer
279 %scevgep38 = getelementptr i8, ptr %a, i64 %lsr.iv
280 %scevgep39 = getelementptr i8, ptr %b, i64 %lsr.iv
281 %5 = select <vscale x 2 x i1> %active.lane.mask, <vscale x 2 x i1> %4, <vscale x 2 x i1> zeroinitializer
282 %interleaved.mask = tail call <vscale x 4 x i1> @llvm.experimental.vector.interleave2.nxv4i1(<vscale x 2 x i1> %5, <vscale x 2 x i1> %5)
283 %wide.masked.vec = tail call <vscale x 4 x double> @llvm.masked.load.nxv4f64.p0(ptr %scevgep38, i32 8, <vscale x 4 x i1> %interleaved.mask, <vscale x 4 x double> poison)
284 %strided.vec = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %wide.masked.vec)
285 %6 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 0
286 %7 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec, 1
287 %interleaved.mask31 = tail call <vscale x 4 x i1> @llvm.experimental.vector.interleave2.nxv4i1(<vscale x 2 x i1> %5, <vscale x 2 x i1> %5)
288 %wide.masked.vec32 = tail call <vscale x 4 x double> @llvm.masked.load.nxv4f64.p0(ptr %scevgep39, i32 8, <vscale x 4 x i1> %interleaved.mask31, <vscale x 4 x double> poison)
289 %strided.vec33 = tail call { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double> %wide.masked.vec32)
290 %8 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec33, 0
291 %9 = extractvalue { <vscale x 2 x double>, <vscale x 2 x double> } %strided.vec33, 1
292 %10 = fmul fast <vscale x 2 x double> %9, %6
293 %11 = fmul fast <vscale x 2 x double> %8, %7
294 %12 = fmul fast <vscale x 2 x double> %8, %6
295 %13 = fadd fast <vscale x 2 x double> %12, %vec.phi30
296 %14 = fmul fast <vscale x 2 x double> %9, %7
297 %15 = fsub fast <vscale x 2 x double> %13, %14
298 %16 = fadd fast <vscale x 2 x double> %11, %vec.phi
299 %17 = fadd fast <vscale x 2 x double> %16, %10
300 %18 = select <vscale x 2 x i1> %active.lane.mask, <vscale x 2 x i1> %4, <vscale x 2 x i1> zeroinitializer
301 %19 = select fast <vscale x 2 x i1> %18, <vscale x 2 x double> %17, <vscale x 2 x double> %vec.phi
302 %20 = select <vscale x 2 x i1> %active.lane.mask, <vscale x 2 x i1> %4, <vscale x 2 x i1> zeroinitializer
303 %21 = select fast <vscale x 2 x i1> %20, <vscale x 2 x double> %15, <vscale x 2 x double> %vec.phi30
304 %index.next = add i64 %index, %1
305 %22 = add i64 %1, %index
306 %active.lane.mask.next = tail call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 %22, i64 100)
307 %23 = extractelement <vscale x 2 x i1> %active.lane.mask.next, i64 0
308 %lsr.iv.next = add i64 %lsr.iv, %2
309 br i1 %23, label %vector.body, label %exit.block
311 exit.block: ; preds = %vector.body
312 %24 = tail call fast double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, <vscale x 2 x double> %21)
313 %25 = tail call fast double @llvm.vector.reduce.fadd.nxv2f64(double -0.000000e+00, <vscale x 2 x double> %19)
314 %.fca.0.0.insert = insertvalue %"class.std::complex" poison, double %24, 0, 0
315 %.fca.0.1.insert = insertvalue %"class.std::complex" %.fca.0.0.insert, double %25, 0, 1
316 ret %"class.std::complex" %.fca.0.1.insert
319 declare i64 @llvm.vscale.i64()
320 declare <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64, i64)
321 declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32.p0(ptr nocapture, i32 immarg, <vscale x 2 x i1>, <vscale x 2 x i32>)
322 declare <vscale x 4 x double> @llvm.masked.load.nxv4f64.p0(ptr nocapture, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x double>)
323 declare <vscale x 4 x i1> @llvm.experimental.vector.interleave2.nxv4i1(<vscale x 2 x i1>, <vscale x 2 x i1>)
324 declare { <vscale x 2 x double>, <vscale x 2 x double> } @llvm.experimental.vector.deinterleave2.nxv4f64(<vscale x 4 x double>)
325 declare double @llvm.vector.reduce.fadd.nxv2f64(double, <vscale x 2 x double>)