1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst %s -o - | FileCheck %s
4 define arm_aapcs_vfpcc <4 x i32> @gather_inc_mini_4i32(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, <4 x i32> %offs) {
5 ; CHECK-LABEL: gather_inc_mini_4i32:
7 ; CHECK-NEXT: vmov.i32 q1, #0x4
8 ; CHECK-NEXT: vadd.i32 q1, q0, q1
9 ; CHECK-NEXT: vldrw.u32 q0, [r0, q1, uxtw #2]
11 %1 = add <4 x i32> %offs, <i32 4, i32 4, i32 4, i32 4>
12 %2 = getelementptr inbounds i32, i32* %data, <4 x i32> %1
13 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
14 ret <4 x i32> %wide.masked.gather
17 define arm_aapcs_vfpcc <4 x i32> @gather_inc_minipred_4i32(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, <4 x i32> %offs) {
18 ; CHECK-LABEL: gather_inc_minipred_4i32:
20 ; CHECK-NEXT: vmov.i32 q1, #0x4
21 ; CHECK-NEXT: movw r1, #3855
22 ; CHECK-NEXT: vadd.i32 q1, q0, q1
23 ; CHECK-NEXT: vmsr p0, r1
25 ; CHECK-NEXT: vldrwt.u32 q0, [r0, q1, uxtw #2]
27 %1 = add <4 x i32> %offs, <i32 4, i32 4, i32 4, i32 4>
28 %2 = getelementptr inbounds i32, i32* %data, <4 x i32> %1
29 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %2, i32 4, <4 x i1> <i1 true, i1 false, i1 true, i1 false>, <4 x i32> undef)
30 ret <4 x i32> %wide.masked.gather
33 define arm_aapcs_vfpcc <8 x i16> @gather_inc_mini_8i16(i16* noalias nocapture readonly %data, i16* noalias nocapture %dst, <8 x i32> %offs) {
34 ; CHECK-LABEL: gather_inc_mini_8i16:
36 ; CHECK-NEXT: .save {r4, r5, r7, lr}
37 ; CHECK-NEXT: push {r4, r5, r7, lr}
38 ; CHECK-NEXT: vshl.i32 q0, q0, #1
39 ; CHECK-NEXT: vmov.i32 q2, #0x10
40 ; CHECK-NEXT: vadd.i32 q0, q0, r0
41 ; CHECK-NEXT: vshl.i32 q1, q1, #1
42 ; CHECK-NEXT: vadd.i32 q0, q0, q2
43 ; CHECK-NEXT: vadd.i32 q1, q1, r0
44 ; CHECK-NEXT: vmov r4, r5, d0
45 ; CHECK-NEXT: vadd.i32 q1, q1, q2
46 ; CHECK-NEXT: vmov r0, r2, d1
47 ; CHECK-NEXT: vmov r1, lr, d2
48 ; CHECK-NEXT: vmov r3, r12, d3
49 ; CHECK-NEXT: ldrh r4, [r4]
50 ; CHECK-NEXT: ldrh r5, [r5]
51 ; CHECK-NEXT: vmov.16 q0[0], r4
52 ; CHECK-NEXT: ldrh r0, [r0]
53 ; CHECK-NEXT: vmov.16 q0[1], r5
54 ; CHECK-NEXT: ldrh r2, [r2]
55 ; CHECK-NEXT: vmov.16 q0[2], r0
56 ; CHECK-NEXT: ldrh r1, [r1]
57 ; CHECK-NEXT: vmov.16 q0[3], r2
58 ; CHECK-NEXT: ldrh.w lr, [lr]
59 ; CHECK-NEXT: vmov.16 q0[4], r1
60 ; CHECK-NEXT: ldrh r3, [r3]
61 ; CHECK-NEXT: vmov.16 q0[5], lr
62 ; CHECK-NEXT: ldrh.w r12, [r12]
63 ; CHECK-NEXT: vmov.16 q0[6], r3
64 ; CHECK-NEXT: vmov.16 q0[7], r12
65 ; CHECK-NEXT: pop {r4, r5, r7, pc}
66 %1 = add <8 x i32> %offs, <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
67 %2 = getelementptr inbounds i16, i16* %data, <8 x i32> %1
68 %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %2, i32 4, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
69 ret <8 x i16> %wide.masked.gather
72 define arm_aapcs_vfpcc <8 x i16> @gather_inc_minipred_8i16(i16* noalias nocapture readonly %data, i16* noalias nocapture %dst, <8 x i32> %offs) {
73 ; CHECK-LABEL: gather_inc_minipred_8i16:
75 ; CHECK-NEXT: vshl.i32 q0, q0, #1
76 ; CHECK-NEXT: vmov.i32 q2, #0x10
77 ; CHECK-NEXT: vadd.i32 q0, q0, r0
78 ; CHECK-NEXT: vshl.i32 q1, q1, #1
79 ; CHECK-NEXT: vadd.i32 q0, q0, q2
80 ; CHECK-NEXT: vadd.i32 q1, q1, r0
81 ; CHECK-NEXT: vmov r1, s0
82 ; CHECK-NEXT: vadd.i32 q1, q1, q2
83 ; CHECK-NEXT: vmov r3, s2
84 ; CHECK-NEXT: vmov r0, s4
85 ; CHECK-NEXT: vmov r2, s6
86 ; CHECK-NEXT: ldrh r1, [r1]
87 ; CHECK-NEXT: ldrh r3, [r3]
88 ; CHECK-NEXT: vmov.16 q0[0], r1
89 ; CHECK-NEXT: ldrh r0, [r0]
90 ; CHECK-NEXT: vmov.16 q0[2], r3
91 ; CHECK-NEXT: ldrh r2, [r2]
92 ; CHECK-NEXT: vmov.16 q0[4], r0
93 ; CHECK-NEXT: vmov.16 q0[6], r2
95 %1 = add <8 x i32> %offs, <i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8, i32 8>
96 %2 = getelementptr inbounds i16, i16* %data, <8 x i32> %1
97 %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %2, i32 4, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>, <8 x i16> undef)
98 ret <8 x i16> %wide.masked.gather
101 define arm_aapcs_vfpcc <16 x i8> @gather_inc_mini_16i8(i8* noalias nocapture readonly %data, i8* noalias nocapture %dst, <16 x i32> %offs) {
102 ; CHECK-LABEL: gather_inc_mini_16i8:
104 ; CHECK-NEXT: .save {r4, r5, r6, lr}
105 ; CHECK-NEXT: push {r4, r5, r6, lr}
106 ; CHECK-NEXT: .vsave {d8, d9}
107 ; CHECK-NEXT: vpush {d8, d9}
108 ; CHECK-NEXT: vmov.i32 q4, #0x10
109 ; CHECK-NEXT: vadd.i32 q3, q3, r0
110 ; CHECK-NEXT: vadd.i32 q2, q2, r0
111 ; CHECK-NEXT: vadd.i32 q3, q3, q4
112 ; CHECK-NEXT: vadd.i32 q2, q2, q4
113 ; CHECK-NEXT: vmov r1, r2, d7
114 ; CHECK-NEXT: vmov r3, r4, d6
115 ; CHECK-NEXT: vadd.i32 q0, q0, r0
116 ; CHECK-NEXT: vmov r5, r6, d5
117 ; CHECK-NEXT: vadd.i32 q3, q0, q4
118 ; CHECK-NEXT: vadd.i32 q1, q1, r0
119 ; CHECK-NEXT: vadd.i32 q1, q1, q4
120 ; CHECK-NEXT: ldrb.w lr, [r1]
121 ; CHECK-NEXT: ldrb.w r12, [r2]
122 ; CHECK-NEXT: ldrb r1, [r4]
123 ; CHECK-NEXT: ldrb r4, [r5]
124 ; CHECK-NEXT: ldrb r2, [r6]
125 ; CHECK-NEXT: vmov r5, r6, d6
126 ; CHECK-NEXT: ldrb r3, [r3]
127 ; CHECK-NEXT: ldrb r5, [r5]
128 ; CHECK-NEXT: vmov.8 q0[0], r5
129 ; CHECK-NEXT: ldrb r5, [r6]
130 ; CHECK-NEXT: vmov.8 q0[1], r5
131 ; CHECK-NEXT: vmov r5, r6, d7
132 ; CHECK-NEXT: ldrb r0, [r5]
133 ; CHECK-NEXT: ldrb r6, [r6]
134 ; CHECK-NEXT: vmov.8 q0[2], r0
135 ; CHECK-NEXT: vmov r0, r5, d2
136 ; CHECK-NEXT: vmov.8 q0[3], r6
137 ; CHECK-NEXT: ldrb r0, [r0]
138 ; CHECK-NEXT: ldrb r5, [r5]
139 ; CHECK-NEXT: vmov.8 q0[4], r0
140 ; CHECK-NEXT: vmov.8 q0[5], r5
141 ; CHECK-NEXT: vmov r0, r5, d3
142 ; CHECK-NEXT: ldrb r0, [r0]
143 ; CHECK-NEXT: ldrb r5, [r5]
144 ; CHECK-NEXT: vmov.8 q0[6], r0
145 ; CHECK-NEXT: vmov.8 q0[7], r5
146 ; CHECK-NEXT: vmov r0, r5, d4
147 ; CHECK-NEXT: ldrb r0, [r0]
148 ; CHECK-NEXT: ldrb r5, [r5]
149 ; CHECK-NEXT: vmov.8 q0[8], r0
150 ; CHECK-NEXT: vmov.8 q0[9], r5
151 ; CHECK-NEXT: vmov.8 q0[10], r4
152 ; CHECK-NEXT: vmov.8 q0[11], r2
153 ; CHECK-NEXT: vmov.8 q0[12], r3
154 ; CHECK-NEXT: vmov.8 q0[13], r1
155 ; CHECK-NEXT: vmov.8 q0[14], lr
156 ; CHECK-NEXT: vmov.8 q0[15], r12
157 ; CHECK-NEXT: vpop {d8, d9}
158 ; CHECK-NEXT: pop {r4, r5, r6, pc}
159 %1 = add <16 x i32> %offs, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
160 %2 = getelementptr inbounds i8, i8* %data, <16 x i32> %1
161 %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %2, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
162 ret <16 x i8> %wide.masked.gather
165 define arm_aapcs_vfpcc <16 x i8> @gather_inc_minipred_16i8(i8* noalias nocapture readonly %data, i8* noalias nocapture %dst, <16 x i32> %offs) {
166 ; CHECK-LABEL: gather_inc_minipred_16i8:
168 ; CHECK-NEXT: .save {r4, r5, r7, lr}
169 ; CHECK-NEXT: push {r4, r5, r7, lr}
170 ; CHECK-NEXT: .vsave {d8, d9}
171 ; CHECK-NEXT: vpush {d8, d9}
172 ; CHECK-NEXT: vmov.i32 q4, #0x10
173 ; CHECK-NEXT: vadd.i32 q2, q2, r0
174 ; CHECK-NEXT: vadd.i32 q2, q2, q4
175 ; CHECK-NEXT: vadd.i32 q1, q1, r0
176 ; CHECK-NEXT: vmov r2, s8
177 ; CHECK-NEXT: vadd.i32 q1, q1, q4
178 ; CHECK-NEXT: vmov r1, s4
179 ; CHECK-NEXT: vadd.i32 q0, q0, r0
180 ; CHECK-NEXT: vadd.i32 q0, q0, q4
181 ; CHECK-NEXT: vmov r3, s10
182 ; CHECK-NEXT: vmov r5, s2
183 ; CHECK-NEXT: ldrb.w lr, [r2]
184 ; CHECK-NEXT: vmov r2, s0
185 ; CHECK-NEXT: ldrb.w r12, [r1]
186 ; CHECK-NEXT: vmov r1, s6
187 ; CHECK-NEXT: vadd.i32 q1, q3, r0
188 ; CHECK-NEXT: ldrb r3, [r3]
189 ; CHECK-NEXT: vadd.i32 q1, q1, q4
190 ; CHECK-NEXT: ldrb r5, [r5]
191 ; CHECK-NEXT: vmov r0, s4
192 ; CHECK-NEXT: vmov r4, s6
193 ; CHECK-NEXT: ldrb r2, [r2]
194 ; CHECK-NEXT: ldrb r1, [r1]
195 ; CHECK-NEXT: vmov.8 q0[0], r2
196 ; CHECK-NEXT: vmov.8 q0[2], r5
197 ; CHECK-NEXT: vmov.8 q0[4], r12
198 ; CHECK-NEXT: ldrb r0, [r0]
199 ; CHECK-NEXT: vmov.8 q0[6], r1
200 ; CHECK-NEXT: ldrb r4, [r4]
201 ; CHECK-NEXT: vmov.8 q0[8], lr
202 ; CHECK-NEXT: vmov.8 q0[10], r3
203 ; CHECK-NEXT: vmov.8 q0[12], r0
204 ; CHECK-NEXT: vmov.8 q0[14], r4
205 ; CHECK-NEXT: vpop {d8, d9}
206 ; CHECK-NEXT: pop {r4, r5, r7, pc}
207 %1 = add <16 x i32> %offs, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
208 %2 = getelementptr inbounds i8, i8* %data, <16 x i32> %1
209 %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %2, i32 2, <16 x i1> <i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false, i1 true, i1 false>, <16 x i8> undef)
210 ret <16 x i8> %wide.masked.gather
213 define arm_aapcs_vfpcc void @gather_pre_inc(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec) {
214 ; CHECK-LABEL: gather_pre_inc:
215 ; CHECK: @ %bb.0: @ %vector.ph
216 ; CHECK-NEXT: adr r3, .LCPI6_0
217 ; CHECK-NEXT: vldrw.u32 q0, [r3]
218 ; CHECK-NEXT: vadd.i32 q0, q0, r0
219 ; CHECK-NEXT: .LBB6_1: @ %vector.body
220 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
221 ; CHECK-NEXT: vldrw.u32 q1, [q0, #96]!
222 ; CHECK-NEXT: subs r2, #4
223 ; CHECK-NEXT: vstrb.8 q1, [r1], #16
224 ; CHECK-NEXT: bne .LBB6_1
225 ; CHECK-NEXT: @ %bb.2: @ %end
227 ; CHECK-NEXT: .p2align 4
228 ; CHECK-NEXT: @ %bb.3:
229 ; CHECK-NEXT: .LCPI6_0:
230 ; CHECK-NEXT: .long 4294967224 @ 0xffffffb8
231 ; CHECK-NEXT: .long 4294967248 @ 0xffffffd0
232 ; CHECK-NEXT: .long 4294967272 @ 0xffffffe8
233 ; CHECK-NEXT: .long 0 @ 0x0
234 vector.ph: ; preds = %for.body.preheader
235 br label %vector.body
237 vector.body: ; preds = %vector.body, %vector.ph
238 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
239 %vec.ind = phi <4 x i32> [ <i32 0, i32 2, i32 4, i32 6>, %vector.ph ], [ %vec.ind.next, %vector.body ]
240 %0 = mul <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3>
241 %1 = add <4 x i32> %0, <i32 6, i32 6, i32 6, i32 6>
242 %2 = getelementptr inbounds i32, i32* %data, <4 x i32> %1
243 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %2, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
244 %3 = getelementptr inbounds i32, i32* %dst, i32 %index
245 %4 = bitcast i32* %3 to <4 x i32>*
246 store <4 x i32> %wide.masked.gather, <4 x i32>* %4, align 4
247 %index.next = add i32 %index, 4
248 %vec.ind.next = add <4 x i32> %vec.ind, <i32 8, i32 8, i32 8, i32 8>
249 %5 = icmp eq i32 %index.next, %n.vec
250 br i1 %5, label %end, label %vector.body
256 define arm_aapcs_vfpcc void @gather_post_inc(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n.vec43) {
257 ; CHECK-LABEL: gather_post_inc:
258 ; CHECK: @ %bb.0: @ %vector.ph41
259 ; CHECK-NEXT: adr r3, .LCPI7_0
260 ; CHECK-NEXT: vldrw.u32 q0, [r3]
261 ; CHECK-NEXT: vadd.i32 q0, q0, r0
262 ; CHECK-NEXT: .LBB7_1: @ %vector.body39
263 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
264 ; CHECK-NEXT: vldrw.u32 q1, [q0, #96]!
265 ; CHECK-NEXT: subs r2, #4
266 ; CHECK-NEXT: vstrb.8 q1, [r1], #16
267 ; CHECK-NEXT: bne .LBB7_1
268 ; CHECK-NEXT: @ %bb.2: @ %end
270 ; CHECK-NEXT: .p2align 4
271 ; CHECK-NEXT: @ %bb.3:
272 ; CHECK-NEXT: .LCPI7_0:
273 ; CHECK-NEXT: .long 4294967200 @ 0xffffffa0
274 ; CHECK-NEXT: .long 4294967224 @ 0xffffffb8
275 ; CHECK-NEXT: .long 4294967248 @ 0xffffffd0
276 ; CHECK-NEXT: .long 4294967272 @ 0xffffffe8
277 vector.ph41: ; preds = %for.body6.preheader
278 br label %vector.body39
280 vector.body39: ; preds = %vector.body39, %vector.ph41
281 %index44 = phi i32 [ 0, %vector.ph41 ], [ %index.next45, %vector.body39 ]
282 %vec.ind50 = phi <4 x i32> [ <i32 0, i32 2, i32 4, i32 6>, %vector.ph41 ], [ %vec.ind.next51, %vector.body39 ]
283 %0 = mul nuw nsw <4 x i32> %vec.ind50, <i32 3, i32 3, i32 3, i32 3>
284 %1 = getelementptr inbounds i32, i32* %data, <4 x i32> %0
285 %wide.masked.gather55 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
286 %2 = getelementptr inbounds i32, i32* %dst, i32 %index44
287 %3 = bitcast i32* %2 to <4 x i32>*
288 store <4 x i32> %wide.masked.gather55, <4 x i32>* %3, align 4
289 %index.next45 = add i32 %index44, 4
290 %vec.ind.next51 = add <4 x i32> %vec.ind50, <i32 8, i32 8, i32 8, i32 8>
291 %4 = icmp eq i32 %index.next45, %n.vec43
292 br i1 %4, label %end, label %vector.body39
298 define arm_aapcs_vfpcc void @gather_inc_v4i32_simple(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n) {
299 ; CHECK-LABEL: gather_inc_v4i32_simple:
300 ; CHECK: @ %bb.0: @ %entry
301 ; CHECK-NEXT: .save {r4, lr}
302 ; CHECK-NEXT: push {r4, lr}
303 ; CHECK-NEXT: cmp r2, #1
305 ; CHECK-NEXT: poplt {r4, pc}
306 ; CHECK-NEXT: .LBB8_1: @ %vector.ph.preheader
307 ; CHECK-NEXT: bic r12, r2, #3
308 ; CHECK-NEXT: movs r3, #1
309 ; CHECK-NEXT: sub.w lr, r12, #4
310 ; CHECK-NEXT: add.w r4, r3, lr, lsr #2
311 ; CHECK-NEXT: adr r3, .LCPI8_0
312 ; CHECK-NEXT: vldrw.u32 q0, [r3]
313 ; CHECK-NEXT: vadd.i32 q0, q0, r0
314 ; CHECK-NEXT: .LBB8_2: @ %vector.ph
315 ; CHECK-NEXT: @ =>This Loop Header: Depth=1
316 ; CHECK-NEXT: @ Child Loop BB8_3 Depth 2
317 ; CHECK-NEXT: dls lr, r4
318 ; CHECK-NEXT: mov r0, r1
319 ; CHECK-NEXT: vmov q1, q0
320 ; CHECK-NEXT: .LBB8_3: @ %vector.body
321 ; CHECK-NEXT: @ Parent Loop BB8_2 Depth=1
322 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
323 ; CHECK-NEXT: vldrw.u32 q2, [q1, #16]!
324 ; CHECK-NEXT: vstrb.8 q2, [r0], #16
325 ; CHECK-NEXT: le lr, .LBB8_3
326 ; CHECK-NEXT: @ %bb.4: @ %middle.block
327 ; CHECK-NEXT: @ in Loop: Header=BB8_2 Depth=1
328 ; CHECK-NEXT: cmp r12, r2
329 ; CHECK-NEXT: bne .LBB8_2
330 ; CHECK-NEXT: @ %bb.5: @ %for.cond.cleanup
331 ; CHECK-NEXT: pop {r4, pc}
332 ; CHECK-NEXT: .p2align 4
333 ; CHECK-NEXT: @ %bb.6:
334 ; CHECK-NEXT: .LCPI8_0:
335 ; CHECK-NEXT: .long 4294967280 @ 0xfffffff0
336 ; CHECK-NEXT: .long 4294967284 @ 0xfffffff4
337 ; CHECK-NEXT: .long 4294967288 @ 0xfffffff8
338 ; CHECK-NEXT: .long 4294967292 @ 0xfffffffc
340 %cmp22 = icmp sgt i32 %n, 0
341 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup
343 vector.ph: ; preds = %for.body.preheader
344 %n.vec = and i32 %n, -4
345 br label %vector.body
347 vector.body: ; preds = %vector.body, %vector.ph
348 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
349 %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
350 %0 = getelementptr inbounds i32, i32* %data, <4 x i32> %vec.ind
351 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %0, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
352 %1 = getelementptr inbounds i32, i32* %dst, i32 %index
353 %2 = bitcast i32* %1 to <4 x i32>*
354 store <4 x i32> %wide.masked.gather, <4 x i32>* %2, align 4
355 %index.next = add i32 %index, 4
356 %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
357 %3 = icmp eq i32 %index.next, %n.vec
358 br i1 %3, label %middle.block, label %vector.body
360 middle.block: ; preds = %vector.body
361 %cmp.n = icmp eq i32 %n.vec, %n
362 br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph
364 for.cond.cleanup: ; preds = %for.body, %middle.block, %entry
368 define arm_aapcs_vfpcc void @gather_inc_v4i32_complex(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n) {
369 ; CHECK-LABEL: gather_inc_v4i32_complex:
370 ; CHECK: @ %bb.0: @ %entry
371 ; CHECK-NEXT: .save {r4, r5, r7, lr}
372 ; CHECK-NEXT: push {r4, r5, r7, lr}
373 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
374 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
375 ; CHECK-NEXT: cmp r2, #1
376 ; CHECK-NEXT: blt .LBB9_5
377 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
378 ; CHECK-NEXT: bic r12, r2, #3
379 ; CHECK-NEXT: movs r3, #1
380 ; CHECK-NEXT: sub.w lr, r12, #4
381 ; CHECK-NEXT: adr r4, .LCPI9_1
382 ; CHECK-NEXT: adr r5, .LCPI9_2
383 ; CHECK-NEXT: vldrw.u32 q1, [r4]
384 ; CHECK-NEXT: add.w r3, r3, lr, lsr #2
385 ; CHECK-NEXT: adr.w lr, .LCPI9_0
386 ; CHECK-NEXT: vldrw.u32 q0, [r5]
387 ; CHECK-NEXT: vldrw.u32 q2, [lr]
388 ; CHECK-NEXT: vadd.i32 q1, q1, r0
389 ; CHECK-NEXT: vadd.i32 q0, q0, r0
390 ; CHECK-NEXT: vadd.i32 q2, q2, r0
391 ; CHECK-NEXT: .LBB9_2: @ %vector.ph
392 ; CHECK-NEXT: @ =>This Loop Header: Depth=1
393 ; CHECK-NEXT: @ Child Loop BB9_3 Depth 2
394 ; CHECK-NEXT: dls lr, r3
395 ; CHECK-NEXT: mov r0, r1
396 ; CHECK-NEXT: vmov q3, q1
397 ; CHECK-NEXT: vmov q4, q0
398 ; CHECK-NEXT: vmov q5, q2
399 ; CHECK-NEXT: .LBB9_3: @ %vector.body
400 ; CHECK-NEXT: @ Parent Loop BB9_2 Depth=1
401 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
402 ; CHECK-NEXT: vldrw.u32 q6, [q5, #48]!
403 ; CHECK-NEXT: vldrw.u32 q7, [q3, #48]!
404 ; CHECK-NEXT: vadd.i32 q6, q7, q6
405 ; CHECK-NEXT: vldrw.u32 q7, [q4, #48]!
406 ; CHECK-NEXT: vadd.i32 q6, q6, q7
407 ; CHECK-NEXT: vstrb.8 q6, [r0], #16
408 ; CHECK-NEXT: le lr, .LBB9_3
409 ; CHECK-NEXT: @ %bb.4: @ %middle.block
410 ; CHECK-NEXT: @ in Loop: Header=BB9_2 Depth=1
411 ; CHECK-NEXT: cmp r12, r2
412 ; CHECK-NEXT: bne .LBB9_2
413 ; CHECK-NEXT: .LBB9_5: @ %for.cond.cleanup
414 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
415 ; CHECK-NEXT: pop {r4, r5, r7, pc}
416 ; CHECK-NEXT: .p2align 4
417 ; CHECK-NEXT: @ %bb.6:
418 ; CHECK-NEXT: .LCPI9_0:
419 ; CHECK-NEXT: .long 4294967248 @ 0xffffffd0
420 ; CHECK-NEXT: .long 4294967260 @ 0xffffffdc
421 ; CHECK-NEXT: .long 4294967272 @ 0xffffffe8
422 ; CHECK-NEXT: .long 4294967284 @ 0xfffffff4
423 ; CHECK-NEXT: .LCPI9_1:
424 ; CHECK-NEXT: .long 4294967252 @ 0xffffffd4
425 ; CHECK-NEXT: .long 4294967264 @ 0xffffffe0
426 ; CHECK-NEXT: .long 4294967276 @ 0xffffffec
427 ; CHECK-NEXT: .long 4294967288 @ 0xfffffff8
428 ; CHECK-NEXT: .LCPI9_2:
429 ; CHECK-NEXT: .long 4294967256 @ 0xffffffd8
430 ; CHECK-NEXT: .long 4294967268 @ 0xffffffe4
431 ; CHECK-NEXT: .long 4294967280 @ 0xfffffff0
432 ; CHECK-NEXT: .long 4294967292 @ 0xfffffffc
434 %cmp22 = icmp sgt i32 %n, 0
435 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup
437 vector.ph: ; preds = %for.body.preheader
438 %n.vec = and i32 %n, -4
439 br label %vector.body
441 vector.body: ; preds = %vector.body, %vector.ph
442 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
443 %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
444 %0 = mul nuw nsw <4 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3>
445 %1 = getelementptr inbounds i32, i32* %data, <4 x i32> %0
446 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %1, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
447 %2 = add nuw nsw <4 x i32> %0, <i32 1, i32 1, i32 1, i32 1>
448 %3 = getelementptr inbounds i32, i32* %data, <4 x i32> %2
449 %wide.masked.gather24 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %3, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
450 %4 = add nuw nsw <4 x i32> %0, <i32 2, i32 2, i32 2, i32 2>
451 %5 = getelementptr inbounds i32, i32* %data, <4 x i32> %4
452 %wide.masked.gather25 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %5, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
453 %6 = add nsw <4 x i32> %wide.masked.gather24, %wide.masked.gather
454 %7 = add nsw <4 x i32> %6, %wide.masked.gather25
455 %8 = getelementptr inbounds i32, i32* %dst, i32 %index
456 %9 = bitcast i32* %8 to <4 x i32>*
457 store <4 x i32> %7, <4 x i32>* %9, align 4
458 %index.next = add i32 %index, 4
459 %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
460 %10 = icmp eq i32 %index.next, %n.vec
461 br i1 %10, label %middle.block, label %vector.body
463 middle.block: ; preds = %vector.body
464 %cmp.n = icmp eq i32 %n.vec, %n
465 br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph
467 for.cond.cleanup: ; preds = %for.body, %middle.block, %entry
471 define arm_aapcs_vfpcc void @gather_inc_v4i32_large(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n) {
472 ; CHECK-LABEL: gather_inc_v4i32_large:
473 ; CHECK: @ %bb.0: @ %entry
474 ; CHECK-NEXT: .save {r4, lr}
475 ; CHECK-NEXT: push {r4, lr}
476 ; CHECK-NEXT: cmp r2, #1
478 ; CHECK-NEXT: poplt {r4, pc}
479 ; CHECK-NEXT: .LBB10_1: @ %vector.ph.preheader
480 ; CHECK-NEXT: bic r12, r2, #3
481 ; CHECK-NEXT: movs r3, #1
482 ; CHECK-NEXT: sub.w lr, r12, #4
483 ; CHECK-NEXT: add.w r4, r3, lr, lsr #2
484 ; CHECK-NEXT: adr r3, .LCPI10_0
485 ; CHECK-NEXT: vldrw.u32 q0, [r3]
486 ; CHECK-NEXT: vadd.i32 q0, q0, r0
487 ; CHECK-NEXT: .LBB10_2: @ %vector.ph
488 ; CHECK-NEXT: @ =>This Loop Header: Depth=1
489 ; CHECK-NEXT: @ Child Loop BB10_3 Depth 2
490 ; CHECK-NEXT: dls lr, r4
491 ; CHECK-NEXT: mov r0, r1
492 ; CHECK-NEXT: vmov q1, q0
493 ; CHECK-NEXT: .LBB10_3: @ %vector.body
494 ; CHECK-NEXT: @ Parent Loop BB10_2 Depth=1
495 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
496 ; CHECK-NEXT: vldrw.u32 q2, [q1, #508]!
497 ; CHECK-NEXT: vstrb.8 q2, [r0], #16
498 ; CHECK-NEXT: le lr, .LBB10_3
499 ; CHECK-NEXT: @ %bb.4: @ %middle.block
500 ; CHECK-NEXT: @ in Loop: Header=BB10_2 Depth=1
501 ; CHECK-NEXT: cmp r12, r2
502 ; CHECK-NEXT: bne .LBB10_2
503 ; CHECK-NEXT: @ %bb.5: @ %for.cond.cleanup
504 ; CHECK-NEXT: pop {r4, pc}
505 ; CHECK-NEXT: .p2align 4
506 ; CHECK-NEXT: @ %bb.6:
507 ; CHECK-NEXT: .LCPI10_0:
508 ; CHECK-NEXT: .long 4294966788 @ 0xfffffe04
509 ; CHECK-NEXT: .long 4294966792 @ 0xfffffe08
510 ; CHECK-NEXT: .long 4294966796 @ 0xfffffe0c
511 ; CHECK-NEXT: .long 4294966800 @ 0xfffffe10
513 %cmp22 = icmp sgt i32 %n, 0
514 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup
516 vector.ph: ; preds = %for.body.preheader
517 %n.vec = and i32 %n, -4
518 br label %vector.body
520 vector.body: ; preds = %vector.body, %vector.ph
521 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
522 %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
523 %0 = getelementptr inbounds i32, i32* %data, <4 x i32> %vec.ind
524 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %0, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
525 %1 = getelementptr inbounds i32, i32* %dst, i32 %index
526 %2 = bitcast i32* %1 to <4 x i32>*
527 store <4 x i32> %wide.masked.gather, <4 x i32>* %2, align 4
528 %index.next = add i32 %index, 4
529 %vec.ind.next = add <4 x i32> %vec.ind, <i32 127, i32 127, i32 127, i32 127>
530 %3 = icmp eq i32 %index.next, %n.vec
531 br i1 %3, label %middle.block, label %vector.body
533 middle.block: ; preds = %vector.body
534 %cmp.n = icmp eq i32 %n.vec, %n
535 br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph
537 for.cond.cleanup: ; preds = %for.body, %middle.block, %entry
541 ; TODO: uneven - I think it's not possible to create such an example, because vec.ind will always be increased by a vector with 4 elements (=> x*4 = even)
543 ; TODO: What is sxth?
544 define arm_aapcs_vfpcc void @gather_inc_v8i16_simple(i16* noalias nocapture readonly %data, i16* noalias nocapture %dst, i32 %n) {
545 ; CHECK-LABEL: gather_inc_v8i16_simple:
546 ; CHECK: @ %bb.0: @ %entry
547 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
548 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
549 ; CHECK-NEXT: .pad #4
550 ; CHECK-NEXT: sub sp, #4
551 ; CHECK-NEXT: .vsave {d8, d9}
552 ; CHECK-NEXT: vpush {d8, d9}
553 ; CHECK-NEXT: .pad #24
554 ; CHECK-NEXT: sub sp, #24
555 ; CHECK-NEXT: cmp r2, #1
556 ; CHECK-NEXT: str r1, [sp] @ 4-byte Spill
557 ; CHECK-NEXT: mov r1, r2
558 ; CHECK-NEXT: str r2, [sp, #4] @ 4-byte Spill
559 ; CHECK-NEXT: blt .LBB11_5
560 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
561 ; CHECK-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
562 ; CHECK-NEXT: movs r5, #1
563 ; CHECK-NEXT: vmov.i16 q1, #0x8
564 ; CHECK-NEXT: bic r12, r1, #7
565 ; CHECK-NEXT: add r1, sp, #8
566 ; CHECK-NEXT: sub.w r3, r12, #8
567 ; CHECK-NEXT: add.w r8, r5, r3, lsr #3
568 ; CHECK-NEXT: adr r5, .LCPI11_0
569 ; CHECK-NEXT: vldrw.u32 q0, [r5]
570 ; CHECK-NEXT: .LBB11_2: @ %vector.ph
571 ; CHECK-NEXT: @ =>This Loop Header: Depth=1
572 ; CHECK-NEXT: @ Child Loop BB11_3 Depth 2
573 ; CHECK-NEXT: dls lr, r8
574 ; CHECK-NEXT: vmov q2, q0
575 ; CHECK-NEXT: ldr r5, [sp] @ 4-byte Reload
576 ; CHECK-NEXT: .LBB11_3: @ %vector.body
577 ; CHECK-NEXT: @ Parent Loop BB11_2 Depth=1
578 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
579 ; CHECK-NEXT: vstrw.32 q2, [r1]
580 ; CHECK-NEXT: mov r10, r1
581 ; CHECK-NEXT: vldrh.s32 q4, [r1, #8]
582 ; CHECK-NEXT: vldrh.s32 q3, [r1]
583 ; CHECK-NEXT: vadd.i16 q2, q2, q1
584 ; CHECK-NEXT: vshl.i32 q4, q4, #1
585 ; CHECK-NEXT: vshl.i32 q3, q3, #1
586 ; CHECK-NEXT: vadd.i32 q4, q4, r0
587 ; CHECK-NEXT: vadd.i32 q3, q3, r0
588 ; CHECK-NEXT: vmov r1, r2, d9
589 ; CHECK-NEXT: vmov r6, r7, d7
590 ; CHECK-NEXT: vmov r3, r4, d8
591 ; CHECK-NEXT: ldrh.w r11, [r2]
592 ; CHECK-NEXT: vmov r2, r9, d6
593 ; CHECK-NEXT: ldrh r6, [r6]
594 ; CHECK-NEXT: ldrh r7, [r7]
595 ; CHECK-NEXT: ldrh r3, [r3]
596 ; CHECK-NEXT: ldrh r4, [r4]
597 ; CHECK-NEXT: ldrh r1, [r1]
598 ; CHECK-NEXT: ldrh r2, [r2]
599 ; CHECK-NEXT: ldrh.w r9, [r9]
600 ; CHECK-NEXT: vmov.16 q3[0], r2
601 ; CHECK-NEXT: vmov.16 q3[1], r9
602 ; CHECK-NEXT: vmov.16 q3[2], r6
603 ; CHECK-NEXT: vmov.16 q3[3], r7
604 ; CHECK-NEXT: vmov.16 q3[4], r3
605 ; CHECK-NEXT: vmov.16 q3[5], r4
606 ; CHECK-NEXT: vmov.16 q3[6], r1
607 ; CHECK-NEXT: mov r1, r10
608 ; CHECK-NEXT: vmov.16 q3[7], r11
609 ; CHECK-NEXT: vstrb.8 q3, [r5], #16
610 ; CHECK-NEXT: le lr, .LBB11_3
611 ; CHECK-NEXT: @ %bb.4: @ %middle.block
612 ; CHECK-NEXT: @ in Loop: Header=BB11_2 Depth=1
613 ; CHECK-NEXT: ldr r2, [sp, #4] @ 4-byte Reload
614 ; CHECK-NEXT: cmp r12, r2
615 ; CHECK-NEXT: bne .LBB11_2
616 ; CHECK-NEXT: .LBB11_5: @ %for.cond.cleanup
617 ; CHECK-NEXT: add sp, #24
618 ; CHECK-NEXT: vpop {d8, d9}
619 ; CHECK-NEXT: add sp, #4
620 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
621 ; CHECK-NEXT: .p2align 4
622 ; CHECK-NEXT: @ %bb.6:
623 ; CHECK-NEXT: .LCPI11_0:
624 ; CHECK-NEXT: .short 0 @ 0x0
625 ; CHECK-NEXT: .short 1 @ 0x1
626 ; CHECK-NEXT: .short 2 @ 0x2
627 ; CHECK-NEXT: .short 3 @ 0x3
628 ; CHECK-NEXT: .short 4 @ 0x4
629 ; CHECK-NEXT: .short 5 @ 0x5
630 ; CHECK-NEXT: .short 6 @ 0x6
631 ; CHECK-NEXT: .short 7 @ 0x7
635 %cmp22 = icmp sgt i32 %n, 0
636 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup
638 vector.ph: ; preds = %for.body.preheader
639 %n.vec = and i32 %n, -8
640 br label %vector.body
642 vector.body: ; preds = %vector.body, %vector.ph
643 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
644 %vec.ind = phi <8 x i16> [ <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, %vector.ph ], [ %vec.ind.next, %vector.body ]
645 %0 = getelementptr inbounds i16, i16* %data, <8 x i16> %vec.ind
646 %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %0, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
647 %1 = getelementptr inbounds i16, i16* %dst, i32 %index
648 %2 = bitcast i16* %1 to <8 x i16>*
649 store <8 x i16> %wide.masked.gather, <8 x i16>* %2, align 2
650 %index.next = add i32 %index, 8
651 %vec.ind.next = add <8 x i16> %vec.ind, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
652 %3 = icmp eq i32 %index.next, %n.vec
653 br i1 %3, label %middle.block, label %vector.body
655 middle.block: ; preds = %vector.body
656 %cmp.n = icmp eq i32 %n.vec, %n
657 br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph
659 for.cond.cleanup: ; preds = %for.body, %middle.block, %entry
663 ; TODO: This looks absolutely terrifying :(
664 define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(i16* noalias nocapture readonly %data, i16* noalias nocapture %dst, i32 %n) {
665 ; CHECK-LABEL: gather_inc_v8i16_complex:
666 ; CHECK: @ %bb.0: @ %entry
667 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
668 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
669 ; CHECK-NEXT: .pad #4
670 ; CHECK-NEXT: sub sp, #4
671 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
672 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
673 ; CHECK-NEXT: .pad #136
674 ; CHECK-NEXT: sub sp, #136
675 ; CHECK-NEXT: cmp r2, #1
676 ; CHECK-NEXT: str r1, [sp, #64] @ 4-byte Spill
677 ; CHECK-NEXT: mov r1, r2
678 ; CHECK-NEXT: str r2, [sp, #68] @ 4-byte Spill
679 ; CHECK-NEXT: blt.w .LBB12_5
680 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
681 ; CHECK-NEXT: ldr r1, [sp, #68] @ 4-byte Reload
682 ; CHECK-NEXT: adr r3, .LCPI12_2
683 ; CHECK-NEXT: vldrw.u32 q0, [r3]
684 ; CHECK-NEXT: movs r2, #1
685 ; CHECK-NEXT: bic r1, r1, #7
686 ; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill
687 ; CHECK-NEXT: subs r1, #8
688 ; CHECK-NEXT: vstrw.32 q0, [sp, #40] @ 16-byte Spill
689 ; CHECK-NEXT: vmov.i16 q2, #0x18
690 ; CHECK-NEXT: add.w r1, r2, r1, lsr #3
691 ; CHECK-NEXT: str r1, [sp, #60] @ 4-byte Spill
692 ; CHECK-NEXT: adr r1, .LCPI12_0
693 ; CHECK-NEXT: adr r2, .LCPI12_1
694 ; CHECK-NEXT: vldrw.u32 q0, [r1]
695 ; CHECK-NEXT: vstrw.32 q2, [sp, #72] @ 16-byte Spill
696 ; CHECK-NEXT: vstrw.32 q0, [sp, #24] @ 16-byte Spill
697 ; CHECK-NEXT: vldrw.u32 q0, [r2]
698 ; CHECK-NEXT: add r2, sp, #120
699 ; CHECK-NEXT: vstrw.32 q0, [sp, #8] @ 16-byte Spill
700 ; CHECK-NEXT: .LBB12_2: @ %vector.ph
701 ; CHECK-NEXT: @ =>This Loop Header: Depth=1
702 ; CHECK-NEXT: @ Child Loop BB12_3 Depth 2
703 ; CHECK-NEXT: ldr r1, [sp, #60] @ 4-byte Reload
704 ; CHECK-NEXT: add.w r10, sp, #104
705 ; CHECK-NEXT: dls lr, r1
706 ; CHECK-NEXT: ldr r7, [sp, #64] @ 4-byte Reload
707 ; CHECK-NEXT: vldrw.u32 q4, [sp, #24] @ 16-byte Reload
708 ; CHECK-NEXT: vldrw.u32 q5, [sp, #40] @ 16-byte Reload
709 ; CHECK-NEXT: vldrw.u32 q6, [sp, #8] @ 16-byte Reload
710 ; CHECK-NEXT: .LBB12_3: @ %vector.body
711 ; CHECK-NEXT: @ Parent Loop BB12_2 Depth=1
712 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
713 ; CHECK-NEXT: vstrw.32 q5, [r2]
714 ; CHECK-NEXT: mov r8, r2
715 ; CHECK-NEXT: vldrh.s32 q0, [r2, #8]
716 ; CHECK-NEXT: vshl.i32 q0, q0, #1
717 ; CHECK-NEXT: vadd.i32 q0, q0, r0
718 ; CHECK-NEXT: vmov r1, r3, d0
719 ; CHECK-NEXT: vmov r4, r5, d1
720 ; CHECK-NEXT: vldrh.s32 q0, [r2]
721 ; CHECK-NEXT: vshl.i32 q0, q0, #1
722 ; CHECK-NEXT: vadd.i32 q2, q0, r0
723 ; CHECK-NEXT: vmov r6, r2, d4
724 ; CHECK-NEXT: ldrh r1, [r1]
725 ; CHECK-NEXT: ldrh.w r12, [r4]
726 ; CHECK-NEXT: add r4, sp, #88
727 ; CHECK-NEXT: ldrh.w r11, [r5]
728 ; CHECK-NEXT: ldrh r3, [r3]
729 ; CHECK-NEXT: ldrh r5, [r6]
730 ; CHECK-NEXT: ldrh r2, [r2]
731 ; CHECK-NEXT: vstrw.32 q6, [r4]
732 ; CHECK-NEXT: vldrh.s32 q0, [r4]
733 ; CHECK-NEXT: vmov.16 q7[0], r5
734 ; CHECK-NEXT: vmov.16 q7[1], r2
735 ; CHECK-NEXT: vshl.i32 q0, q0, #1
736 ; CHECK-NEXT: vadd.i32 q0, q0, r0
737 ; CHECK-NEXT: vmov r6, r9, d0
738 ; CHECK-NEXT: vmov r2, r5, d1
739 ; CHECK-NEXT: vldrh.s32 q0, [r4, #8]
740 ; CHECK-NEXT: vshl.i32 q0, q0, #1
741 ; CHECK-NEXT: vadd.i32 q0, q0, r0
742 ; CHECK-NEXT: ldrh r6, [r6]
743 ; CHECK-NEXT: ldrh r2, [r2]
744 ; CHECK-NEXT: vmov.16 q1[0], r6
745 ; CHECK-NEXT: ldrh.w r6, [r9]
746 ; CHECK-NEXT: ldrh r5, [r5]
747 ; CHECK-NEXT: vmov.16 q1[1], r6
748 ; CHECK-NEXT: vmov.16 q1[2], r2
749 ; CHECK-NEXT: vmov r2, r6, d0
750 ; CHECK-NEXT: vmov.16 q1[3], r5
751 ; CHECK-NEXT: ldrh r2, [r2]
752 ; CHECK-NEXT: ldrh r6, [r6]
753 ; CHECK-NEXT: vmov.16 q1[4], r2
754 ; CHECK-NEXT: vmov r2, r5, d1
755 ; CHECK-NEXT: vmov.16 q1[5], r6
756 ; CHECK-NEXT: mov r6, r10
757 ; CHECK-NEXT: ldrh r2, [r2]
758 ; CHECK-NEXT: ldrh r5, [r5]
759 ; CHECK-NEXT: vstrw.32 q4, [r10]
760 ; CHECK-NEXT: vldrh.s32 q0, [r6]
761 ; CHECK-NEXT: vmov.16 q1[6], r2
762 ; CHECK-NEXT: vmov.16 q1[7], r5
763 ; CHECK-NEXT: vshl.i32 q0, q0, #1
764 ; CHECK-NEXT: vadd.i32 q0, q0, r0
765 ; CHECK-NEXT: vmov r2, r5, d0
766 ; CHECK-NEXT: ldrh r2, [r2]
767 ; CHECK-NEXT: ldrh r5, [r5]
768 ; CHECK-NEXT: vmov.16 q3[0], r2
769 ; CHECK-NEXT: vmov.16 q3[1], r5
770 ; CHECK-NEXT: vmov r2, r5, d5
771 ; CHECK-NEXT: vldrw.u32 q2, [sp, #72] @ 16-byte Reload
772 ; CHECK-NEXT: vadd.i16 q6, q6, q2
773 ; CHECK-NEXT: vadd.i16 q5, q5, q2
774 ; CHECK-NEXT: vadd.i16 q4, q4, q2
775 ; CHECK-NEXT: ldrh.w r9, [r2]
776 ; CHECK-NEXT: vmov r2, r4, d1
777 ; CHECK-NEXT: vldrh.s32 q0, [r6, #8]
778 ; CHECK-NEXT: ldrh r5, [r5]
779 ; CHECK-NEXT: vmov.16 q7[2], r9
780 ; CHECK-NEXT: vshl.i32 q0, q0, #1
781 ; CHECK-NEXT: vmov.16 q7[3], r5
782 ; CHECK-NEXT: vadd.i32 q0, q0, r0
783 ; CHECK-NEXT: vmov.16 q7[4], r1
784 ; CHECK-NEXT: vmov.16 q7[5], r3
785 ; CHECK-NEXT: vmov.16 q7[6], r12
786 ; CHECK-NEXT: vmov.16 q7[7], r11
787 ; CHECK-NEXT: ldrh r2, [r2]
788 ; CHECK-NEXT: ldrh r4, [r4]
789 ; CHECK-NEXT: vmov.16 q3[2], r2
790 ; CHECK-NEXT: vmov.16 q3[3], r4
791 ; CHECK-NEXT: vmov r2, r4, d0
792 ; CHECK-NEXT: ldrh r2, [r2]
793 ; CHECK-NEXT: ldrh r4, [r4]
794 ; CHECK-NEXT: vmov.16 q3[4], r2
795 ; CHECK-NEXT: vmov.16 q3[5], r4
796 ; CHECK-NEXT: vmov r2, r4, d1
797 ; CHECK-NEXT: ldrh r2, [r2]
798 ; CHECK-NEXT: ldrh r4, [r4]
799 ; CHECK-NEXT: vmov.16 q3[6], r2
800 ; CHECK-NEXT: mov r2, r8
801 ; CHECK-NEXT: vmov.16 q3[7], r4
802 ; CHECK-NEXT: vadd.i16 q0, q3, q1
803 ; CHECK-NEXT: vadd.i16 q0, q0, q7
804 ; CHECK-NEXT: vstrb.8 q0, [r7], #16
805 ; CHECK-NEXT: le lr, .LBB12_3
806 ; CHECK-NEXT: @ %bb.4: @ %middle.block
807 ; CHECK-NEXT: @ in Loop: Header=BB12_2 Depth=1
808 ; CHECK-NEXT: ldr r1, [sp, #4] @ 4-byte Reload
809 ; CHECK-NEXT: ldr r3, [sp, #68] @ 4-byte Reload
810 ; CHECK-NEXT: cmp r1, r3
811 ; CHECK-NEXT: bne.w .LBB12_2
812 ; CHECK-NEXT: .LBB12_5: @ %for.cond.cleanup
813 ; CHECK-NEXT: add sp, #136
814 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
815 ; CHECK-NEXT: add sp, #4
816 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
817 ; CHECK-NEXT: .p2align 4
818 ; CHECK-NEXT: @ %bb.6:
819 ; CHECK-NEXT: .LCPI12_0:
820 ; CHECK-NEXT: .short 1 @ 0x1
821 ; CHECK-NEXT: .short 4 @ 0x4
822 ; CHECK-NEXT: .short 7 @ 0x7
823 ; CHECK-NEXT: .short 10 @ 0xa
824 ; CHECK-NEXT: .short 13 @ 0xd
825 ; CHECK-NEXT: .short 16 @ 0x10
826 ; CHECK-NEXT: .short 19 @ 0x13
827 ; CHECK-NEXT: .short 22 @ 0x16
828 ; CHECK-NEXT: .LCPI12_1:
829 ; CHECK-NEXT: .short 0 @ 0x0
830 ; CHECK-NEXT: .short 3 @ 0x3
831 ; CHECK-NEXT: .short 6 @ 0x6
832 ; CHECK-NEXT: .short 9 @ 0x9
833 ; CHECK-NEXT: .short 12 @ 0xc
834 ; CHECK-NEXT: .short 15 @ 0xf
835 ; CHECK-NEXT: .short 18 @ 0x12
836 ; CHECK-NEXT: .short 21 @ 0x15
837 ; CHECK-NEXT: .LCPI12_2:
838 ; CHECK-NEXT: .short 2 @ 0x2
839 ; CHECK-NEXT: .short 5 @ 0x5
840 ; CHECK-NEXT: .short 8 @ 0x8
841 ; CHECK-NEXT: .short 11 @ 0xb
842 ; CHECK-NEXT: .short 14 @ 0xe
843 ; CHECK-NEXT: .short 17 @ 0x11
844 ; CHECK-NEXT: .short 20 @ 0x14
845 ; CHECK-NEXT: .short 23 @ 0x17
849 %cmp22 = icmp sgt i32 %n, 0
850 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup
852 vector.ph: ; preds = %for.body.preheader
853 %n.vec = and i32 %n, -8
854 br label %vector.body
856 vector.body: ; preds = %vector.body, %vector.ph
857 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
858 %vec.ind = phi <8 x i16> [ <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7>, %vector.ph ], [ %vec.ind.next, %vector.body ]
859 %0 = mul nuw nsw <8 x i16> %vec.ind, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
860 %1 = getelementptr inbounds i16, i16* %data, <8 x i16> %0
861 %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %1, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
862 %2 = add nuw nsw <8 x i16> %0, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
863 %3 = getelementptr inbounds i16, i16* %data, <8 x i16> %2
864 %wide.masked.gather24 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %3, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
865 %4 = add nuw nsw <8 x i16> %0, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
866 %5 = getelementptr inbounds i16, i16* %data, <8 x i16> %4
867 %wide.masked.gather25 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*> %5, i32 2, <8 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <8 x i16> undef)
868 %6 = add nsw <8 x i16> %wide.masked.gather24, %wide.masked.gather
869 %7 = add nsw <8 x i16> %6, %wide.masked.gather25
870 %8 = getelementptr inbounds i16, i16* %dst, i32 %index
871 %9 = bitcast i16* %8 to <8 x i16>*
872 store <8 x i16> %7, <8 x i16>* %9, align 2
873 %index.next = add i32 %index, 8
874 %vec.ind.next = add <8 x i16> %vec.ind, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
875 %10 = icmp eq i32 %index.next, %n.vec
876 br i1 %10, label %middle.block, label %vector.body
878 middle.block: ; preds = %vector.body
879 %cmp.n = icmp eq i32 %n.vec, %n
880 br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph
882 for.cond.cleanup: ; preds = %for.body, %middle.block, %entry
887 define arm_aapcs_vfpcc void @gather_inc_v16i8_complex(i8* noalias nocapture readonly %data, i8* noalias nocapture %dst, i32 %n) {
888 ; CHECK-LABEL: gather_inc_v16i8_complex:
889 ; CHECK: @ %bb.0: @ %entry
890 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
891 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
892 ; CHECK-NEXT: .pad #4
893 ; CHECK-NEXT: sub sp, #4
894 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
895 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
896 ; CHECK-NEXT: .pad #312
897 ; CHECK-NEXT: sub sp, #312
898 ; CHECK-NEXT: cmp r2, #1
899 ; CHECK-NEXT: str r1, [sp, #116] @ 4-byte Spill
900 ; CHECK-NEXT: blt.w .LBB13_5
901 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
902 ; CHECK-NEXT: adr r1, .LCPI13_0
903 ; CHECK-NEXT: adr r6, .LCPI13_8
904 ; CHECK-NEXT: vldrw.u32 q0, [r1]
905 ; CHECK-NEXT: adr r1, .LCPI13_1
906 ; CHECK-NEXT: adr r7, .LCPI13_7
907 ; CHECK-NEXT: adr r3, .LCPI13_6
908 ; CHECK-NEXT: vstrw.32 q0, [sp, #96] @ 16-byte Spill
909 ; CHECK-NEXT: vldrw.u32 q0, [r1]
910 ; CHECK-NEXT: adr r1, .LCPI13_5
911 ; CHECK-NEXT: bic r10, r2, #7
912 ; CHECK-NEXT: vstrw.32 q0, [sp, #80] @ 16-byte Spill
913 ; CHECK-NEXT: vldrw.u32 q0, [r6]
914 ; CHECK-NEXT: adr r6, .LCPI13_9
915 ; CHECK-NEXT: vmov.i32 q2, #0x30
916 ; CHECK-NEXT: vstrw.32 q0, [sp, #64] @ 16-byte Spill
917 ; CHECK-NEXT: vldrw.u32 q0, [r7]
918 ; CHECK-NEXT: vstrw.32 q0, [sp, #48] @ 16-byte Spill
919 ; CHECK-NEXT: vldrw.u32 q0, [r6]
920 ; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill
921 ; CHECK-NEXT: vldrw.u32 q0, [r1]
922 ; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill
923 ; CHECK-NEXT: vldrw.u32 q0, [r3]
924 ; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
925 ; CHECK-NEXT: .LBB13_2: @ %vector.ph
926 ; CHECK-NEXT: @ =>This Loop Header: Depth=1
927 ; CHECK-NEXT: @ Child Loop BB13_3 Depth 2
928 ; CHECK-NEXT: adr r1, .LCPI13_3
929 ; CHECK-NEXT: vldrw.u32 q6, [sp, #16] @ 16-byte Reload
930 ; CHECK-NEXT: vldrw.u32 q0, [r1]
931 ; CHECK-NEXT: adr r1, .LCPI13_4
932 ; CHECK-NEXT: vldrw.u32 q5, [r1]
933 ; CHECK-NEXT: adr r1, .LCPI13_2
934 ; CHECK-NEXT: vldrw.u32 q3, [r1]
935 ; CHECK-NEXT: adr r1, .LCPI13_10
936 ; CHECK-NEXT: vstrw.32 q6, [sp, #280] @ 16-byte Spill
937 ; CHECK-NEXT: vldrw.u32 q6, [sp, #32] @ 16-byte Reload
938 ; CHECK-NEXT: vstrw.32 q3, [sp, #296] @ 16-byte Spill
939 ; CHECK-NEXT: vldrw.u32 q3, [r1]
940 ; CHECK-NEXT: adr r1, .LCPI13_11
941 ; CHECK-NEXT: ldr.w r8, [sp, #116] @ 4-byte Reload
942 ; CHECK-NEXT: vstrw.32 q3, [sp, #248] @ 16-byte Spill
943 ; CHECK-NEXT: vldrw.u32 q3, [sp, #80] @ 16-byte Reload
944 ; CHECK-NEXT: vstrw.32 q6, [sp, #264] @ 16-byte Spill
945 ; CHECK-NEXT: vldrw.u32 q6, [sp, #48] @ 16-byte Reload
946 ; CHECK-NEXT: vstrw.32 q3, [sp, #216] @ 16-byte Spill
947 ; CHECK-NEXT: vldrw.u32 q3, [sp, #64] @ 16-byte Reload
948 ; CHECK-NEXT: vldrw.u32 q7, [r1]
949 ; CHECK-NEXT: vldrw.u32 q1, [sp] @ 16-byte Reload
950 ; CHECK-NEXT: vstrw.32 q3, [sp, #200] @ 16-byte Spill
951 ; CHECK-NEXT: vldrw.u32 q3, [sp, #96] @ 16-byte Reload
952 ; CHECK-NEXT: mov r11, r10
953 ; CHECK-NEXT: vstrw.32 q6, [sp, #232] @ 16-byte Spill
954 ; CHECK-NEXT: vstrw.32 q3, [sp, #184] @ 16-byte Spill
955 ; CHECK-NEXT: .LBB13_3: @ %vector.body
956 ; CHECK-NEXT: @ Parent Loop BB13_2 Depth=1
957 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
958 ; CHECK-NEXT: vadd.i32 q4, q1, r0
959 ; CHECK-NEXT: vstrw.32 q7, [sp, #136] @ 16-byte Spill
960 ; CHECK-NEXT: vmov r1, lr, d8
961 ; CHECK-NEXT: vadd.i32 q7, q7, r0
962 ; CHECK-NEXT: vmov r5, r4, d15
963 ; CHECK-NEXT: vadd.i32 q6, q0, r0
964 ; CHECK-NEXT: vmov r6, r7, d13
965 ; CHECK-NEXT: vstrw.32 q1, [sp, #152] @ 16-byte Spill
966 ; CHECK-NEXT: vldrw.u32 q1, [sp, #296] @ 16-byte Reload
967 ; CHECK-NEXT: vstrw.32 q0, [sp, #168] @ 16-byte Spill
968 ; CHECK-NEXT: vmov q0, q2
969 ; CHECK-NEXT: vmov q3, q5
970 ; CHECK-NEXT: vadd.i32 q1, q1, r0
971 ; CHECK-NEXT: vldrw.u32 q0, [sp, #248] @ 16-byte Reload
972 ; CHECK-NEXT: vldrw.u32 q3, [sp, #216] @ 16-byte Reload
973 ; CHECK-NEXT: vstrw.32 q5, [sp, #120] @ 16-byte Spill
974 ; CHECK-NEXT: vadd.i32 q0, q0, r0
975 ; CHECK-NEXT: subs.w r11, r11, #16
976 ; CHECK-NEXT: ldrb.w r9, [r1]
977 ; CHECK-NEXT: vmov r1, r3, d14
978 ; CHECK-NEXT: ldrb r5, [r5]
979 ; CHECK-NEXT: ldrb r7, [r7]
980 ; CHECK-NEXT: ldrb r1, [r1]
981 ; CHECK-NEXT: vmov.8 q7[0], r1
982 ; CHECK-NEXT: ldrb r1, [r3]
983 ; CHECK-NEXT: vmov.8 q7[1], r1
984 ; CHECK-NEXT: vmov r1, r3, d12
985 ; CHECK-NEXT: vmov.8 q7[2], r5
986 ; CHECK-NEXT: ldrb r5, [r6]
987 ; CHECK-NEXT: ldrb r6, [r4]
988 ; CHECK-NEXT: vmov.8 q7[3], r6
989 ; CHECK-NEXT: ldrb r1, [r1]
990 ; CHECK-NEXT: ldrb r3, [r3]
991 ; CHECK-NEXT: vmov.8 q6[0], r1
992 ; CHECK-NEXT: vmov r6, r1, d2
993 ; CHECK-NEXT: vmov.8 q6[1], r3
994 ; CHECK-NEXT: vmov.8 q6[2], r5
995 ; CHECK-NEXT: vmov.8 q6[3], r7
996 ; CHECK-NEXT: ldrb.w r7, [lr]
997 ; CHECK-NEXT: vmov.8 q6[4], r9
998 ; CHECK-NEXT: vmov.8 q6[5], r7
999 ; CHECK-NEXT: ldrb r4, [r1]
1000 ; CHECK-NEXT: vmov r1, r5, d3
1001 ; CHECK-NEXT: vldrw.u32 q1, [sp, #232] @ 16-byte Reload
1002 ; CHECK-NEXT: ldrb.w r12, [r1]
1003 ; CHECK-NEXT: vmov r1, r3, d9
1004 ; CHECK-NEXT: ldrb r5, [r5]
1005 ; CHECK-NEXT: vldrw.u32 q4, [sp, #184] @ 16-byte Reload
1006 ; CHECK-NEXT: ldrb r1, [r1]
1007 ; CHECK-NEXT: ldrb r3, [r3]
1008 ; CHECK-NEXT: vmov.8 q6[6], r1
1009 ; CHECK-NEXT: vmov r1, r7, d0
1010 ; CHECK-NEXT: vmov.8 q6[7], r3
1011 ; CHECK-NEXT: ldrb r1, [r1]
1012 ; CHECK-NEXT: ldrb r7, [r7]
1013 ; CHECK-NEXT: vmov.8 q7[4], r1
1014 ; CHECK-NEXT: vmov r1, r3, d1
1015 ; CHECK-NEXT: vldrw.u32 q0, [sp, #264] @ 16-byte Reload
1016 ; CHECK-NEXT: vmov.8 q7[5], r7
1017 ; CHECK-NEXT: vadd.i32 q0, q0, r0
1018 ; CHECK-NEXT: ldrb r1, [r1]
1019 ; CHECK-NEXT: ldrb r3, [r3]
1020 ; CHECK-NEXT: vmov.8 q7[6], r1
1021 ; CHECK-NEXT: ldrb r1, [r6]
1022 ; CHECK-NEXT: vmov r7, r6, d0
1023 ; CHECK-NEXT: vmov.8 q7[7], r3
1024 ; CHECK-NEXT: vmov r3, lr, d1
1025 ; CHECK-NEXT: vldrw.u32 q0, [sp, #280] @ 16-byte Reload
1026 ; CHECK-NEXT: vmov.8 q7[8], r1
1027 ; CHECK-NEXT: vadd.i32 q0, q0, r0
1028 ; CHECK-NEXT: vmov.8 q7[9], r4
1029 ; CHECK-NEXT: vmov r4, r1, d0
1030 ; CHECK-NEXT: vmov.8 q7[10], r12
1031 ; CHECK-NEXT: vmov.8 q7[11], r5
1032 ; CHECK-NEXT: ldrb r7, [r7]
1033 ; CHECK-NEXT: ldrb r6, [r6]
1034 ; CHECK-NEXT: ldrb r3, [r3]
1035 ; CHECK-NEXT: ldrb r4, [r4]
1036 ; CHECK-NEXT: ldrb r1, [r1]
1037 ; CHECK-NEXT: vmov.8 q6[8], r4
1038 ; CHECK-NEXT: vmov r5, r4, d1
1039 ; CHECK-NEXT: vmov.8 q6[9], r1
1040 ; CHECK-NEXT: vadd.i32 q0, q5, r0
1041 ; CHECK-NEXT: vldrw.u32 q5, [sp, #200] @ 16-byte Reload
1042 ; CHECK-NEXT: ldrb r5, [r5]
1043 ; CHECK-NEXT: ldrb r4, [r4]
1044 ; CHECK-NEXT: vmov.8 q6[10], r5
1045 ; CHECK-NEXT: vmov.8 q6[11], r4
1046 ; CHECK-NEXT: vmov.8 q6[12], r7
1047 ; CHECK-NEXT: vmov.8 q6[13], r6
1048 ; CHECK-NEXT: vmov.8 q6[14], r3
1049 ; CHECK-NEXT: vmov r1, r3, d0
1050 ; CHECK-NEXT: ldrb r1, [r1]
1051 ; CHECK-NEXT: vmov.8 q7[12], r1
1052 ; CHECK-NEXT: ldrb r1, [r3]
1053 ; CHECK-NEXT: vmov.8 q7[13], r1
1054 ; CHECK-NEXT: vmov r1, r3, d1
1055 ; CHECK-NEXT: vadd.i32 q0, q1, r0
1056 ; CHECK-NEXT: vadd.i32 q1, q1, q2
1057 ; CHECK-NEXT: vstrw.32 q1, [sp, #232] @ 16-byte Spill
1058 ; CHECK-NEXT: vldrw.u32 q1, [sp, #248] @ 16-byte Reload
1059 ; CHECK-NEXT: vadd.i32 q1, q1, q2
1060 ; CHECK-NEXT: vstrw.32 q1, [sp, #248] @ 16-byte Spill
1061 ; CHECK-NEXT: vldrw.u32 q1, [sp, #152] @ 16-byte Reload
1062 ; CHECK-NEXT: vadd.i32 q1, q1, q2
1063 ; CHECK-NEXT: ldrb r1, [r1]
1064 ; CHECK-NEXT: vmov.8 q7[14], r1
1065 ; CHECK-NEXT: ldrb r1, [r3]
1066 ; CHECK-NEXT: vmov.8 q7[15], r1
1067 ; CHECK-NEXT: ldrb.w r1, [lr]
1068 ; CHECK-NEXT: vmov.8 q6[15], r1
1069 ; CHECK-NEXT: vmov r1, r3, d0
1070 ; CHECK-NEXT: vadd.i8 q6, q6, q7
1071 ; CHECK-NEXT: ldrb r1, [r1]
1072 ; CHECK-NEXT: ldrb r3, [r3]
1073 ; CHECK-NEXT: vmov.8 q7[0], r1
1074 ; CHECK-NEXT: vmov.8 q7[1], r3
1075 ; CHECK-NEXT: vmov r1, r3, d1
1076 ; CHECK-NEXT: vadd.i32 q0, q3, r0
1077 ; CHECK-NEXT: vadd.i32 q3, q3, q2
1078 ; CHECK-NEXT: vstrw.32 q3, [sp, #216] @ 16-byte Spill
1079 ; CHECK-NEXT: vldrw.u32 q3, [sp, #296] @ 16-byte Reload
1080 ; CHECK-NEXT: vadd.i32 q3, q3, q2
1081 ; CHECK-NEXT: vstrw.32 q3, [sp, #296] @ 16-byte Spill
1082 ; CHECK-NEXT: vldrw.u32 q3, [sp, #280] @ 16-byte Reload
1083 ; CHECK-NEXT: vadd.i32 q3, q3, q2
1084 ; CHECK-NEXT: vstrw.32 q3, [sp, #280] @ 16-byte Spill
1085 ; CHECK-NEXT: vldrw.u32 q3, [sp, #264] @ 16-byte Reload
1086 ; CHECK-NEXT: vadd.i32 q3, q3, q2
1087 ; CHECK-NEXT: vstrw.32 q3, [sp, #264] @ 16-byte Spill
1088 ; CHECK-NEXT: ldrb r1, [r1]
1089 ; CHECK-NEXT: vmov.8 q7[2], r1
1090 ; CHECK-NEXT: ldrb r1, [r3]
1091 ; CHECK-NEXT: vmov.8 q7[3], r1
1092 ; CHECK-NEXT: vmov r1, r3, d0
1093 ; CHECK-NEXT: ldrb r1, [r1]
1094 ; CHECK-NEXT: vmov.8 q7[4], r1
1095 ; CHECK-NEXT: ldrb r1, [r3]
1096 ; CHECK-NEXT: vmov.8 q7[5], r1
1097 ; CHECK-NEXT: vmov r1, r3, d1
1098 ; CHECK-NEXT: vadd.i32 q0, q5, r0
1099 ; CHECK-NEXT: vadd.i32 q5, q5, q2
1100 ; CHECK-NEXT: vstrw.32 q5, [sp, #200] @ 16-byte Spill
1101 ; CHECK-NEXT: vldrw.u32 q5, [sp, #120] @ 16-byte Reload
1102 ; CHECK-NEXT: vadd.i32 q5, q5, q2
1103 ; CHECK-NEXT: ldrb r1, [r1]
1104 ; CHECK-NEXT: vmov.8 q7[6], r1
1105 ; CHECK-NEXT: ldrb r1, [r3]
1106 ; CHECK-NEXT: vmov.8 q7[7], r1
1107 ; CHECK-NEXT: vmov r1, r3, d0
1108 ; CHECK-NEXT: ldrb r1, [r1]
1109 ; CHECK-NEXT: vmov.8 q7[8], r1
1110 ; CHECK-NEXT: ldrb r1, [r3]
1111 ; CHECK-NEXT: vmov.8 q7[9], r1
1112 ; CHECK-NEXT: vmov r1, r3, d1
1113 ; CHECK-NEXT: vadd.i32 q0, q4, r0
1114 ; CHECK-NEXT: vadd.i32 q4, q4, q2
1115 ; CHECK-NEXT: vstrw.32 q4, [sp, #184] @ 16-byte Spill
1116 ; CHECK-NEXT: ldrb r1, [r1]
1117 ; CHECK-NEXT: vmov.8 q7[10], r1
1118 ; CHECK-NEXT: ldrb r1, [r3]
1119 ; CHECK-NEXT: vmov.8 q7[11], r1
1120 ; CHECK-NEXT: vmov r1, r3, d0
1121 ; CHECK-NEXT: ldrb r1, [r1]
1122 ; CHECK-NEXT: vmov.8 q7[12], r1
1123 ; CHECK-NEXT: ldrb r1, [r3]
1124 ; CHECK-NEXT: vmov.8 q7[13], r1
1125 ; CHECK-NEXT: vmov r1, r3, d1
1126 ; CHECK-NEXT: ldrb r1, [r1]
1127 ; CHECK-NEXT: vmov.8 q7[14], r1
1128 ; CHECK-NEXT: ldrb r1, [r3]
1129 ; CHECK-NEXT: vmov.8 q7[15], r1
1130 ; CHECK-NEXT: vadd.i8 q0, q6, q7
1131 ; CHECK-NEXT: vldrw.u32 q7, [sp, #136] @ 16-byte Reload
1132 ; CHECK-NEXT: vstrb.8 q0, [r8], #16
1133 ; CHECK-NEXT: vldrw.u32 q0, [sp, #168] @ 16-byte Reload
1134 ; CHECK-NEXT: vadd.i32 q7, q7, q2
1135 ; CHECK-NEXT: vadd.i32 q0, q0, q2
1136 ; CHECK-NEXT: bne.w .LBB13_3
1137 ; CHECK-NEXT: @ %bb.4: @ %middle.block
1138 ; CHECK-NEXT: @ in Loop: Header=BB13_2 Depth=1
1139 ; CHECK-NEXT: cmp r10, r2
1140 ; CHECK-NEXT: bne.w .LBB13_2
1141 ; CHECK-NEXT: .LBB13_5: @ %for.cond.cleanup
1142 ; CHECK-NEXT: add sp, #312
1143 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
1144 ; CHECK-NEXT: add sp, #4
1145 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
1146 ; CHECK-NEXT: .p2align 4
1147 ; CHECK-NEXT: @ %bb.6:
1148 ; CHECK-NEXT: .LCPI13_0:
1149 ; CHECK-NEXT: .long 38 @ 0x26
1150 ; CHECK-NEXT: .long 41 @ 0x29
1151 ; CHECK-NEXT: .long 44 @ 0x2c
1152 ; CHECK-NEXT: .long 47 @ 0x2f
1153 ; CHECK-NEXT: .LCPI13_1:
1154 ; CHECK-NEXT: .long 14 @ 0xe
1155 ; CHECK-NEXT: .long 17 @ 0x11
1156 ; CHECK-NEXT: .long 20 @ 0x14
1157 ; CHECK-NEXT: .long 23 @ 0x17
1158 ; CHECK-NEXT: .LCPI13_2:
1159 ; CHECK-NEXT: .long 24 @ 0x18
1160 ; CHECK-NEXT: .long 27 @ 0x1b
1161 ; CHECK-NEXT: .long 30 @ 0x1e
1162 ; CHECK-NEXT: .long 33 @ 0x21
1163 ; CHECK-NEXT: .LCPI13_3:
1164 ; CHECK-NEXT: .long 1 @ 0x1
1165 ; CHECK-NEXT: .long 4 @ 0x4
1166 ; CHECK-NEXT: .long 7 @ 0x7
1167 ; CHECK-NEXT: .long 10 @ 0xa
1168 ; CHECK-NEXT: .LCPI13_4:
1169 ; CHECK-NEXT: .long 36 @ 0x24
1170 ; CHECK-NEXT: .long 39 @ 0x27
1171 ; CHECK-NEXT: .long 42 @ 0x2a
1172 ; CHECK-NEXT: .long 45 @ 0x2d
1173 ; CHECK-NEXT: .LCPI13_5:
1174 ; CHECK-NEXT: .long 25 @ 0x19
1175 ; CHECK-NEXT: .long 28 @ 0x1c
1176 ; CHECK-NEXT: .long 31 @ 0x1f
1177 ; CHECK-NEXT: .long 34 @ 0x22
1178 ; CHECK-NEXT: .LCPI13_6:
1179 ; CHECK-NEXT: .long 13 @ 0xd
1180 ; CHECK-NEXT: .long 16 @ 0x10
1181 ; CHECK-NEXT: .long 19 @ 0x13
1182 ; CHECK-NEXT: .long 22 @ 0x16
1183 ; CHECK-NEXT: .LCPI13_7:
1184 ; CHECK-NEXT: .long 2 @ 0x2
1185 ; CHECK-NEXT: .long 5 @ 0x5
1186 ; CHECK-NEXT: .long 8 @ 0x8
1187 ; CHECK-NEXT: .long 11 @ 0xb
1188 ; CHECK-NEXT: .LCPI13_8:
1189 ; CHECK-NEXT: .long 26 @ 0x1a
1190 ; CHECK-NEXT: .long 29 @ 0x1d
1191 ; CHECK-NEXT: .long 32 @ 0x20
1192 ; CHECK-NEXT: .long 35 @ 0x23
1193 ; CHECK-NEXT: .LCPI13_9:
1194 ; CHECK-NEXT: .long 37 @ 0x25
1195 ; CHECK-NEXT: .long 40 @ 0x28
1196 ; CHECK-NEXT: .long 43 @ 0x2b
1197 ; CHECK-NEXT: .long 46 @ 0x2e
1198 ; CHECK-NEXT: .LCPI13_10:
1199 ; CHECK-NEXT: .long 12 @ 0xc
1200 ; CHECK-NEXT: .long 15 @ 0xf
1201 ; CHECK-NEXT: .long 18 @ 0x12
1202 ; CHECK-NEXT: .long 21 @ 0x15
1203 ; CHECK-NEXT: .LCPI13_11:
1204 ; CHECK-NEXT: .long 0 @ 0x0
1205 ; CHECK-NEXT: .long 3 @ 0x3
1206 ; CHECK-NEXT: .long 6 @ 0x6
1207 ; CHECK-NEXT: .long 9 @ 0x9
1211 %cmp22 = icmp sgt i32 %n, 0
1212 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup
1214 vector.ph: ; preds = %for.body.preheader
1215 %n.vec = and i32 %n, -8
1216 br label %vector.body
1218 vector.body: ; preds = %vector.body, %vector.ph
1219 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
1220 %vec.ind = phi <16 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, %vector.ph ], [ %vec.ind.next, %vector.body ]
1221 %0 = mul nuw nsw <16 x i32> %vec.ind, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
1222 %1 = getelementptr inbounds i8, i8* %data, <16 x i32> %0
1223 %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %1, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
1224 %2 = add nuw nsw <16 x i32> %0, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
1225 %3 = getelementptr inbounds i8, i8* %data, <16 x i32> %2
1226 %wide.masked.gather24 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %3, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
1227 %4 = add nuw nsw <16 x i32> %0, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
1228 %5 = getelementptr inbounds i8, i8* %data, <16 x i32> %4
1229 %wide.masked.gather25 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %5, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
1230 %6 = add nsw <16 x i8> %wide.masked.gather24, %wide.masked.gather
1231 %7 = add nsw <16 x i8> %6, %wide.masked.gather25
1232 %8 = getelementptr inbounds i8, i8* %dst, i32 %index
1233 %9 = bitcast i8* %8 to <16 x i8>*
1234 store <16 x i8> %7, <16 x i8>* %9, align 2
1235 %index.next = add i32 %index, 16
1236 %vec.ind.next = add <16 x i32> %vec.ind, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
1237 %10 = icmp eq i32 %index.next, %n.vec
1238 br i1 %10, label %middle.block, label %vector.body
1240 middle.block: ; preds = %vector.body
1241 %cmp.n = icmp eq i32 %n.vec, %n
1242 br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph
1244 for.cond.cleanup: ; preds = %for.body, %middle.block, %entry
1248 define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(i8* noalias nocapture readonly %data, i8* noalias nocapture %dst, i32 %n) {
1249 ; CHECK-LABEL: gather_inc_v16i8_simple:
1250 ; CHECK: @ %bb.0: @ %entry
1251 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr}
1252 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr}
1253 ; CHECK-NEXT: .pad #4
1254 ; CHECK-NEXT: sub sp, #4
1255 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
1256 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
1257 ; CHECK-NEXT: .pad #72
1258 ; CHECK-NEXT: sub sp, #72
1259 ; CHECK-NEXT: cmp r2, #1
1260 ; CHECK-NEXT: str r1, [sp, #68] @ 4-byte Spill
1261 ; CHECK-NEXT: blt.w .LBB14_5
1262 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader
1263 ; CHECK-NEXT: adr r5, .LCPI14_3
1264 ; CHECK-NEXT: adr r7, .LCPI14_1
1265 ; CHECK-NEXT: vldrw.u32 q0, [r5]
1266 ; CHECK-NEXT: adr r6, .LCPI14_2
1267 ; CHECK-NEXT: adr r3, .LCPI14_0
1268 ; CHECK-NEXT: bic r1, r2, #7
1269 ; CHECK-NEXT: vstrw.32 q0, [sp, #48] @ 16-byte Spill
1270 ; CHECK-NEXT: vldrw.u32 q0, [r7]
1271 ; CHECK-NEXT: vmov.i32 q4, #0x10
1272 ; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill
1273 ; CHECK-NEXT: vldrw.u32 q0, [r6]
1274 ; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill
1275 ; CHECK-NEXT: vldrw.u32 q0, [r3]
1276 ; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill
1277 ; CHECK-NEXT: .LBB14_2: @ %vector.ph
1278 ; CHECK-NEXT: @ =>This Loop Header: Depth=1
1279 ; CHECK-NEXT: @ Child Loop BB14_3 Depth 2
1280 ; CHECK-NEXT: ldr.w lr, [sp, #68] @ 4-byte Reload
1281 ; CHECK-NEXT: vldrw.u32 q5, [sp] @ 16-byte Reload
1282 ; CHECK-NEXT: vldrw.u32 q6, [sp, #16] @ 16-byte Reload
1283 ; CHECK-NEXT: vldrw.u32 q7, [sp, #32] @ 16-byte Reload
1284 ; CHECK-NEXT: vldrw.u32 q0, [sp, #48] @ 16-byte Reload
1285 ; CHECK-NEXT: mov r8, r1
1286 ; CHECK-NEXT: .LBB14_3: @ %vector.body
1287 ; CHECK-NEXT: @ Parent Loop BB14_2 Depth=1
1288 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
1289 ; CHECK-NEXT: vadd.i32 q2, q6, r0
1290 ; CHECK-NEXT: vadd.i32 q1, q5, r0
1291 ; CHECK-NEXT: vmov r6, r7, d5
1292 ; CHECK-NEXT: vadd.i32 q3, q0, r0
1293 ; CHECK-NEXT: vmov r4, r5, d3
1294 ; CHECK-NEXT: subs.w r8, r8, #16
1295 ; CHECK-NEXT: vmov r3, r9, d4
1296 ; CHECK-NEXT: vadd.i32 q2, q7, r0
1297 ; CHECK-NEXT: vadd.i32 q5, q5, q4
1298 ; CHECK-NEXT: vadd.i32 q6, q6, q4
1299 ; CHECK-NEXT: vadd.i32 q7, q7, q4
1300 ; CHECK-NEXT: vadd.i32 q0, q0, q4
1301 ; CHECK-NEXT: ldrb.w r11, [r6]
1302 ; CHECK-NEXT: ldrb.w r10, [r7]
1303 ; CHECK-NEXT: vmov r6, r7, d2
1304 ; CHECK-NEXT: ldrb r4, [r4]
1305 ; CHECK-NEXT: ldrb r5, [r5]
1306 ; CHECK-NEXT: ldrb r3, [r3]
1307 ; CHECK-NEXT: ldrb.w r9, [r9]
1308 ; CHECK-NEXT: ldrb r6, [r6]
1309 ; CHECK-NEXT: ldrb r7, [r7]
1310 ; CHECK-NEXT: vmov.8 q1[0], r6
1311 ; CHECK-NEXT: vmov.8 q1[1], r7
1312 ; CHECK-NEXT: vmov r6, r7, d5
1313 ; CHECK-NEXT: vmov.8 q1[2], r4
1314 ; CHECK-NEXT: vmov.8 q1[3], r5
1315 ; CHECK-NEXT: vmov.8 q1[4], r3
1316 ; CHECK-NEXT: vmov.8 q1[5], r9
1317 ; CHECK-NEXT: vmov.8 q1[6], r11
1318 ; CHECK-NEXT: vmov.8 q1[7], r10
1319 ; CHECK-NEXT: ldrb.w r12, [r7]
1320 ; CHECK-NEXT: vmov r5, r7, d7
1321 ; CHECK-NEXT: ldrb r6, [r6]
1322 ; CHECK-NEXT: ldrb.w r9, [r7]
1323 ; CHECK-NEXT: vmov r7, r3, d6
1324 ; CHECK-NEXT: ldrb r5, [r5]
1325 ; CHECK-NEXT: ldrb.w r11, [r3]
1326 ; CHECK-NEXT: vmov r3, r4, d4
1327 ; CHECK-NEXT: ldrb r7, [r7]
1328 ; CHECK-NEXT: ldrb r3, [r3]
1329 ; CHECK-NEXT: ldrb r4, [r4]
1330 ; CHECK-NEXT: vmov.8 q1[8], r3
1331 ; CHECK-NEXT: vmov.8 q1[9], r4
1332 ; CHECK-NEXT: vmov.8 q1[10], r6
1333 ; CHECK-NEXT: vmov.8 q1[11], r12
1334 ; CHECK-NEXT: vmov.8 q1[12], r7
1335 ; CHECK-NEXT: vmov.8 q1[13], r11
1336 ; CHECK-NEXT: vmov.8 q1[14], r5
1337 ; CHECK-NEXT: vmov.8 q1[15], r9
1338 ; CHECK-NEXT: vstrb.8 q1, [lr], #16
1339 ; CHECK-NEXT: bne .LBB14_3
1340 ; CHECK-NEXT: @ %bb.4: @ %middle.block
1341 ; CHECK-NEXT: @ in Loop: Header=BB14_2 Depth=1
1342 ; CHECK-NEXT: cmp r1, r2
1343 ; CHECK-NEXT: bne .LBB14_2
1344 ; CHECK-NEXT: .LBB14_5: @ %for.cond.cleanup
1345 ; CHECK-NEXT: add sp, #72
1346 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
1347 ; CHECK-NEXT: add sp, #4
1348 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc}
1349 ; CHECK-NEXT: .p2align 4
1350 ; CHECK-NEXT: @ %bb.6:
1351 ; CHECK-NEXT: .LCPI14_0:
1352 ; CHECK-NEXT: .long 0 @ 0x0
1353 ; CHECK-NEXT: .long 1 @ 0x1
1354 ; CHECK-NEXT: .long 2 @ 0x2
1355 ; CHECK-NEXT: .long 3 @ 0x3
1356 ; CHECK-NEXT: .LCPI14_1:
1357 ; CHECK-NEXT: .long 8 @ 0x8
1358 ; CHECK-NEXT: .long 9 @ 0x9
1359 ; CHECK-NEXT: .long 10 @ 0xa
1360 ; CHECK-NEXT: .long 11 @ 0xb
1361 ; CHECK-NEXT: .LCPI14_2:
1362 ; CHECK-NEXT: .long 4 @ 0x4
1363 ; CHECK-NEXT: .long 5 @ 0x5
1364 ; CHECK-NEXT: .long 6 @ 0x6
1365 ; CHECK-NEXT: .long 7 @ 0x7
1366 ; CHECK-NEXT: .LCPI14_3:
1367 ; CHECK-NEXT: .long 12 @ 0xc
1368 ; CHECK-NEXT: .long 13 @ 0xd
1369 ; CHECK-NEXT: .long 14 @ 0xe
1370 ; CHECK-NEXT: .long 15 @ 0xf
1374 %cmp22 = icmp sgt i32 %n, 0
1375 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup
1377 vector.ph: ; preds = %for.body.preheader
1378 %n.vec = and i32 %n, -8
1379 br label %vector.body
1381 vector.body: ; preds = %vector.body, %vector.ph
1382 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
1383 %vec.ind = phi <16 x i32> [ <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>, %vector.ph ], [ %vec.ind.next, %vector.body ]
1384 %0 = getelementptr inbounds i8, i8* %data, <16 x i32> %vec.ind
1385 %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*> %0, i32 2, <16 x i1> <i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true, i1 true>, <16 x i8> undef)
1386 %1 = getelementptr inbounds i8, i8* %dst, i32 %index
1387 %2 = bitcast i8* %1 to <16 x i8>*
1388 store <16 x i8> %wide.masked.gather, <16 x i8>* %2, align 2
1389 %index.next = add i32 %index, 16
1390 %vec.ind.next = add <16 x i32> %vec.ind, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
1391 %3 = icmp eq i32 %index.next, %n.vec
1392 br i1 %3, label %middle.block, label %vector.body
1394 middle.block: ; preds = %vector.body
1395 %cmp.n = icmp eq i32 %n.vec, %n
1396 br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph
1398 for.cond.cleanup: ; preds = %for.body, %middle.block, %entry
1403 declare <2 x i32> @llvm.masked.gather.v2i32.v2p0i32(<2 x i32*>, i32, <2 x i1>, <2 x i32>)
1404 declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)
1405 declare <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*>, i32, <8 x i1>, <8 x i32>)
1406 declare <16 x i32> @llvm.masked.gather.v16i32.v16p0i32(<16 x i32*>, i32, <16 x i1>, <16 x i32>)
1407 declare <2 x float> @llvm.masked.gather.v2f32.v2p0f32(<2 x float*>, i32, <2 x i1>, <2 x float>)
1408 declare <4 x float> @llvm.masked.gather.v4f32.v4p0f32(<4 x float*>, i32, <4 x i1>, <4 x float>)
1409 declare <8 x float> @llvm.masked.gather.v8f32.v8p0f32(<8 x float*>, i32, <8 x i1>, <8 x float>)
1410 declare <2 x i16> @llvm.masked.gather.v2i16.v2p0i16(<2 x i16*>, i32, <2 x i1>, <2 x i16>)
1411 declare <4 x i16> @llvm.masked.gather.v4i16.v4p0i16(<4 x i16*>, i32, <4 x i1>, <4 x i16>)
1412 declare <8 x i16> @llvm.masked.gather.v8i16.v8p0i16(<8 x i16*>, i32, <8 x i1>, <8 x i16>)
1413 declare <16 x i16> @llvm.masked.gather.v16i16.v16p0i16(<16 x i16*>, i32, <16 x i1>, <16 x i16>)
1414 declare <4 x half> @llvm.masked.gather.v4f16.v4p0f16(<4 x half*>, i32, <4 x i1>, <4 x half>)
1415 declare <8 x half> @llvm.masked.gather.v8f16.v8p0f16(<8 x half*>, i32, <8 x i1>, <8 x half>)
1416 declare <16 x half> @llvm.masked.gather.v16f16.v16p0f16(<16 x half*>, i32, <16 x i1>, <16 x half>)
1417 declare <4 x i8> @llvm.masked.gather.v4i8.v4p0i8(<4 x i8*>, i32, <4 x i1>, <4 x i8>)
1418 declare <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*>, i32, <8 x i1>, <8 x i8>)
1419 declare <16 x i8> @llvm.masked.gather.v16i8.v16p0i8(<16 x i8*>, i32, <16 x i1>, <16 x i8>)
1420 declare <32 x i8> @llvm.masked.gather.v32i8.v32p0i8(<32 x i8*>, i32, <32 x i1>, <32 x i8>)
1421 declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)