1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst -tail-predication=force-enabled %s -o - | FileCheck %s
4 define arm_aapcs_vfpcc void @gather_inc_v4i32_simple(i32* noalias nocapture readonly %data, i32* noalias nocapture %dst, i32 %n) {
5 ; CHECK-LABEL: gather_inc_v4i32_simple:
6 ; CHECK: @ %bb.0: @ %entry
7 ; CHECK-NEXT: .save {r4, lr}
8 ; CHECK-NEXT: push {r4, lr}
9 ; CHECK-NEXT: cmp r2, #1
11 ; CHECK-NEXT: poplt {r4, pc}
12 ; CHECK-NEXT: .LBB0_1: @ %vector.ph.preheader
13 ; CHECK-NEXT: bic r12, r2, #3
14 ; CHECK-NEXT: movs r3, #1
15 ; CHECK-NEXT: sub.w lr, r12, #4
16 ; CHECK-NEXT: add.w r4, r3, lr, lsr #2
17 ; CHECK-NEXT: adr r3, .LCPI0_0
18 ; CHECK-NEXT: vldrw.u32 q0, [r3]
19 ; CHECK-NEXT: vadd.i32 q0, q0, r0
20 ; CHECK-NEXT: .LBB0_2: @ %vector.ph
21 ; CHECK-NEXT: @ =>This Loop Header: Depth=1
22 ; CHECK-NEXT: @ Child Loop BB0_3 Depth 2
23 ; CHECK-NEXT: dls lr, r4
24 ; CHECK-NEXT: mov r0, r1
25 ; CHECK-NEXT: vmov q1, q0
26 ; CHECK-NEXT: .LBB0_3: @ %vector.body
27 ; CHECK-NEXT: @ Parent Loop BB0_2 Depth=1
28 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2
29 ; CHECK-NEXT: vldrw.u32 q2, [q1, #16]!
30 ; CHECK-NEXT: vstrb.8 q2, [r0], #16
31 ; CHECK-NEXT: le lr, .LBB0_3
32 ; CHECK-NEXT: @ %bb.4: @ %middle.block
33 ; CHECK-NEXT: @ in Loop: Header=BB0_2 Depth=1
34 ; CHECK-NEXT: cmp r12, r2
35 ; CHECK-NEXT: bne .LBB0_2
36 ; CHECK-NEXT: @ %bb.5: @ %for.cond.cleanup
37 ; CHECK-NEXT: pop {r4, pc}
38 ; CHECK-NEXT: .p2align 4
39 ; CHECK-NEXT: @ %bb.6:
40 ; CHECK-NEXT: .LCPI0_0:
41 ; CHECK-NEXT: .long 4294967280 @ 0xfffffff0
42 ; CHECK-NEXT: .long 4294967284 @ 0xfffffff4
43 ; CHECK-NEXT: .long 4294967288 @ 0xfffffff8
44 ; CHECK-NEXT: .long 4294967292 @ 0xfffffffc
46 %cmp22 = icmp sgt i32 %n, 0
47 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup
49 vector.ph: ; preds = %for.body.preheader
50 %n.vec = and i32 %n, -4
53 vector.body: ; preds = %vector.body, %vector.ph
54 %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
55 %vec.ind = phi <4 x i32> [ <i32 0, i32 1, i32 2, i32 3>, %vector.ph ], [ %vec.ind.next, %vector.body ]
56 %0 = getelementptr inbounds i32, i32* %data, <4 x i32> %vec.ind
57 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*> %0, i32 4, <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i32> undef)
58 %1 = getelementptr inbounds i32, i32* %dst, i32 %index
59 %2 = bitcast i32* %1 to <4 x i32>*
60 store <4 x i32> %wide.masked.gather, <4 x i32>* %2, align 4
61 %index.next = add i32 %index, 4
62 %vec.ind.next = add <4 x i32> %vec.ind, <i32 4, i32 4, i32 4, i32 4>
63 %3 = icmp eq i32 %index.next, %n.vec
64 br i1 %3, label %middle.block, label %vector.body
66 middle.block: ; preds = %vector.body
67 %cmp.n = icmp eq i32 %n.vec, %n
68 br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph
70 for.cond.cleanup: ; preds = %for.body, %middle.block, %entry
74 declare <4 x i32> @llvm.masked.gather.v4i32.v4p0i32(<4 x i32*>, i32, <4 x i1>, <4 x i32>)