1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=slp-vectorizer -S | FileCheck %s
4 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
5 target triple = "aarch64-unknown-linux-gnu"
7 ; This test check that we are not crashing or changing the code.
11 ; CHECK-NEXT: [[LOAD0:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x i8> undef)
12 ; CHECK-NEXT: [[LOAD1:%.*]] = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x i8> undef)
13 ; CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[LOAD1]], [[LOAD0]]
14 ; CHECK-NEXT: tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> [[ADD]], ptr undef, i32 1, <vscale x 16 x i1> undef)
15 ; CHECK-NEXT: ret void
17 %load0 = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x i8> undef)
18 %load1 = tail call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr undef, i32 1, <vscale x 16 x i1> undef, <vscale x 16 x i8> undef)
19 %add = add <vscale x 16 x i8> %load1, %load0
20 tail call void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8> %add, ptr undef, i32 1, <vscale x 16 x i1> undef)
24 define <vscale x 4 x i32> @scalable_phi(<vscale x 4 x i32> %a, i32 %b) {
25 ; CHECK-LABEL: @scalable_phi(
27 ; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[B:%.*]], 0
28 ; CHECK-NEXT: br i1 [[CMP]], label [[IF_THEN:%.*]], label [[END:%.*]]
30 ; CHECK-NEXT: br label [[END]]
32 ; CHECK-NEXT: [[RETVAL:%.*]] = phi <vscale x 4 x i32> [ [[A:%.*]], [[ENTRY:%.*]] ], [ zeroinitializer, [[IF_THEN]] ]
33 ; CHECK-NEXT: ret <vscale x 4 x i32> [[RETVAL]]
36 %cmp = icmp eq i32 %b, 0
37 br i1 %cmp, label %if.then, label %end
43 %retval = phi <vscale x 4 x i32> [ %a, %entry ], [ zeroinitializer, %if.then ]
44 ret <vscale x 4 x i32> %retval
47 define void @scalable_phi1() {
48 ; CHECK-LABEL: @scalable_phi1(
49 ; CHECK-NEXT: middle.block:
50 ; CHECK-NEXT: [[EXTRACT1:%.*]] = extractelement <vscale x 8 x i16> undef, i32 undef
51 ; CHECK-NEXT: [[EXTRACT2:%.*]] = extractelement <vscale x 8 x i16> undef, i32 undef
52 ; CHECK-NEXT: br label [[FOR_BODY_I:%.*]]
54 ; CHECK-NEXT: [[RECUR1:%.*]] = phi i16 [ [[EXTRACT1]], [[MIDDLE_BLOCK:%.*]] ], [ undef, [[FOR_BODY_I]] ]
55 ; CHECK-NEXT: [[RECUR2:%.*]] = phi i16 [ [[EXTRACT2]], [[MIDDLE_BLOCK]] ], [ undef, [[FOR_BODY_I]] ]
56 ; CHECK-NEXT: br label [[FOR_BODY_I]]
59 %extract1 = extractelement <vscale x 8 x i16> undef, i32 undef
60 %extract2 = extractelement <vscale x 8 x i16> undef, i32 undef
63 for.body.i: ; preds = %for.body.i, %middle.block
64 %recur1 = phi i16 [ %extract1, %middle.block ], [ undef, %for.body.i ]
65 %recur2 = phi i16 [ %extract2, %middle.block ], [ undef, %for.body.i ]
69 define void @scalable_phi2() {
70 ; CHECK-LABEL: @scalable_phi2(
71 ; CHECK-NEXT: middle.block:
72 ; CHECK-NEXT: [[EXTRACT1:%.*]] = extractelement <vscale x 8 x i16> undef, i32 undef
73 ; CHECK-NEXT: [[EXTRACT2:%.*]] = extractelement <vscale x 8 x i16> undef, i32 undef
74 ; CHECK-NEXT: br label [[FOR_BODY_I:%.*]]
76 ; CHECK-NEXT: [[RECUR1:%.*]] = phi i16 [ undef, [[FOR_BODY_I]] ], [ [[EXTRACT1]], [[MIDDLE_BLOCK:%.*]] ]
77 ; CHECK-NEXT: [[RECUR2:%.*]] = phi i16 [ undef, [[FOR_BODY_I]] ], [ [[EXTRACT2]], [[MIDDLE_BLOCK]] ]
78 ; CHECK-NEXT: br label [[FOR_BODY_I]]
81 %extract1 = extractelement <vscale x 8 x i16> undef, i32 undef
82 %extract2 = extractelement <vscale x 8 x i16> undef, i32 undef
85 for.body.i: ; preds = %for.body.i, %middle.block
86 %recur1 = phi i16 [ undef, %for.body.i ], [ %extract1, %middle.block ]
87 %recur2 = phi i16 [ undef, %for.body.i ], [ %extract2, %middle.block ]
91 define <vscale x 4 x i32> @build_vec_v4i32_reuse_0(<vscale x 2 x i32> %v0) {
92 ; CHECK-LABEL: @build_vec_v4i32_reuse_0(
93 ; CHECK-NEXT: [[V0_0:%.*]] = extractelement <vscale x 2 x i32> [[V0:%.*]], i32 0
94 ; CHECK-NEXT: [[V0_1:%.*]] = extractelement <vscale x 2 x i32> [[V0]], i32 1
95 ; CHECK-NEXT: [[TMP0_0:%.*]] = add i32 [[V0_0]], [[V0_0]]
96 ; CHECK-NEXT: [[TMP1_0:%.*]] = sub i32 [[V0_0]], [[V0_1]]
97 ; CHECK-NEXT: [[TMP2_0:%.*]] = add i32 [[TMP0_0]], [[TMP1_0]]
98 ; CHECK-NEXT: [[TMP3_0:%.*]] = insertelement <vscale x 4 x i32> undef, i32 [[TMP2_0]], i32 0
99 ; CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3_0]]
101 %v0.0 = extractelement <vscale x 2 x i32> %v0, i32 0
102 %v0.1 = extractelement <vscale x 2 x i32> %v0, i32 1
103 %tmp0.0 = add i32 %v0.0, %v0.0
104 %tmp1.0 = sub i32 %v0.0, %v0.1
105 %tmp2.0 = add i32 %tmp0.0, %tmp1.0
106 %tmp3.0 = insertelement <vscale x 4 x i32> undef, i32 %tmp2.0, i32 0
107 ret <vscale x 4 x i32> %tmp3.0
110 define <vscale x 4 x i8> @shuffle(<4 x i8> %x, <4 x i8> %y) {
111 ; CHECK-LABEL: @shuffle(
112 ; CHECK-NEXT: [[X0:%.*]] = extractelement <4 x i8> [[X:%.*]], i32 0
113 ; CHECK-NEXT: [[X3:%.*]] = extractelement <4 x i8> [[X]], i32 3
114 ; CHECK-NEXT: [[Y1:%.*]] = extractelement <4 x i8> [[Y:%.*]], i32 1
115 ; CHECK-NEXT: [[Y2:%.*]] = extractelement <4 x i8> [[Y]], i32 2
116 ; CHECK-NEXT: [[X0X0:%.*]] = mul i8 [[X0]], [[X0]]
117 ; CHECK-NEXT: [[X3X3:%.*]] = mul i8 [[X3]], [[X3]]
118 ; CHECK-NEXT: [[Y1Y1:%.*]] = mul i8 [[Y1]], [[Y1]]
119 ; CHECK-NEXT: [[Y2Y2:%.*]] = mul i8 [[Y2]], [[Y2]]
120 ; CHECK-NEXT: [[INS1:%.*]] = insertelement <vscale x 4 x i8> poison, i8 [[X0X0]], i32 0
121 ; CHECK-NEXT: [[INS2:%.*]] = insertelement <vscale x 4 x i8> [[INS1]], i8 [[X3X3]], i32 1
122 ; CHECK-NEXT: [[INS3:%.*]] = insertelement <vscale x 4 x i8> [[INS2]], i8 [[Y1Y1]], i32 2
123 ; CHECK-NEXT: [[INS4:%.*]] = insertelement <vscale x 4 x i8> [[INS3]], i8 [[Y2Y2]], i32 3
124 ; CHECK-NEXT: ret <vscale x 4 x i8> [[INS4]]
126 %x0 = extractelement <4 x i8> %x, i32 0
127 %x3 = extractelement <4 x i8> %x, i32 3
128 %y1 = extractelement <4 x i8> %y, i32 1
129 %y2 = extractelement <4 x i8> %y, i32 2
130 %x0x0 = mul i8 %x0, %x0
131 %x3x3 = mul i8 %x3, %x3
132 %y1y1 = mul i8 %y1, %y1
133 %y2y2 = mul i8 %y2, %y2
134 %ins1 = insertelement <vscale x 4 x i8> poison, i8 %x0x0, i32 0
135 %ins2 = insertelement <vscale x 4 x i8> %ins1, i8 %x3x3, i32 1
136 %ins3 = insertelement <vscale x 4 x i8> %ins2, i8 %y1y1, i32 2
137 %ins4 = insertelement <vscale x 4 x i8> %ins3, i8 %y2y2, i32 3
138 ret <vscale x 4 x i8> %ins4
141 define void @sext_scalable_extractelement() {
142 ; CHECK-LABEL: @sext_scalable_extractelement(
143 ; CHECK-NEXT: [[X0:%.*]] = extractelement <vscale x 2 x i32> undef, i32 undef
144 ; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[X0]] to i64
145 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr undef, i64 [[TMP1]]
146 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <vscale x 2 x i32> undef, i32 undef
147 ; CHECK-NEXT: [[TMP4:%.*]] = sext i32 [[TMP3]] to i64
148 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr undef, i64 [[TMP4]]
149 ; CHECK-NEXT: ret void
151 %x0 = extractelement <vscale x 2 x i32> undef, i32 undef
152 %1 = sext i32 %x0 to i64
153 %2 = getelementptr inbounds i64, ptr undef, i64 %1
154 %3 = extractelement <vscale x 2 x i32> undef, i32 undef
155 %4 = sext i32 %3 to i64
156 %5 = getelementptr inbounds i64, ptr undef, i64 %4
160 define void @zext_scalable_extractelement() {
161 ; CHECK-LABEL: @zext_scalable_extractelement(
162 ; CHECK-NEXT: [[X0:%.*]] = extractelement <vscale x 2 x i32> undef, i32 undef
163 ; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[X0]] to i64
164 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr undef, i64 [[TMP1]]
165 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <vscale x 2 x i32> undef, i32 undef
166 ; CHECK-NEXT: [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
167 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr undef, i64 [[TMP4]]
168 ; CHECK-NEXT: ret void
170 %x0 = extractelement <vscale x 2 x i32> undef, i32 undef
171 %1 = zext i32 %x0 to i64
172 %2 = getelementptr inbounds i64, ptr undef, i64 %1
173 %3 = extractelement <vscale x 2 x i32> undef, i32 undef
174 %4 = zext i32 %3 to i64
175 %5 = getelementptr inbounds i64, ptr undef, i64 %4
179 define void @trunc_scalable_extractelement() {
180 ; CHECK-LABEL: @trunc_scalable_extractelement(
181 ; CHECK-NEXT: [[X0:%.*]] = extractelement <vscale x 2 x i64> undef, i32 undef
182 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X0]] to i32
183 ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr undef, i32 [[TMP1]]
184 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <vscale x 2 x i64> undef, i32 undef
185 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i64 [[TMP3]] to i32
186 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr undef, i32 [[TMP4]]
187 ; CHECK-NEXT: ret void
189 %x0 = extractelement <vscale x 2 x i64> undef, i32 undef
190 %1 = trunc i64 %x0 to i32
191 %2 = getelementptr inbounds i32, ptr undef, i32 %1
192 %3 = extractelement <vscale x 2 x i64> undef, i32 undef
193 %4 = trunc i64 %3 to i32
194 %5 = getelementptr inbounds i32, ptr undef, i32 %4
198 declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(ptr, i32 immarg, <vscale x 16 x i1>, <vscale x 16 x i8>)
199 declare void @llvm.masked.store.nxv16i8.p0(<vscale x 16 x i8>, ptr, i32 immarg, <vscale x 16 x i1>)