1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 -mattr=+v \
3 ; RUN: -riscv-v-vector-bits-min=-1 -riscv-v-slp-max-vf=0 -S | FileCheck %s --check-prefixes=CHECK
4 ; RUN: opt < %s -passes=slp-vectorizer -mtriple=riscv64 -mattr=+v -S | FileCheck %s --check-prefixes=DEFAULT
6 define void @vec_add(ptr %dest, ptr %p) {
7 ; CHECK-LABEL: @vec_add(
9 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
10 ; CHECK-NEXT: [[TMP1:%.*]] = add <2 x i16> [[TMP0]], <i16 1, i16 1>
11 ; CHECK-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4
12 ; CHECK-NEXT: ret void
14 ; DEFAULT-LABEL: @vec_add(
15 ; DEFAULT-NEXT: entry:
16 ; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
17 ; DEFAULT-NEXT: [[TMP1:%.*]] = add <2 x i16> [[TMP0]], <i16 1, i16 1>
18 ; DEFAULT-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4
19 ; DEFAULT-NEXT: ret void
22 %e0 = load i16, ptr %p, align 4
23 %inc = getelementptr inbounds i16, ptr %p, i64 1
24 %e1 = load i16, ptr %inc, align 2
29 store i16 %a0, ptr %dest, align 4
30 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
31 store i16 %a1, ptr %inc2, align 2
35 define void @vec_sub(ptr %dest, ptr %p) {
36 ; CHECK-LABEL: @vec_sub(
38 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
39 ; CHECK-NEXT: [[TMP1:%.*]] = sub <2 x i16> [[TMP0]], <i16 17, i16 17>
40 ; CHECK-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4
41 ; CHECK-NEXT: ret void
43 ; DEFAULT-LABEL: @vec_sub(
44 ; DEFAULT-NEXT: entry:
45 ; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
46 ; DEFAULT-NEXT: [[TMP1:%.*]] = sub <2 x i16> [[TMP0]], <i16 17, i16 17>
47 ; DEFAULT-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4
48 ; DEFAULT-NEXT: ret void
51 %e0 = load i16, ptr %p, align 4
52 %inc = getelementptr inbounds i16, ptr %p, i64 1
53 %e1 = load i16, ptr %inc, align 2
58 store i16 %a0, ptr %dest, align 4
59 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
60 store i16 %a1, ptr %inc2, align 2
64 define void @vec_rsub(ptr %dest, ptr %p) {
65 ; CHECK-LABEL: @vec_rsub(
67 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
68 ; CHECK-NEXT: [[TMP1:%.*]] = sub <2 x i16> <i16 29, i16 29>, [[TMP0]]
69 ; CHECK-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4
70 ; CHECK-NEXT: ret void
72 ; DEFAULT-LABEL: @vec_rsub(
73 ; DEFAULT-NEXT: entry:
74 ; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
75 ; DEFAULT-NEXT: [[TMP1:%.*]] = sub <2 x i16> <i16 29, i16 29>, [[TMP0]]
76 ; DEFAULT-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4
77 ; DEFAULT-NEXT: ret void
80 %e0 = load i16, ptr %p, align 4
81 %inc = getelementptr inbounds i16, ptr %p, i64 1
82 %e1 = load i16, ptr %inc, align 2
87 store i16 %a0, ptr %dest, align 4
88 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
89 store i16 %a1, ptr %inc2, align 2
93 define void @vec_mul(ptr %dest, ptr %p) {
94 ; CHECK-LABEL: @vec_mul(
96 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
97 ; CHECK-NEXT: [[TMP1:%.*]] = mul <2 x i16> [[TMP0]], <i16 7, i16 7>
98 ; CHECK-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4
99 ; CHECK-NEXT: ret void
101 ; DEFAULT-LABEL: @vec_mul(
102 ; DEFAULT-NEXT: entry:
103 ; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
104 ; DEFAULT-NEXT: [[TMP1:%.*]] = mul <2 x i16> [[TMP0]], <i16 7, i16 7>
105 ; DEFAULT-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4
106 ; DEFAULT-NEXT: ret void
109 %e0 = load i16, ptr %p, align 4
110 %inc = getelementptr inbounds i16, ptr %p, i64 1
111 %e1 = load i16, ptr %inc, align 2
116 store i16 %a0, ptr %dest, align 4
117 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
118 store i16 %a1, ptr %inc2, align 2
122 define void @vec_sdiv(ptr %dest, ptr %p) {
123 ; CHECK-LABEL: @vec_sdiv(
125 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
126 ; CHECK-NEXT: [[TMP1:%.*]] = sdiv <2 x i16> [[TMP0]], <i16 7, i16 7>
127 ; CHECK-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4
128 ; CHECK-NEXT: ret void
130 ; DEFAULT-LABEL: @vec_sdiv(
131 ; DEFAULT-NEXT: entry:
132 ; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
133 ; DEFAULT-NEXT: [[TMP1:%.*]] = sdiv <2 x i16> [[TMP0]], <i16 7, i16 7>
134 ; DEFAULT-NEXT: store <2 x i16> [[TMP1]], ptr [[DEST:%.*]], align 4
135 ; DEFAULT-NEXT: ret void
138 %e0 = load i16, ptr %p, align 4
139 %inc = getelementptr inbounds i16, ptr %p, i64 1
140 %e1 = load i16, ptr %inc, align 2
142 %a0 = sdiv i16 %e0, 7
143 %a1 = sdiv i16 %e1, 7
145 store i16 %a0, ptr %dest, align 4
146 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
147 store i16 %a1, ptr %inc2, align 2
151 define void @vec_and(ptr %dest, ptr %p, ptr %q) {
152 ; CHECK-LABEL: @vec_and(
154 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
155 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4
156 ; CHECK-NEXT: [[TMP2:%.*]] = and <2 x i16> [[TMP0]], [[TMP1]]
157 ; CHECK-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4
158 ; CHECK-NEXT: ret void
160 ; DEFAULT-LABEL: @vec_and(
161 ; DEFAULT-NEXT: entry:
162 ; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
163 ; DEFAULT-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4
164 ; DEFAULT-NEXT: [[TMP2:%.*]] = and <2 x i16> [[TMP0]], [[TMP1]]
165 ; DEFAULT-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4
166 ; DEFAULT-NEXT: ret void
169 %e0 = load i16, ptr %p, align 4
170 %inc = getelementptr inbounds i16, ptr %p, i64 1
171 %e1 = load i16, ptr %inc, align 2
173 %f0 = load i16, ptr %q, align 4
174 %inq = getelementptr inbounds i16, ptr %q, i64 1
175 %f1 = load i16, ptr %inq, align 2
177 %a0 = and i16 %e0, %f0
178 %a1 = and i16 %e1, %f1
180 store i16 %a0, ptr %dest, align 4
181 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
182 store i16 %a1, ptr %inc2, align 2
186 define void @vec_or(ptr %dest, ptr %p, ptr %q) {
187 ; CHECK-LABEL: @vec_or(
189 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
190 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4
191 ; CHECK-NEXT: [[TMP2:%.*]] = or <2 x i16> [[TMP0]], [[TMP1]]
192 ; CHECK-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4
193 ; CHECK-NEXT: ret void
195 ; DEFAULT-LABEL: @vec_or(
196 ; DEFAULT-NEXT: entry:
197 ; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
198 ; DEFAULT-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4
199 ; DEFAULT-NEXT: [[TMP2:%.*]] = or <2 x i16> [[TMP0]], [[TMP1]]
200 ; DEFAULT-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4
201 ; DEFAULT-NEXT: ret void
204 %e0 = load i16, ptr %p, align 4
205 %inc = getelementptr inbounds i16, ptr %p, i64 1
206 %e1 = load i16, ptr %inc, align 2
208 %f0 = load i16, ptr %q, align 4
209 %inq = getelementptr inbounds i16, ptr %q, i64 1
210 %f1 = load i16, ptr %inq, align 2
212 %a0 = or i16 %e0, %f0
213 %a1 = or i16 %e1, %f1
215 store i16 %a0, ptr %dest, align 4
216 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
217 store i16 %a1, ptr %inc2, align 2
221 define void @vec_sll(ptr %dest, ptr %p, ptr %q) {
222 ; CHECK-LABEL: @vec_sll(
224 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
225 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4
226 ; CHECK-NEXT: [[TMP2:%.*]] = shl <2 x i16> [[TMP0]], [[TMP1]]
227 ; CHECK-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4
228 ; CHECK-NEXT: ret void
230 ; DEFAULT-LABEL: @vec_sll(
231 ; DEFAULT-NEXT: entry:
232 ; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
233 ; DEFAULT-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4
234 ; DEFAULT-NEXT: [[TMP2:%.*]] = shl <2 x i16> [[TMP0]], [[TMP1]]
235 ; DEFAULT-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4
236 ; DEFAULT-NEXT: ret void
239 %e0 = load i16, ptr %p, align 4
240 %inc = getelementptr inbounds i16, ptr %p, i64 1
241 %e1 = load i16, ptr %inc, align 2
243 %f0 = load i16, ptr %q, align 4
244 %inq = getelementptr inbounds i16, ptr %q, i64 1
245 %f1 = load i16, ptr %inq, align 2
247 %a0 = shl i16 %e0, %f0
248 %a1 = shl i16 %e1, %f1
250 store i16 %a0, ptr %dest, align 4
251 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
252 store i16 %a1, ptr %inc2, align 2
256 declare i16 @llvm.smin.i16(i16, i16)
257 define void @vec_smin(ptr %dest, ptr %p, ptr %q) {
258 ; CHECK-LABEL: @vec_smin(
260 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
261 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4
262 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.smin.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
263 ; CHECK-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4
264 ; CHECK-NEXT: ret void
266 ; DEFAULT-LABEL: @vec_smin(
267 ; DEFAULT-NEXT: entry:
268 ; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
269 ; DEFAULT-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4
270 ; DEFAULT-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.smin.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
271 ; DEFAULT-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4
272 ; DEFAULT-NEXT: ret void
275 %e0 = load i16, ptr %p, align 4
276 %inc = getelementptr inbounds i16, ptr %p, i64 1
277 %e1 = load i16, ptr %inc, align 2
279 %f0 = load i16, ptr %q, align 4
280 %inq = getelementptr inbounds i16, ptr %q, i64 1
281 %f1 = load i16, ptr %inq, align 2
283 %a0 = tail call i16 @llvm.smin.i16(i16 %e0, i16 %f0)
284 %a1 = tail call i16 @llvm.smin.i16(i16 %e1, i16 %f1)
286 store i16 %a0, ptr %dest, align 4
287 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
288 store i16 %a1, ptr %inc2, align 2
292 declare i16 @llvm.umax.i16(i16, i16)
293 define void @vec_umax(ptr %dest, ptr %p, ptr %q) {
294 ; CHECK-LABEL: @vec_umax(
296 ; CHECK-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
297 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4
298 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.umax.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
299 ; CHECK-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4
300 ; CHECK-NEXT: ret void
302 ; DEFAULT-LABEL: @vec_umax(
303 ; DEFAULT-NEXT: entry:
304 ; DEFAULT-NEXT: [[TMP0:%.*]] = load <2 x i16>, ptr [[P:%.*]], align 4
305 ; DEFAULT-NEXT: [[TMP1:%.*]] = load <2 x i16>, ptr [[Q:%.*]], align 4
306 ; DEFAULT-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.umax.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
307 ; DEFAULT-NEXT: store <2 x i16> [[TMP2]], ptr [[DEST:%.*]], align 4
308 ; DEFAULT-NEXT: ret void
311 %e0 = load i16, ptr %p, align 4
312 %inc = getelementptr inbounds i16, ptr %p, i64 1
313 %e1 = load i16, ptr %inc, align 2
315 %f0 = load i16, ptr %q, align 4
316 %inq = getelementptr inbounds i16, ptr %q, i64 1
317 %f1 = load i16, ptr %inq, align 2
319 %a0 = tail call i16 @llvm.umax.i16(i16 %e0, i16 %f0)
320 %a1 = tail call i16 @llvm.umax.i16(i16 %e1, i16 %f1)
322 store i16 %a0, ptr %dest, align 4
323 %inc2 = getelementptr inbounds i16, ptr %dest, i64 1
324 store i16 %a1, ptr %inc2, align 2