1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -verify-machineinstrs < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-SELDAG %s
3 ; RUN: llc -verify-machineinstrs -O0 < %s | FileCheck --check-prefix=CHECK --check-prefix=CHECK-FASTISEL %s
5 target triple = "aarch64-unknown-linux-gnu"
11 define <vscale x 2 x i1> @reverse_nxv2i1(<vscale x 2 x i1> %a) #0 {
12 ; CHECK-LABEL: reverse_nxv2i1:
14 ; CHECK-NEXT: rev p0.d, p0.d
17 %res = call <vscale x 2 x i1> @llvm.experimental.vector.reverse.nxv2i1(<vscale x 2 x i1> %a)
18 ret <vscale x 2 x i1> %res
21 define <vscale x 4 x i1> @reverse_nxv4i1(<vscale x 4 x i1> %a) #0 {
22 ; CHECK-LABEL: reverse_nxv4i1:
24 ; CHECK-NEXT: rev p0.s, p0.s
27 %res = call <vscale x 4 x i1> @llvm.experimental.vector.reverse.nxv4i1(<vscale x 4 x i1> %a)
28 ret <vscale x 4 x i1> %res
31 define <vscale x 8 x i1> @reverse_nxv8i1(<vscale x 8 x i1> %a) #0 {
32 ; CHECK-LABEL: reverse_nxv8i1:
34 ; CHECK-NEXT: rev p0.h, p0.h
37 %res = call <vscale x 8 x i1> @llvm.experimental.vector.reverse.nxv8i1(<vscale x 8 x i1> %a)
38 ret <vscale x 8 x i1> %res
41 define <vscale x 16 x i1> @reverse_nxv16i1(<vscale x 16 x i1> %a) #0 {
42 ; CHECK-LABEL: reverse_nxv16i1:
44 ; CHECK-NEXT: rev p0.b, p0.b
47 %res = call <vscale x 16 x i1> @llvm.experimental.vector.reverse.nxv16i1(<vscale x 16 x i1> %a)
48 ret <vscale x 16 x i1> %res
51 ; Verify splitvec type legalisation works as expected.
52 define <vscale x 32 x i1> @reverse_nxv32i1(<vscale x 32 x i1> %a) #0 {
53 ; CHECK-SELDAG-LABEL: reverse_nxv32i1:
54 ; CHECK-SELDAG: // %bb.0:
55 ; CHECK-SELDAG-NEXT: rev p2.b, p1.b
56 ; CHECK-SELDAG-NEXT: rev p1.b, p0.b
57 ; CHECK-SELDAG-NEXT: mov p0.b, p2.b
58 ; CHECK-SELDAG-NEXT: ret
60 ; CHECK-FASTISEL-LABEL: reverse_nxv32i1:
61 ; CHECK-FASTISEL: // %bb.0:
62 ; CHECK-FASTISEL-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
63 ; CHECK-FASTISEL-NEXT: addvl sp, sp, #-1
64 ; CHECK-FASTISEL-NEXT: str p1, [sp, #7, mul vl] // 2-byte Folded Spill
65 ; CHECK-FASTISEL-NEXT: mov p1.b, p0.b
66 ; CHECK-FASTISEL-NEXT: ldr p0, [sp, #7, mul vl] // 2-byte Folded Reload
67 ; CHECK-FASTISEL-NEXT: rev p0.b, p0.b
68 ; CHECK-FASTISEL-NEXT: rev p1.b, p1.b
69 ; CHECK-FASTISEL-NEXT: addvl sp, sp, #1
70 ; CHECK-FASTISEL-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
71 ; CHECK-FASTISEL-NEXT: ret
73 %res = call <vscale x 32 x i1> @llvm.experimental.vector.reverse.nxv32i1(<vscale x 32 x i1> %a)
74 ret <vscale x 32 x i1> %res
78 ; VECTOR_REVERSE - ZPR
81 define <vscale x 16 x i8> @reverse_nxv16i8(<vscale x 16 x i8> %a) #0 {
82 ; CHECK-LABEL: reverse_nxv16i8:
84 ; CHECK-NEXT: rev z0.b, z0.b
87 %res = call <vscale x 16 x i8> @llvm.experimental.vector.reverse.nxv16i8(<vscale x 16 x i8> %a)
88 ret <vscale x 16 x i8> %res
91 define <vscale x 8 x i16> @reverse_nxv8i16(<vscale x 8 x i16> %a) #0 {
92 ; CHECK-LABEL: reverse_nxv8i16:
94 ; CHECK-NEXT: rev z0.h, z0.h
97 %res = call <vscale x 8 x i16> @llvm.experimental.vector.reverse.nxv8i16(<vscale x 8 x i16> %a)
98 ret <vscale x 8 x i16> %res
101 define <vscale x 4 x i32> @reverse_nxv4i32(<vscale x 4 x i32> %a) #0 {
102 ; CHECK-LABEL: reverse_nxv4i32:
104 ; CHECK-NEXT: rev z0.s, z0.s
107 %res = call <vscale x 4 x i32> @llvm.experimental.vector.reverse.nxv4i32(<vscale x 4 x i32> %a)
108 ret <vscale x 4 x i32> %res
111 define <vscale x 2 x i64> @reverse_nxv2i64(<vscale x 2 x i64> %a) #0 {
112 ; CHECK-LABEL: reverse_nxv2i64:
114 ; CHECK-NEXT: rev z0.d, z0.d
117 %res = call <vscale x 2 x i64> @llvm.experimental.vector.reverse.nxv2i64(<vscale x 2 x i64> %a)
118 ret <vscale x 2 x i64> %res
121 define <vscale x 2 x half> @reverse_nxv2f16(<vscale x 2 x half> %a) #0 {
122 ; CHECK-LABEL: reverse_nxv2f16:
124 ; CHECK-NEXT: rev z0.d, z0.d
127 %res = call <vscale x 2 x half> @llvm.experimental.vector.reverse.nxv2f16(<vscale x 2 x half> %a)
128 ret <vscale x 2 x half> %res
131 define <vscale x 4 x half> @reverse_nxv4f16(<vscale x 4 x half> %a) #0 {
132 ; CHECK-LABEL: reverse_nxv4f16:
134 ; CHECK-NEXT: rev z0.s, z0.s
137 %res = call <vscale x 4 x half> @llvm.experimental.vector.reverse.nxv4f16(<vscale x 4 x half> %a)
138 ret <vscale x 4 x half> %res
141 define <vscale x 8 x half> @reverse_nxv8f16(<vscale x 8 x half> %a) #0 {
142 ; CHECK-LABEL: reverse_nxv8f16:
144 ; CHECK-NEXT: rev z0.h, z0.h
147 %res = call <vscale x 8 x half> @llvm.experimental.vector.reverse.nxv8f16(<vscale x 8 x half> %a)
148 ret <vscale x 8 x half> %res
151 define <vscale x 2 x bfloat> @reverse_nxv2bf16(<vscale x 2 x bfloat> %a) #1 {
152 ; CHECK-LABEL: reverse_nxv2bf16:
154 ; CHECK-NEXT: rev z0.d, z0.d
157 %res = call <vscale x 2 x bfloat> @llvm.experimental.vector.reverse.nxv2bf16(<vscale x 2 x bfloat> %a)
158 ret <vscale x 2 x bfloat> %res
161 define <vscale x 4 x bfloat> @reverse_nxv4bf16(<vscale x 4 x bfloat> %a) #1 {
162 ; CHECK-LABEL: reverse_nxv4bf16:
164 ; CHECK-NEXT: rev z0.s, z0.s
167 %res = call <vscale x 4 x bfloat> @llvm.experimental.vector.reverse.nxv4bf16(<vscale x 4 x bfloat> %a)
168 ret <vscale x 4 x bfloat> %res
171 define <vscale x 8 x bfloat> @reverse_nxv8bf16(<vscale x 8 x bfloat> %a) #1 {
172 ; CHECK-LABEL: reverse_nxv8bf16:
174 ; CHECK-NEXT: rev z0.h, z0.h
177 %res = call <vscale x 8 x bfloat> @llvm.experimental.vector.reverse.nxv8bf16(<vscale x 8 x bfloat> %a)
178 ret <vscale x 8 x bfloat> %res
181 define <vscale x 2 x float> @reverse_nxv2f32(<vscale x 2 x float> %a) #0 {
182 ; CHECK-LABEL: reverse_nxv2f32:
184 ; CHECK-NEXT: rev z0.d, z0.d
187 %res = call <vscale x 2 x float> @llvm.experimental.vector.reverse.nxv2f32(<vscale x 2 x float> %a) ret <vscale x 2 x float> %res
190 define <vscale x 4 x float> @reverse_nxv4f32(<vscale x 4 x float> %a) #0 {
191 ; CHECK-LABEL: reverse_nxv4f32:
193 ; CHECK-NEXT: rev z0.s, z0.s
196 %res = call <vscale x 4 x float> @llvm.experimental.vector.reverse.nxv4f32(<vscale x 4 x float> %a) ret <vscale x 4 x float> %res
199 define <vscale x 2 x double> @reverse_nxv2f64(<vscale x 2 x double> %a) #0 {
200 ; CHECK-LABEL: reverse_nxv2f64:
202 ; CHECK-NEXT: rev z0.d, z0.d
205 %res = call <vscale x 2 x double> @llvm.experimental.vector.reverse.nxv2f64(<vscale x 2 x double> %a)
206 ret <vscale x 2 x double> %res
209 ; Verify promote type legalisation works as expected.
210 define <vscale x 2 x i8> @reverse_nxv2i8(<vscale x 2 x i8> %a) #0 {
211 ; CHECK-LABEL: reverse_nxv2i8:
213 ; CHECK-NEXT: rev z0.d, z0.d
216 %res = call <vscale x 2 x i8> @llvm.experimental.vector.reverse.nxv2i8(<vscale x 2 x i8> %a)
217 ret <vscale x 2 x i8> %res
220 ; Verify splitvec type legalisation works as expected.
221 define <vscale x 8 x i32> @reverse_nxv8i32(<vscale x 8 x i32> %a) #0 {
222 ; CHECK-SELDAG-LABEL: reverse_nxv8i32:
223 ; CHECK-SELDAG: // %bb.0:
224 ; CHECK-SELDAG-NEXT: rev z2.s, z1.s
225 ; CHECK-SELDAG-NEXT: rev z1.s, z0.s
226 ; CHECK-SELDAG-NEXT: mov z0.d, z2.d
227 ; CHECK-SELDAG-NEXT: ret
229 ; CHECK-FASTISEL-LABEL: reverse_nxv8i32:
230 ; CHECK-FASTISEL: // %bb.0:
231 ; CHECK-FASTISEL-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
232 ; CHECK-FASTISEL-NEXT: addvl sp, sp, #-1
233 ; CHECK-FASTISEL-NEXT: str z1, [sp] // 16-byte Folded Spill
234 ; CHECK-FASTISEL-NEXT: mov z1.d, z0.d
235 ; CHECK-FASTISEL-NEXT: ldr z0, [sp] // 16-byte Folded Reload
236 ; CHECK-FASTISEL-NEXT: rev z0.s, z0.s
237 ; CHECK-FASTISEL-NEXT: rev z1.s, z1.s
238 ; CHECK-FASTISEL-NEXT: addvl sp, sp, #1
239 ; CHECK-FASTISEL-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
240 ; CHECK-FASTISEL-NEXT: ret
242 %res = call <vscale x 8 x i32> @llvm.experimental.vector.reverse.nxv8i32(<vscale x 8 x i32> %a)
243 ret <vscale x 8 x i32> %res
246 ; Verify splitvec type legalisation works as expected.
247 define <vscale x 16 x float> @reverse_nxv16f32(<vscale x 16 x float> %a) #0 {
248 ; CHECK-SELDAG-LABEL: reverse_nxv16f32:
249 ; CHECK-SELDAG: // %bb.0:
250 ; CHECK-SELDAG-NEXT: rev z5.s, z3.s
251 ; CHECK-SELDAG-NEXT: rev z4.s, z2.s
252 ; CHECK-SELDAG-NEXT: rev z2.s, z1.s
253 ; CHECK-SELDAG-NEXT: rev z3.s, z0.s
254 ; CHECK-SELDAG-NEXT: mov z0.d, z5.d
255 ; CHECK-SELDAG-NEXT: mov z1.d, z4.d
256 ; CHECK-SELDAG-NEXT: ret
258 ; CHECK-FASTISEL-LABEL: reverse_nxv16f32:
259 ; CHECK-FASTISEL: // %bb.0:
260 ; CHECK-FASTISEL-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
261 ; CHECK-FASTISEL-NEXT: addvl sp, sp, #-2
262 ; CHECK-FASTISEL-NEXT: str z3, [sp, #1, mul vl] // 16-byte Folded Spill
263 ; CHECK-FASTISEL-NEXT: str z2, [sp] // 16-byte Folded Spill
264 ; CHECK-FASTISEL-NEXT: mov z2.d, z1.d
265 ; CHECK-FASTISEL-NEXT: ldr z1, [sp] // 16-byte Folded Reload
266 ; CHECK-FASTISEL-NEXT: mov z3.d, z0.d
267 ; CHECK-FASTISEL-NEXT: ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
268 ; CHECK-FASTISEL-NEXT: rev z0.s, z0.s
269 ; CHECK-FASTISEL-NEXT: rev z1.s, z1.s
270 ; CHECK-FASTISEL-NEXT: rev z2.s, z2.s
271 ; CHECK-FASTISEL-NEXT: rev z3.s, z3.s
272 ; CHECK-FASTISEL-NEXT: addvl sp, sp, #2
273 ; CHECK-FASTISEL-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
274 ; CHECK-FASTISEL-NEXT: ret
276 %res = call <vscale x 16 x float> @llvm.experimental.vector.reverse.nxv16f32(<vscale x 16 x float> %a)
277 ret <vscale x 16 x float> %res
281 declare <vscale x 2 x i1> @llvm.experimental.vector.reverse.nxv2i1(<vscale x 2 x i1>)
282 declare <vscale x 4 x i1> @llvm.experimental.vector.reverse.nxv4i1(<vscale x 4 x i1>)
283 declare <vscale x 8 x i1> @llvm.experimental.vector.reverse.nxv8i1(<vscale x 8 x i1>)
284 declare <vscale x 16 x i1> @llvm.experimental.vector.reverse.nxv16i1(<vscale x 16 x i1>)
285 declare <vscale x 32 x i1> @llvm.experimental.vector.reverse.nxv32i1(<vscale x 32 x i1>)
286 declare <vscale x 2 x i8> @llvm.experimental.vector.reverse.nxv2i8(<vscale x 2 x i8>)
287 declare <vscale x 16 x i8> @llvm.experimental.vector.reverse.nxv16i8(<vscale x 16 x i8>)
288 declare <vscale x 8 x i16> @llvm.experimental.vector.reverse.nxv8i16(<vscale x 8 x i16>)
289 declare <vscale x 4 x i32> @llvm.experimental.vector.reverse.nxv4i32(<vscale x 4 x i32>)
290 declare <vscale x 8 x i32> @llvm.experimental.vector.reverse.nxv8i32(<vscale x 8 x i32>)
291 declare <vscale x 2 x i64> @llvm.experimental.vector.reverse.nxv2i64(<vscale x 2 x i64>)
292 declare <vscale x 2 x half> @llvm.experimental.vector.reverse.nxv2f16(<vscale x 2 x half>)
293 declare <vscale x 4 x half> @llvm.experimental.vector.reverse.nxv4f16(<vscale x 4 x half>)
294 declare <vscale x 8 x half> @llvm.experimental.vector.reverse.nxv8f16(<vscale x 8 x half>)
295 declare <vscale x 2 x bfloat> @llvm.experimental.vector.reverse.nxv2bf16(<vscale x 2 x bfloat>)
296 declare <vscale x 4 x bfloat> @llvm.experimental.vector.reverse.nxv4bf16(<vscale x 4 x bfloat>)
297 declare <vscale x 8 x bfloat> @llvm.experimental.vector.reverse.nxv8bf16(<vscale x 8 x bfloat>)
298 declare <vscale x 2 x float> @llvm.experimental.vector.reverse.nxv2f32(<vscale x 2 x float>)
299 declare <vscale x 4 x float> @llvm.experimental.vector.reverse.nxv4f32(<vscale x 4 x float>)
300 declare <vscale x 16 x float> @llvm.experimental.vector.reverse.nxv16f32(<vscale x 16 x float>)
301 declare <vscale x 2 x double> @llvm.experimental.vector.reverse.nxv2f64(<vscale x 2 x double>)
304 attributes #0 = { nounwind "target-features"="+sve" }
305 attributes #1 = { nounwind "target-features"="+sve,+bf16" }