1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s --check-prefixes=CHECK
4 ; Should codegen to a nop, since idx is zero.
5 define <2 x i64> @extract_v2i64_nxv2i64(<vscale x 2 x i64> %vec) nounwind {
6 ; CHECK-LABEL: extract_v2i64_nxv2i64:
8 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
10 %retval = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %vec, i64 0)
14 ; Goes through memory currently; idx != 0.
15 define <2 x i64> @extract_v2i64_nxv2i64_idx2(<vscale x 2 x i64> %vec) nounwind {
16 ; CHECK-LABEL: extract_v2i64_nxv2i64_idx2:
18 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
19 ; CHECK-NEXT: addvl sp, sp, #-1
21 ; CHECK-NEXT: sub x9, x9, #2
22 ; CHECK-NEXT: mov w8, #2
23 ; CHECK-NEXT: cmp x9, #2
24 ; CHECK-NEXT: ptrue p0.d
25 ; CHECK-NEXT: csel x8, x9, x8, lo
26 ; CHECK-NEXT: st1d { z0.d }, p0, [sp]
27 ; CHECK-NEXT: lsl x8, x8, #3
28 ; CHECK-NEXT: mov x9, sp
29 ; CHECK-NEXT: ldr q0, [x9, x8]
30 ; CHECK-NEXT: addvl sp, sp, #1
31 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
33 %retval = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %vec, i64 2)
37 ; Should codegen to a nop, since idx is zero.
38 define <4 x i32> @extract_v4i32_nxv4i32(<vscale x 4 x i32> %vec) nounwind {
39 ; CHECK-LABEL: extract_v4i32_nxv4i32:
41 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
43 %retval = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %vec, i64 0)
47 ; Goes through memory currently; idx != 0.
48 define <4 x i32> @extract_v4i32_nxv4i32_idx4(<vscale x 4 x i32> %vec) nounwind {
49 ; CHECK-LABEL: extract_v4i32_nxv4i32_idx4:
51 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
52 ; CHECK-NEXT: addvl sp, sp, #-1
54 ; CHECK-NEXT: sub x9, x9, #4
55 ; CHECK-NEXT: mov w8, #4
56 ; CHECK-NEXT: cmp x9, #4
57 ; CHECK-NEXT: ptrue p0.s
58 ; CHECK-NEXT: csel x8, x9, x8, lo
59 ; CHECK-NEXT: st1w { z0.s }, p0, [sp]
60 ; CHECK-NEXT: lsl x8, x8, #2
61 ; CHECK-NEXT: mov x9, sp
62 ; CHECK-NEXT: ldr q0, [x9, x8]
63 ; CHECK-NEXT: addvl sp, sp, #1
64 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
66 %retval = call <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32> %vec, i64 4)
70 ; Should codegen to a nop, since idx is zero.
71 define <8 x i16> @extract_v8i16_nxv8i16(<vscale x 8 x i16> %vec) nounwind {
72 ; CHECK-LABEL: extract_v8i16_nxv8i16:
74 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
76 %retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> %vec, i64 0)
80 ; Goes through memory currently; idx != 0.
81 define <8 x i16> @extract_v8i16_nxv8i16_idx8(<vscale x 8 x i16> %vec) nounwind {
82 ; CHECK-LABEL: extract_v8i16_nxv8i16_idx8:
84 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
85 ; CHECK-NEXT: addvl sp, sp, #-1
87 ; CHECK-NEXT: sub x9, x9, #8
88 ; CHECK-NEXT: mov w8, #8
89 ; CHECK-NEXT: cmp x9, #8
90 ; CHECK-NEXT: ptrue p0.h
91 ; CHECK-NEXT: csel x8, x9, x8, lo
92 ; CHECK-NEXT: st1h { z0.h }, p0, [sp]
93 ; CHECK-NEXT: lsl x8, x8, #1
94 ; CHECK-NEXT: mov x9, sp
95 ; CHECK-NEXT: ldr q0, [x9, x8]
96 ; CHECK-NEXT: addvl sp, sp, #1
97 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
99 %retval = call <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16> %vec, i64 8)
100 ret <8 x i16> %retval
103 ; Should codegen to a nop, since idx is zero.
104 define <16 x i8> @extract_v16i8_nxv16i8(<vscale x 16 x i8> %vec) nounwind {
105 ; CHECK-LABEL: extract_v16i8_nxv16i8:
107 ; CHECK-NEXT: // kill: def $q0 killed $q0 killed $z0
109 %retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> %vec, i64 0)
110 ret <16 x i8> %retval
113 ; Goes through memory currently; idx != 0.
114 define <16 x i8> @extract_v16i8_nxv16i8_idx16(<vscale x 16 x i8> %vec) nounwind {
115 ; CHECK-LABEL: extract_v16i8_nxv16i8_idx16:
117 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
118 ; CHECK-NEXT: addvl sp, sp, #-1
119 ; CHECK-NEXT: rdvl x9, #1
120 ; CHECK-NEXT: sub x9, x9, #16
121 ; CHECK-NEXT: ptrue p0.b
122 ; CHECK-NEXT: mov w8, #16
123 ; CHECK-NEXT: cmp x9, #16
124 ; CHECK-NEXT: st1b { z0.b }, p0, [sp]
125 ; CHECK-NEXT: csel x8, x9, x8, lo
126 ; CHECK-NEXT: mov x9, sp
127 ; CHECK-NEXT: ldr q0, [x9, x8]
128 ; CHECK-NEXT: addvl sp, sp, #1
129 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
131 %retval = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8> %vec, i64 16)
132 ret <16 x i8> %retval
136 ; Extracting illegal subvectors
138 define <vscale x 1 x i32> @extract_nxv1i32_nxv4i32(<vscale x 4 x i32> %vec) nounwind {
139 ; CHECK-LABEL: extract_nxv1i32_nxv4i32:
142 %retval = call <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32> %vec, i64 0)
143 ret <vscale x 1 x i32> %retval
146 define <vscale x 1 x i16> @extract_nxv1i16_nxv6i16(<vscale x 6 x i16> %vec) nounwind {
147 ; CHECK-LABEL: extract_nxv1i16_nxv6i16:
150 %retval = call <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv6i16(<vscale x 6 x i16> %vec, i64 0)
151 ret <vscale x 1 x i16> %retval
154 ; Fixed length clamping
156 define <2 x i64> @extract_fixed_v2i64_nxv2i64(<vscale x 2 x i64> %vec) nounwind #0 {
157 ; CHECK-LABEL: extract_fixed_v2i64_nxv2i64:
159 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
160 ; CHECK-NEXT: addvl sp, sp, #-1
161 ; CHECK-NEXT: cntd x9
162 ; CHECK-NEXT: sub x9, x9, #2
163 ; CHECK-NEXT: mov w8, #2
164 ; CHECK-NEXT: cmp x9, #2
165 ; CHECK-NEXT: ptrue p0.d
166 ; CHECK-NEXT: csel x8, x9, x8, lo
167 ; CHECK-NEXT: st1d { z0.d }, p0, [sp]
168 ; CHECK-NEXT: lsl x8, x8, #3
169 ; CHECK-NEXT: mov x9, sp
170 ; CHECK-NEXT: ldr q0, [x9, x8]
171 ; CHECK-NEXT: addvl sp, sp, #1
172 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
174 %retval = call <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64> %vec, i64 2)
175 ret <2 x i64> %retval
178 define <4 x i64> @extract_fixed_v4i64_nxv2i64(<vscale x 2 x i64> %vec) nounwind #0 {
179 ; CHECK-LABEL: extract_fixed_v4i64_nxv2i64:
181 ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill
182 ; CHECK-NEXT: addvl sp, sp, #-1
183 ; CHECK-NEXT: cntd x9
184 ; CHECK-NEXT: subs x9, x9, #4
185 ; CHECK-NEXT: csel x9, xzr, x9, lo
186 ; CHECK-NEXT: ptrue p0.d
187 ; CHECK-NEXT: mov w10, #4
188 ; CHECK-NEXT: cmp x9, #4
189 ; CHECK-NEXT: ptrue p1.d, vl4
190 ; CHECK-NEXT: st1d { z0.d }, p0, [sp]
191 ; CHECK-NEXT: csel x9, x9, x10, lo
192 ; CHECK-NEXT: mov x10, sp
193 ; CHECK-NEXT: ld1d { z0.d }, p1/z, [x10, x9, lsl #3]
194 ; CHECK-NEXT: st1d { z0.d }, p1, [x8]
195 ; CHECK-NEXT: addvl sp, sp, #1
196 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload
198 %retval = call <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64> %vec, i64 4)
199 ret <4 x i64> %retval
202 attributes #0 = { vscale_range(2,2) }
204 declare <2 x i64> @llvm.experimental.vector.extract.v2i64.nxv2i64(<vscale x 2 x i64>, i64)
205 declare <4 x i32> @llvm.experimental.vector.extract.v4i32.nxv4i32(<vscale x 4 x i32>, i64)
206 declare <8 x i16> @llvm.experimental.vector.extract.v8i16.nxv8i16(<vscale x 8 x i16>, i64)
207 declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv16i8(<vscale x 16 x i8>, i64)
209 declare <4 x i64> @llvm.experimental.vector.extract.v4i64.nxv2i64(<vscale x 2 x i64>, i64)
211 declare <vscale x 1 x i32> @llvm.experimental.vector.extract.nxv1i32.nxv4i32(<vscale x 4 x i32>, i64)
212 declare <vscale x 1 x i16> @llvm.experimental.vector.extract.nxv1i16.nxv6i16(<vscale x 6 x i16>, i64)