1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
8 define <vscale x 16 x i8> @index_ii_i8() {
9 ; CHECK-LABEL: index_ii_i8:
11 ; CHECK-NEXT: index z0.b, #-16, #15
13 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.index.nxv16i8(i8 -16, i8 15)
14 ret <vscale x 16 x i8> %out
17 define <vscale x 8 x i16> @index_ii_i16() {
18 ; CHECK-LABEL: index_ii_i16:
20 ; CHECK-NEXT: index z0.h, #15, #-16
22 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.index.nxv8i16(i16 15, i16 -16)
23 ret <vscale x 8 x i16> %out
26 define <vscale x 4 x i32> @index_ii_i32() {
27 ; CHECK-LABEL: index_ii_i32:
29 ; CHECK-NEXT: index z0.s, #-16, #15
31 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 -16, i32 15)
32 ret <vscale x 4 x i32> %out
35 define <vscale x 2 x i64> @index_ii_i64() {
36 ; CHECK-LABEL: index_ii_i64:
38 ; CHECK-NEXT: index z0.d, #15, #-16
40 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.index.nxv2i64(i64 15, i64 -16)
41 ret <vscale x 2 x i64> %out
44 define <vscale x 2 x i64> @index_ii_range() {
45 ; CHECK-LABEL: index_ii_range:
47 ; CHECK-NEXT: mov w8, #16
48 ; CHECK-NEXT: mov x9, #-17
49 ; CHECK-NEXT: index z0.d, x9, x8
51 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.index.nxv2i64(i64 -17, i64 16)
52 ret <vscale x 2 x i64> %out
55 define <vscale x 8 x i16> @index_ii_range_combine(i16 %a) {
56 ; CHECK-LABEL: index_ii_range_combine:
58 ; CHECK-NEXT: index z0.h, #2, #8
60 %val = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
61 %val1 = shufflevector <vscale x 8 x i16> %val, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
62 %val2 = call <vscale x 8 x i16> @llvm.aarch64.sve.index.nxv8i16(i16 0, i16 2)
63 %val3 = shl <vscale x 8 x i16> %val2, %val1
64 %out = add <vscale x 8 x i16> %val3, %val1
65 ret <vscale x 8 x i16> %out
69 ; INDEX (IMMEDIATE, SCALAR)
72 define <vscale x 16 x i8> @index_ir_i8(i8 %a) {
73 ; CHECK-LABEL: index_ir_i8:
75 ; CHECK-NEXT: index z0.b, #15, w0
77 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.index.nxv16i8(i8 15, i8 %a)
78 ret <vscale x 16 x i8> %out
81 define <vscale x 8 x i16> @index_ir_i16(i16 %a) {
82 ; CHECK-LABEL: index_ir_i16:
84 ; CHECK-NEXT: index z0.h, #-16, w0
86 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.index.nxv8i16(i16 -16, i16 %a)
87 ret <vscale x 8 x i16> %out
90 define <vscale x 4 x i32> @index_ir_i32(i32 %a) {
91 ; CHECK-LABEL: index_ir_i32:
93 ; CHECK-NEXT: index z0.s, #15, w0
95 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 15, i32 %a)
96 ret <vscale x 4 x i32> %out
99 define <vscale x 2 x i64> @index_ir_i64(i64 %a) {
100 ; CHECK-LABEL: index_ir_i64:
102 ; CHECK-NEXT: index z0.d, #-16, x0
104 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.index.nxv2i64(i64 -16, i64 %a)
105 ret <vscale x 2 x i64> %out
108 define <vscale x 4 x i32> @index_ir_range(i32 %a) {
109 ; CHECK-LABEL: index_ir_range:
111 ; CHECK-NEXT: mov w8, #-17
112 ; CHECK-NEXT: index z0.s, w8, w0
114 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 -17, i32 %a)
115 ret <vscale x 4 x i32> %out
118 define <vscale x 4 x i32> @index_ir_range_combine(i32 %a) {
119 ; CHECK-LABEL: index_ir_range_combine:
121 ; CHECK-NEXT: index z0.s, #0, w0
123 %val = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
124 %val1 = shufflevector <vscale x 4 x i32> %val, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
125 %tmp = call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 2, i32 1)
126 %tmp1 = sub <vscale x 4 x i32> %tmp, %val1
127 %val2 = insertelement <vscale x 4 x i32> poison, i32 %a, i32 0
128 %val3 = shufflevector <vscale x 4 x i32> %val2, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
129 %out = mul <vscale x 4 x i32> %tmp1, %val3
130 ret <vscale x 4 x i32> %out
134 ; INDEX (SCALAR, IMMEDIATE)
137 define <vscale x 16 x i8> @index_ri_i8(i8 %a) {
138 ; CHECK-LABEL: index_ri_i8:
140 ; CHECK-NEXT: index z0.b, w0, #-16
142 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.index.nxv16i8(i8 %a, i8 -16)
143 ret <vscale x 16 x i8> %out
146 define <vscale x 8 x i16> @index_ri_i16(i16 %a) {
147 ; CHECK-LABEL: index_ri_i16:
149 ; CHECK-NEXT: index z0.h, w0, #15
151 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.index.nxv8i16(i16 %a, i16 15)
152 ret <vscale x 8 x i16> %out
155 define <vscale x 4 x i32> @index_ri_i32(i32 %a) {
156 ; CHECK-LABEL: index_ri_i32:
158 ; CHECK-NEXT: index z0.s, w0, #-16
160 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 %a, i32 -16)
161 ret <vscale x 4 x i32> %out
164 define <vscale x 2 x i64> @index_ri_i64(i64 %a) {
165 ; CHECK-LABEL: index_ri_i64:
167 ; CHECK-NEXT: index z0.d, x0, #15
169 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.index.nxv2i64(i64 %a, i64 15)
170 ret <vscale x 2 x i64> %out
173 define <vscale x 8 x i16> @index_ri_range(i16 %a) {
174 ; CHECK-LABEL: index_ri_range:
176 ; CHECK-NEXT: mov w8, #16
177 ; CHECK-NEXT: index z0.h, w0, w8
179 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.index.nxv8i16(i16 %a, i16 16)
180 ret <vscale x 8 x i16> %out
187 define <vscale x 16 x i8> @index_rr_i8(i8 %a, i8 %b) {
188 ; CHECK-LABEL: index_rr_i8:
190 ; CHECK-NEXT: index z0.b, w0, w1
192 %out = call <vscale x 16 x i8> @llvm.aarch64.sve.index.nxv16i8(i8 %a, i8 %b)
193 ret <vscale x 16 x i8> %out
196 define <vscale x 8 x i16> @index_rr_i16(i16 %a, i16 %b) {
197 ; CHECK-LABEL: index_rr_i16:
199 ; CHECK-NEXT: index z0.h, w0, w1
201 %out = call <vscale x 8 x i16> @llvm.aarch64.sve.index.nxv8i16(i16 %a, i16 %b)
202 ret <vscale x 8 x i16> %out
205 define <vscale x 4 x i32> @index_rr_i32(i32 %a, i32 %b) {
206 ; CHECK-LABEL: index_rr_i32:
208 ; CHECK-NEXT: index z0.s, w0, w1
210 %out = call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 %a, i32 %b)
211 ret <vscale x 4 x i32> %out
214 define <vscale x 2 x i64> @index_rr_i64(i64 %a, i64 %b) {
215 ; CHECK-LABEL: index_rr_i64:
217 ; CHECK-NEXT: index z0.d, x0, x1
219 %out = call <vscale x 2 x i64> @llvm.aarch64.sve.index.nxv2i64(i64 %a, i64 %b)
220 ret <vscale x 2 x i64> %out
223 define <vscale x 4 x i32> @index_rr_i32_combine(i32 %a, i32 %b) {
224 ; CHECK-LABEL: index_rr_i32_combine:
226 ; CHECK-NEXT: index z0.s, w0, w1
228 %val = insertelement <vscale x 4 x i32> poison, i32 %a, i32 0
229 %val1 = shufflevector <vscale x 4 x i32> %val, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
230 %val2 = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
231 %val3 = shufflevector <vscale x 4 x i32> %val2, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
232 %tmp = call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 0, i32 1)
233 %tmp1 = mul <vscale x 4 x i32> %tmp, %val3
234 %out = add <vscale x 4 x i32> %tmp1, %val1
235 ret <vscale x 4 x i32> %out
238 define <vscale x 4 x i32> @index_rr_i32_not_combine(i32 %a, i32 %b) {
239 ; CHECK-LABEL: index_rr_i32_not_combine:
241 ; CHECK-NEXT: mov z0.s, w0
242 ; CHECK-NEXT: mov z1.s, w1
243 ; CHECK-NEXT: index z2.s, #0, #1
244 ; CHECK-NEXT: ptrue p0.s
245 ; CHECK-NEXT: mla z0.s, p0/m, z2.s, z1.s
246 ; CHECK-NEXT: add z0.s, z0.s, z2.s
248 %val = insertelement <vscale x 4 x i32> poison, i32 %a, i32 0
249 %val1 = shufflevector <vscale x 4 x i32> %val, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
250 %val2 = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
251 %val3 = shufflevector <vscale x 4 x i32> %val2, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
252 %tmp = call <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32 0, i32 1)
253 %tmp1 = mul <vscale x 4 x i32> %tmp, %val3
254 %tmp2 = add <vscale x 4 x i32> %tmp1, %val1
255 %out = add <vscale x 4 x i32> %tmp2, %tmp
256 ret <vscale x 4 x i32> %out
259 declare <vscale x 16 x i8> @llvm.aarch64.sve.index.nxv16i8(i8, i8)
260 declare <vscale x 8 x i16> @llvm.aarch64.sve.index.nxv8i16(i16, i16)
261 declare <vscale x 4 x i32> @llvm.aarch64.sve.index.nxv4i32(i32, i32)
262 declare <vscale x 2 x i64> @llvm.aarch64.sve.index.nxv2i64(i64, i64)