1 ; RUN: llvm-as %s -o - | llvm-dis - | FileCheck %s
3 define <vscale x 32 x i8> @ld2.nxv32i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
4 ; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
5 ; CHECK-NEXT: %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
6 ; CHECK-NEXT: %3 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %2, i64 0)
7 ; CHECK-NEXT: %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
8 ; CHECK-NEXT: %res = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> %3, <vscale x 16 x i8> %4, i64 16)
9 ; CHECK-NEXT: ret <vscale x 32 x i8> %res
10 %res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
11 ret <vscale x 32 x i8> %res
14 define <vscale x 48 x i8> @ld3.nxv48i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
15 ; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld3.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
16 ; CHECK-NEXT: %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
17 ; CHECK-NEXT: %3 = call <vscale x 48 x i8> @llvm.vector.insert.nxv48i8.nxv16i8(<vscale x 48 x i8> poison, <vscale x 16 x i8> %2, i64 0)
18 ; CHECK-NEXT: %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
19 ; CHECK-NEXT: %5 = call <vscale x 48 x i8> @llvm.vector.insert.nxv48i8.nxv16i8(<vscale x 48 x i8> %3, <vscale x 16 x i8> %4, i64 16)
20 ; CHECK-NEXT: %6 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 2
21 ; CHECK-NEXT: %res = call <vscale x 48 x i8> @llvm.vector.insert.nxv48i8.nxv16i8(<vscale x 48 x i8> %5, <vscale x 16 x i8> %6, i64 32)
22 ; CHECK-NEXT: ret <vscale x 48 x i8> %res
23 %res = call <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
24 ret <vscale x 48 x i8> %res
27 define <vscale x 64 x i8> @ld4.nxv64i8_lower_bound(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
28 ; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld4.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
29 ; CHECK-NEXT: %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
30 ; CHECK-NEXT: %3 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> poison, <vscale x 16 x i8> %2, i64 0)
31 ; CHECK-NEXT: %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
32 ; CHECK-NEXT: %5 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %3, <vscale x 16 x i8> %4, i64 16)
33 ; CHECK-NEXT: %6 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 2
34 ; CHECK-NEXT: %7 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %5, <vscale x 16 x i8> %6, i64 32)
35 ; CHECK-NEXT: %8 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 3
36 ; CHECK-NEXT: %res = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %7, <vscale x 16 x i8> %8, i64 48)
37 ; CHECK-NEXT: ret <vscale x 64 x i8> %res
38 %res = call <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
39 ret <vscale x 64 x i8> %res
42 ; Check short mangling name
44 ; ldN intrinsic name without any element type
45 define <vscale x 32 x i8> @ld2.nxv32i8_no_eltty(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
46 ; CHECK-LABEL: @ld2.nxv32i8_no_eltty
47 ; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
48 ; CHECK-NEXT: %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
49 ; CHECK-NEXT: %3 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %2, i64 0)
50 ; CHECK-NEXT: %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
51 ; CHECK-NEXT: %res = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> %3, <vscale x 16 x i8> %4, i64 16)
52 ; CHECK-NEXT: ret <vscale x 32 x i8> %res
53 %res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
54 ret <vscale x 32 x i8> %res
57 ; ldN instrinsic name with only output type
58 define <vscale x 32 x i8> @ld2.nxv32i8_no_predty_pty(<vscale x 16 x i1> %Pg, i8 *%base_ptr) {
59 ; CHECK-LABEL: @ld2.nxv32i8_no_predty_pty
60 ; CHECK: %1 = call { <vscale x 16 x i8>, <vscale x 16 x i8> } @llvm.aarch64.sve.ld2.sret.nxv16i8(<vscale x 16 x i1> %Pg, ptr %base_ptr)
61 ; CHECK-NEXT: %2 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 0
62 ; CHECK-NEXT: %3 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %2, i64 0)
63 ; CHECK-NEXT: %4 = extractvalue { <vscale x 16 x i8>, <vscale x 16 x i8> } %1, 1
64 ; CHECK-NEXT: %res = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> %3, <vscale x 16 x i8> %4, i64 16)
65 ; CHECK-NEXT: ret <vscale x 32 x i8> %res
66 %res = call <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8(<vscale x 16 x i1> %Pg, i8 *%base_ptr)
67 ret <vscale x 32 x i8> %res
70 declare <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
71 declare <vscale x 48 x i8> @llvm.aarch64.sve.ld3.nxv48i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
72 declare <vscale x 64 x i8> @llvm.aarch64.sve.ld4.nxv64i8.nxv16i1.p0i8(<vscale x 16 x i1>, i8*)
73 declare <vscale x 32 x i8> @llvm.aarch64.sve.ld2(<vscale x 16 x i1>, i8 *)
74 declare <vscale x 32 x i8> @llvm.aarch64.sve.ld2.nxv32i8(<vscale x 16 x i1>, i8 *)
76 ; aarch64.sve.tuple.create.N
77 define <vscale x 32 x i8> @create2_nxv32i8_nxv16i8(<vscale x 16 x i8> %z1, <vscale x 16 x i8> %z2) {
78 ; CHECK-LABEL: @create2_nxv32i8_nxv16i8
79 ; CHECK: %1 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %z1, i64 0)
80 ; CHECK-NEXT: %tuple = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> %1, <vscale x 16 x i8> %z2, i64 16)
81 ; CHECK-NEXT: ret <vscale x 32 x i8> %tuple
83 %tuple = tail call <vscale x 32 x i8> @llvm.aarch64.sve.tuple.create2.nxv32i8.nxv16i8(<vscale x 16 x i8> %z1, <vscale x 16 x i8> %z2)
84 ret <vscale x 32 x i8> %tuple
87 define <vscale x 24 x i16> @create3_nxv24i8_nxv16i8(<vscale x 8 x i16> %unused_z0, <vscale x 8 x i16> %z1, <vscale x 8 x i16> %z2, <vscale x 8 x i16> %z3) {
88 ; CHECK-LABEL: @create3_nxv24i8_nxv16i8
89 ; CHECK: %1 = call <vscale x 24 x i16> @llvm.vector.insert.nxv24i16.nxv8i16(<vscale x 24 x i16> poison, <vscale x 8 x i16> %z1, i64 0)
90 ; CHECK-NEXT: %2 = call <vscale x 24 x i16> @llvm.vector.insert.nxv24i16.nxv8i16(<vscale x 24 x i16> %1, <vscale x 8 x i16> %z2, i64 8)
91 ; CHECK-NEXT: %tuple = call <vscale x 24 x i16> @llvm.vector.insert.nxv24i16.nxv8i16(<vscale x 24 x i16> %2, <vscale x 8 x i16> %z3, i64 16)
92 ; CHECK-NEXT: ret <vscale x 24 x i16> %tuple
94 %tuple = tail call <vscale x 24 x i16> @llvm.aarch64.sve.tuple.create3.nxv24i16.nxv8i16(<vscale x 8 x i16> %z1, <vscale x 8 x i16> %z2, <vscale x 8 x i16> %z3)
95 ret <vscale x 24 x i16> %tuple
98 define <vscale x 64 x i8> @create4_nxv64i8_nxv16i8(<vscale x 16 x i8> %unused_z0, <vscale x 16 x i8> %z1, <vscale x 16 x i8> %z2, <vscale x 16 x i8> %z3, <vscale x 16 x i8> %z4) {
99 ; CHECK-LABEL: @create4_nxv64i8_nxv16i8
100 ; CHECK: %1 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> poison, <vscale x 16 x i8> %z1, i64 0)
101 ; CHECK-NEXT: %2 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %1, <vscale x 16 x i8> %z2, i64 16)
102 ; CHECK-NEXT: %3 = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %2, <vscale x 16 x i8> %z3, i64 32)
103 ; CHECK-NEXT: %tuple = call <vscale x 64 x i8> @llvm.vector.insert.nxv64i8.nxv16i8(<vscale x 64 x i8> %3, <vscale x 16 x i8> %z4, i64 48)
104 ; CHECK-NEXT: ret <vscale x 64 x i8> %tuple
106 %tuple = tail call <vscale x 64 x i8> @llvm.aarch64.sve.tuple.create4.nxv64i8.nxv16i8(<vscale x 16 x i8> %z1, <vscale x 16 x i8> %z2, <vscale x 16 x i8> %z3, <vscale x 16 x i8> %z4)
107 ret <vscale x 64 x i8> %tuple
110 ; Accept short mangling name
111 define <vscale x 32 x i8> @create2_nxv32i8(<vscale x 16 x i8> %z1, <vscale x 16 x i8> %z2) {
112 ; CHECK-LABEL: @create2_nxv32i8
113 ; CHECK: %1 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %z1, i64 0)
114 ; CHECK-NEXT: %tuple = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> %1, <vscale x 16 x i8> %z2, i64 16)
115 ; CHECK-NEXT: ret <vscale x 32 x i8> %tuple
117 %tuple = tail call <vscale x 32 x i8> @llvm.aarch64.sve.tuple.create2.nxv32i8(<vscale x 16 x i8> %z1, <vscale x 16 x i8> %z2)
118 ret <vscale x 32 x i8> %tuple
121 define <vscale x 32 x i8> @create2(<vscale x 16 x i8> %z1, <vscale x 16 x i8> %z2) {
122 ; CHECK-LABEL: @create2
123 ; CHECK: %1 = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> poison, <vscale x 16 x i8> %z1, i64 0)
124 ; CHECK-NEXT: %tuple = call <vscale x 32 x i8> @llvm.vector.insert.nxv32i8.nxv16i8(<vscale x 32 x i8> %1, <vscale x 16 x i8> %z2, i64 16)
125 ; CHECK-NEXT: ret <vscale x 32 x i8> %tuple
127 %tuple = tail call <vscale x 32 x i8> @llvm.aarch64.sve.tuple.create2(<vscale x 16 x i8> %z1, <vscale x 16 x i8> %z2)
128 ret <vscale x 32 x i8> %tuple
131 ; Negative test for create
132 ; Should not upgrade when create is not 2,3 or 4
133 define <vscale x 16 x i8> @sve_tuple_create1(<vscale x 16 x i8> %z0) {
134 ; CHECK-LABEL: @sve_tuple_create1
135 ; CHECK: %tuple = tail call <vscale x 16 x i8> @llvm.aarch64.sve.tuple.create1.nxv16i8.nxv16i8(<vscale x 16 x i8> %z0)
136 ; CHECK-NEXT: ret <vscale x 16 x i8> %tuple
138 %tuple = tail call <vscale x 16 x i8> @llvm.aarch64.sve.tuple.create1.nxv16i8.nxv16i8(<vscale x 16 x i8> %z0);
139 ret <vscale x 16 x i8> %tuple;
142 ; aarch64.sve.tuple.set
144 define void @set_tuple2_nxv8i32_elt1(<vscale x 8 x i32> %z0, <vscale x 4 x i32> %z1) {
145 ; CHECK-LABEL: @set_tuple2_nxv8i32_elt1
146 ; CHECK: %ins = call <vscale x 8 x i32> @llvm.vector.insert.nxv8i32.nxv4i32(<vscale x 8 x i32> %z0, <vscale x 4 x i32> %z1, i64 4)
147 ; CHECK-NEXT: ret void
149 %ins = call <vscale x 8 x i32> @llvm.aarch64.sve.tuple.set.nxv8i32.nxv4i32(<vscale x 8 x i32> %z0, i32 1, <vscale x 4 x i32> %z1)
153 ; aarch64.sve.tuple.get
154 define <vscale x 4 x i32> @get_tuple2_nxv8i32_elt1(<vscale x 8 x i32> %tuple) {
155 ; CHECK-LABEL: @get_tuple2_nxv8i32_elt1
156 ; CHECK: %ext = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %tuple, i64 4)
157 ; CHECK-NEXT: ret <vscale x 4 x i32> %ext
159 %ext = call <vscale x 4 x i32> @llvm.aarch64.sve.tuple.get.nxv8i32(<vscale x 8 x i32> %tuple, i32 1)
160 ret <vscale x 4 x i32> %ext
164 define <vscale x 4 x float> @bfdot_lane(<vscale x 4 x float> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) nounwind {
165 ; CHECK-LABEL: @bfdot_lane
166 ; CHECK: %out = call <vscale x 4 x float> @llvm.aarch64.sve.bfdot.lane.v2(<vscale x 4 x float> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c, i32 0)
167 ; CHECK-NEXT: ret <vscale x 4 x float> %out
168 %out = call <vscale x 4 x float> @llvm.aarch64.sve.bfdot.lane(<vscale x 4 x float> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c, i64 0)
169 ret <vscale x 4 x float> %out
173 define <vscale x 4 x float> @bfmlalb_lane(<vscale x 4 x float> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) nounwind {
174 ; CHECK-LABEL: @bfmlalb_lane
175 ; CHECK: %out = call <vscale x 4 x float> @llvm.aarch64.sve.bfmlalb.lane.v2(<vscale x 4 x float> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c, i32 0)
176 ; CHECK-NEXT: ret <vscale x 4 x float> %out
177 %out = call <vscale x 4 x float> @llvm.aarch64.sve.bfmlalb.lane(<vscale x 4 x float> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c, i64 0)
178 ret <vscale x 4 x float> %out
182 define <vscale x 4 x float> @bfmlalt_lane(<vscale x 4 x float> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) nounwind {
183 ; CHECK-LABEL: @bfmlalt_lane
184 ; CHECK: %out = call <vscale x 4 x float> @llvm.aarch64.sve.bfmlalt.lane.v2(<vscale x 4 x float> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c, i32 0)
185 ; CHECK-NEXT: ret <vscale x 4 x float> %out
186 %out = call <vscale x 4 x float> @llvm.aarch64.sve.bfmlalt.lane(<vscale x 4 x float> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c, i64 0)
187 ret <vscale x 4 x float> %out
190 declare <vscale x 32 x i8> @llvm.aarch64.sve.tuple.create2.nxv32i8.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
191 declare <vscale x 32 x i8> @llvm.aarch64.sve.tuple.create2.nxv32i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
192 declare <vscale x 32 x i8> @llvm.aarch64.sve.tuple.create2(<vscale x 16 x i8>, <vscale x 16 x i8>)
193 declare <vscale x 24 x i16> @llvm.aarch64.sve.tuple.create3.nxv24i16.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>)
194 declare <vscale x 64 x i8> @llvm.aarch64.sve.tuple.create4.nxv64i8.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>)
195 declare <vscale x 16 x i8> @llvm.aarch64.sve.tuple.create1.nxv16i8.nxv16i8(<vscale x 16 x i8>)
196 declare <vscale x 8 x i32> @llvm.aarch64.sve.tuple.set.nxv8i32.nxv4i32(<vscale x 8 x i32>, i32, <vscale x 4 x i32>)
197 declare <vscale x 4 x i32> @llvm.aarch64.sve.tuple.get.nxv8i32(<vscale x 8 x i32>, i32)
198 declare <vscale x 4 x float> @llvm.aarch64.sve.bfdot.lane(<vscale x 4 x float>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i64)
199 declare <vscale x 4 x float> @llvm.aarch64.sve.bfmlalb.lane(<vscale x 4 x float>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i64)
200 declare <vscale x 4 x float> @llvm.aarch64.sve.bfmlalt.lane(<vscale x 4 x float>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>, i64)