1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=1 < %s | FileCheck %s
8 define <vscale x 16 x i32> @masked_ld1b_i8_sext_i32(<vscale x 16 x i8> *%base, <vscale x 16 x i1> %mask) {
9 ; CHECK-LABEL: masked_ld1b_i8_sext_i32:
11 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
12 ; CHECK-NEXT: sunpklo z1.h, z0.b
13 ; CHECK-NEXT: sunpkhi z3.h, z0.b
14 ; CHECK-NEXT: sunpklo z0.s, z1.h
15 ; CHECK-NEXT: sunpkhi z1.s, z1.h
16 ; CHECK-NEXT: sunpklo z2.s, z3.h
17 ; CHECK-NEXT: sunpkhi z3.s, z3.h
19 %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
20 %res = sext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i32>
21 ret <vscale x 16 x i32> %res
24 define <vscale x 8 x i32> @masked_ld1b_nxv8i8_sext_i32(<vscale x 8 x i8> *%a, <vscale x 8 x i1> %mask) {
25 ; CHECK-LABEL: masked_ld1b_nxv8i8_sext_i32:
27 ; CHECK-NEXT: ld1sb { z1.h }, p0/z, [x0]
28 ; CHECK-NEXT: sunpklo z0.s, z1.h
29 ; CHECK-NEXT: sunpkhi z1.s, z1.h
31 %wide.masked.load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> poison)
32 %res = sext <vscale x 8 x i8> %wide.masked.load to <vscale x 8 x i32>
33 ret <vscale x 8 x i32> %res
36 define <vscale x 16 x i32> @masked_ld1b_i8_zext_i32(<vscale x 16 x i8> *%base, <vscale x 16 x i1> %mask) {
37 ; CHECK-LABEL: masked_ld1b_i8_zext_i32:
39 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
40 ; CHECK-NEXT: uunpklo z1.h, z0.b
41 ; CHECK-NEXT: uunpkhi z3.h, z0.b
42 ; CHECK-NEXT: uunpklo z0.s, z1.h
43 ; CHECK-NEXT: uunpkhi z1.s, z1.h
44 ; CHECK-NEXT: uunpklo z2.s, z3.h
45 ; CHECK-NEXT: uunpkhi z3.s, z3.h
47 %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
48 %res = zext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i32>
49 ret <vscale x 16 x i32> %res
52 define <vscale x 8 x i32> @masked_ld1b_nxv8i8_zext_i32(<vscale x 8 x i8> *%a, <vscale x 8 x i1> %mask) {
53 ; CHECK-LABEL: masked_ld1b_nxv8i8_zext_i32:
55 ; CHECK-NEXT: ld1b { z1.h }, p0/z, [x0]
56 ; CHECK-NEXT: uunpklo z0.s, z1.h
57 ; CHECK-NEXT: uunpkhi z1.s, z1.h
59 %wide.masked.load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(ptr %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> poison)
60 %res = zext <vscale x 8 x i8> %wide.masked.load to <vscale x 8 x i32>
61 ret <vscale x 8 x i32> %res
64 define <vscale x 16 x i64> @masked_ld1b_i8_sext(<vscale x 16 x i8> *%base, <vscale x 16 x i1> %mask) {
65 ; CHECK-LABEL: masked_ld1b_i8_sext:
67 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
68 ; CHECK-NEXT: sunpklo z1.h, z0.b
69 ; CHECK-NEXT: sunpkhi z0.h, z0.b
70 ; CHECK-NEXT: sunpklo z2.s, z1.h
71 ; CHECK-NEXT: sunpkhi z3.s, z1.h
72 ; CHECK-NEXT: sunpklo z5.s, z0.h
73 ; CHECK-NEXT: sunpkhi z7.s, z0.h
74 ; CHECK-NEXT: sunpklo z0.d, z2.s
75 ; CHECK-NEXT: sunpkhi z1.d, z2.s
76 ; CHECK-NEXT: sunpklo z2.d, z3.s
77 ; CHECK-NEXT: sunpkhi z3.d, z3.s
78 ; CHECK-NEXT: sunpklo z4.d, z5.s
79 ; CHECK-NEXT: sunpkhi z5.d, z5.s
80 ; CHECK-NEXT: sunpklo z6.d, z7.s
81 ; CHECK-NEXT: sunpkhi z7.d, z7.s
83 %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
84 %res = sext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i64>
85 ret <vscale x 16 x i64> %res
88 define <vscale x 4 x i64> @masked_ld1b_nxv4i8_sext_i64(<vscale x 4 x i8> *%a, <vscale x 4 x i1> %mask) {
89 ; CHECK-LABEL: masked_ld1b_nxv4i8_sext_i64:
91 ; CHECK-NEXT: ld1sb { z1.s }, p0/z, [x0]
92 ; CHECK-NEXT: sunpklo z0.d, z1.s
93 ; CHECK-NEXT: sunpkhi z1.d, z1.s
95 %wide.masked.load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> poison)
96 %res = sext <vscale x 4 x i8> %wide.masked.load to <vscale x 4 x i64>
97 ret <vscale x 4 x i64> %res
100 define <vscale x 16 x i64> @masked_ld1b_i8_zext(<vscale x 16 x i8> *%base, <vscale x 16 x i1> %mask) {
101 ; CHECK-LABEL: masked_ld1b_i8_zext:
103 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
104 ; CHECK-NEXT: uunpklo z1.h, z0.b
105 ; CHECK-NEXT: uunpkhi z0.h, z0.b
106 ; CHECK-NEXT: uunpklo z2.s, z1.h
107 ; CHECK-NEXT: uunpkhi z3.s, z1.h
108 ; CHECK-NEXT: uunpklo z5.s, z0.h
109 ; CHECK-NEXT: uunpkhi z7.s, z0.h
110 ; CHECK-NEXT: uunpklo z0.d, z2.s
111 ; CHECK-NEXT: uunpkhi z1.d, z2.s
112 ; CHECK-NEXT: uunpklo z2.d, z3.s
113 ; CHECK-NEXT: uunpkhi z3.d, z3.s
114 ; CHECK-NEXT: uunpklo z4.d, z5.s
115 ; CHECK-NEXT: uunpkhi z5.d, z5.s
116 ; CHECK-NEXT: uunpklo z6.d, z7.s
117 ; CHECK-NEXT: uunpkhi z7.d, z7.s
119 %wide.masked.load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>* %base, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
120 %res = zext <vscale x 16 x i8> %wide.masked.load to <vscale x 16 x i64>
121 ret <vscale x 16 x i64> %res
124 define <vscale x 4 x i64> @masked_ld1b_nxv4i8_zext_i64(<vscale x 4 x i8> *%a, <vscale x 4 x i1> %mask) {
125 ; CHECK-LABEL: masked_ld1b_nxv4i8_zext_i64:
127 ; CHECK-NEXT: ld1b { z1.s }, p0/z, [x0]
128 ; CHECK-NEXT: uunpklo z0.d, z1.s
129 ; CHECK-NEXT: uunpkhi z1.d, z1.s
131 %wide.masked.load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> poison)
132 %res = zext <vscale x 4 x i8> %wide.masked.load to <vscale x 4 x i64>
133 ret <vscale x 4 x i64> %res
140 define <vscale x 8 x i64> @masked_ld1h_i16_sext(<vscale x 8 x i16> *%base, <vscale x 8 x i1> %mask) {
141 ; CHECK-LABEL: masked_ld1h_i16_sext:
143 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
144 ; CHECK-NEXT: sunpklo z1.s, z0.h
145 ; CHECK-NEXT: sunpkhi z3.s, z0.h
146 ; CHECK-NEXT: sunpklo z0.d, z1.s
147 ; CHECK-NEXT: sunpkhi z1.d, z1.s
148 ; CHECK-NEXT: sunpklo z2.d, z3.s
149 ; CHECK-NEXT: sunpkhi z3.d, z3.s
151 %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(<vscale x 8 x i16>* %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
152 %res = sext <vscale x 8 x i16> %wide.masked.load to <vscale x 8 x i64>
153 ret <vscale x 8 x i64> %res
156 define <vscale x 4 x i64> @masked_ld1h_nxv4i16_sext(<vscale x 4 x i16> *%a, <vscale x 4 x i1> %mask) {
157 ; CHECK-LABEL: masked_ld1h_nxv4i16_sext:
159 ; CHECK-NEXT: ld1sh { z1.s }, p0/z, [x0]
160 ; CHECK-NEXT: sunpklo z0.d, z1.s
161 ; CHECK-NEXT: sunpkhi z1.d, z1.s
163 %wide.masked.load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
164 %res = sext <vscale x 4 x i16> %wide.masked.load to <vscale x 4 x i64>
165 ret <vscale x 4 x i64> %res
168 define <vscale x 8 x i64> @masked_ld1h_i16_zext(<vscale x 8 x i16> *%base, <vscale x 8 x i1> %mask) {
169 ; CHECK-LABEL: masked_ld1h_i16_zext:
171 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
172 ; CHECK-NEXT: uunpklo z1.s, z0.h
173 ; CHECK-NEXT: uunpkhi z3.s, z0.h
174 ; CHECK-NEXT: uunpklo z0.d, z1.s
175 ; CHECK-NEXT: uunpkhi z1.d, z1.s
176 ; CHECK-NEXT: uunpklo z2.d, z3.s
177 ; CHECK-NEXT: uunpkhi z3.d, z3.s
179 %wide.masked.load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(<vscale x 8 x i16>* %base, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
180 %res = zext <vscale x 8 x i16> %wide.masked.load to <vscale x 8 x i64>
181 ret <vscale x 8 x i64> %res
184 define <vscale x 4 x i64> @masked_ld1h_nxv4i16_zext(<vscale x 4 x i16> *%a, <vscale x 4 x i1> %mask) {
185 ; CHECK-LABEL: masked_ld1h_nxv4i16_zext:
187 ; CHECK-NEXT: ld1h { z1.s }, p0/z, [x0]
188 ; CHECK-NEXT: uunpklo z0.d, z1.s
189 ; CHECK-NEXT: uunpkhi z1.d, z1.s
191 %wide.masked.load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(ptr %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> poison)
192 %res = zext <vscale x 4 x i16> %wide.masked.load to <vscale x 4 x i64>
193 ret <vscale x 4 x i64> %res
200 define <vscale x 4 x i64> @masked_ld1w_i32_sext(<vscale x 4 x i32> *%base, <vscale x 4 x i1> %mask) {
201 ; CHECK-LABEL: masked_ld1w_i32_sext:
203 ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
204 ; CHECK-NEXT: sunpklo z0.d, z1.s
205 ; CHECK-NEXT: sunpkhi z1.d, z1.s
207 %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(<vscale x 4 x i32>* %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
208 %res = sext <vscale x 4 x i32> %wide.masked.load to <vscale x 4 x i64>
209 ret <vscale x 4 x i64> %res
212 define <vscale x 4 x i64> @masked_ld1w_i32_zext(<vscale x 4 x i32> *%base, <vscale x 4 x i1> %mask) {
213 ; CHECK-LABEL: masked_ld1w_i32_zext:
215 ; CHECK-NEXT: ld1w { z1.s }, p0/z, [x0]
216 ; CHECK-NEXT: uunpklo z0.d, z1.s
217 ; CHECK-NEXT: uunpkhi z1.d, z1.s
219 %wide.masked.load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(<vscale x 4 x i32>* %base, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
220 %res = zext <vscale x 4 x i32> %wide.masked.load to <vscale x 4 x i64>
221 ret <vscale x 4 x i64> %res
224 declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8.p0(<vscale x 16 x i8>*, i32 immarg, <vscale x 16 x i1>, <vscale x 16 x i8>)
225 declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8.p0(<vscale x 8 x i8>*, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i8>)
226 declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8.p0(<vscale x 4 x i8>*, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i8>)
227 declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16.p0(<vscale x 8 x i16>*, i32 immarg, <vscale x 8 x i1>, <vscale x 8 x i16>)
228 declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16.p0(<vscale x 4 x i16>*, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i16>)
229 declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32.p0(<vscale x 4 x i32>*, i32 immarg, <vscale x 4 x i1>, <vscale x 4 x i32>)