1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s | FileCheck %s
4 target triple = "aarch64-unknown-linux-gnu"
6 ; Check that we don't try and merge uunpklo/uzp1 with a load or store if we
7 ; would end up creating a predicate that would be too large for the max VL.
11 define <vscale x 8 x i16> @uunpklo_i8_valid(ptr %b) #0 {
12 ; CHECK-LABEL: uunpklo_i8_valid:
14 ; CHECK-NEXT: ptrue p0.h, vl64
15 ; CHECK-NEXT: ld1b { z0.h }, p0/z, [x0]
17 %mask = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 11)
18 %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %b, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
19 %uzp = call <vscale x 8 x i16> @llvm.aarch64.sve.uunpklo.nxv8i16(<vscale x 16 x i8> %load)
20 ret <vscale x 8 x i16> %uzp
23 define <vscale x 8 x i16> @uunpklo_i8_invalid(ptr %b) #0 {
24 ; CHECK-LABEL: uunpklo_i8_invalid:
26 ; CHECK-NEXT: ptrue p0.b, vl128
27 ; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0]
28 ; CHECK-NEXT: uunpklo z0.h, z0.b
30 %mask = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 12)
31 %load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(ptr %b, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
32 %uzp = call <vscale x 8 x i16> @llvm.aarch64.sve.uunpklo.nxv8i16(<vscale x 16 x i8> %load)
33 ret <vscale x 8 x i16> %uzp
36 define <vscale x 4 x i32> @uunpklo_i16_valid(ptr %b) #0 {
37 ; CHECK-LABEL: uunpklo_i16_valid:
39 ; CHECK-NEXT: ptrue p0.s, vl32
40 ; CHECK-NEXT: ld1h { z0.s }, p0/z, [x0]
42 %mask = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 10)
43 %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %b, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
44 %uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uunpklo.nxv4i32(<vscale x 8 x i16> %load)
45 ret <vscale x 4 x i32> %uzp
48 define <vscale x 4 x i32> @uunpklo_i16_invalid(ptr %b) #0 {
49 ; CHECK-LABEL: uunpklo_i16_invalid:
51 ; CHECK-NEXT: ptrue p0.h, vl64
52 ; CHECK-NEXT: ld1h { z0.h }, p0/z, [x0]
53 ; CHECK-NEXT: uunpklo z0.s, z0.h
55 %mask = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 11)
56 %load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(ptr %b, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
57 %uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uunpklo.nxv4i32(<vscale x 8 x i16> %load)
58 ret <vscale x 4 x i32> %uzp
61 define <vscale x 2 x i64> @uunpklo_i32_valid(ptr %b) #0 {
62 ; CHECK-LABEL: uunpklo_i32_valid:
64 ; CHECK-NEXT: ptrue p0.d, vl16
65 ; CHECK-NEXT: ld1w { z0.d }, p0/z, [x0]
67 %mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 9)
68 %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %b, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
69 %uzp = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32> %load)
70 ret <vscale x 2 x i64> %uzp
73 define <vscale x 2 x i64> @uunpklo_i32_invalid(ptr %b) #0 {
74 ; CHECK-LABEL: uunpklo_i32_invalid:
76 ; CHECK-NEXT: ptrue p0.s, vl32
77 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
78 ; CHECK-NEXT: uunpklo z0.d, z0.s
80 %mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 10)
81 %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %b, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
82 %uzp = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32> %load)
83 ret <vscale x 2 x i64> %uzp
86 define <vscale x 2 x i64> @uunpklo_invalid_all(ptr %b) #0 {
87 ; CHECK-LABEL: uunpklo_invalid_all:
89 ; CHECK-NEXT: ptrue p0.s
90 ; CHECK-NEXT: ld1w { z0.s }, p0/z, [x0]
91 ; CHECK-NEXT: uunpklo z0.d, z0.s
93 %mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
94 %load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(ptr %b, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
95 %uzp = call <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32> %load)
96 ret <vscale x 2 x i64> %uzp
101 define void @uzp1_i8_valid(<vscale x 8 x i16> %a, ptr %b) #0 {
102 ; CHECK-LABEL: uzp1_i8_valid:
104 ; CHECK-NEXT: ptrue p0.h, vl64
105 ; CHECK-NEXT: st1b { z0.h }, p0, [x0]
107 %a.bc = bitcast <vscale x 8 x i16> %a to <vscale x 16 x i8>
108 %uzp = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp1.nxv16i8(<vscale x 16 x i8> %a.bc, <vscale x 16 x i8> %a.bc)
109 %mask = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 11)
110 call void @llvm.masked.store.nxv16i8(<vscale x 16 x i8> %uzp, ptr %b, i32 2, <vscale x 16 x i1> %mask)
114 define void @uzp1_i8_invalid(<vscale x 8 x i16> %a, ptr %b) #0 {
115 ; CHECK-LABEL: uzp1_i8_invalid:
117 ; CHECK-NEXT: ptrue p0.b, vl128
118 ; CHECK-NEXT: uzp1 z0.b, z0.b, z0.b
119 ; CHECK-NEXT: st1b { z0.b }, p0, [x0]
121 %a.bc = bitcast <vscale x 8 x i16> %a to <vscale x 16 x i8>
122 %uzp = call <vscale x 16 x i8> @llvm.aarch64.sve.uzp1.nxv16i8(<vscale x 16 x i8> %a.bc, <vscale x 16 x i8> %a.bc)
123 %mask = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 12)
124 call void @llvm.masked.store.nxv16i8(<vscale x 16 x i8> %uzp, ptr %b, i32 2, <vscale x 16 x i1> %mask)
128 define void @uzp1_i16_valid(<vscale x 4 x i32> %a, ptr %b) #0 {
129 ; CHECK-LABEL: uzp1_i16_valid:
131 ; CHECK-NEXT: ptrue p0.s, vl32
132 ; CHECK-NEXT: st1h { z0.s }, p0, [x0]
134 %a.bc = bitcast <vscale x 4 x i32> %a to <vscale x 8 x i16>
135 %uzp = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp1.nxv8i16(<vscale x 8 x i16> %a.bc, <vscale x 8 x i16> %a.bc)
136 %mask = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 10)
137 call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %uzp, ptr %b, i32 2, <vscale x 8 x i1> %mask)
141 define void @uzp1_i16_invalid(<vscale x 4 x i32> %a, ptr %b) #0 {
142 ; CHECK-LABEL: uzp1_i16_invalid:
144 ; CHECK-NEXT: ptrue p0.h, vl64
145 ; CHECK-NEXT: uzp1 z0.h, z0.h, z0.h
146 ; CHECK-NEXT: st1h { z0.h }, p0, [x0]
148 %a.bc = bitcast <vscale x 4 x i32> %a to <vscale x 8 x i16>
149 %uzp = call <vscale x 8 x i16> @llvm.aarch64.sve.uzp1.nxv8i16(<vscale x 8 x i16> %a.bc, <vscale x 8 x i16> %a.bc)
150 %mask = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 11)
151 call void @llvm.masked.store.nxv8i16(<vscale x 8 x i16> %uzp, ptr %b, i32 2, <vscale x 8 x i1> %mask)
155 define void @uzp1_i32_valid(<vscale x 2 x i64> %a, ptr %b) #0 {
156 ; CHECK-LABEL: uzp1_i32_valid:
158 ; CHECK-NEXT: ptrue p0.d, vl16
159 ; CHECK-NEXT: st1w { z0.d }, p0, [x0]
161 %a.bc = bitcast <vscale x 2 x i64> %a to <vscale x 4 x i32>
162 %uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32> %a.bc, <vscale x 4 x i32> %a.bc)
163 %mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 9)
164 call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %uzp, ptr %b, i32 2, <vscale x 4 x i1> %mask)
168 define void @uzp1_i32_invalid(<vscale x 2 x i64> %a, ptr %b) #0 {
169 ; CHECK-LABEL: uzp1_i32_invalid:
171 ; CHECK-NEXT: ptrue p0.s, vl32
172 ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
173 ; CHECK-NEXT: st1w { z0.s }, p0, [x0]
175 %a.bc = bitcast <vscale x 2 x i64> %a to <vscale x 4 x i32>
176 %uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32> %a.bc, <vscale x 4 x i32> %a.bc)
177 %mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 10)
178 call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %uzp, ptr %b, i32 2, <vscale x 4 x i1> %mask)
182 define void @uzp1_invalid_all(<vscale x 2 x i64> %a, ptr %b) #0 {
183 ; CHECK-LABEL: uzp1_invalid_all:
185 ; CHECK-NEXT: ptrue p0.s
186 ; CHECK-NEXT: uzp1 z0.s, z0.s, z0.s
187 ; CHECK-NEXT: st1w { z0.s }, p0, [x0]
189 %a.bc = bitcast <vscale x 2 x i64> %a to <vscale x 4 x i32>
190 %uzp = call <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32> %a.bc, <vscale x 4 x i32> %a.bc)
191 %mask = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
192 call void @llvm.masked.store.nxv4i32(<vscale x 4 x i32> %uzp, ptr %b, i32 2, <vscale x 4 x i1> %mask)
196 declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern)
197 declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 %pattern)
198 declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 %pattern)
200 declare <vscale x 8 x i16> @llvm.aarch64.sve.uunpklo.nxv8i16(<vscale x 16 x i8>)
201 declare <vscale x 4 x i32> @llvm.aarch64.sve.uunpklo.nxv4i32(<vscale x 8 x i16>)
202 declare <vscale x 2 x i64> @llvm.aarch64.sve.uunpklo.nxv2i64(<vscale x 4 x i32>)
204 declare <vscale x 16 x i8> @llvm.aarch64.sve.uzp1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
205 declare <vscale x 8 x i16> @llvm.aarch64.sve.uzp1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
206 declare <vscale x 4 x i32> @llvm.aarch64.sve.uzp1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
208 declare <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>*, i32, <vscale x 16 x i1>, <vscale x 16 x i8>)
209 declare <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>*, i32, <vscale x 8 x i1>, <vscale x 8 x i16>)
210 declare <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>*, i32, <vscale x 4 x i1>, <vscale x 4 x i32>)
212 declare void @llvm.masked.store.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>*, i32, <vscale x 16 x i1>)
213 declare void @llvm.masked.store.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, i32, <vscale x 8 x i1>)
214 declare void @llvm.masked.store.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, i32, <vscale x 4 x i1>)
216 attributes #0 = { "target-features"="+sve" vscale_range(8,0) }