1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
6 ; If the mask isn't constant, do nothing.
8 define <4 x float> @mload(i8* %f, <4 x i32> %mask) {
10 ; CHECK-NEXT: [[LD:%.*]] = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* [[F:%.*]], <4 x i32> [[MASK:%.*]])
11 ; CHECK-NEXT: ret <4 x float> [[LD]]
13 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> %mask)
18 ; Zero mask returns a zero vector.
20 define <4 x float> @mload_zeros(i8* %f) {
21 ; CHECK-LABEL: @mload_zeros(
22 ; CHECK-NEXT: ret <4 x float> zeroinitializer
24 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> zeroinitializer)
29 ; Only the sign bit matters.
31 define <4 x float> @mload_fake_ones(i8* %f) {
32 ; CHECK-LABEL: @mload_fake_ones(
33 ; CHECK-NEXT: ret <4 x float> zeroinitializer
35 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 1, i32 2, i32 3, i32 2147483647>)
40 ; All mask bits are set, so this is just a vector load.
42 define <4 x float> @mload_real_ones(i8* %f) {
43 ; CHECK-LABEL: @mload_real_ones(
44 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x float>*
45 ; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <4 x float>, <4 x float>* [[CASTVEC]], align 1
46 ; CHECK-NEXT: ret <4 x float> [[UNMASKEDLOAD]]
48 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 -1, i32 -2, i32 -3, i32 2147483648>)
53 ; It's a constant mask, so convert to an LLVM intrinsic. The backend should optimize further.
55 define <4 x float> @mload_one_one(i8* %f) {
56 ; CHECK-LABEL: @mload_one_one(
57 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x float>*
58 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* [[CASTVEC]], i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef>)
59 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
61 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>)
68 define <2 x double> @mload_one_one_double(i8* %f) {
69 ; CHECK-LABEL: @mload_one_one_double(
70 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <2 x double>*
71 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* [[CASTVEC]], i32 1, <2 x i1> <i1 true, i1 false>, <2 x double> <double undef, double 0.000000e+00>)
72 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
74 %ld = tail call <2 x double> @llvm.x86.avx.maskload.pd(i8* %f, <2 x i64> <i64 -1, i64 0>)
81 define <8 x float> @mload_v8f32(i8* %f) {
82 ; CHECK-LABEL: @mload_v8f32(
83 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <8 x float>*
84 ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* [[CASTVEC]], i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float undef, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>)
85 ; CHECK-NEXT: ret <8 x float> [[TMP1]]
87 %ld = tail call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %f, <8 x i32> <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 0>)
92 define <4 x double> @mload_v4f64(i8* %f) {
93 ; CHECK-LABEL: @mload_v4f64(
94 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x double>*
95 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[CASTVEC]], i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x double> <double undef, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00>)
96 ; CHECK-NEXT: ret <4 x double> [[TMP1]]
98 %ld = tail call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 0, i64 0>)
103 ; Try the AVX2 variants.
105 define <4 x i32> @mload_v4i32(i8* %f) {
106 ; CHECK-LABEL: @mload_v4i32(
107 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x i32>*
108 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[CASTVEC]], i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x i32> <i32 0, i32 0, i32 0, i32 undef>)
109 ; CHECK-NEXT: ret <4 x i32> [[TMP1]]
111 %ld = tail call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>)
116 define <2 x i64> @mload_v2i64(i8* %f) {
117 ; CHECK-LABEL: @mload_v2i64(
118 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <2 x i64>*
119 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* [[CASTVEC]], i32 1, <2 x i1> <i1 true, i1 false>, <2 x i64> <i64 undef, i64 0>)
120 ; CHECK-NEXT: ret <2 x i64> [[TMP1]]
122 %ld = tail call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %f, <2 x i64> <i64 -1, i64 0>)
127 define <8 x i32> @mload_v8i32(i8* %f) {
128 ; CHECK-LABEL: @mload_v8i32(
129 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <8 x i32>*
130 ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* [[CASTVEC]], i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i32> <i32 0, i32 0, i32 0, i32 undef, i32 0, i32 0, i32 0, i32 0>)
131 ; CHECK-NEXT: ret <8 x i32> [[TMP1]]
133 %ld = tail call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %f, <8 x i32> <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 0>)
138 define <4 x i64> @mload_v4i64(i8* %f) {
139 ; CHECK-LABEL: @mload_v4i64(
140 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x i64>*
141 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* [[CASTVEC]], i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> <i64 undef, i64 0, i64 0, i64 0>)
142 ; CHECK-NEXT: ret <4 x i64> [[TMP1]]
144 %ld = tail call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 0, i64 0>)
152 ; If the mask isn't constant, do nothing.
154 define void @mstore(i8* %f, <4 x i32> %mask, <4 x float> %v) {
155 ; CHECK-LABEL: @mstore(
156 ; CHECK-NEXT: tail call void @llvm.x86.avx.maskstore.ps(i8* [[F:%.*]], <4 x i32> [[MASK:%.*]], <4 x float> [[V:%.*]])
157 ; CHECK-NEXT: ret void
159 tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> %mask, <4 x float> %v)
164 ; Zero mask is a nop.
166 define void @mstore_zeros(i8* %f, <4 x float> %v) {
167 ; CHECK-LABEL: @mstore_zeros(
168 ; CHECK-NEXT: ret void
170 tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> zeroinitializer, <4 x float> %v)
175 ; Only the sign bit matters.
177 define void @mstore_fake_ones(i8* %f, <4 x float> %v) {
178 ; CHECK-LABEL: @mstore_fake_ones(
179 ; CHECK-NEXT: ret void
181 tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> <i32 1, i32 2, i32 3, i32 2147483647>, <4 x float> %v)
186 ; All mask bits are set, so this is just a vector store.
188 define void @mstore_real_ones(i8* %f, <4 x float> %v) {
189 ; CHECK-LABEL: @mstore_real_ones(
190 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x float>*
191 ; CHECK-NEXT: store <4 x float> [[V:%.*]], <4 x float>* [[CASTVEC]], align 1
192 ; CHECK-NEXT: ret void
194 tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> <i32 -1, i32 -2, i32 -3, i32 -2147483648>, <4 x float> %v)
199 ; It's a constant mask, so convert to an LLVM intrinsic. The backend should optimize further.
201 define void @mstore_one_one(i8* %f, <4 x float> %v) {
202 ; CHECK-LABEL: @mstore_one_one(
203 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x float>*
204 ; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> [[V:%.*]], <4 x float>* [[CASTVEC]], i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>)
205 ; CHECK-NEXT: ret void
207 tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>, <4 x float> %v)
214 define void @mstore_one_one_double(i8* %f, <2 x double> %v) {
215 ; CHECK-LABEL: @mstore_one_one_double(
216 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <2 x double>*
217 ; CHECK-NEXT: call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> [[V:%.*]], <2 x double>* [[CASTVEC]], i32 1, <2 x i1> <i1 true, i1 false>)
218 ; CHECK-NEXT: ret void
220 tail call void @llvm.x86.avx.maskstore.pd(i8* %f, <2 x i64> <i64 -1, i64 0>, <2 x double> %v)
225 ; Try 256-bit FP ops.
227 define void @mstore_v8f32(i8* %f, <8 x float> %v) {
228 ; CHECK-LABEL: @mstore_v8f32(
229 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <8 x float>*
230 ; CHECK-NEXT: call void @llvm.masked.store.v8f32.p0v8f32(<8 x float> [[V:%.*]], <8 x float>* [[CASTVEC]], i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>)
231 ; CHECK-NEXT: ret void
233 tail call void @llvm.x86.avx.maskstore.ps.256(i8* %f, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 -1, i32 -2, i32 -3, i32 -4>, <8 x float> %v)
238 define void @mstore_v4f64(i8* %f, <4 x double> %v) {
239 ; CHECK-LABEL: @mstore_v4f64(
240 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x double>*
241 ; CHECK-NEXT: call void @llvm.masked.store.v4f64.p0v4f64(<4 x double> [[V:%.*]], <4 x double>* [[CASTVEC]], i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>)
242 ; CHECK-NEXT: ret void
244 tail call void @llvm.x86.avx.maskstore.pd.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 1, i64 2>, <4 x double> %v)
249 ; Try the AVX2 variants.
251 define void @mstore_v4i32(i8* %f, <4 x i32> %v) {
252 ; CHECK-LABEL: @mstore_v4i32(
253 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x i32>*
254 ; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[V:%.*]], <4 x i32>* [[CASTVEC]], i32 1, <4 x i1> <i1 false, i1 false, i1 true, i1 true>)
255 ; CHECK-NEXT: ret void
257 tail call void @llvm.x86.avx2.maskstore.d(i8* %f, <4 x i32> <i32 0, i32 1, i32 -1, i32 -2>, <4 x i32> %v)
262 define void @mstore_v2i64(i8* %f, <2 x i64> %v) {
263 ; CHECK-LABEL: @mstore_v2i64(
264 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <2 x i64>*
265 ; CHECK-NEXT: call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> [[V:%.*]], <2 x i64>* [[CASTVEC]], i32 1, <2 x i1> <i1 true, i1 false>)
266 ; CHECK-NEXT: ret void
268 tail call void @llvm.x86.avx2.maskstore.q(i8* %f, <2 x i64> <i64 -1, i64 0>, <2 x i64> %v)
273 define void @mstore_v8i32(i8* %f, <8 x i32> %v) {
274 ; CHECK-LABEL: @mstore_v8i32(
275 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <8 x i32>*
276 ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32> [[V:%.*]], <8 x i32>* [[CASTVEC]], i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>)
277 ; CHECK-NEXT: ret void
279 tail call void @llvm.x86.avx2.maskstore.d.256(i8* %f, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 -1, i32 -2, i32 -3, i32 -4>, <8 x i32> %v)
284 define void @mstore_v4i64(i8* %f, <4 x i64> %v) {
285 ; CHECK-LABEL: @mstore_v4i64(
286 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x i64>*
287 ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[CASTVEC]], i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>)
288 ; CHECK-NEXT: ret void
290 tail call void @llvm.x86.avx2.maskstore.q.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 1, i64 2>, <4 x i64> %v)
295 ; The original SSE2 masked store variant.
297 define void @mstore_v16i8_sse2_zeros(<16 x i8> %d, i8* %p) {
298 ; CHECK-LABEL: @mstore_v16i8_sse2_zeros(
299 ; CHECK-NEXT: ret void
301 tail call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %d, <16 x i8> zeroinitializer, i8* %p)
307 declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x i32>)
308 declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>)
309 declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>)
310 declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>)
312 declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>)
313 declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>)
314 declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>)
315 declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>)
317 declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>)
318 declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>)
319 declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>)
320 declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>)
322 declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>)
323 declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>)
324 declare void @llvm.x86.avx2.maskstore.d.256(i8*, <8 x i32>, <8 x i32>)
325 declare void @llvm.x86.avx2.maskstore.q.256(i8*, <4 x i64>, <4 x i64>)
327 declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, i8*)