1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=instcombine -mtriple=x86_64-unknown-unknown -S | FileCheck %s
6 ; If the mask isn't constant, do nothing.
8 define <4 x float> @mload(ptr %f, <4 x i32> %mask) {
10 ; CHECK-NEXT: [[LD:%.*]] = tail call <4 x float> @llvm.x86.avx.maskload.ps(ptr [[F:%.*]], <4 x i32> [[MASK:%.*]])
11 ; CHECK-NEXT: ret <4 x float> [[LD]]
13 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(ptr %f, <4 x i32> %mask)
17 ; If the mask comes from a comparison, convert to an LLVM intrinsic. The backend should optimize further.
19 define <4 x float> @mload_v4f32_cmp(ptr %f, <4 x i32> %src) {
20 ; CHECK-LABEL: @mload_v4f32_cmp(
21 ; CHECK-NEXT: [[ICMP:%.*]] = icmp ne <4 x i32> [[SRC:%.*]], zeroinitializer
22 ; CHECK-NEXT: [[LD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[F:%.*]], i32 1, <4 x i1> [[ICMP]], <4 x float> zeroinitializer)
23 ; CHECK-NEXT: ret <4 x float> [[LD]]
25 %icmp = icmp ne <4 x i32> %src, zeroinitializer
26 %mask = sext <4 x i1> %icmp to <4 x i32>
27 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(ptr %f, <4 x i32> %mask)
31 ; Zero mask returns a zero vector.
33 define <4 x float> @mload_zeros(ptr %f) {
34 ; CHECK-LABEL: @mload_zeros(
35 ; CHECK-NEXT: ret <4 x float> zeroinitializer
37 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(ptr %f, <4 x i32> zeroinitializer)
41 ; Only the sign bit matters.
43 define <4 x float> @mload_fake_ones(ptr %f) {
44 ; CHECK-LABEL: @mload_fake_ones(
45 ; CHECK-NEXT: ret <4 x float> zeroinitializer
47 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(ptr %f, <4 x i32> <i32 1, i32 2, i32 3, i32 2147483647>)
51 ; All mask bits are set, so this is just a vector load.
53 define <4 x float> @mload_real_ones(ptr %f) {
54 ; CHECK-LABEL: @mload_real_ones(
55 ; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <4 x float>, ptr [[F:%.*]], align 1
56 ; CHECK-NEXT: ret <4 x float> [[UNMASKEDLOAD]]
58 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(ptr %f, <4 x i32> <i32 -1, i32 -2, i32 -3, i32 2147483648>)
62 ; It's a constant mask, so convert to an LLVM intrinsic. The backend should optimize further.
64 define <4 x float> @mload_one_one(ptr %f) {
65 ; CHECK-LABEL: @mload_one_one(
66 ; CHECK-NEXT: [[LD:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0(ptr [[F:%.*]], i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float poison>)
67 ; CHECK-NEXT: ret <4 x float> [[LD]]
69 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(ptr %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>)
75 define <2 x double> @mload_one_one_double(ptr %f) {
76 ; CHECK-LABEL: @mload_one_one_double(
77 ; CHECK-NEXT: [[LD:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0(ptr [[F:%.*]], i32 1, <2 x i1> <i1 true, i1 false>, <2 x double> <double poison, double 0.000000e+00>)
78 ; CHECK-NEXT: ret <2 x double> [[LD]]
80 %ld = tail call <2 x double> @llvm.x86.avx.maskload.pd(ptr %f, <2 x i64> <i64 -1, i64 0>)
86 define <8 x float> @mload_v8f32(ptr %f) {
87 ; CHECK-LABEL: @mload_v8f32(
88 ; CHECK-NEXT: [[LD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[F:%.*]], i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float poison, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>)
89 ; CHECK-NEXT: ret <8 x float> [[LD]]
91 %ld = tail call <8 x float> @llvm.x86.avx.maskload.ps.256(ptr %f, <8 x i32> <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 0>)
95 define <8 x float> @mload_v8f32_cmp(ptr %f, <8 x float> %src0, <8 x float> %src1) {
96 ; CHECK-LABEL: @mload_v8f32_cmp(
97 ; CHECK-NEXT: [[ICMP0:%.*]] = fcmp one <8 x float> [[SRC0:%.*]], zeroinitializer
98 ; CHECK-NEXT: [[ICMP1:%.*]] = fcmp one <8 x float> [[SRC1:%.*]], zeroinitializer
99 ; CHECK-NEXT: [[MASK1:%.*]] = and <8 x i1> [[ICMP0]], [[ICMP1]]
100 ; CHECK-NEXT: [[LD:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0(ptr [[F:%.*]], i32 1, <8 x i1> [[MASK1]], <8 x float> zeroinitializer)
101 ; CHECK-NEXT: ret <8 x float> [[LD]]
103 %icmp0 = fcmp one <8 x float> %src0, zeroinitializer
104 %icmp1 = fcmp one <8 x float> %src1, zeroinitializer
105 %ext0 = sext <8 x i1> %icmp0 to <8 x i32>
106 %ext1 = sext <8 x i1> %icmp1 to <8 x i32>
107 %mask = and <8 x i32> %ext0, %ext1
108 %ld = tail call <8 x float> @llvm.x86.avx.maskload.ps.256(ptr %f, <8 x i32> %mask)
112 define <4 x double> @mload_v4f64(ptr %f) {
113 ; CHECK-LABEL: @mload_v4f64(
114 ; CHECK-NEXT: [[LD:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0(ptr [[F:%.*]], i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x double> <double poison, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00>)
115 ; CHECK-NEXT: ret <4 x double> [[LD]]
117 %ld = tail call <4 x double> @llvm.x86.avx.maskload.pd.256(ptr %f, <4 x i64> <i64 -1, i64 0, i64 0, i64 0>)
121 ; Try the AVX2 variants.
123 define <4 x i32> @mload_v4i32(ptr %f) {
124 ; CHECK-LABEL: @mload_v4i32(
125 ; CHECK-NEXT: [[LD:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0(ptr [[F:%.*]], i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x i32> <i32 0, i32 0, i32 0, i32 poison>)
126 ; CHECK-NEXT: ret <4 x i32> [[LD]]
128 %ld = tail call <4 x i32> @llvm.x86.avx2.maskload.d(ptr %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>)
132 define <2 x i64> @mload_v2i64(ptr %f) {
133 ; CHECK-LABEL: @mload_v2i64(
134 ; CHECK-NEXT: [[LD:%.*]] = call <2 x i64> @llvm.masked.load.v2i64.p0(ptr [[F:%.*]], i32 1, <2 x i1> <i1 true, i1 false>, <2 x i64> <i64 poison, i64 0>)
135 ; CHECK-NEXT: ret <2 x i64> [[LD]]
137 %ld = tail call <2 x i64> @llvm.x86.avx2.maskload.q(ptr %f, <2 x i64> <i64 -1, i64 0>)
141 define <8 x i32> @mload_v8i32(ptr %f) {
142 ; CHECK-LABEL: @mload_v8i32(
143 ; CHECK-NEXT: [[LD:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0(ptr [[F:%.*]], i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i32> <i32 0, i32 0, i32 0, i32 poison, i32 0, i32 0, i32 0, i32 0>)
144 ; CHECK-NEXT: ret <8 x i32> [[LD]]
146 %ld = tail call <8 x i32> @llvm.x86.avx2.maskload.d.256(ptr %f, <8 x i32> <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 0>)
150 define <4 x i64> @mload_v4i64(ptr %f) {
151 ; CHECK-LABEL: @mload_v4i64(
152 ; CHECK-NEXT: [[LD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr [[F:%.*]], i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> <i64 poison, i64 0, i64 0, i64 0>)
153 ; CHECK-NEXT: ret <4 x i64> [[LD]]
155 %ld = tail call <4 x i64> @llvm.x86.avx2.maskload.q.256(ptr %f, <4 x i64> <i64 -1, i64 0, i64 0, i64 0>)
159 define <4 x i64> @mload_v4i64_cmp(ptr %f, <4 x i64> %src) {
160 ; CHECK-LABEL: @mload_v4i64_cmp(
161 ; CHECK-NEXT: [[ICMP:%.*]] = icmp sgt <4 x i64> [[SRC:%.*]], splat (i64 -1)
162 ; CHECK-NEXT: [[LD:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0(ptr [[F:%.*]], i32 1, <4 x i1> [[ICMP]], <4 x i64> zeroinitializer)
163 ; CHECK-NEXT: ret <4 x i64> [[LD]]
165 %icmp = icmp sge <4 x i64> %src, zeroinitializer
166 %mask = sext <4 x i1> %icmp to <4 x i64>
167 %ld = tail call <4 x i64> @llvm.x86.avx2.maskload.q.256(ptr %f, <4 x i64> %mask)
173 ; If the mask isn't constant, do nothing.
175 define void @mstore(ptr %f, <4 x i32> %mask, <4 x float> %v) {
176 ; CHECK-LABEL: @mstore(
177 ; CHECK-NEXT: tail call void @llvm.x86.avx.maskstore.ps(ptr [[F:%.*]], <4 x i32> [[MASK:%.*]], <4 x float> [[V:%.*]])
178 ; CHECK-NEXT: ret void
180 tail call void @llvm.x86.avx.maskstore.ps(ptr %f, <4 x i32> %mask, <4 x float> %v)
184 ; If the mask comes from a comparison, convert to an LLVM intrinsic. The backend should optimize further.
186 define void @mstore_v4f32_cmp(ptr %f, <4 x i32> %src, <4 x float> %v) {
187 ; CHECK-LABEL: @mstore_v4f32_cmp(
188 ; CHECK-NEXT: [[ICMP:%.*]] = icmp eq <4 x i32> [[SRC:%.*]], zeroinitializer
189 ; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[V:%.*]], ptr [[F:%.*]], i32 1, <4 x i1> [[ICMP]])
190 ; CHECK-NEXT: ret void
192 %icmp = icmp eq <4 x i32> %src, zeroinitializer
193 %mask = sext <4 x i1> %icmp to <4 x i32>
194 tail call void @llvm.x86.avx.maskstore.ps(ptr %f, <4 x i32> %mask, <4 x float> %v)
198 ; Zero mask is a nop.
200 define void @mstore_zeros(ptr %f, <4 x float> %v) {
201 ; CHECK-LABEL: @mstore_zeros(
202 ; CHECK-NEXT: ret void
204 tail call void @llvm.x86.avx.maskstore.ps(ptr %f, <4 x i32> zeroinitializer, <4 x float> %v)
208 ; Only the sign bit matters.
210 define void @mstore_fake_ones(ptr %f, <4 x float> %v) {
211 ; CHECK-LABEL: @mstore_fake_ones(
212 ; CHECK-NEXT: ret void
214 tail call void @llvm.x86.avx.maskstore.ps(ptr %f, <4 x i32> <i32 1, i32 2, i32 3, i32 2147483647>, <4 x float> %v)
218 ; All mask bits are set, so this is just a vector store.
220 define void @mstore_real_ones(ptr %f, <4 x float> %v) {
221 ; CHECK-LABEL: @mstore_real_ones(
222 ; CHECK-NEXT: store <4 x float> [[V:%.*]], ptr [[F:%.*]], align 1
223 ; CHECK-NEXT: ret void
225 tail call void @llvm.x86.avx.maskstore.ps(ptr %f, <4 x i32> <i32 -1, i32 -2, i32 -3, i32 -2147483648>, <4 x float> %v)
229 ; It's a constant mask, so convert to an LLVM intrinsic. The backend should optimize further.
231 define void @mstore_one_one(ptr %f, <4 x float> %v) {
232 ; CHECK-LABEL: @mstore_one_one(
233 ; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0(<4 x float> [[V:%.*]], ptr [[F:%.*]], i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>)
234 ; CHECK-NEXT: ret void
236 tail call void @llvm.x86.avx.maskstore.ps(ptr %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>, <4 x float> %v)
242 define void @mstore_one_one_double(ptr %f, <2 x double> %v) {
243 ; CHECK-LABEL: @mstore_one_one_double(
244 ; CHECK-NEXT: call void @llvm.masked.store.v2f64.p0(<2 x double> [[V:%.*]], ptr [[F:%.*]], i32 1, <2 x i1> <i1 true, i1 false>)
245 ; CHECK-NEXT: ret void
247 tail call void @llvm.x86.avx.maskstore.pd(ptr %f, <2 x i64> <i64 -1, i64 0>, <2 x double> %v)
251 ; Try 256-bit FP ops.
253 define void @mstore_v8f32(ptr %f, <8 x float> %v) {
254 ; CHECK-LABEL: @mstore_v8f32(
255 ; CHECK-NEXT: call void @llvm.masked.store.v8f32.p0(<8 x float> [[V:%.*]], ptr [[F:%.*]], i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>)
256 ; CHECK-NEXT: ret void
258 tail call void @llvm.x86.avx.maskstore.ps.256(ptr %f, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 -1, i32 -2, i32 -3, i32 -4>, <8 x float> %v)
262 define void @mstore_v4f64(ptr %f, <4 x double> %v) {
263 ; CHECK-LABEL: @mstore_v4f64(
264 ; CHECK-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[V:%.*]], ptr [[F:%.*]], i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>)
265 ; CHECK-NEXT: ret void
267 tail call void @llvm.x86.avx.maskstore.pd.256(ptr %f, <4 x i64> <i64 -1, i64 0, i64 1, i64 2>, <4 x double> %v)
271 define void @mstore_v4f64_cmp(ptr %f, <4 x i32> %src, <4 x double> %v) {
272 ; CHECK-LABEL: @mstore_v4f64_cmp(
273 ; CHECK-NEXT: [[ICMP:%.*]] = icmp sgt <4 x i32> [[SRC:%.*]], splat (i32 -1)
274 ; CHECK-NEXT: call void @llvm.masked.store.v4f64.p0(<4 x double> [[V:%.*]], ptr [[F:%.*]], i32 1, <4 x i1> [[ICMP]])
275 ; CHECK-NEXT: ret void
277 %icmp = icmp sge <4 x i32> %src, zeroinitializer
278 %mask = sext <4 x i1> %icmp to <4 x i64>
279 tail call void @llvm.x86.avx.maskstore.pd.256(ptr %f, <4 x i64> %mask, <4 x double> %v)
283 ; Try the AVX2 variants.
285 define void @mstore_v4i32(ptr %f, <4 x i32> %v) {
286 ; CHECK-LABEL: @mstore_v4i32(
287 ; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0(<4 x i32> [[V:%.*]], ptr [[F:%.*]], i32 1, <4 x i1> <i1 false, i1 false, i1 true, i1 true>)
288 ; CHECK-NEXT: ret void
290 tail call void @llvm.x86.avx2.maskstore.d(ptr %f, <4 x i32> <i32 0, i32 1, i32 -1, i32 -2>, <4 x i32> %v)
294 define void @mstore_v2i64(ptr %f, <2 x i64> %v) {
295 ; CHECK-LABEL: @mstore_v2i64(
296 ; CHECK-NEXT: call void @llvm.masked.store.v2i64.p0(<2 x i64> [[V:%.*]], ptr [[F:%.*]], i32 1, <2 x i1> <i1 true, i1 false>)
297 ; CHECK-NEXT: ret void
299 tail call void @llvm.x86.avx2.maskstore.q(ptr %f, <2 x i64> <i64 -1, i64 0>, <2 x i64> %v)
304 define void @mstore_v8i32(ptr %f, <8 x i32> %v) {
305 ; CHECK-LABEL: @mstore_v8i32(
306 ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0(<8 x i32> [[V:%.*]], ptr [[F:%.*]], i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>)
307 ; CHECK-NEXT: ret void
309 tail call void @llvm.x86.avx2.maskstore.d.256(ptr %f, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 -1, i32 -2, i32 -3, i32 -4>, <8 x i32> %v)
313 define void @mstore_v4i64(ptr %f, <4 x i64> %v) {
314 ; CHECK-LABEL: @mstore_v4i64(
315 ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[F:%.*]], i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>)
316 ; CHECK-NEXT: ret void
318 tail call void @llvm.x86.avx2.maskstore.q.256(ptr %f, <4 x i64> <i64 -1, i64 0, i64 1, i64 2>, <4 x i64> %v)
322 define void @mstore_v4i64_cmp(ptr %f, <4 x i64> %src0, <4 x i64> %src1, <4 x i64> %v) {
323 ; CHECK-LABEL: @mstore_v4i64_cmp(
324 ; CHECK-NEXT: [[ICMP0:%.*]] = icmp eq <4 x i64> [[SRC0:%.*]], zeroinitializer
325 ; CHECK-NEXT: [[ICMP1:%.*]] = icmp ne <4 x i64> [[SRC1:%.*]], zeroinitializer
326 ; CHECK-NEXT: [[MASK1:%.*]] = and <4 x i1> [[ICMP0]], [[ICMP1]]
327 ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0(<4 x i64> [[V:%.*]], ptr [[F:%.*]], i32 1, <4 x i1> [[MASK1]])
328 ; CHECK-NEXT: ret void
330 %icmp0 = icmp eq <4 x i64> %src0, zeroinitializer
331 %icmp1 = icmp ne <4 x i64> %src1, zeroinitializer
332 %ext0 = sext <4 x i1> %icmp0 to <4 x i64>
333 %ext1 = sext <4 x i1> %icmp1 to <4 x i64>
334 %mask = and <4 x i64> %ext0, %ext1
335 tail call void @llvm.x86.avx2.maskstore.q.256(ptr %f, <4 x i64> %mask, <4 x i64> %v)
339 ; The original SSE2 masked store variant.
341 define void @mstore_v16i8_sse2_zeros(<16 x i8> %d, ptr %p) {
342 ; CHECK-LABEL: @mstore_v16i8_sse2_zeros(
343 ; CHECK-NEXT: ret void
345 tail call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %d, <16 x i8> zeroinitializer, ptr %p)
349 declare <4 x float> @llvm.x86.avx.maskload.ps(ptr, <4 x i32>)
350 declare <2 x double> @llvm.x86.avx.maskload.pd(ptr, <2 x i64>)
351 declare <8 x float> @llvm.x86.avx.maskload.ps.256(ptr, <8 x i32>)
352 declare <4 x double> @llvm.x86.avx.maskload.pd.256(ptr, <4 x i64>)
354 declare <4 x i32> @llvm.x86.avx2.maskload.d(ptr, <4 x i32>)
355 declare <2 x i64> @llvm.x86.avx2.maskload.q(ptr, <2 x i64>)
356 declare <8 x i32> @llvm.x86.avx2.maskload.d.256(ptr, <8 x i32>)
357 declare <4 x i64> @llvm.x86.avx2.maskload.q.256(ptr, <4 x i64>)
359 declare void @llvm.x86.avx.maskstore.ps(ptr, <4 x i32>, <4 x float>)
360 declare void @llvm.x86.avx.maskstore.pd(ptr, <2 x i64>, <2 x double>)
361 declare void @llvm.x86.avx.maskstore.ps.256(ptr, <8 x i32>, <8 x float>)
362 declare void @llvm.x86.avx.maskstore.pd.256(ptr, <4 x i64>, <4 x double>)
364 declare void @llvm.x86.avx2.maskstore.d(ptr, <4 x i32>, <4 x i32>)
365 declare void @llvm.x86.avx2.maskstore.q(ptr, <2 x i64>, <2 x i64>)
366 declare void @llvm.x86.avx2.maskstore.d.256(ptr, <8 x i32>, <8 x i32>)
367 declare void @llvm.x86.avx2.maskstore.q.256(ptr, <4 x i64>, <4 x i64>)
369 declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, ptr)