1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -mtriple=x86_64-unknown-unknown -S | FileCheck %s
6 ; If the mask isn't constant, do nothing.
8 define <4 x float> @mload(i8* %f, <4 x i32> %mask) {
10 ; CHECK-NEXT: [[LD:%.*]] = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* [[F:%.*]], <4 x i32> [[MASK:%.*]])
11 ; CHECK-NEXT: ret <4 x float> [[LD]]
13 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> %mask)
17 ; If the mask comes from a comparison, convert to an LLVM intrinsic. The backend should optimize further.
19 define <4 x float> @mload_v4f32_cmp(i8* %f, <4 x i32> %src) {
20 ; CHECK-LABEL: @mload_v4f32_cmp(
21 ; CHECK-NEXT: [[ICMP:%.*]] = icmp ne <4 x i32> [[SRC:%.*]], zeroinitializer
22 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x float>*
23 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* [[CASTVEC]], i32 1, <4 x i1> [[ICMP]], <4 x float> zeroinitializer)
24 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
26 %icmp = icmp ne <4 x i32> %src, zeroinitializer
27 %mask = sext <4 x i1> %icmp to <4 x i32>
28 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> %mask)
32 ; Zero mask returns a zero vector.
34 define <4 x float> @mload_zeros(i8* %f) {
35 ; CHECK-LABEL: @mload_zeros(
36 ; CHECK-NEXT: ret <4 x float> zeroinitializer
38 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> zeroinitializer)
42 ; Only the sign bit matters.
44 define <4 x float> @mload_fake_ones(i8* %f) {
45 ; CHECK-LABEL: @mload_fake_ones(
46 ; CHECK-NEXT: ret <4 x float> zeroinitializer
48 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 1, i32 2, i32 3, i32 2147483647>)
52 ; All mask bits are set, so this is just a vector load.
54 define <4 x float> @mload_real_ones(i8* %f) {
55 ; CHECK-LABEL: @mload_real_ones(
56 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x float>*
57 ; CHECK-NEXT: [[UNMASKEDLOAD:%.*]] = load <4 x float>, <4 x float>* [[CASTVEC]], align 1
58 ; CHECK-NEXT: ret <4 x float> [[UNMASKEDLOAD]]
60 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 -1, i32 -2, i32 -3, i32 2147483648>)
64 ; It's a constant mask, so convert to an LLVM intrinsic. The backend should optimize further.
66 define <4 x float> @mload_one_one(i8* %f) {
67 ; CHECK-LABEL: @mload_one_one(
68 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x float>*
69 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* [[CASTVEC]], i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float poison>)
70 ; CHECK-NEXT: ret <4 x float> [[TMP1]]
72 %ld = tail call <4 x float> @llvm.x86.avx.maskload.ps(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>)
78 define <2 x double> @mload_one_one_double(i8* %f) {
79 ; CHECK-LABEL: @mload_one_one_double(
80 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <2 x double>*
81 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* [[CASTVEC]], i32 1, <2 x i1> <i1 true, i1 false>, <2 x double> <double poison, double 0.000000e+00>)
82 ; CHECK-NEXT: ret <2 x double> [[TMP1]]
84 %ld = tail call <2 x double> @llvm.x86.avx.maskload.pd(i8* %f, <2 x i64> <i64 -1, i64 0>)
90 define <8 x float> @mload_v8f32(i8* %f) {
91 ; CHECK-LABEL: @mload_v8f32(
92 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <8 x float>*
93 ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* [[CASTVEC]], i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x float> <float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float poison, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>)
94 ; CHECK-NEXT: ret <8 x float> [[TMP1]]
96 %ld = tail call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %f, <8 x i32> <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 0>)
100 define <8 x float> @mload_v8f32_cmp(i8* %f, <8 x float> %src0, <8 x float> %src1) {
101 ; CHECK-LABEL: @mload_v8f32_cmp(
102 ; CHECK-NEXT: [[ICMP0:%.*]] = fcmp one <8 x float> [[SRC0:%.*]], zeroinitializer
103 ; CHECK-NEXT: [[ICMP1:%.*]] = fcmp one <8 x float> [[SRC1:%.*]], zeroinitializer
104 ; CHECK-NEXT: [[MASK1:%.*]] = and <8 x i1> [[ICMP0]], [[ICMP1]]
105 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <8 x float>*
106 ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x float> @llvm.masked.load.v8f32.p0v8f32(<8 x float>* [[CASTVEC]], i32 1, <8 x i1> [[MASK1]], <8 x float> zeroinitializer)
107 ; CHECK-NEXT: ret <8 x float> [[TMP1]]
109 %icmp0 = fcmp one <8 x float> %src0, zeroinitializer
110 %icmp1 = fcmp one <8 x float> %src1, zeroinitializer
111 %ext0 = sext <8 x i1> %icmp0 to <8 x i32>
112 %ext1 = sext <8 x i1> %icmp1 to <8 x i32>
113 %mask = and <8 x i32> %ext0, %ext1
114 %ld = tail call <8 x float> @llvm.x86.avx.maskload.ps.256(i8* %f, <8 x i32> %mask)
118 define <4 x double> @mload_v4f64(i8* %f) {
119 ; CHECK-LABEL: @mload_v4f64(
120 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x double>*
121 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x double> @llvm.masked.load.v4f64.p0v4f64(<4 x double>* [[CASTVEC]], i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x double> <double poison, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00>)
122 ; CHECK-NEXT: ret <4 x double> [[TMP1]]
124 %ld = tail call <4 x double> @llvm.x86.avx.maskload.pd.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 0, i64 0>)
128 ; Try the AVX2 variants.
130 define <4 x i32> @mload_v4i32(i8* %f) {
131 ; CHECK-LABEL: @mload_v4i32(
132 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x i32>*
133 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* [[CASTVEC]], i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>, <4 x i32> <i32 0, i32 0, i32 0, i32 poison>)
134 ; CHECK-NEXT: ret <4 x i32> [[TMP1]]
136 %ld = tail call <4 x i32> @llvm.x86.avx2.maskload.d(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>)
140 define <2 x i64> @mload_v2i64(i8* %f) {
141 ; CHECK-LABEL: @mload_v2i64(
142 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <2 x i64>*
143 ; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* [[CASTVEC]], i32 1, <2 x i1> <i1 true, i1 false>, <2 x i64> <i64 poison, i64 0>)
144 ; CHECK-NEXT: ret <2 x i64> [[TMP1]]
146 %ld = tail call <2 x i64> @llvm.x86.avx2.maskload.q(i8* %f, <2 x i64> <i64 -1, i64 0>)
150 define <8 x i32> @mload_v8i32(i8* %f) {
151 ; CHECK-LABEL: @mload_v8i32(
152 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <8 x i32>*
153 ; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i32> @llvm.masked.load.v8i32.p0v8i32(<8 x i32>* [[CASTVEC]], i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false>, <8 x i32> <i32 0, i32 0, i32 0, i32 poison, i32 0, i32 0, i32 0, i32 0>)
154 ; CHECK-NEXT: ret <8 x i32> [[TMP1]]
156 %ld = tail call <8 x i32> @llvm.x86.avx2.maskload.d.256(i8* %f, <8 x i32> <i32 0, i32 0, i32 0, i32 -1, i32 0, i32 0, i32 0, i32 0>)
160 define <4 x i64> @mload_v4i64(i8* %f) {
161 ; CHECK-LABEL: @mload_v4i64(
162 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x i64>*
163 ; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i64> @llvm.masked.load.v4i64.p0v4i64(<4 x i64>* [[CASTVEC]], i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>, <4 x i64> <i64 poison, i64 0, i64 0, i64 0>)
164 ; CHECK-NEXT: ret <4 x i64> [[TMP1]]
166 %ld = tail call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 0, i64 0>)
170 define <4 x i64> @mload_v4i64_cmp(i8* %f, <4 x i64> %src) {
171 ; CHECK-LABEL: @mload_v4i64_cmp(
172 ; CHECK-NEXT: [[SRC_LOBIT:%.*]] = ashr <4 x i64> [[SRC:%.*]], <i64 63, i64 63, i64 63, i64 63>
173 ; CHECK-NEXT: [[SRC_LOBIT_NOT:%.*]] = xor <4 x i64> [[SRC_LOBIT]], <i64 -1, i64 -1, i64 -1, i64 -1>
174 ; CHECK-NEXT: [[LD:%.*]] = tail call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* [[F:%.*]], <4 x i64> [[SRC_LOBIT_NOT]])
175 ; CHECK-NEXT: ret <4 x i64> [[LD]]
177 %icmp = icmp sge <4 x i64> %src, zeroinitializer
178 %mask = sext <4 x i1> %icmp to <4 x i64>
179 %ld = tail call <4 x i64> @llvm.x86.avx2.maskload.q.256(i8* %f, <4 x i64> %mask)
185 ; If the mask isn't constant, do nothing.
187 define void @mstore(i8* %f, <4 x i32> %mask, <4 x float> %v) {
188 ; CHECK-LABEL: @mstore(
189 ; CHECK-NEXT: tail call void @llvm.x86.avx.maskstore.ps(i8* [[F:%.*]], <4 x i32> [[MASK:%.*]], <4 x float> [[V:%.*]])
190 ; CHECK-NEXT: ret void
192 tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> %mask, <4 x float> %v)
196 ; If the mask comes from a comparison, convert to an LLVM intrinsic. The backend should optimize further.
198 define void @mstore_v4f32_cmp(i8* %f, <4 x i32> %src, <4 x float> %v) {
199 ; CHECK-LABEL: @mstore_v4f32_cmp(
200 ; CHECK-NEXT: [[ICMP:%.*]] = icmp eq <4 x i32> [[SRC:%.*]], zeroinitializer
201 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x float>*
202 ; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> [[V:%.*]], <4 x float>* [[CASTVEC]], i32 1, <4 x i1> [[ICMP]])
203 ; CHECK-NEXT: ret void
205 %icmp = icmp eq <4 x i32> %src, zeroinitializer
206 %mask = sext <4 x i1> %icmp to <4 x i32>
207 tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> %mask, <4 x float> %v)
211 ; Zero mask is a nop.
213 define void @mstore_zeros(i8* %f, <4 x float> %v) {
214 ; CHECK-LABEL: @mstore_zeros(
215 ; CHECK-NEXT: ret void
217 tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> zeroinitializer, <4 x float> %v)
221 ; Only the sign bit matters.
223 define void @mstore_fake_ones(i8* %f, <4 x float> %v) {
224 ; CHECK-LABEL: @mstore_fake_ones(
225 ; CHECK-NEXT: ret void
227 tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> <i32 1, i32 2, i32 3, i32 2147483647>, <4 x float> %v)
231 ; All mask bits are set, so this is just a vector store.
233 define void @mstore_real_ones(i8* %f, <4 x float> %v) {
234 ; CHECK-LABEL: @mstore_real_ones(
235 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x float>*
236 ; CHECK-NEXT: store <4 x float> [[V:%.*]], <4 x float>* [[CASTVEC]], align 1
237 ; CHECK-NEXT: ret void
239 tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> <i32 -1, i32 -2, i32 -3, i32 -2147483648>, <4 x float> %v)
243 ; It's a constant mask, so convert to an LLVM intrinsic. The backend should optimize further.
245 define void @mstore_one_one(i8* %f, <4 x float> %v) {
246 ; CHECK-LABEL: @mstore_one_one(
247 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x float>*
248 ; CHECK-NEXT: call void @llvm.masked.store.v4f32.p0v4f32(<4 x float> [[V:%.*]], <4 x float>* [[CASTVEC]], i32 1, <4 x i1> <i1 false, i1 false, i1 false, i1 true>)
249 ; CHECK-NEXT: ret void
251 tail call void @llvm.x86.avx.maskstore.ps(i8* %f, <4 x i32> <i32 0, i32 0, i32 0, i32 -1>, <4 x float> %v)
257 define void @mstore_one_one_double(i8* %f, <2 x double> %v) {
258 ; CHECK-LABEL: @mstore_one_one_double(
259 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <2 x double>*
260 ; CHECK-NEXT: call void @llvm.masked.store.v2f64.p0v2f64(<2 x double> [[V:%.*]], <2 x double>* [[CASTVEC]], i32 1, <2 x i1> <i1 true, i1 false>)
261 ; CHECK-NEXT: ret void
263 tail call void @llvm.x86.avx.maskstore.pd(i8* %f, <2 x i64> <i64 -1, i64 0>, <2 x double> %v)
267 ; Try 256-bit FP ops.
269 define void @mstore_v8f32(i8* %f, <8 x float> %v) {
270 ; CHECK-LABEL: @mstore_v8f32(
271 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <8 x float>*
272 ; CHECK-NEXT: call void @llvm.masked.store.v8f32.p0v8f32(<8 x float> [[V:%.*]], <8 x float>* [[CASTVEC]], i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>)
273 ; CHECK-NEXT: ret void
275 tail call void @llvm.x86.avx.maskstore.ps.256(i8* %f, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 -1, i32 -2, i32 -3, i32 -4>, <8 x float> %v)
279 define void @mstore_v4f64(i8* %f, <4 x double> %v) {
280 ; CHECK-LABEL: @mstore_v4f64(
281 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x double>*
282 ; CHECK-NEXT: call void @llvm.masked.store.v4f64.p0v4f64(<4 x double> [[V:%.*]], <4 x double>* [[CASTVEC]], i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>)
283 ; CHECK-NEXT: ret void
285 tail call void @llvm.x86.avx.maskstore.pd.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 1, i64 2>, <4 x double> %v)
289 define void @mstore_v4f64_cmp(i8* %f, <4 x i32> %src, <4 x double> %v) {
290 ; CHECK-LABEL: @mstore_v4f64_cmp(
291 ; CHECK-NEXT: [[SRC_LOBIT:%.*]] = ashr <4 x i32> [[SRC:%.*]], <i32 31, i32 31, i32 31, i32 31>
292 ; CHECK-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[SRC_LOBIT]], <i32 -1, i32 -1, i32 -1, i32 -1>
293 ; CHECK-NEXT: [[DOTNOT:%.*]] = sext <4 x i32> [[TMP1]] to <4 x i64>
294 ; CHECK-NEXT: tail call void @llvm.x86.avx.maskstore.pd.256(i8* [[F:%.*]], <4 x i64> [[DOTNOT]], <4 x double> [[V:%.*]])
295 ; CHECK-NEXT: ret void
297 %icmp = icmp sge <4 x i32> %src, zeroinitializer
298 %mask = sext <4 x i1> %icmp to <4 x i64>
299 tail call void @llvm.x86.avx.maskstore.pd.256(i8* %f, <4 x i64> %mask, <4 x double> %v)
303 ; Try the AVX2 variants.
305 define void @mstore_v4i32(i8* %f, <4 x i32> %v) {
306 ; CHECK-LABEL: @mstore_v4i32(
307 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x i32>*
308 ; CHECK-NEXT: call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> [[V:%.*]], <4 x i32>* [[CASTVEC]], i32 1, <4 x i1> <i1 false, i1 false, i1 true, i1 true>)
309 ; CHECK-NEXT: ret void
311 tail call void @llvm.x86.avx2.maskstore.d(i8* %f, <4 x i32> <i32 0, i32 1, i32 -1, i32 -2>, <4 x i32> %v)
315 define void @mstore_v2i64(i8* %f, <2 x i64> %v) {
316 ; CHECK-LABEL: @mstore_v2i64(
317 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <2 x i64>*
318 ; CHECK-NEXT: call void @llvm.masked.store.v2i64.p0v2i64(<2 x i64> [[V:%.*]], <2 x i64>* [[CASTVEC]], i32 1, <2 x i1> <i1 true, i1 false>)
319 ; CHECK-NEXT: ret void
321 tail call void @llvm.x86.avx2.maskstore.q(i8* %f, <2 x i64> <i64 -1, i64 0>, <2 x i64> %v)
326 define void @mstore_v8i32(i8* %f, <8 x i32> %v) {
327 ; CHECK-LABEL: @mstore_v8i32(
328 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <8 x i32>*
329 ; CHECK-NEXT: call void @llvm.masked.store.v8i32.p0v8i32(<8 x i32> [[V:%.*]], <8 x i32>* [[CASTVEC]], i32 1, <8 x i1> <i1 false, i1 false, i1 false, i1 false, i1 true, i1 true, i1 true, i1 true>)
330 ; CHECK-NEXT: ret void
332 tail call void @llvm.x86.avx2.maskstore.d.256(i8* %f, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 -1, i32 -2, i32 -3, i32 -4>, <8 x i32> %v)
336 define void @mstore_v4i64(i8* %f, <4 x i64> %v) {
337 ; CHECK-LABEL: @mstore_v4i64(
338 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x i64>*
339 ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[CASTVEC]], i32 1, <4 x i1> <i1 true, i1 false, i1 false, i1 false>)
340 ; CHECK-NEXT: ret void
342 tail call void @llvm.x86.avx2.maskstore.q.256(i8* %f, <4 x i64> <i64 -1, i64 0, i64 1, i64 2>, <4 x i64> %v)
346 define void @mstore_v4i64_cmp(i8* %f, <4 x i64> %src0, <4 x i64> %src1, <4 x i64> %v) {
347 ; CHECK-LABEL: @mstore_v4i64_cmp(
348 ; CHECK-NEXT: [[ICMP0:%.*]] = icmp eq <4 x i64> [[SRC0:%.*]], zeroinitializer
349 ; CHECK-NEXT: [[ICMP1:%.*]] = icmp ne <4 x i64> [[SRC1:%.*]], zeroinitializer
350 ; CHECK-NEXT: [[MASK1:%.*]] = and <4 x i1> [[ICMP0]], [[ICMP1]]
351 ; CHECK-NEXT: [[CASTVEC:%.*]] = bitcast i8* [[F:%.*]] to <4 x i64>*
352 ; CHECK-NEXT: call void @llvm.masked.store.v4i64.p0v4i64(<4 x i64> [[V:%.*]], <4 x i64>* [[CASTVEC]], i32 1, <4 x i1> [[MASK1]])
353 ; CHECK-NEXT: ret void
355 %icmp0 = icmp eq <4 x i64> %src0, zeroinitializer
356 %icmp1 = icmp ne <4 x i64> %src1, zeroinitializer
357 %ext0 = sext <4 x i1> %icmp0 to <4 x i64>
358 %ext1 = sext <4 x i1> %icmp1 to <4 x i64>
359 %mask = and <4 x i64> %ext0, %ext1
360 tail call void @llvm.x86.avx2.maskstore.q.256(i8* %f, <4 x i64> %mask, <4 x i64> %v)
364 ; The original SSE2 masked store variant.
366 define void @mstore_v16i8_sse2_zeros(<16 x i8> %d, i8* %p) {
367 ; CHECK-LABEL: @mstore_v16i8_sse2_zeros(
368 ; CHECK-NEXT: ret void
370 tail call void @llvm.x86.sse2.maskmov.dqu(<16 x i8> %d, <16 x i8> zeroinitializer, i8* %p)
374 declare <4 x float> @llvm.x86.avx.maskload.ps(i8*, <4 x i32>)
375 declare <2 x double> @llvm.x86.avx.maskload.pd(i8*, <2 x i64>)
376 declare <8 x float> @llvm.x86.avx.maskload.ps.256(i8*, <8 x i32>)
377 declare <4 x double> @llvm.x86.avx.maskload.pd.256(i8*, <4 x i64>)
379 declare <4 x i32> @llvm.x86.avx2.maskload.d(i8*, <4 x i32>)
380 declare <2 x i64> @llvm.x86.avx2.maskload.q(i8*, <2 x i64>)
381 declare <8 x i32> @llvm.x86.avx2.maskload.d.256(i8*, <8 x i32>)
382 declare <4 x i64> @llvm.x86.avx2.maskload.q.256(i8*, <4 x i64>)
384 declare void @llvm.x86.avx.maskstore.ps(i8*, <4 x i32>, <4 x float>)
385 declare void @llvm.x86.avx.maskstore.pd(i8*, <2 x i64>, <2 x double>)
386 declare void @llvm.x86.avx.maskstore.ps.256(i8*, <8 x i32>, <8 x float>)
387 declare void @llvm.x86.avx.maskstore.pd.256(i8*, <4 x i64>, <4 x double>)
389 declare void @llvm.x86.avx2.maskstore.d(i8*, <4 x i32>, <4 x i32>)
390 declare void @llvm.x86.avx2.maskstore.q(i8*, <2 x i64>, <2 x i64>)
391 declare void @llvm.x86.avx2.maskstore.d.256(i8*, <8 x i32>, <8 x i32>)
392 declare void @llvm.x86.avx2.maskstore.q.256(i8*, <4 x i64>, <4 x i64>)
394 declare void @llvm.x86.sse2.maskmov.dqu(<16 x i8>, <16 x i8>, i8*)