1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw,avx512vl < %s | FileCheck %s
4 ; Skylake-avx512 target supports masked load/store for i8 and i16 vectors
6 define <16 x i8> @test_mask_load_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
7 ; CHECK-LABEL: test_mask_load_16xi8:
9 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
10 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
11 ; CHECK-NEXT: vmovdqu8 (%rdi), %xmm0 {%k1} {z}
13 %res = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %addr, i32 4, <16 x i1>%mask, <16 x i8> undef)
16 declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
18 define <32 x i8> @test_mask_load_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
19 ; CHECK-LABEL: test_mask_load_32xi8:
21 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
22 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
23 ; CHECK-NEXT: vpblendmb (%rdi), %ymm1, %ymm0 {%k1}
25 %res = call <32 x i8> @llvm.masked.load.v32i8.p0v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> %val)
28 declare <32 x i8> @llvm.masked.load.v32i8.p0v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>)
30 define <64 x i8> @test_mask_load_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
31 ; CHECK-LABEL: test_mask_load_64xi8:
33 ; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
34 ; CHECK-NEXT: vpmovb2m %zmm0, %k1
35 ; CHECK-NEXT: vpblendmb (%rdi), %zmm1, %zmm0 {%k1}
37 %res = call <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>* %addr, i32 4, <64 x i1>%mask, <64 x i8> %val)
40 declare <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>*, i32, <64 x i1>, <64 x i8>)
42 define <8 x i16> @test_mask_load_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
43 ; CHECK-LABEL: test_mask_load_8xi16:
45 ; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
46 ; CHECK-NEXT: vpmovw2m %xmm0, %k1
47 ; CHECK-NEXT: vmovdqu16 (%rdi), %xmm0 {%k1} {z}
49 %res = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %addr, i32 4, <8 x i1>%mask, <8 x i16> undef)
52 declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
54 define <16 x i16> @test_mask_load_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
55 ; CHECK-LABEL: test_mask_load_16xi16:
57 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
58 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
59 ; CHECK-NEXT: vmovdqu16 (%rdi), %ymm0 {%k1} {z}
61 %res = call <16 x i16> @llvm.masked.load.v16i16.p0v16i16(<16 x i16>* %addr, i32 4, <16 x i1>%mask, <16 x i16> zeroinitializer)
64 declare <16 x i16> @llvm.masked.load.v16i16.p0v16i16(<16 x i16>*, i32, <16 x i1>, <16 x i16>)
66 define <32 x i16> @test_mask_load_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
67 ; CHECK-LABEL: test_mask_load_32xi16:
69 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
70 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
71 ; CHECK-NEXT: vpblendmw (%rdi), %zmm1, %zmm0 {%k1}
73 %res = call <32 x i16> @llvm.masked.load.v32i16.p0v32i16(<32 x i16>* %addr, i32 4, <32 x i1>%mask, <32 x i16> %val)
76 declare <32 x i16> @llvm.masked.load.v32i16.p0v32i16(<32 x i16>*, i32, <32 x i1>, <32 x i16>)
78 define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
79 ; CHECK-LABEL: test_mask_store_16xi8:
81 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
82 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
83 ; CHECK-NEXT: vmovdqu8 %xmm1, (%rdi) {%k1}
85 call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %val, <16 x i8>* %addr, i32 4, <16 x i1>%mask)
88 declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
90 define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
91 ; CHECK-LABEL: test_mask_store_32xi8:
93 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
94 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
95 ; CHECK-NEXT: vmovdqu8 %ymm1, (%rdi) {%k1}
96 ; CHECK-NEXT: vzeroupper
98 call void @llvm.masked.store.v32i8.p0v32i8(<32 x i8> %val, <32 x i8>* %addr, i32 4, <32 x i1>%mask)
101 declare void @llvm.masked.store.v32i8.p0v32i8(<32 x i8>, <32 x i8>*, i32, <32 x i1>)
103 define void @test_mask_store_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
104 ; CHECK-LABEL: test_mask_store_64xi8:
106 ; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
107 ; CHECK-NEXT: vpmovb2m %zmm0, %k1
108 ; CHECK-NEXT: vmovdqu8 %zmm1, (%rdi) {%k1}
109 ; CHECK-NEXT: vzeroupper
111 call void @llvm.masked.store.v64i8.p0v64i8(<64 x i8> %val, <64 x i8>* %addr, i32 4, <64 x i1>%mask)
114 declare void @llvm.masked.store.v64i8.p0v64i8(<64 x i8>, <64 x i8>*, i32, <64 x i1>)
116 define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
117 ; CHECK-LABEL: test_mask_store_8xi16:
119 ; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
120 ; CHECK-NEXT: vpmovw2m %xmm0, %k1
121 ; CHECK-NEXT: vmovdqu16 %xmm1, (%rdi) {%k1}
123 call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %val, <8 x i16>* %addr, i32 4, <8 x i1>%mask)
126 declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
128 define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
129 ; CHECK-LABEL: test_mask_store_16xi16:
131 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
132 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
133 ; CHECK-NEXT: vmovdqu16 %ymm1, (%rdi) {%k1}
134 ; CHECK-NEXT: vzeroupper
136 call void @llvm.masked.store.v16i16.p0v16i16(<16 x i16> %val, <16 x i16>* %addr, i32 4, <16 x i1>%mask)
139 declare void @llvm.masked.store.v16i16.p0v16i16(<16 x i16>, <16 x i16>*, i32, <16 x i1>)
141 define void @test_mask_store_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
142 ; CHECK-LABEL: test_mask_store_32xi16:
144 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
145 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
146 ; CHECK-NEXT: vmovdqu16 %zmm1, (%rdi) {%k1}
147 ; CHECK-NEXT: vzeroupper
149 call void @llvm.masked.store.v32i16.p0v32i16(<32 x i16> %val, <32 x i16>* %addr, i32 4, <32 x i1>%mask)
153 declare void @llvm.masked.store.v32i16.p0v32i16(<32 x i16>, <32 x i16>*, i32, <32 x i1>)
155 ; Make sure we scalarize masked loads of f16.
156 define <16 x half> @test_mask_load_16xf16(<16 x i1> %mask, <16 x half>* %addr, <16 x half> %val) {
157 ; CHECK-LABEL: test_mask_load_16xf16:
159 ; CHECK-NEXT: movq %rdi, %rax
160 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
161 ; CHECK-NEXT: vpmovmskb %xmm0, %ecx
162 ; CHECK-NEXT: testb $1, %cl
163 ; CHECK-NEXT: je LBB12_1
164 ; CHECK-NEXT: ## %bb.2: ## %cond.load
165 ; CHECK-NEXT: movswl (%rsi), %edx
166 ; CHECK-NEXT: vmovd %edx, %xmm0
167 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm8
168 ; CHECK-NEXT: jmp LBB12_3
169 ; CHECK-NEXT: LBB12_1:
170 ; CHECK-NEXT: vxorps %xmm8, %xmm8, %xmm8
171 ; CHECK-NEXT: LBB12_3: ## %else
172 ; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
173 ; CHECK-NEXT: vxorps %xmm9, %xmm9, %xmm9
174 ; CHECK-NEXT: testb $2, %cl
175 ; CHECK-NEXT: je LBB12_4
176 ; CHECK-NEXT: ## %bb.5: ## %cond.load1
177 ; CHECK-NEXT: movswl 2(%rsi), %edx
178 ; CHECK-NEXT: vmovd %edx, %xmm0
179 ; CHECK-NEXT: vmovaps %xmm2, %xmm1
180 ; CHECK-NEXT: vmovaps %xmm2, %xmm7
181 ; CHECK-NEXT: vmovaps %xmm2, %xmm6
182 ; CHECK-NEXT: vmovaps %xmm2, %xmm5
183 ; CHECK-NEXT: vmovaps %xmm2, %xmm4
184 ; CHECK-NEXT: vmovaps %xmm2, %xmm3
185 ; CHECK-NEXT: vmovaps %xmm2, %xmm16
186 ; CHECK-NEXT: vmovaps %xmm2, %xmm15
187 ; CHECK-NEXT: vmovaps %xmm2, %xmm14
188 ; CHECK-NEXT: vmovaps %xmm2, %xmm13
189 ; CHECK-NEXT: vmovaps %xmm2, %xmm12
190 ; CHECK-NEXT: vmovaps %xmm2, %xmm11
191 ; CHECK-NEXT: vmovaps %xmm2, %xmm10
192 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm2
193 ; CHECK-NEXT: testb $4, %cl
194 ; CHECK-NEXT: jne LBB12_7
195 ; CHECK-NEXT: jmp LBB12_8
196 ; CHECK-NEXT: LBB12_4:
197 ; CHECK-NEXT: vmovaps %xmm2, %xmm1
198 ; CHECK-NEXT: vmovaps %xmm2, %xmm7
199 ; CHECK-NEXT: vmovaps %xmm2, %xmm6
200 ; CHECK-NEXT: vmovaps %xmm2, %xmm5
201 ; CHECK-NEXT: vmovaps %xmm2, %xmm4
202 ; CHECK-NEXT: vmovaps %xmm2, %xmm3
203 ; CHECK-NEXT: vmovaps %xmm2, %xmm16
204 ; CHECK-NEXT: vmovaps %xmm2, %xmm15
205 ; CHECK-NEXT: vmovaps %xmm2, %xmm14
206 ; CHECK-NEXT: vmovaps %xmm2, %xmm13
207 ; CHECK-NEXT: vmovaps %xmm2, %xmm12
208 ; CHECK-NEXT: vmovaps %xmm2, %xmm11
209 ; CHECK-NEXT: vmovaps %xmm2, %xmm10
210 ; CHECK-NEXT: testb $4, %cl
211 ; CHECK-NEXT: je LBB12_8
212 ; CHECK-NEXT: LBB12_7: ## %cond.load4
213 ; CHECK-NEXT: movswl 4(%rsi), %edx
214 ; CHECK-NEXT: vmovd %edx, %xmm0
215 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm1
216 ; CHECK-NEXT: LBB12_8: ## %else5
217 ; CHECK-NEXT: testb $8, %cl
218 ; CHECK-NEXT: jne LBB12_9
219 ; CHECK-NEXT: ## %bb.10: ## %else8
220 ; CHECK-NEXT: testb $16, %cl
221 ; CHECK-NEXT: jne LBB12_11
222 ; CHECK-NEXT: LBB12_12: ## %else11
223 ; CHECK-NEXT: testb $32, %cl
224 ; CHECK-NEXT: jne LBB12_13
225 ; CHECK-NEXT: LBB12_14: ## %else14
226 ; CHECK-NEXT: testb $64, %cl
227 ; CHECK-NEXT: jne LBB12_15
228 ; CHECK-NEXT: LBB12_16: ## %else17
229 ; CHECK-NEXT: testb $-128, %cl
230 ; CHECK-NEXT: jne LBB12_17
231 ; CHECK-NEXT: LBB12_18: ## %else20
232 ; CHECK-NEXT: testl $256, %ecx ## imm = 0x100
233 ; CHECK-NEXT: jne LBB12_19
234 ; CHECK-NEXT: LBB12_20: ## %else23
235 ; CHECK-NEXT: testl $512, %ecx ## imm = 0x200
236 ; CHECK-NEXT: jne LBB12_21
237 ; CHECK-NEXT: LBB12_22: ## %else26
238 ; CHECK-NEXT: testl $1024, %ecx ## imm = 0x400
239 ; CHECK-NEXT: jne LBB12_23
240 ; CHECK-NEXT: LBB12_24: ## %else29
241 ; CHECK-NEXT: testl $2048, %ecx ## imm = 0x800
242 ; CHECK-NEXT: jne LBB12_25
243 ; CHECK-NEXT: LBB12_26: ## %else32
244 ; CHECK-NEXT: testl $4096, %ecx ## imm = 0x1000
245 ; CHECK-NEXT: jne LBB12_27
246 ; CHECK-NEXT: LBB12_28: ## %else35
247 ; CHECK-NEXT: testl $8192, %ecx ## imm = 0x2000
248 ; CHECK-NEXT: jne LBB12_29
249 ; CHECK-NEXT: LBB12_30: ## %else38
250 ; CHECK-NEXT: testl $16384, %ecx ## imm = 0x4000
251 ; CHECK-NEXT: jne LBB12_31
252 ; CHECK-NEXT: LBB12_32: ## %else41
253 ; CHECK-NEXT: testl $32768, %ecx ## imm = 0x8000
254 ; CHECK-NEXT: je LBB12_34
255 ; CHECK-NEXT: LBB12_33: ## %cond.load43
256 ; CHECK-NEXT: movswl 30(%rsi), %ecx
257 ; CHECK-NEXT: vmovd %ecx, %xmm0
258 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm9
259 ; CHECK-NEXT: LBB12_34: ## %else44
260 ; CHECK-NEXT: vcvtps2ph $4, %xmm8, %xmm0
261 ; CHECK-NEXT: vmovd %xmm0, %ecx
262 ; CHECK-NEXT: movw %cx, (%rax)
263 ; CHECK-NEXT: vcvtps2ph $4, %xmm2, %xmm0
264 ; CHECK-NEXT: vmovd %xmm0, %ecx
265 ; CHECK-NEXT: movw %cx, 2(%rax)
266 ; CHECK-NEXT: vcvtps2ph $4, %xmm1, %xmm0
267 ; CHECK-NEXT: vmovd %xmm0, %ecx
268 ; CHECK-NEXT: movw %cx, 4(%rax)
269 ; CHECK-NEXT: vcvtps2ph $4, %xmm7, %xmm0
270 ; CHECK-NEXT: vmovd %xmm0, %ecx
271 ; CHECK-NEXT: movw %cx, 6(%rax)
272 ; CHECK-NEXT: vcvtps2ph $4, %xmm6, %xmm0
273 ; CHECK-NEXT: vmovd %xmm0, %ecx
274 ; CHECK-NEXT: movw %cx, 8(%rax)
275 ; CHECK-NEXT: vcvtps2ph $4, %xmm5, %xmm0
276 ; CHECK-NEXT: vmovd %xmm0, %ecx
277 ; CHECK-NEXT: movw %cx, 10(%rax)
278 ; CHECK-NEXT: vcvtps2ph $4, %xmm4, %xmm0
279 ; CHECK-NEXT: vmovd %xmm0, %ecx
280 ; CHECK-NEXT: movw %cx, 12(%rax)
281 ; CHECK-NEXT: vcvtps2ph $4, %xmm3, %xmm0
282 ; CHECK-NEXT: vmovd %xmm0, %ecx
283 ; CHECK-NEXT: movw %cx, 14(%rax)
284 ; CHECK-NEXT: vcvtps2ph $4, %xmm16, %xmm0
285 ; CHECK-NEXT: vmovd %xmm0, %ecx
286 ; CHECK-NEXT: movw %cx, 16(%rax)
287 ; CHECK-NEXT: vcvtps2ph $4, %xmm15, %xmm0
288 ; CHECK-NEXT: vmovd %xmm0, %ecx
289 ; CHECK-NEXT: movw %cx, 18(%rax)
290 ; CHECK-NEXT: vcvtps2ph $4, %xmm14, %xmm0
291 ; CHECK-NEXT: vmovd %xmm0, %ecx
292 ; CHECK-NEXT: movw %cx, 20(%rax)
293 ; CHECK-NEXT: vcvtps2ph $4, %xmm13, %xmm0
294 ; CHECK-NEXT: vmovd %xmm0, %ecx
295 ; CHECK-NEXT: movw %cx, 22(%rax)
296 ; CHECK-NEXT: vcvtps2ph $4, %xmm12, %xmm0
297 ; CHECK-NEXT: vmovd %xmm0, %ecx
298 ; CHECK-NEXT: movw %cx, 24(%rax)
299 ; CHECK-NEXT: vcvtps2ph $4, %xmm11, %xmm0
300 ; CHECK-NEXT: vmovd %xmm0, %ecx
301 ; CHECK-NEXT: movw %cx, 26(%rax)
302 ; CHECK-NEXT: vcvtps2ph $4, %xmm10, %xmm0
303 ; CHECK-NEXT: vmovd %xmm0, %ecx
304 ; CHECK-NEXT: movw %cx, 28(%rax)
305 ; CHECK-NEXT: vcvtps2ph $4, %xmm9, %xmm0
306 ; CHECK-NEXT: vmovd %xmm0, %ecx
307 ; CHECK-NEXT: movw %cx, 30(%rax)
309 ; CHECK-NEXT: LBB12_9: ## %cond.load7
310 ; CHECK-NEXT: movswl 6(%rsi), %edx
311 ; CHECK-NEXT: vmovd %edx, %xmm0
312 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm7
313 ; CHECK-NEXT: testb $16, %cl
314 ; CHECK-NEXT: je LBB12_12
315 ; CHECK-NEXT: LBB12_11: ## %cond.load10
316 ; CHECK-NEXT: movswl 8(%rsi), %edx
317 ; CHECK-NEXT: vmovd %edx, %xmm0
318 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm6
319 ; CHECK-NEXT: testb $32, %cl
320 ; CHECK-NEXT: je LBB12_14
321 ; CHECK-NEXT: LBB12_13: ## %cond.load13
322 ; CHECK-NEXT: movswl 10(%rsi), %edx
323 ; CHECK-NEXT: vmovd %edx, %xmm0
324 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm5
325 ; CHECK-NEXT: testb $64, %cl
326 ; CHECK-NEXT: je LBB12_16
327 ; CHECK-NEXT: LBB12_15: ## %cond.load16
328 ; CHECK-NEXT: movswl 12(%rsi), %edx
329 ; CHECK-NEXT: vmovd %edx, %xmm0
330 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm4
331 ; CHECK-NEXT: testb $-128, %cl
332 ; CHECK-NEXT: je LBB12_18
333 ; CHECK-NEXT: LBB12_17: ## %cond.load19
334 ; CHECK-NEXT: movswl 14(%rsi), %edx
335 ; CHECK-NEXT: vmovd %edx, %xmm0
336 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm3
337 ; CHECK-NEXT: testl $256, %ecx ## imm = 0x100
338 ; CHECK-NEXT: je LBB12_20
339 ; CHECK-NEXT: LBB12_19: ## %cond.load22
340 ; CHECK-NEXT: movswl 16(%rsi), %edx
341 ; CHECK-NEXT: vmovd %edx, %xmm0
342 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm16
343 ; CHECK-NEXT: testl $512, %ecx ## imm = 0x200
344 ; CHECK-NEXT: je LBB12_22
345 ; CHECK-NEXT: LBB12_21: ## %cond.load25
346 ; CHECK-NEXT: movswl 18(%rsi), %edx
347 ; CHECK-NEXT: vmovd %edx, %xmm0
348 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm15
349 ; CHECK-NEXT: testl $1024, %ecx ## imm = 0x400
350 ; CHECK-NEXT: je LBB12_24
351 ; CHECK-NEXT: LBB12_23: ## %cond.load28
352 ; CHECK-NEXT: movswl 20(%rsi), %edx
353 ; CHECK-NEXT: vmovd %edx, %xmm0
354 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm14
355 ; CHECK-NEXT: testl $2048, %ecx ## imm = 0x800
356 ; CHECK-NEXT: je LBB12_26
357 ; CHECK-NEXT: LBB12_25: ## %cond.load31
358 ; CHECK-NEXT: movswl 22(%rsi), %edx
359 ; CHECK-NEXT: vmovd %edx, %xmm0
360 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm13
361 ; CHECK-NEXT: testl $4096, %ecx ## imm = 0x1000
362 ; CHECK-NEXT: je LBB12_28
363 ; CHECK-NEXT: LBB12_27: ## %cond.load34
364 ; CHECK-NEXT: movswl 24(%rsi), %edx
365 ; CHECK-NEXT: vmovd %edx, %xmm0
366 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm12
367 ; CHECK-NEXT: testl $8192, %ecx ## imm = 0x2000
368 ; CHECK-NEXT: je LBB12_30
369 ; CHECK-NEXT: LBB12_29: ## %cond.load37
370 ; CHECK-NEXT: movswl 26(%rsi), %edx
371 ; CHECK-NEXT: vmovd %edx, %xmm0
372 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm11
373 ; CHECK-NEXT: testl $16384, %ecx ## imm = 0x4000
374 ; CHECK-NEXT: je LBB12_32
375 ; CHECK-NEXT: LBB12_31: ## %cond.load40
376 ; CHECK-NEXT: movswl 28(%rsi), %edx
377 ; CHECK-NEXT: vmovd %edx, %xmm0
378 ; CHECK-NEXT: vcvtph2ps %xmm0, %xmm10
379 ; CHECK-NEXT: testl $32768, %ecx ## imm = 0x8000
380 ; CHECK-NEXT: jne LBB12_33
381 ; CHECK-NEXT: jmp LBB12_34
382 %res = call <16 x half> @llvm.masked.load.v16f16(<16 x half>* %addr, i32 4, <16 x i1>%mask, <16 x half> zeroinitializer)
385 declare <16 x half> @llvm.masked.load.v16f16(<16 x half>*, i32, <16 x i1>, <16 x half>)
387 ; Make sure we scalarize masked stores of f16.
388 define void @test_mask_store_16xf16(<16 x i1> %mask, <16 x half>* %addr, <16 x half> %val) {
389 ; CHECK-LABEL: test_mask_store_16xf16:
391 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
392 ; CHECK-NEXT: vpmovmskb %xmm0, %eax
393 ; CHECK-NEXT: testb $1, %al
394 ; CHECK-NEXT: jne LBB13_1
395 ; CHECK-NEXT: ## %bb.2: ## %else
396 ; CHECK-NEXT: testb $2, %al
397 ; CHECK-NEXT: jne LBB13_3
398 ; CHECK-NEXT: LBB13_4: ## %else2
399 ; CHECK-NEXT: testb $4, %al
400 ; CHECK-NEXT: jne LBB13_5
401 ; CHECK-NEXT: LBB13_6: ## %else4
402 ; CHECK-NEXT: testb $8, %al
403 ; CHECK-NEXT: jne LBB13_7
404 ; CHECK-NEXT: LBB13_8: ## %else6
405 ; CHECK-NEXT: testb $16, %al
406 ; CHECK-NEXT: jne LBB13_9
407 ; CHECK-NEXT: LBB13_10: ## %else8
408 ; CHECK-NEXT: testb $32, %al
409 ; CHECK-NEXT: jne LBB13_11
410 ; CHECK-NEXT: LBB13_12: ## %else10
411 ; CHECK-NEXT: testb $64, %al
412 ; CHECK-NEXT: jne LBB13_13
413 ; CHECK-NEXT: LBB13_14: ## %else12
414 ; CHECK-NEXT: testb $-128, %al
415 ; CHECK-NEXT: jne LBB13_15
416 ; CHECK-NEXT: LBB13_16: ## %else14
417 ; CHECK-NEXT: testl $256, %eax ## imm = 0x100
418 ; CHECK-NEXT: jne LBB13_17
419 ; CHECK-NEXT: LBB13_18: ## %else16
420 ; CHECK-NEXT: testl $512, %eax ## imm = 0x200
421 ; CHECK-NEXT: jne LBB13_19
422 ; CHECK-NEXT: LBB13_20: ## %else18
423 ; CHECK-NEXT: testl $1024, %eax ## imm = 0x400
424 ; CHECK-NEXT: jne LBB13_21
425 ; CHECK-NEXT: LBB13_22: ## %else20
426 ; CHECK-NEXT: testl $2048, %eax ## imm = 0x800
427 ; CHECK-NEXT: jne LBB13_23
428 ; CHECK-NEXT: LBB13_24: ## %else22
429 ; CHECK-NEXT: testl $4096, %eax ## imm = 0x1000
430 ; CHECK-NEXT: jne LBB13_25
431 ; CHECK-NEXT: LBB13_26: ## %else24
432 ; CHECK-NEXT: testl $8192, %eax ## imm = 0x2000
433 ; CHECK-NEXT: jne LBB13_27
434 ; CHECK-NEXT: LBB13_28: ## %else26
435 ; CHECK-NEXT: testl $16384, %eax ## imm = 0x4000
436 ; CHECK-NEXT: jne LBB13_29
437 ; CHECK-NEXT: LBB13_30: ## %else28
438 ; CHECK-NEXT: testl $32768, %eax ## imm = 0x8000
439 ; CHECK-NEXT: jne LBB13_31
440 ; CHECK-NEXT: LBB13_32: ## %else30
442 ; CHECK-NEXT: LBB13_1: ## %cond.store
443 ; CHECK-NEXT: vcvtps2ph $4, %xmm1, %xmm0
444 ; CHECK-NEXT: vmovd %xmm0, %ecx
445 ; CHECK-NEXT: movw %cx, (%rdi)
446 ; CHECK-NEXT: testb $2, %al
447 ; CHECK-NEXT: je LBB13_4
448 ; CHECK-NEXT: LBB13_3: ## %cond.store1
449 ; CHECK-NEXT: vcvtps2ph $4, %xmm2, %xmm0
450 ; CHECK-NEXT: vmovd %xmm0, %ecx
451 ; CHECK-NEXT: movw %cx, 2(%rdi)
452 ; CHECK-NEXT: testb $4, %al
453 ; CHECK-NEXT: je LBB13_6
454 ; CHECK-NEXT: LBB13_5: ## %cond.store3
455 ; CHECK-NEXT: vcvtps2ph $4, %xmm3, %xmm0
456 ; CHECK-NEXT: vmovd %xmm0, %ecx
457 ; CHECK-NEXT: movw %cx, 4(%rdi)
458 ; CHECK-NEXT: testb $8, %al
459 ; CHECK-NEXT: je LBB13_8
460 ; CHECK-NEXT: LBB13_7: ## %cond.store5
461 ; CHECK-NEXT: vcvtps2ph $4, %xmm4, %xmm0
462 ; CHECK-NEXT: vmovd %xmm0, %ecx
463 ; CHECK-NEXT: movw %cx, 6(%rdi)
464 ; CHECK-NEXT: testb $16, %al
465 ; CHECK-NEXT: je LBB13_10
466 ; CHECK-NEXT: LBB13_9: ## %cond.store7
467 ; CHECK-NEXT: vcvtps2ph $4, %xmm5, %xmm0
468 ; CHECK-NEXT: vmovd %xmm0, %ecx
469 ; CHECK-NEXT: movw %cx, 8(%rdi)
470 ; CHECK-NEXT: testb $32, %al
471 ; CHECK-NEXT: je LBB13_12
472 ; CHECK-NEXT: LBB13_11: ## %cond.store9
473 ; CHECK-NEXT: vcvtps2ph $4, %xmm6, %xmm0
474 ; CHECK-NEXT: vmovd %xmm0, %ecx
475 ; CHECK-NEXT: movw %cx, 10(%rdi)
476 ; CHECK-NEXT: testb $64, %al
477 ; CHECK-NEXT: je LBB13_14
478 ; CHECK-NEXT: LBB13_13: ## %cond.store11
479 ; CHECK-NEXT: vcvtps2ph $4, %xmm7, %xmm0
480 ; CHECK-NEXT: vmovd %xmm0, %ecx
481 ; CHECK-NEXT: movw %cx, 12(%rdi)
482 ; CHECK-NEXT: testb $-128, %al
483 ; CHECK-NEXT: je LBB13_16
484 ; CHECK-NEXT: LBB13_15: ## %cond.store13
485 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
486 ; CHECK-NEXT: vcvtps2ph $4, %xmm0, %xmm0
487 ; CHECK-NEXT: vmovd %xmm0, %ecx
488 ; CHECK-NEXT: movw %cx, 14(%rdi)
489 ; CHECK-NEXT: testl $256, %eax ## imm = 0x100
490 ; CHECK-NEXT: je LBB13_18
491 ; CHECK-NEXT: LBB13_17: ## %cond.store15
492 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
493 ; CHECK-NEXT: vcvtps2ph $4, %xmm0, %xmm0
494 ; CHECK-NEXT: vmovd %xmm0, %ecx
495 ; CHECK-NEXT: movw %cx, 16(%rdi)
496 ; CHECK-NEXT: testl $512, %eax ## imm = 0x200
497 ; CHECK-NEXT: je LBB13_20
498 ; CHECK-NEXT: LBB13_19: ## %cond.store17
499 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
500 ; CHECK-NEXT: vcvtps2ph $4, %xmm0, %xmm0
501 ; CHECK-NEXT: vmovd %xmm0, %ecx
502 ; CHECK-NEXT: movw %cx, 18(%rdi)
503 ; CHECK-NEXT: testl $1024, %eax ## imm = 0x400
504 ; CHECK-NEXT: je LBB13_22
505 ; CHECK-NEXT: LBB13_21: ## %cond.store19
506 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
507 ; CHECK-NEXT: vcvtps2ph $4, %xmm0, %xmm0
508 ; CHECK-NEXT: vmovd %xmm0, %ecx
509 ; CHECK-NEXT: movw %cx, 20(%rdi)
510 ; CHECK-NEXT: testl $2048, %eax ## imm = 0x800
511 ; CHECK-NEXT: je LBB13_24
512 ; CHECK-NEXT: LBB13_23: ## %cond.store21
513 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
514 ; CHECK-NEXT: vcvtps2ph $4, %xmm0, %xmm0
515 ; CHECK-NEXT: vmovd %xmm0, %ecx
516 ; CHECK-NEXT: movw %cx, 22(%rdi)
517 ; CHECK-NEXT: testl $4096, %eax ## imm = 0x1000
518 ; CHECK-NEXT: je LBB13_26
519 ; CHECK-NEXT: LBB13_25: ## %cond.store23
520 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
521 ; CHECK-NEXT: vcvtps2ph $4, %xmm0, %xmm0
522 ; CHECK-NEXT: vmovd %xmm0, %ecx
523 ; CHECK-NEXT: movw %cx, 24(%rdi)
524 ; CHECK-NEXT: testl $8192, %eax ## imm = 0x2000
525 ; CHECK-NEXT: je LBB13_28
526 ; CHECK-NEXT: LBB13_27: ## %cond.store25
527 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
528 ; CHECK-NEXT: vcvtps2ph $4, %xmm0, %xmm0
529 ; CHECK-NEXT: vmovd %xmm0, %ecx
530 ; CHECK-NEXT: movw %cx, 26(%rdi)
531 ; CHECK-NEXT: testl $16384, %eax ## imm = 0x4000
532 ; CHECK-NEXT: je LBB13_30
533 ; CHECK-NEXT: LBB13_29: ## %cond.store27
534 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
535 ; CHECK-NEXT: vcvtps2ph $4, %xmm0, %xmm0
536 ; CHECK-NEXT: vmovd %xmm0, %ecx
537 ; CHECK-NEXT: movw %cx, 28(%rdi)
538 ; CHECK-NEXT: testl $32768, %eax ## imm = 0x8000
539 ; CHECK-NEXT: je LBB13_32
540 ; CHECK-NEXT: LBB13_31: ## %cond.store29
541 ; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
542 ; CHECK-NEXT: vcvtps2ph $4, %xmm0, %xmm0
543 ; CHECK-NEXT: vmovd %xmm0, %eax
544 ; CHECK-NEXT: movw %ax, 30(%rdi)
546 call void @llvm.masked.store.v16f16.p0v16f16(<16 x half> %val, <16 x half>* %addr, i32 4, <16 x i1>%mask)
549 declare void @llvm.masked.store.v16f16.p0v16f16(<16 x half>, <16 x half>*, i32, <16 x i1>)