1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=x86_64-apple-darwin -mattr=avx512f,avx512bw,avx512vl < %s | FileCheck %s
4 ; Skylake-avx512 target supports masked load/store for i8 and i16 vectors
6 define <16 x i8> @test_mask_load_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
7 ; CHECK-LABEL: test_mask_load_16xi8:
9 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
10 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
11 ; CHECK-NEXT: vmovdqu8 (%rdi), %xmm0 {%k1} {z}
13 %res = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %addr, i32 4, <16 x i1>%mask, <16 x i8> undef)
16 declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
18 define <32 x i8> @test_mask_load_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
19 ; CHECK-LABEL: test_mask_load_32xi8:
21 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
22 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
23 ; CHECK-NEXT: vpblendmb (%rdi), %ymm1, %ymm0 {%k1}
25 %res = call <32 x i8> @llvm.masked.load.v32i8.p0v32i8(<32 x i8>* %addr, i32 4, <32 x i1>%mask, <32 x i8> %val)
28 declare <32 x i8> @llvm.masked.load.v32i8.p0v32i8(<32 x i8>*, i32, <32 x i1>, <32 x i8>)
30 define <64 x i8> @test_mask_load_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
31 ; CHECK-LABEL: test_mask_load_64xi8:
33 ; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
34 ; CHECK-NEXT: vpmovb2m %zmm0, %k1
35 ; CHECK-NEXT: vpblendmb (%rdi), %zmm1, %zmm0 {%k1}
37 %res = call <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>* %addr, i32 4, <64 x i1>%mask, <64 x i8> %val)
40 declare <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>*, i32, <64 x i1>, <64 x i8>)
42 define <8 x i16> @test_mask_load_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
43 ; CHECK-LABEL: test_mask_load_8xi16:
45 ; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
46 ; CHECK-NEXT: vpmovw2m %xmm0, %k1
47 ; CHECK-NEXT: vmovdqu16 (%rdi), %xmm0 {%k1} {z}
49 %res = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %addr, i32 4, <8 x i1>%mask, <8 x i16> undef)
52 declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
54 define <16 x i16> @test_mask_load_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
55 ; CHECK-LABEL: test_mask_load_16xi16:
57 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
58 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
59 ; CHECK-NEXT: vmovdqu16 (%rdi), %ymm0 {%k1} {z}
61 %res = call <16 x i16> @llvm.masked.load.v16i16.p0v16i16(<16 x i16>* %addr, i32 4, <16 x i1>%mask, <16 x i16> zeroinitializer)
64 declare <16 x i16> @llvm.masked.load.v16i16.p0v16i16(<16 x i16>*, i32, <16 x i1>, <16 x i16>)
66 define <32 x i16> @test_mask_load_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
67 ; CHECK-LABEL: test_mask_load_32xi16:
69 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
70 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
71 ; CHECK-NEXT: vpblendmw (%rdi), %zmm1, %zmm0 {%k1}
73 %res = call <32 x i16> @llvm.masked.load.v32i16.p0v32i16(<32 x i16>* %addr, i32 4, <32 x i1>%mask, <32 x i16> %val)
76 declare <32 x i16> @llvm.masked.load.v32i16.p0v32i16(<32 x i16>*, i32, <32 x i1>, <32 x i16>)
78 define void @test_mask_store_16xi8(<16 x i1> %mask, <16 x i8>* %addr, <16 x i8> %val) {
79 ; CHECK-LABEL: test_mask_store_16xi8:
81 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
82 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
83 ; CHECK-NEXT: vmovdqu8 %xmm1, (%rdi) {%k1}
85 call void @llvm.masked.store.v16i8.p0v16i8(<16 x i8> %val, <16 x i8>* %addr, i32 4, <16 x i1>%mask)
88 declare void @llvm.masked.store.v16i8.p0v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
90 define void @test_mask_store_32xi8(<32 x i1> %mask, <32 x i8>* %addr, <32 x i8> %val) {
91 ; CHECK-LABEL: test_mask_store_32xi8:
93 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
94 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
95 ; CHECK-NEXT: vmovdqu8 %ymm1, (%rdi) {%k1}
96 ; CHECK-NEXT: vzeroupper
98 call void @llvm.masked.store.v32i8.p0v32i8(<32 x i8> %val, <32 x i8>* %addr, i32 4, <32 x i1>%mask)
101 declare void @llvm.masked.store.v32i8.p0v32i8(<32 x i8>, <32 x i8>*, i32, <32 x i1>)
103 define void @test_mask_store_64xi8(<64 x i1> %mask, <64 x i8>* %addr, <64 x i8> %val) {
104 ; CHECK-LABEL: test_mask_store_64xi8:
106 ; CHECK-NEXT: vpsllw $7, %zmm0, %zmm0
107 ; CHECK-NEXT: vpmovb2m %zmm0, %k1
108 ; CHECK-NEXT: vmovdqu8 %zmm1, (%rdi) {%k1}
109 ; CHECK-NEXT: vzeroupper
111 call void @llvm.masked.store.v64i8.p0v64i8(<64 x i8> %val, <64 x i8>* %addr, i32 4, <64 x i1>%mask)
114 declare void @llvm.masked.store.v64i8.p0v64i8(<64 x i8>, <64 x i8>*, i32, <64 x i1>)
116 define void @test_mask_store_8xi16(<8 x i1> %mask, <8 x i16>* %addr, <8 x i16> %val) {
117 ; CHECK-LABEL: test_mask_store_8xi16:
119 ; CHECK-NEXT: vpsllw $15, %xmm0, %xmm0
120 ; CHECK-NEXT: vpmovw2m %xmm0, %k1
121 ; CHECK-NEXT: vmovdqu16 %xmm1, (%rdi) {%k1}
123 call void @llvm.masked.store.v8i16.p0v8i16(<8 x i16> %val, <8 x i16>* %addr, i32 4, <8 x i1>%mask)
126 declare void @llvm.masked.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
128 define void @test_mask_store_16xi16(<16 x i1> %mask, <16 x i16>* %addr, <16 x i16> %val) {
129 ; CHECK-LABEL: test_mask_store_16xi16:
131 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
132 ; CHECK-NEXT: vpmovb2m %xmm0, %k1
133 ; CHECK-NEXT: vmovdqu16 %ymm1, (%rdi) {%k1}
134 ; CHECK-NEXT: vzeroupper
136 call void @llvm.masked.store.v16i16.p0v16i16(<16 x i16> %val, <16 x i16>* %addr, i32 4, <16 x i1>%mask)
139 declare void @llvm.masked.store.v16i16.p0v16i16(<16 x i16>, <16 x i16>*, i32, <16 x i1>)
141 define void @test_mask_store_32xi16(<32 x i1> %mask, <32 x i16>* %addr, <32 x i16> %val) {
142 ; CHECK-LABEL: test_mask_store_32xi16:
144 ; CHECK-NEXT: vpsllw $7, %ymm0, %ymm0
145 ; CHECK-NEXT: vpmovb2m %ymm0, %k1
146 ; CHECK-NEXT: vmovdqu16 %zmm1, (%rdi) {%k1}
147 ; CHECK-NEXT: vzeroupper
149 call void @llvm.masked.store.v32i16.p0v32i16(<32 x i16> %val, <32 x i16>* %addr, i32 4, <32 x i1>%mask)
153 declare void @llvm.masked.store.v32i16.p0v32i16(<32 x i16>, <32 x i16>*, i32, <32 x i1>)
155 ; Make sure we scalarize masked loads of f16.
156 define <16 x half> @test_mask_load_16xf16(<16 x i1> %mask, <16 x half>* %addr, <16 x half> %val) {
157 ; CHECK-LABEL: test_mask_load_16xf16:
159 ; CHECK-NEXT: pushq %rbp
160 ; CHECK-NEXT: .cfi_def_cfa_offset 16
161 ; CHECK-NEXT: pushq %r15
162 ; CHECK-NEXT: .cfi_def_cfa_offset 24
163 ; CHECK-NEXT: pushq %r14
164 ; CHECK-NEXT: .cfi_def_cfa_offset 32
165 ; CHECK-NEXT: pushq %r13
166 ; CHECK-NEXT: .cfi_def_cfa_offset 40
167 ; CHECK-NEXT: pushq %r12
168 ; CHECK-NEXT: .cfi_def_cfa_offset 48
169 ; CHECK-NEXT: pushq %rbx
170 ; CHECK-NEXT: .cfi_def_cfa_offset 56
171 ; CHECK-NEXT: .cfi_offset %rbx, -56
172 ; CHECK-NEXT: .cfi_offset %r12, -48
173 ; CHECK-NEXT: .cfi_offset %r13, -40
174 ; CHECK-NEXT: .cfi_offset %r14, -32
175 ; CHECK-NEXT: .cfi_offset %r15, -24
176 ; CHECK-NEXT: .cfi_offset %rbp, -16
177 ; CHECK-NEXT: movq %rdi, %rax
178 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
179 ; CHECK-NEXT: vpmovmskb %xmm0, %r11d
180 ; CHECK-NEXT: testb $1, %r11b
181 ; CHECK-NEXT: je LBB12_1
182 ; CHECK-NEXT: ## %bb.2: ## %cond.load
183 ; CHECK-NEXT: movzwl (%rsi), %ecx
184 ; CHECK-NEXT: movl %ecx, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Spill
185 ; CHECK-NEXT: jmp LBB12_3
186 ; CHECK-NEXT: LBB12_1:
187 ; CHECK-NEXT: movl $0, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Folded Spill
188 ; CHECK-NEXT: LBB12_3: ## %else
189 ; CHECK-NEXT: xorl %edi, %edi
190 ; CHECK-NEXT: movl $0, {{[-0-9]+}}(%r{{[sb]}}p) ## 4-byte Folded Spill
191 ; CHECK-NEXT: movl %edi, %ecx
192 ; CHECK-NEXT: testb $2, %r11b
193 ; CHECK-NEXT: je LBB12_4
194 ; CHECK-NEXT: ## %bb.5: ## %cond.load1
195 ; CHECK-NEXT: movw %di, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
196 ; CHECK-NEXT: movl %edi, %r12d
197 ; CHECK-NEXT: movl %edi, %ebx
198 ; CHECK-NEXT: movl %edi, %ebp
199 ; CHECK-NEXT: movl %edi, %r13d
200 ; CHECK-NEXT: movl %edi, %r14d
201 ; CHECK-NEXT: movl %edi, %r8d
202 ; CHECK-NEXT: movl %edi, %r9d
203 ; CHECK-NEXT: movl %edi, %r10d
204 ; CHECK-NEXT: movl %edi, %r15d
205 ; CHECK-NEXT: movl %edi, %edx
206 ; CHECK-NEXT: movw %di, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
207 ; CHECK-NEXT: movw %di, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
208 ; CHECK-NEXT: movzwl 2(%rsi), %edi
209 ; CHECK-NEXT: ## kill: def $di killed $di def $edi
210 ; CHECK-NEXT: testb $4, %r11b
211 ; CHECK-NEXT: jne LBB12_7
212 ; CHECK-NEXT: jmp LBB12_8
213 ; CHECK-NEXT: LBB12_4:
214 ; CHECK-NEXT: movw %di, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
215 ; CHECK-NEXT: movl %edi, %r12d
216 ; CHECK-NEXT: movl %edi, %ebx
217 ; CHECK-NEXT: movl %edi, %ebp
218 ; CHECK-NEXT: movl %edi, %r13d
219 ; CHECK-NEXT: movl %edi, %r14d
220 ; CHECK-NEXT: movl %edi, %r8d
221 ; CHECK-NEXT: movl %edi, %r9d
222 ; CHECK-NEXT: movl %edi, %r10d
223 ; CHECK-NEXT: movl %edi, %r15d
224 ; CHECK-NEXT: movl %edi, %edx
225 ; CHECK-NEXT: movw %di, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
226 ; CHECK-NEXT: movw %di, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
227 ; CHECK-NEXT: testb $4, %r11b
228 ; CHECK-NEXT: je LBB12_8
229 ; CHECK-NEXT: LBB12_7: ## %cond.load4
230 ; CHECK-NEXT: movzwl 4(%rsi), %ecx
231 ; CHECK-NEXT: movw %cx, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
232 ; CHECK-NEXT: LBB12_8: ## %else5
233 ; CHECK-NEXT: testb $8, %r11b
234 ; CHECK-NEXT: jne LBB12_9
235 ; CHECK-NEXT: ## %bb.10: ## %else8
236 ; CHECK-NEXT: testb $16, %r11b
237 ; CHECK-NEXT: jne LBB12_11
238 ; CHECK-NEXT: LBB12_12: ## %else11
239 ; CHECK-NEXT: testb $32, %r11b
240 ; CHECK-NEXT: jne LBB12_13
241 ; CHECK-NEXT: LBB12_14: ## %else14
242 ; CHECK-NEXT: testb $64, %r11b
243 ; CHECK-NEXT: jne LBB12_15
244 ; CHECK-NEXT: LBB12_16: ## %else17
245 ; CHECK-NEXT: testb $-128, %r11b
246 ; CHECK-NEXT: jne LBB12_17
247 ; CHECK-NEXT: LBB12_18: ## %else20
248 ; CHECK-NEXT: testl $256, %r11d ## imm = 0x100
249 ; CHECK-NEXT: jne LBB12_19
250 ; CHECK-NEXT: LBB12_20: ## %else23
251 ; CHECK-NEXT: testl $512, %r11d ## imm = 0x200
252 ; CHECK-NEXT: jne LBB12_21
253 ; CHECK-NEXT: LBB12_22: ## %else26
254 ; CHECK-NEXT: testl $1024, %r11d ## imm = 0x400
255 ; CHECK-NEXT: jne LBB12_23
256 ; CHECK-NEXT: LBB12_24: ## %else29
257 ; CHECK-NEXT: testl $2048, %r11d ## imm = 0x800
258 ; CHECK-NEXT: jne LBB12_25
259 ; CHECK-NEXT: LBB12_26: ## %else32
260 ; CHECK-NEXT: testl $4096, %r11d ## imm = 0x1000
261 ; CHECK-NEXT: je LBB12_28
262 ; CHECK-NEXT: LBB12_27: ## %cond.load34
263 ; CHECK-NEXT: movzwl 24(%rsi), %edx
264 ; CHECK-NEXT: LBB12_28: ## %else35
265 ; CHECK-NEXT: movw %dx, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
266 ; CHECK-NEXT: testl $8192, %r11d ## imm = 0x2000
267 ; CHECK-NEXT: jne LBB12_29
268 ; CHECK-NEXT: ## %bb.30: ## %else38
269 ; CHECK-NEXT: testl $16384, %r11d ## imm = 0x4000
270 ; CHECK-NEXT: jne LBB12_31
271 ; CHECK-NEXT: LBB12_32: ## %else41
272 ; CHECK-NEXT: testl $32768, %r11d ## imm = 0x8000
273 ; CHECK-NEXT: je LBB12_33
274 ; CHECK-NEXT: LBB12_34: ## %cond.load43
275 ; CHECK-NEXT: movzwl {{[-0-9]+}}(%r{{[sb]}}p), %ecx ## 2-byte Folded Reload
276 ; CHECK-NEXT: movzwl 30(%rsi), %esi
277 ; CHECK-NEXT: jmp LBB12_35
278 ; CHECK-NEXT: LBB12_9: ## %cond.load7
279 ; CHECK-NEXT: movzwl 6(%rsi), %r12d
280 ; CHECK-NEXT: testb $16, %r11b
281 ; CHECK-NEXT: je LBB12_12
282 ; CHECK-NEXT: LBB12_11: ## %cond.load10
283 ; CHECK-NEXT: movzwl 8(%rsi), %ebx
284 ; CHECK-NEXT: testb $32, %r11b
285 ; CHECK-NEXT: je LBB12_14
286 ; CHECK-NEXT: LBB12_13: ## %cond.load13
287 ; CHECK-NEXT: movzwl 10(%rsi), %ebp
288 ; CHECK-NEXT: testb $64, %r11b
289 ; CHECK-NEXT: je LBB12_16
290 ; CHECK-NEXT: LBB12_15: ## %cond.load16
291 ; CHECK-NEXT: movzwl 12(%rsi), %r13d
292 ; CHECK-NEXT: testb $-128, %r11b
293 ; CHECK-NEXT: je LBB12_18
294 ; CHECK-NEXT: LBB12_17: ## %cond.load19
295 ; CHECK-NEXT: movzwl 14(%rsi), %r14d
296 ; CHECK-NEXT: testl $256, %r11d ## imm = 0x100
297 ; CHECK-NEXT: je LBB12_20
298 ; CHECK-NEXT: LBB12_19: ## %cond.load22
299 ; CHECK-NEXT: movzwl 16(%rsi), %r8d
300 ; CHECK-NEXT: testl $512, %r11d ## imm = 0x200
301 ; CHECK-NEXT: je LBB12_22
302 ; CHECK-NEXT: LBB12_21: ## %cond.load25
303 ; CHECK-NEXT: movzwl 18(%rsi), %r9d
304 ; CHECK-NEXT: testl $1024, %r11d ## imm = 0x400
305 ; CHECK-NEXT: je LBB12_24
306 ; CHECK-NEXT: LBB12_23: ## %cond.load28
307 ; CHECK-NEXT: movzwl 20(%rsi), %r10d
308 ; CHECK-NEXT: testl $2048, %r11d ## imm = 0x800
309 ; CHECK-NEXT: je LBB12_26
310 ; CHECK-NEXT: LBB12_25: ## %cond.load31
311 ; CHECK-NEXT: movzwl 22(%rsi), %r15d
312 ; CHECK-NEXT: testl $4096, %r11d ## imm = 0x1000
313 ; CHECK-NEXT: jne LBB12_27
314 ; CHECK-NEXT: jmp LBB12_28
315 ; CHECK-NEXT: LBB12_29: ## %cond.load37
316 ; CHECK-NEXT: movzwl 26(%rsi), %ecx
317 ; CHECK-NEXT: movw %cx, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
318 ; CHECK-NEXT: testl $16384, %r11d ## imm = 0x4000
319 ; CHECK-NEXT: je LBB12_32
320 ; CHECK-NEXT: LBB12_31: ## %cond.load40
321 ; CHECK-NEXT: movzwl 28(%rsi), %ecx
322 ; CHECK-NEXT: movw %cx, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
323 ; CHECK-NEXT: testl $32768, %r11d ## imm = 0x8000
324 ; CHECK-NEXT: jne LBB12_34
325 ; CHECK-NEXT: LBB12_33:
326 ; CHECK-NEXT: movzwl {{[-0-9]+}}(%r{{[sb]}}p), %ecx ## 2-byte Folded Reload
327 ; CHECK-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %esi ## 4-byte Reload
328 ; CHECK-NEXT: LBB12_35: ## %else44
329 ; CHECK-NEXT: movl {{[-0-9]+}}(%r{{[sb]}}p), %edx ## 4-byte Reload
330 ; CHECK-NEXT: movw %dx, (%rax)
331 ; CHECK-NEXT: movw %di, 2(%rax)
332 ; CHECK-NEXT: movw %cx, 4(%rax)
333 ; CHECK-NEXT: movw %r12w, 6(%rax)
334 ; CHECK-NEXT: movw %bx, 8(%rax)
335 ; CHECK-NEXT: movw %bp, 10(%rax)
336 ; CHECK-NEXT: movw %r13w, 12(%rax)
337 ; CHECK-NEXT: movw %r14w, 14(%rax)
338 ; CHECK-NEXT: movw %r8w, 16(%rax)
339 ; CHECK-NEXT: movw %r9w, 18(%rax)
340 ; CHECK-NEXT: movw %r10w, 20(%rax)
341 ; CHECK-NEXT: movw %r15w, 22(%rax)
342 ; CHECK-NEXT: movzwl {{[-0-9]+}}(%r{{[sb]}}p), %ecx ## 2-byte Folded Reload
343 ; CHECK-NEXT: movw %cx, 24(%rax)
344 ; CHECK-NEXT: movzwl {{[-0-9]+}}(%r{{[sb]}}p), %ecx ## 2-byte Folded Reload
345 ; CHECK-NEXT: movw %cx, 26(%rax)
346 ; CHECK-NEXT: movzwl {{[-0-9]+}}(%r{{[sb]}}p), %ecx ## 2-byte Folded Reload
347 ; CHECK-NEXT: movw %cx, 28(%rax)
348 ; CHECK-NEXT: movw %si, 30(%rax)
349 ; CHECK-NEXT: popq %rbx
350 ; CHECK-NEXT: popq %r12
351 ; CHECK-NEXT: popq %r13
352 ; CHECK-NEXT: popq %r14
353 ; CHECK-NEXT: popq %r15
354 ; CHECK-NEXT: popq %rbp
356 %res = call <16 x half> @llvm.masked.load.v16f16(<16 x half>* %addr, i32 4, <16 x i1>%mask, <16 x half> zeroinitializer)
359 declare <16 x half> @llvm.masked.load.v16f16(<16 x half>*, i32, <16 x i1>, <16 x half>)
361 ; Make sure we scalarize masked stores of f16.
362 define void @test_mask_store_16xf16(<16 x i1> %mask, <16 x half>* %addr, <16 x half> %val) {
363 ; CHECK-LABEL: test_mask_store_16xf16:
365 ; CHECK-NEXT: vpsllw $7, %xmm0, %xmm0
366 ; CHECK-NEXT: vpmovmskb %xmm0, %eax
367 ; CHECK-NEXT: testb $1, %al
368 ; CHECK-NEXT: jne LBB13_1
369 ; CHECK-NEXT: ## %bb.2: ## %else
370 ; CHECK-NEXT: testb $2, %al
371 ; CHECK-NEXT: jne LBB13_3
372 ; CHECK-NEXT: LBB13_4: ## %else2
373 ; CHECK-NEXT: testb $4, %al
374 ; CHECK-NEXT: jne LBB13_5
375 ; CHECK-NEXT: LBB13_6: ## %else4
376 ; CHECK-NEXT: testb $8, %al
377 ; CHECK-NEXT: jne LBB13_7
378 ; CHECK-NEXT: LBB13_8: ## %else6
379 ; CHECK-NEXT: testb $16, %al
380 ; CHECK-NEXT: jne LBB13_9
381 ; CHECK-NEXT: LBB13_10: ## %else8
382 ; CHECK-NEXT: testb $32, %al
383 ; CHECK-NEXT: jne LBB13_11
384 ; CHECK-NEXT: LBB13_12: ## %else10
385 ; CHECK-NEXT: testb $64, %al
386 ; CHECK-NEXT: jne LBB13_13
387 ; CHECK-NEXT: LBB13_14: ## %else12
388 ; CHECK-NEXT: testb $-128, %al
389 ; CHECK-NEXT: jne LBB13_15
390 ; CHECK-NEXT: LBB13_16: ## %else14
391 ; CHECK-NEXT: testl $256, %eax ## imm = 0x100
392 ; CHECK-NEXT: jne LBB13_17
393 ; CHECK-NEXT: LBB13_18: ## %else16
394 ; CHECK-NEXT: testl $512, %eax ## imm = 0x200
395 ; CHECK-NEXT: jne LBB13_19
396 ; CHECK-NEXT: LBB13_20: ## %else18
397 ; CHECK-NEXT: testl $1024, %eax ## imm = 0x400
398 ; CHECK-NEXT: jne LBB13_21
399 ; CHECK-NEXT: LBB13_22: ## %else20
400 ; CHECK-NEXT: testl $2048, %eax ## imm = 0x800
401 ; CHECK-NEXT: jne LBB13_23
402 ; CHECK-NEXT: LBB13_24: ## %else22
403 ; CHECK-NEXT: testl $4096, %eax ## imm = 0x1000
404 ; CHECK-NEXT: jne LBB13_25
405 ; CHECK-NEXT: LBB13_26: ## %else24
406 ; CHECK-NEXT: testl $8192, %eax ## imm = 0x2000
407 ; CHECK-NEXT: jne LBB13_27
408 ; CHECK-NEXT: LBB13_28: ## %else26
409 ; CHECK-NEXT: testl $16384, %eax ## imm = 0x4000
410 ; CHECK-NEXT: jne LBB13_29
411 ; CHECK-NEXT: LBB13_30: ## %else28
412 ; CHECK-NEXT: testl $32768, %eax ## imm = 0x8000
413 ; CHECK-NEXT: jne LBB13_31
414 ; CHECK-NEXT: LBB13_32: ## %else30
416 ; CHECK-NEXT: LBB13_1: ## %cond.store
417 ; CHECK-NEXT: movw %si, (%rdi)
418 ; CHECK-NEXT: testb $2, %al
419 ; CHECK-NEXT: je LBB13_4
420 ; CHECK-NEXT: LBB13_3: ## %cond.store1
421 ; CHECK-NEXT: movw %dx, 2(%rdi)
422 ; CHECK-NEXT: testb $4, %al
423 ; CHECK-NEXT: je LBB13_6
424 ; CHECK-NEXT: LBB13_5: ## %cond.store3
425 ; CHECK-NEXT: movw %cx, 4(%rdi)
426 ; CHECK-NEXT: testb $8, %al
427 ; CHECK-NEXT: je LBB13_8
428 ; CHECK-NEXT: LBB13_7: ## %cond.store5
429 ; CHECK-NEXT: movw %r8w, 6(%rdi)
430 ; CHECK-NEXT: testb $16, %al
431 ; CHECK-NEXT: je LBB13_10
432 ; CHECK-NEXT: LBB13_9: ## %cond.store7
433 ; CHECK-NEXT: movw %r9w, 8(%rdi)
434 ; CHECK-NEXT: testb $32, %al
435 ; CHECK-NEXT: je LBB13_12
436 ; CHECK-NEXT: LBB13_11: ## %cond.store9
437 ; CHECK-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
438 ; CHECK-NEXT: movw %cx, 10(%rdi)
439 ; CHECK-NEXT: testb $64, %al
440 ; CHECK-NEXT: je LBB13_14
441 ; CHECK-NEXT: LBB13_13: ## %cond.store11
442 ; CHECK-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
443 ; CHECK-NEXT: movw %cx, 12(%rdi)
444 ; CHECK-NEXT: testb $-128, %al
445 ; CHECK-NEXT: je LBB13_16
446 ; CHECK-NEXT: LBB13_15: ## %cond.store13
447 ; CHECK-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
448 ; CHECK-NEXT: movw %cx, 14(%rdi)
449 ; CHECK-NEXT: testl $256, %eax ## imm = 0x100
450 ; CHECK-NEXT: je LBB13_18
451 ; CHECK-NEXT: LBB13_17: ## %cond.store15
452 ; CHECK-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
453 ; CHECK-NEXT: movw %cx, 16(%rdi)
454 ; CHECK-NEXT: testl $512, %eax ## imm = 0x200
455 ; CHECK-NEXT: je LBB13_20
456 ; CHECK-NEXT: LBB13_19: ## %cond.store17
457 ; CHECK-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
458 ; CHECK-NEXT: movw %cx, 18(%rdi)
459 ; CHECK-NEXT: testl $1024, %eax ## imm = 0x400
460 ; CHECK-NEXT: je LBB13_22
461 ; CHECK-NEXT: LBB13_21: ## %cond.store19
462 ; CHECK-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
463 ; CHECK-NEXT: movw %cx, 20(%rdi)
464 ; CHECK-NEXT: testl $2048, %eax ## imm = 0x800
465 ; CHECK-NEXT: je LBB13_24
466 ; CHECK-NEXT: LBB13_23: ## %cond.store21
467 ; CHECK-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
468 ; CHECK-NEXT: movw %cx, 22(%rdi)
469 ; CHECK-NEXT: testl $4096, %eax ## imm = 0x1000
470 ; CHECK-NEXT: je LBB13_26
471 ; CHECK-NEXT: LBB13_25: ## %cond.store23
472 ; CHECK-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
473 ; CHECK-NEXT: movw %cx, 24(%rdi)
474 ; CHECK-NEXT: testl $8192, %eax ## imm = 0x2000
475 ; CHECK-NEXT: je LBB13_28
476 ; CHECK-NEXT: LBB13_27: ## %cond.store25
477 ; CHECK-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
478 ; CHECK-NEXT: movw %cx, 26(%rdi)
479 ; CHECK-NEXT: testl $16384, %eax ## imm = 0x4000
480 ; CHECK-NEXT: je LBB13_30
481 ; CHECK-NEXT: LBB13_29: ## %cond.store27
482 ; CHECK-NEXT: movzwl {{[0-9]+}}(%rsp), %ecx
483 ; CHECK-NEXT: movw %cx, 28(%rdi)
484 ; CHECK-NEXT: testl $32768, %eax ## imm = 0x8000
485 ; CHECK-NEXT: je LBB13_32
486 ; CHECK-NEXT: LBB13_31: ## %cond.store29
487 ; CHECK-NEXT: movzwl {{[0-9]+}}(%rsp), %eax
488 ; CHECK-NEXT: movw %ax, 30(%rdi)
490 call void @llvm.masked.store.v16f16.p0v16f16(<16 x half> %val, <16 x half>* %addr, i32 4, <16 x i1>%mask)
493 declare void @llvm.masked.store.v16f16.p0v16f16(<16 x half>, <16 x half>*, i32, <16 x i1>)