1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -O0 -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck %s
4 ; ModuleID = 'mask_set.c'
5 source_filename = "mask_set.c"
6 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
7 target triple = "x86_64-unknown-linux-gnu"
9 declare void @llvm.dbg.declare(metadata, metadata, metadata)
11 ; Function Attrs: nounwind uwtable
12 declare i64 @calc_expected_mask_val(ptr %valp, i32 %el_size, i32 %length)
13 ; Function Attrs: nounwind uwtable
14 declare i32 @check_mask16(i16 zeroext %res_mask, i16 zeroext %exp_mask, ptr %fname, ptr %input)
16 ; Function Attrs: nounwind uwtable
17 define void @test_xmm(i32 %shift, i32 %mulp, <2 x i64> %a,ptr %arraydecay,ptr %fname){
18 ; CHECK-LABEL: test_xmm:
20 ; CHECK-NEXT: subq $56, %rsp
21 ; CHECK-NEXT: .cfi_def_cfa_offset 64
22 ; CHECK-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
23 ; CHECK-NEXT: movq %rdx, %rdi
24 ; CHECK-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) ## 8-byte Spill
25 ; CHECK-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) ## 16-byte Spill
26 ; CHECK-NEXT: vpmovw2m %xmm0, %k0
27 ; CHECK-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
28 ; CHECK-NEXT: movl $2, %esi
29 ; CHECK-NEXT: movl $8, %edx
30 ; CHECK-NEXT: callq _calc_expected_mask_val
31 ; CHECK-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 ## 2-byte Reload
32 ; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx ## 8-byte Reload
33 ; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx ## 8-byte Reload
34 ; CHECK-NEXT: ## kill: def $eax killed $eax killed $rax
35 ; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
36 ; CHECK-NEXT: movzwl %ax, %esi
37 ; CHECK-NEXT: kmovb %k0, %edi
38 ; CHECK-NEXT: callq _check_mask16
39 ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 ## 16-byte Reload
40 ; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi ## 8-byte Reload
41 ; CHECK-NEXT: vpmovd2m %xmm0, %k0
42 ; CHECK-NEXT: ## kill: def $k1 killed $k0
43 ; CHECK-NEXT: kmovd %k0, %eax
44 ; CHECK-NEXT: ## kill: def $al killed $al killed $eax
45 ; CHECK-NEXT: movzbl %al, %eax
46 ; CHECK-NEXT: ## kill: def $ax killed $ax killed $eax
47 ; CHECK-NEXT: movw %ax, {{[-0-9]+}}(%r{{[sb]}}p) ## 2-byte Spill
48 ; CHECK-NEXT: movl $4, %edx
49 ; CHECK-NEXT: movl %edx, %esi
50 ; CHECK-NEXT: callq _calc_expected_mask_val
51 ; CHECK-NEXT: movw {{[-0-9]+}}(%r{{[sb]}}p), %si ## 2-byte Reload
52 ; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx ## 8-byte Reload
53 ; CHECK-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx ## 8-byte Reload
54 ; CHECK-NEXT: ## kill: def $ax killed $ax killed $rax
55 ; CHECK-NEXT: movzwl %si, %edi
56 ; CHECK-NEXT: movzwl %ax, %esi
57 ; CHECK-NEXT: callq _check_mask16
58 ; CHECK-NEXT: addq $56, %rsp
60 %d2 = bitcast <2 x i64> %a to <8 x i16>
61 %m2 = call i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16> %d2)
62 %conv7 = zext i8 %m2 to i16
63 %call9 = call i64 @calc_expected_mask_val(ptr %arraydecay, i32 2, i32 8)
64 %conv10 = trunc i64 %call9 to i16
65 %call12 = call i32 @check_mask16(i16 zeroext %conv7, i16 zeroext %conv10, ptr %fname, ptr %arraydecay)
66 %d3 = bitcast <2 x i64> %a to <4 x i32>
67 %m3 = call i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32> %d3)
68 %conv14 = zext i8 %m3 to i16
69 %call16 = call i64 @calc_expected_mask_val(ptr %arraydecay, i32 4, i32 4)
70 %conv17 = trunc i64 %call16 to i16
71 %call19 = call i32 @check_mask16(i16 zeroext %conv14, i16 zeroext %conv17, ptr %fname, ptr %arraydecay)
75 ; Function Attrs: nounwind readnone
76 declare i8 @llvm.x86.avx512.cvtw2mask.128(<8 x i16>)
78 ; Function Attrs: nounwind readnone
79 declare i8 @llvm.x86.avx512.cvtd2mask.128(<4 x i32>)