1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+f16c | FileCheck %s --check-prefix=F16C
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+f16c -O0 | FileCheck %s --check-prefix=F16C-O0
7 define <1 x half> @ir_fadd_v1f16(<1 x half> %arg0, <1 x half> %arg1) nounwind {
8 ; X86-LABEL: ir_fadd_v1f16:
10 ; X86-NEXT: subl $28, %esp
11 ; X86-NEXT: movups %xmm1, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
12 ; X86-NEXT: pextrw $0, %xmm0, %eax
13 ; X86-NEXT: movw %ax, (%esp)
14 ; X86-NEXT: calll __extendhfsf2
15 ; X86-NEXT: movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
16 ; X86-NEXT: pextrw $0, %xmm0, %eax
17 ; X86-NEXT: movw %ax, (%esp)
18 ; X86-NEXT: fstps {{[0-9]+}}(%esp)
19 ; X86-NEXT: calll __extendhfsf2
20 ; X86-NEXT: fstps {{[0-9]+}}(%esp)
21 ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
22 ; X86-NEXT: addss {{[0-9]+}}(%esp), %xmm0
23 ; X86-NEXT: movss %xmm0, (%esp)
24 ; X86-NEXT: calll __truncsfhf2
25 ; X86-NEXT: addl $28, %esp
28 ; X64-LABEL: ir_fadd_v1f16:
30 ; X64-NEXT: subq $40, %rsp
31 ; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
32 ; X64-NEXT: movaps %xmm1, %xmm0
33 ; X64-NEXT: callq __extendhfsf2@PLT
34 ; X64-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
35 ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
36 ; X64-NEXT: callq __extendhfsf2@PLT
37 ; X64-NEXT: addss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
38 ; X64-NEXT: callq __truncsfhf2@PLT
39 ; X64-NEXT: addq $40, %rsp
42 ; F16C-LABEL: ir_fadd_v1f16:
44 ; F16C-NEXT: vpextrw $0, %xmm0, %eax
45 ; F16C-NEXT: vpextrw $0, %xmm1, %ecx
46 ; F16C-NEXT: movzwl %cx, %ecx
47 ; F16C-NEXT: vmovd %ecx, %xmm0
48 ; F16C-NEXT: vcvtph2ps %xmm0, %xmm0
49 ; F16C-NEXT: movzwl %ax, %eax
50 ; F16C-NEXT: vmovd %eax, %xmm1
51 ; F16C-NEXT: vcvtph2ps %xmm1, %xmm1
52 ; F16C-NEXT: vaddss %xmm0, %xmm1, %xmm0
53 ; F16C-NEXT: vcvtps2ph $4, %xmm0, %xmm0
54 ; F16C-NEXT: vmovd %xmm0, %eax
55 ; F16C-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
58 ; F16C-O0-LABEL: ir_fadd_v1f16:
60 ; F16C-O0-NEXT: vpextrw $0, %xmm1, %eax
61 ; F16C-O0-NEXT: # kill: def $ax killed $ax killed $eax
62 ; F16C-O0-NEXT: movzwl %ax, %eax
63 ; F16C-O0-NEXT: vmovd %eax, %xmm1
64 ; F16C-O0-NEXT: vcvtph2ps %xmm1, %xmm1
65 ; F16C-O0-NEXT: vpextrw $0, %xmm0, %eax
66 ; F16C-O0-NEXT: # kill: def $ax killed $ax killed $eax
67 ; F16C-O0-NEXT: movzwl %ax, %eax
68 ; F16C-O0-NEXT: vmovd %eax, %xmm0
69 ; F16C-O0-NEXT: vcvtph2ps %xmm0, %xmm0
70 ; F16C-O0-NEXT: vaddss %xmm1, %xmm0, %xmm0
71 ; F16C-O0-NEXT: vcvtps2ph $4, %xmm0, %xmm0
72 ; F16C-O0-NEXT: vmovd %xmm0, %eax
73 ; F16C-O0-NEXT: movw %ax, %cx
74 ; F16C-O0-NEXT: # implicit-def: $eax
75 ; F16C-O0-NEXT: movw %cx, %ax
76 ; F16C-O0-NEXT: # implicit-def: $xmm0
77 ; F16C-O0-NEXT: vpinsrw $0, %eax, %xmm0, %xmm0
79 %retval = fadd <1 x half> %arg0, %arg1
80 ret <1 x half> %retval
83 define <2 x half> @ir_fadd_v2f16(<2 x half> %arg0, <2 x half> %arg1) nounwind {
84 ; X86-LABEL: ir_fadd_v2f16:
86 ; X86-NEXT: subl $84, %esp
87 ; X86-NEXT: movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
88 ; X86-NEXT: psrld $16, %xmm0
89 ; X86-NEXT: movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
90 ; X86-NEXT: movdqa %xmm1, %xmm0
91 ; X86-NEXT: psrld $16, %xmm0
92 ; X86-NEXT: movdqu %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
93 ; X86-NEXT: pextrw $0, %xmm1, %eax
94 ; X86-NEXT: movw %ax, (%esp)
95 ; X86-NEXT: calll __extendhfsf2
96 ; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
97 ; X86-NEXT: movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
98 ; X86-NEXT: pextrw $0, %xmm0, %eax
99 ; X86-NEXT: movw %ax, (%esp)
100 ; X86-NEXT: calll __extendhfsf2
101 ; X86-NEXT: fstpt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Spill
102 ; X86-NEXT: movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
103 ; X86-NEXT: pextrw $0, %xmm0, %eax
104 ; X86-NEXT: movw %ax, (%esp)
105 ; X86-NEXT: calll __extendhfsf2
106 ; X86-NEXT: movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
107 ; X86-NEXT: pextrw $0, %xmm0, %eax
108 ; X86-NEXT: movw %ax, (%esp)
109 ; X86-NEXT: fstps {{[0-9]+}}(%esp)
110 ; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
111 ; X86-NEXT: fstps {{[0-9]+}}(%esp)
112 ; X86-NEXT: calll __extendhfsf2
113 ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
114 ; X86-NEXT: addss {{[0-9]+}}(%esp), %xmm0
115 ; X86-NEXT: movss %xmm0, (%esp)
116 ; X86-NEXT: fstps {{[0-9]+}}(%esp)
117 ; X86-NEXT: fldt {{[-0-9]+}}(%e{{[sb]}}p) # 10-byte Folded Reload
118 ; X86-NEXT: fstps {{[0-9]+}}(%esp)
119 ; X86-NEXT: calll __truncsfhf2
120 ; X86-NEXT: movups %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
121 ; X86-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
122 ; X86-NEXT: addss {{[0-9]+}}(%esp), %xmm0
123 ; X86-NEXT: movss %xmm0, (%esp)
124 ; X86-NEXT: calll __truncsfhf2
125 ; X86-NEXT: movdqu {{[-0-9]+}}(%e{{[sb]}}p), %xmm1 # 16-byte Reload
126 ; X86-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
127 ; X86-NEXT: addl $84, %esp
130 ; X64-LABEL: ir_fadd_v2f16:
132 ; X64-NEXT: subq $72, %rsp
133 ; X64-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
134 ; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
135 ; X64-NEXT: psrld $16, %xmm0
136 ; X64-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
137 ; X64-NEXT: movdqa %xmm1, %xmm0
138 ; X64-NEXT: psrld $16, %xmm0
139 ; X64-NEXT: callq __extendhfsf2@PLT
140 ; X64-NEXT: movd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
141 ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
142 ; X64-NEXT: callq __extendhfsf2@PLT
143 ; X64-NEXT: addss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
144 ; X64-NEXT: callq __truncsfhf2@PLT
145 ; X64-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
146 ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
147 ; X64-NEXT: callq __extendhfsf2@PLT
148 ; X64-NEXT: movss %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Spill
149 ; X64-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
150 ; X64-NEXT: callq __extendhfsf2@PLT
151 ; X64-NEXT: addss {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 4-byte Folded Reload
152 ; X64-NEXT: callq __truncsfhf2@PLT
153 ; X64-NEXT: punpcklwd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
154 ; X64-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3]
155 ; X64-NEXT: addq $72, %rsp
158 ; F16C-LABEL: ir_fadd_v2f16:
160 ; F16C-NEXT: vcvtph2ps %xmm1, %ymm1
161 ; F16C-NEXT: vcvtph2ps %xmm0, %ymm0
162 ; F16C-NEXT: vaddps %ymm1, %ymm0, %ymm0
163 ; F16C-NEXT: vcvtps2ph $4, %ymm0, %xmm0
164 ; F16C-NEXT: vzeroupper
167 ; F16C-O0-LABEL: ir_fadd_v2f16:
169 ; F16C-O0-NEXT: vcvtph2ps %xmm1, %ymm1
170 ; F16C-O0-NEXT: vcvtph2ps %xmm0, %ymm0
171 ; F16C-O0-NEXT: vaddps %ymm1, %ymm0, %ymm0
172 ; F16C-O0-NEXT: vcvtps2ph $4, %ymm0, %xmm0
173 ; F16C-O0-NEXT: vzeroupper
175 %retval = fadd <2 x half> %arg0, %arg1
176 ret <2 x half> %retval