1 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+mmx,+sse2 | FileCheck %s
2 ; There are no MMX operations here, so we use XMM or i64.
4 define void @ti8(double %a, double %b) nounwind {
6 %tmp1 = bitcast double %a to <8 x i8>
7 %tmp2 = bitcast double %b to <8 x i8>
8 %tmp3 = add <8 x i8> %tmp1, %tmp2
9 ; CHECK: paddb %xmm1, %xmm0
10 store <8 x i8> %tmp3, <8 x i8>* null
14 define void @ti16(double %a, double %b) nounwind {
16 %tmp1 = bitcast double %a to <4 x i16>
17 %tmp2 = bitcast double %b to <4 x i16>
18 %tmp3 = add <4 x i16> %tmp1, %tmp2
19 ; CHECK: paddw %xmm1, %xmm0
20 store <4 x i16> %tmp3, <4 x i16>* null
24 define void @ti32(double %a, double %b) nounwind {
26 %tmp1 = bitcast double %a to <2 x i32>
27 %tmp2 = bitcast double %b to <2 x i32>
28 %tmp3 = add <2 x i32> %tmp1, %tmp2
29 ; CHECK: paddd %xmm1, %xmm0
30 store <2 x i32> %tmp3, <2 x i32>* null
34 define void @ti64(double %a, double %b) nounwind {
36 %tmp1 = bitcast double %a to <1 x i64>
37 %tmp2 = bitcast double %b to <1 x i64>
38 %tmp3 = add <1 x i64> %tmp1, %tmp2
39 ; CHECK: addq %rax, %rcx
40 store <1 x i64> %tmp3, <1 x i64>* null
44 ; MMX intrinsics calls get us MMX instructions.
46 define void @ti8a(double %a, double %b) nounwind {
48 %tmp1 = bitcast double %a to x86_mmx
50 %tmp2 = bitcast double %b to x86_mmx
52 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.b(x86_mmx %tmp1, x86_mmx %tmp2)
53 store x86_mmx %tmp3, x86_mmx* null
57 define void @ti16a(double %a, double %b) nounwind {
59 %tmp1 = bitcast double %a to x86_mmx
61 %tmp2 = bitcast double %b to x86_mmx
63 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.w(x86_mmx %tmp1, x86_mmx %tmp2)
64 store x86_mmx %tmp3, x86_mmx* null
68 define void @ti32a(double %a, double %b) nounwind {
70 %tmp1 = bitcast double %a to x86_mmx
72 %tmp2 = bitcast double %b to x86_mmx
74 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.d(x86_mmx %tmp1, x86_mmx %tmp2)
75 store x86_mmx %tmp3, x86_mmx* null
79 define void @ti64a(double %a, double %b) nounwind {
81 %tmp1 = bitcast double %a to x86_mmx
83 %tmp2 = bitcast double %b to x86_mmx
85 %tmp3 = tail call x86_mmx @llvm.x86.mmx.padd.q(x86_mmx %tmp1, x86_mmx %tmp2)
86 store x86_mmx %tmp3, x86_mmx* null
90 declare x86_mmx @llvm.x86.mmx.padd.b(x86_mmx, x86_mmx)
91 declare x86_mmx @llvm.x86.mmx.padd.w(x86_mmx, x86_mmx)
92 declare x86_mmx @llvm.x86.mmx.padd.d(x86_mmx, x86_mmx)
93 declare x86_mmx @llvm.x86.mmx.padd.q(x86_mmx, x86_mmx)