1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-apple-macosx10.6.6 -mattr=+sse4.1 | FileCheck %s
5 %union.anon = type { float }
7 define i32 @double_signbit(double %d1) nounwind uwtable readnone ssp {
8 ; CHECK-LABEL: double_signbit:
9 ; CHECK: ## %bb.0: ## %entry
10 ; CHECK-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
11 ; CHECK-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
12 ; CHECK-NEXT: movmskpd %xmm0, %eax
13 ; CHECK-NEXT: andl $1, %eax
16 %__x.addr.i = alloca double, align 8
17 %__u.i = alloca %0, align 8
18 store double %d1, ptr %__x.addr.i, align 8
19 store double %d1, ptr %__u.i, align 8
20 %tmp = bitcast double %d1 to i64
21 %tmp1 = lshr i64 %tmp, 63
22 %shr.i = trunc i64 %tmp1 to i32
26 define i32 @double_add_signbit(double %d1, double %d2) nounwind uwtable readnone ssp {
27 ; CHECK-LABEL: double_add_signbit:
28 ; CHECK: ## %bb.0: ## %entry
29 ; CHECK-NEXT: addsd %xmm1, %xmm0
30 ; CHECK-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
31 ; CHECK-NEXT: movsd %xmm0, -{{[0-9]+}}(%rsp)
32 ; CHECK-NEXT: movmskpd %xmm0, %eax
33 ; CHECK-NEXT: andl $1, %eax
36 %__x.addr.i = alloca double, align 8
37 %__u.i = alloca %0, align 8
38 %add = fadd double %d1, %d2
39 store double %add, ptr %__x.addr.i, align 8
40 store double %add, ptr %__u.i, align 8
41 %tmp = bitcast double %add to i64
42 %tmp1 = lshr i64 %tmp, 63
43 %shr.i = trunc i64 %tmp1 to i32
47 define i32 @float_signbit(float %f1) nounwind uwtable readnone ssp {
48 ; CHECK-LABEL: float_signbit:
49 ; CHECK: ## %bb.0: ## %entry
50 ; CHECK-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
51 ; CHECK-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
52 ; CHECK-NEXT: movmskps %xmm0, %eax
53 ; CHECK-NEXT: andl $1, %eax
56 %__x.addr.i = alloca float, align 4
57 %__u.i = alloca %union.anon, align 4
58 store float %f1, ptr %__x.addr.i, align 4
59 store float %f1, ptr %__u.i, align 4
60 %0 = bitcast float %f1 to i32
61 %shr.i = lshr i32 %0, 31
65 define i32 @float_add_signbit(float %f1, float %f2) nounwind uwtable readnone ssp {
66 ; CHECK-LABEL: float_add_signbit:
67 ; CHECK: ## %bb.0: ## %entry
68 ; CHECK-NEXT: addss %xmm1, %xmm0
69 ; CHECK-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
70 ; CHECK-NEXT: movss %xmm0, -{{[0-9]+}}(%rsp)
71 ; CHECK-NEXT: movmskps %xmm0, %eax
72 ; CHECK-NEXT: andl $1, %eax
75 %__x.addr.i = alloca float, align 4
76 %__u.i = alloca %union.anon, align 4
77 %add = fadd float %f1, %f2
78 store float %add, ptr %__x.addr.i, align 4
79 store float %add, ptr %__u.i, align 4
80 %0 = bitcast float %add to i32
81 %shr.i = lshr i32 %0, 31
86 define void @float_call_signbit(double %n) {
87 ; CHECK-LABEL: float_call_signbit:
88 ; CHECK: ## %bb.0: ## %entry
89 ; CHECK-NEXT: movmskpd %xmm0, %edi
90 ; CHECK-NEXT: andl $1, %edi
91 ; CHECK-NEXT: jmp _float_call_signbit_callee ## TAILCALL
93 %t0 = bitcast double %n to i64
94 %tobool.i.i.i.i = icmp slt i64 %t0, 0
95 tail call void @float_call_signbit_callee(i1 zeroext %tobool.i.i.i.i)
98 declare void @float_call_signbit_callee(i1 zeroext)
101 define i32 @knownbits_v2f64(<2 x double> %x) {
102 ; CHECK-LABEL: knownbits_v2f64:
104 ; CHECK-NEXT: movmskpd %xmm0, %eax
106 %1 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %x)
111 ; Don't demand any movmsk signbits -> zero
112 define i32 @demandedbits_v16i8(<16 x i8> %x) {
113 ; CHECK-LABEL: demandedbits_v16i8:
115 ; CHECK-NEXT: xorl %eax, %eax
117 %1 = tail call i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8> %x)
118 %2 = and i32 %1, 65536
122 ; Simplify demanded vector elts
123 define i32 @demandedelts_v4f32(<4 x float> %x) {
124 ; CHECK-LABEL: demandedelts_v4f32:
126 ; CHECK-NEXT: movmskps %xmm0, %eax
127 ; CHECK-NEXT: andl $1, %eax
129 %1 = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> zeroinitializer
130 %2 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %1)
136 ; movmskp{s|d} only set low 4/2 bits, high bits are known zero
138 define i32 @t1(<4 x float> %x, ptr nocapture %indexTable) nounwind uwtable readonly ssp {
140 ; CHECK: ## %bb.0: ## %entry
141 ; CHECK-NEXT: movmskps %xmm0, %eax
142 ; CHECK-NEXT: movl (%rdi,%rax,4), %eax
145 %0 = tail call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %x) nounwind
146 %idxprom = sext i32 %0 to i64
147 %arrayidx = getelementptr inbounds i32, ptr %indexTable, i64 %idxprom
148 %1 = load i32, ptr %arrayidx, align 4
152 define i32 @t2(<4 x float> %x, ptr nocapture %indexTable) nounwind uwtable readonly ssp {
154 ; CHECK: ## %bb.0: ## %entry
155 ; CHECK-NEXT: movmskpd %xmm0, %eax
156 ; CHECK-NEXT: movl (%rdi,%rax,4), %eax
159 %0 = bitcast <4 x float> %x to <2 x double>
160 %1 = tail call i32 @llvm.x86.sse2.movmsk.pd(<2 x double> %0) nounwind
161 %idxprom = sext i32 %1 to i64
162 %arrayidx = getelementptr inbounds i32, ptr %indexTable, i64 %idxprom
163 %2 = load i32, ptr %arrayidx, align 4
167 declare i32 @llvm.x86.sse2.pmovmskb.128(<16 x i8>) nounwind readnone
168 declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) nounwind readnone
169 declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) nounwind readnone