1 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=cortex-a9 -simplifycfg-sink-common=false | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-CORTEX
2 ; RUN: llc < %s -mtriple=thumbv7-apple-darwin -mcpu=swift -simplifycfg-sink-common=false | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-SWIFT
3 ; Avoid some 's' 16-bit instruction which partially update CPSR (and add false
4 ; dependency) when it isn't dependent on last CPSR defining instruction.
7 define i32 @t1(i32 %a, i32 %b, i32 %c, i32 %d) nounwind readnone {
10 ; CHECK-CORTEX: muls [[REG:(r[0-9]+)]], r3, r2
11 ; CHECK-CORTEX-NEXT: mul [[REG2:(r[0-9]+)]], r1, r0
12 ; CHECK-SWIFT: muls [[REG2:(r[0-9]+)]], r1, r0
13 ; CHECK-SWIFT-NEXT: mul [[REG:(r[0-9]+)]], r2, r3
14 ; CHECK-NEXT: muls r0, [[REG]], [[REG2]]
15 %0 = mul nsw i32 %a, %b
16 %1 = mul nsw i32 %c, %d
17 %2 = mul nsw i32 %0, %1
21 ; Avoid partial CPSR dependency via loop backedge.
23 define void @t2(i32* nocapture %ptr1, i32* %ptr2, i32 %c) nounwind {
30 ; CHECK: mul r{{[0-9]+}}
32 %ptr1.addr.09 = phi i32* [ %add.ptr, %while.body ], [ %ptr1, %entry ]
33 %ptr2.addr.08 = phi i32* [ %incdec.ptr, %while.body ], [ %ptr2, %entry ]
34 %0 = load i32, i32* %ptr1.addr.09, align 4
35 %arrayidx1 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 1
36 %1 = load i32, i32* %arrayidx1, align 4
37 %arrayidx3 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 2
38 %2 = load i32, i32* %arrayidx3, align 4
39 %arrayidx4 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 3
40 %3 = load i32, i32* %arrayidx4, align 4
41 %add.ptr = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 4
43 %mul5 = mul i32 %mul, %2
44 %mul6 = mul i32 %mul5, %3
45 store i32 %mul6, i32* %ptr2.addr.08, align 4
46 %incdec.ptr = getelementptr inbounds i32, i32* %ptr2.addr.08, i32 -1
47 %tobool = icmp eq i32* %incdec.ptr, null
48 br i1 %tobool, label %while.end, label %while.body
54 ; Allow partial CPSR dependency when code size is the priority.
56 define void @t3(i32* nocapture %ptr1, i32* %ptr2, i32 %c) nounwind minsize {
63 ; CHECK: muls r{{[0-9]+}}
65 %ptr1.addr.09 = phi i32* [ %add.ptr, %while.body ], [ %ptr1, %entry ]
66 %ptr2.addr.08 = phi i32* [ %incdec.ptr, %while.body ], [ %ptr2, %entry ]
67 %0 = load i32, i32* %ptr1.addr.09, align 4
68 %arrayidx1 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 1
69 %1 = load i32, i32* %arrayidx1, align 4
70 %arrayidx3 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 2
71 %2 = load i32, i32* %arrayidx3, align 4
72 %arrayidx4 = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 3
73 %3 = load i32, i32* %arrayidx4, align 4
74 %add.ptr = getelementptr inbounds i32, i32* %ptr1.addr.09, i32 4
76 %mul5 = mul i32 %mul, %2
77 %mul6 = mul i32 %mul5, %3
78 store i32 %mul6, i32* %ptr2.addr.08, align 4
79 %incdec.ptr = getelementptr inbounds i32, i32* %ptr2.addr.08, i32 -1
80 %tobool = icmp eq i32* %incdec.ptr, null
81 br i1 %tobool, label %while.end, label %while.body
87 ; Avoid producing tMOVi8 after a high-latency flag-setting operation.
88 ; <rdar://problem/13468102>
89 define void @t4(i32* nocapture %p, double* nocapture %q) {
92 ; CHECK: vmrs APSR_nzcv, fpscr
95 %0 = load double, double* %q, align 4
96 %cmp = fcmp olt double %0, 1.000000e+01
97 %incdec.ptr1 = getelementptr inbounds i32, i32* %p, i32 1
98 br i1 %cmp, label %if.then, label %if.else
101 store i32 7, i32* %p, align 4
102 %incdec.ptr2 = getelementptr inbounds i32, i32* %p, i32 2
103 store i32 8, i32* %incdec.ptr1, align 4
104 store i32 9, i32* %incdec.ptr2, align 4
108 store i32 3, i32* %p, align 4
109 %incdec.ptr5 = getelementptr inbounds i32, i32* %p, i32 3
110 store i32 5, i32* %incdec.ptr1, align 4
111 store i32 6, i32* %incdec.ptr5, align 4