1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc < %s -mtriple=armv7-apple-ios | FileCheck %s
5 declare void @llvm.trap()
7 @G1 = external global i32
8 @G2 = external global i32
10 define i32 @f1(i32 %cond1, i32 %x1, i32 %x2, i32 %x3) {
12 ; CHECK: @ %bb.0: @ %entry
13 ; CHECK-NEXT: cmp r0, #0
14 ; CHECK-NEXT: moveq r3, r2
15 ; CHECK-NEXT: movne r1, r2
16 ; CHECK-NEXT: add r0, r1, r3
19 %tmp1 = icmp eq i32 %cond1, 0
20 %tmp2 = select i1 %tmp1, i32 %x1, i32 %x2
21 %tmp3 = select i1 %tmp1, i32 %x2, i32 %x3
22 %tmp4 = add i32 %tmp2, %tmp3
26 @foo = external global i32
27 @bar = external global [250 x i8], align 1
29 ; CSE of cmp across BB boundary
31 define void @f2() nounwind ssp {
33 ; CHECK: @ %bb.0: @ %entry
34 ; CHECK-NEXT: movw r0, :lower16:(L_foo$non_lazy_ptr-(LPC1_0+8))
35 ; CHECK-NEXT: movt r0, :upper16:(L_foo$non_lazy_ptr-(LPC1_0+8))
37 ; CHECK-NEXT: ldr r0, [pc, r0]
38 ; CHECK-NEXT: ldr r2, [r0]
39 ; CHECK-NEXT: cmp r2, #1
41 ; CHECK-NEXT: LBB1_1: @ %for.body.lr.ph
42 ; CHECK-NEXT: push {lr}
43 ; CHECK-NEXT: movw r0, :lower16:(L_bar$non_lazy_ptr-(LPC1_1+8))
44 ; CHECK-NEXT: movle r2, #1
45 ; CHECK-NEXT: movt r0, :upper16:(L_bar$non_lazy_ptr-(LPC1_1+8))
46 ; CHECK-NEXT: mov r1, #0
48 ; CHECK-NEXT: ldr r0, [pc, r0]
49 ; CHECK-NEXT: bl _memset
52 %0 = load i32, ptr @foo, align 4
53 %cmp28 = icmp sgt i32 %0, 0
54 br i1 %cmp28, label %for.body.lr.ph, label %for.cond1.preheader
56 for.body.lr.ph: ; preds = %entry
57 %1 = icmp sgt i32 %0, 1
58 %smax = select i1 %1, i32 %0, i32 1
59 call void @llvm.memset.p0.i32(ptr @bar, i8 0, i32 %smax, i1 false)
60 call void @llvm.trap()
63 for.cond1.preheader: ; preds = %entry
67 declare void @llvm.memset.p0.i32(ptr nocapture, i8, i32, i1) nounwind
70 define ptr @f3(ptr %base, ptr nocapture %offset, i32 %size) nounwind {
72 ; CHECK: @ %bb.0: @ %entry
73 ; CHECK-NEXT: ldr r3, [r1]
74 ; CHECK-NEXT: mov r9, #0
75 ; CHECK-NEXT: cmp r3, r2
76 ; CHECK-NEXT: blt LBB2_2
77 ; CHECK-NEXT: @ %bb.1: @ %if.end
78 ; CHECK-NEXT: sub r3, r3, r2
79 ; CHECK-NEXT: add r9, r0, r3
80 ; CHECK-NEXT: sub r2, r2, r3
81 ; CHECK-NEXT: add r2, r3, r2
82 ; CHECK-NEXT: str r2, [r1]
83 ; CHECK-NEXT: LBB2_2: @ %return
84 ; CHECK-NEXT: mov r0, r9
87 %0 = load i32, ptr %offset, align 4
88 %cmp = icmp slt i32 %0, %size
89 %s = sub nsw i32 %0, %size
90 %size2 = sub nsw i32 %size, 0
91 br i1 %cmp, label %return, label %if.end
94 ; We are checking cse between %sub here and %s in entry block.
95 %sub = sub nsw i32 %0, %size2
96 %s2 = sub nsw i32 %s, %size
97 %s3 = sub nsw i32 %sub, %s2
98 store i32 %s3, ptr %offset, align 4
99 %add.ptr = getelementptr inbounds i8, ptr %base, i32 %sub
103 %retval.0 = phi ptr [ %add.ptr, %if.end ], [ null, %entry ]
107 ; The cmp of %val should not be hoisted above the preceding conditional branch
108 define void @f4(ptr %ptr1, ptr %ptr2, i64 %val) {
110 ; CHECK: @ %bb.0: @ %entry
111 ; CHECK-NEXT: cmp r0, #0
112 ; CHECK-NEXT: movne r9, #0
113 ; CHECK-NEXT: strne r9, [r0]
114 ; CHECK-NEXT: orrs r0, r2, r3
115 ; CHECK-NEXT: beq LBB3_2
116 ; CHECK-NEXT: @ %bb.1: @ %if.end
117 ; CHECK-NEXT: subs r0, r2, #10
118 ; CHECK-NEXT: sbcs r0, r3, #0
119 ; CHECK-NEXT: bxlt lr
120 ; CHECK-NEXT: LBB3_2: @ %if.end3
121 ; CHECK-NEXT: subs r0, r2, #10
122 ; CHECK-NEXT: sbc r3, r3, #0
123 ; CHECK-NEXT: stm r1, {r0, r3}
126 %tobool.not = icmp eq ptr %ptr1, null
127 br i1 %tobool.not, label %if.end, label %if.then
130 store ptr null, ptr %ptr1, align 4
134 %tobool1 = icmp ne i64 %val, 0
135 %cmp = icmp slt i64 %val, 10
136 %or.cond = and i1 %tobool1, %cmp
137 br i1 %or.cond, label %cleanup, label %if.end3
140 %sub = add nsw i64 %val, -10
141 store i64 %sub, ptr %ptr2, align 8