1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
3 ; RUN: llc -mcpu=haswell < %s -O2 2>&1 | FileCheck %s
5 ; 2 invariant loads, 1 for OBJC_SELECTOR_REFERENCES_
6 ; and 1 for objc_msgSend from the GOT
8 ; 2 invariant load (full multiply, both loads should be hoisted.)
10 ; 2 invariant load (full divide, both loads should be hoisted.) 1 additional instruction for a zeroing edx that gets hoisted and then rematerialized.
12 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
13 target triple = "x86_64-apple-macosx10.7.2"
15 @"\01L_OBJC_METH_VAR_NAME_" = internal global [4 x i8] c"foo\00", section "__TEXT,__objc_methname,cstring_literals", align 1
16 @"\01L_OBJC_SELECTOR_REFERENCES_" = internal global i8* getelementptr inbounds ([4 x i8], [4 x i8]* @"\01L_OBJC_METH_VAR_NAME_", i64 0, i64 0), section "__DATA, __objc_selrefs, literal_pointers, no_dead_strip"
17 @"\01L_OBJC_IMAGE_INFO" = internal constant [2 x i32] [i32 0, i32 16], section "__DATA, __objc_imageinfo, regular, no_dead_strip"
18 @llvm.used = appending global [3 x i8*] [i8* getelementptr inbounds ([4 x i8], [4 x i8]* @"\01L_OBJC_METH_VAR_NAME_", i32 0, i32 0), i8* bitcast (i8** @"\01L_OBJC_SELECTOR_REFERENCES_" to i8*), i8* bitcast ([2 x i32]* @"\01L_OBJC_IMAGE_INFO" to i8*)], section "llvm.metadata"
20 define void @test(i8* %x) uwtable ssp {
22 ; CHECK: ## %bb.0: ## %entry
23 ; CHECK-NEXT: pushq %rbp
24 ; CHECK-NEXT: .cfi_def_cfa_offset 16
25 ; CHECK-NEXT: pushq %r15
26 ; CHECK-NEXT: .cfi_def_cfa_offset 24
27 ; CHECK-NEXT: pushq %r14
28 ; CHECK-NEXT: .cfi_def_cfa_offset 32
29 ; CHECK-NEXT: pushq %rbx
30 ; CHECK-NEXT: .cfi_def_cfa_offset 40
31 ; CHECK-NEXT: pushq %rax
32 ; CHECK-NEXT: .cfi_def_cfa_offset 48
33 ; CHECK-NEXT: .cfi_offset %rbx, -40
34 ; CHECK-NEXT: .cfi_offset %r14, -32
35 ; CHECK-NEXT: .cfi_offset %r15, -24
36 ; CHECK-NEXT: .cfi_offset %rbp, -16
37 ; CHECK-NEXT: movq %rdi, %rbx
38 ; CHECK-NEXT: movl $10000, %ebp ## imm = 0x2710
39 ; CHECK-NEXT: movq {{.*}}(%rip), %r14
40 ; CHECK-NEXT: movq _objc_msgSend@{{.*}}(%rip), %r15
41 ; CHECK-NEXT: .p2align 4, 0x90
42 ; CHECK-NEXT: LBB0_1: ## %for.body
43 ; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
44 ; CHECK-NEXT: movq %rbx, %rdi
45 ; CHECK-NEXT: movq %r14, %rsi
46 ; CHECK-NEXT: callq *%r15
47 ; CHECK-NEXT: decl %ebp
48 ; CHECK-NEXT: jne LBB0_1
49 ; CHECK-NEXT: ## %bb.2: ## %for.end
50 ; CHECK-NEXT: addq $8, %rsp
51 ; CHECK-NEXT: popq %rbx
52 ; CHECK-NEXT: popq %r14
53 ; CHECK-NEXT: popq %r15
54 ; CHECK-NEXT: popq %rbp
59 for.body: ; preds = %for.body, %entry
60 %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
61 %0 = load i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8, !invariant.load !0
62 %call = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %x, i8* %0)
63 %inc = add i32 %i.01, 1
64 %exitcond = icmp eq i32 %inc, 10000
65 br i1 %exitcond, label %for.end, label %for.body
67 for.end: ; preds = %for.body
71 define void @test_unordered(i8* %x) uwtable ssp {
72 ; CHECK-LABEL: test_unordered:
73 ; CHECK: ## %bb.0: ## %entry
74 ; CHECK-NEXT: pushq %rbp
75 ; CHECK-NEXT: .cfi_def_cfa_offset 16
76 ; CHECK-NEXT: pushq %r15
77 ; CHECK-NEXT: .cfi_def_cfa_offset 24
78 ; CHECK-NEXT: pushq %r14
79 ; CHECK-NEXT: .cfi_def_cfa_offset 32
80 ; CHECK-NEXT: pushq %rbx
81 ; CHECK-NEXT: .cfi_def_cfa_offset 40
82 ; CHECK-NEXT: pushq %rax
83 ; CHECK-NEXT: .cfi_def_cfa_offset 48
84 ; CHECK-NEXT: .cfi_offset %rbx, -40
85 ; CHECK-NEXT: .cfi_offset %r14, -32
86 ; CHECK-NEXT: .cfi_offset %r15, -24
87 ; CHECK-NEXT: .cfi_offset %rbp, -16
88 ; CHECK-NEXT: movq %rdi, %rbx
89 ; CHECK-NEXT: movl $10000, %ebp ## imm = 0x2710
90 ; CHECK-NEXT: movq {{.*}}(%rip), %r14
91 ; CHECK-NEXT: movq _objc_msgSend@{{.*}}(%rip), %r15
92 ; CHECK-NEXT: .p2align 4, 0x90
93 ; CHECK-NEXT: LBB1_1: ## %for.body
94 ; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
95 ; CHECK-NEXT: movq %rbx, %rdi
96 ; CHECK-NEXT: movq %r14, %rsi
97 ; CHECK-NEXT: callq *%r15
98 ; CHECK-NEXT: decl %ebp
99 ; CHECK-NEXT: jne LBB1_1
100 ; CHECK-NEXT: ## %bb.2: ## %for.end
101 ; CHECK-NEXT: addq $8, %rsp
102 ; CHECK-NEXT: popq %rbx
103 ; CHECK-NEXT: popq %r14
104 ; CHECK-NEXT: popq %r15
105 ; CHECK-NEXT: popq %rbp
110 for.body: ; preds = %for.body, %entry
111 %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
112 %0 = load atomic i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_" unordered, align 8, !invariant.load !0
113 %call = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %x, i8* %0)
114 %inc = add i32 %i.01, 1
115 %exitcond = icmp eq i32 %inc, 10000
116 br i1 %exitcond, label %for.end, label %for.body
118 for.end: ; preds = %for.body
122 define void @test_volatile(i8* %x) uwtable ssp {
123 ; CHECK-LABEL: test_volatile:
124 ; CHECK: ## %bb.0: ## %entry
125 ; CHECK-NEXT: pushq %rbp
126 ; CHECK-NEXT: .cfi_def_cfa_offset 16
127 ; CHECK-NEXT: pushq %r14
128 ; CHECK-NEXT: .cfi_def_cfa_offset 24
129 ; CHECK-NEXT: pushq %rbx
130 ; CHECK-NEXT: .cfi_def_cfa_offset 32
131 ; CHECK-NEXT: .cfi_offset %rbx, -32
132 ; CHECK-NEXT: .cfi_offset %r14, -24
133 ; CHECK-NEXT: .cfi_offset %rbp, -16
134 ; CHECK-NEXT: movq %rdi, %rbx
135 ; CHECK-NEXT: movl $10000, %ebp ## imm = 0x2710
136 ; CHECK-NEXT: movq _objc_msgSend@{{.*}}(%rip), %r14
137 ; CHECK-NEXT: .p2align 4, 0x90
138 ; CHECK-NEXT: LBB2_1: ## %for.body
139 ; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
140 ; CHECK-NEXT: movq {{.*}}(%rip), %rsi
141 ; CHECK-NEXT: movq %rbx, %rdi
142 ; CHECK-NEXT: callq *%r14
143 ; CHECK-NEXT: decl %ebp
144 ; CHECK-NEXT: jne LBB2_1
145 ; CHECK-NEXT: ## %bb.2: ## %for.end
146 ; CHECK-NEXT: popq %rbx
147 ; CHECK-NEXT: popq %r14
148 ; CHECK-NEXT: popq %rbp
153 for.body: ; preds = %for.body, %entry
154 %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
155 %0 = load volatile i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_", align 8, !invariant.load !0
156 %call = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %x, i8* %0)
157 %inc = add i32 %i.01, 1
158 %exitcond = icmp eq i32 %inc, 10000
159 br i1 %exitcond, label %for.end, label %for.body
161 for.end: ; preds = %for.body
165 define void @test_seq_cst(i8* %x) uwtable ssp {
166 ; CHECK-LABEL: test_seq_cst:
167 ; CHECK: ## %bb.0: ## %entry
168 ; CHECK-NEXT: pushq %rbp
169 ; CHECK-NEXT: .cfi_def_cfa_offset 16
170 ; CHECK-NEXT: pushq %r14
171 ; CHECK-NEXT: .cfi_def_cfa_offset 24
172 ; CHECK-NEXT: pushq %rbx
173 ; CHECK-NEXT: .cfi_def_cfa_offset 32
174 ; CHECK-NEXT: .cfi_offset %rbx, -32
175 ; CHECK-NEXT: .cfi_offset %r14, -24
176 ; CHECK-NEXT: .cfi_offset %rbp, -16
177 ; CHECK-NEXT: movq %rdi, %rbx
178 ; CHECK-NEXT: movl $10000, %ebp ## imm = 0x2710
179 ; CHECK-NEXT: movq _objc_msgSend@{{.*}}(%rip), %r14
180 ; CHECK-NEXT: .p2align 4, 0x90
181 ; CHECK-NEXT: LBB3_1: ## %for.body
182 ; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
183 ; CHECK-NEXT: movq {{.*}}(%rip), %rsi
184 ; CHECK-NEXT: movq %rbx, %rdi
185 ; CHECK-NEXT: callq *%r14
186 ; CHECK-NEXT: decl %ebp
187 ; CHECK-NEXT: jne LBB3_1
188 ; CHECK-NEXT: ## %bb.2: ## %for.end
189 ; CHECK-NEXT: popq %rbx
190 ; CHECK-NEXT: popq %r14
191 ; CHECK-NEXT: popq %rbp
196 for.body: ; preds = %for.body, %entry
197 %i.01 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
198 %0 = load atomic i8*, i8** @"\01L_OBJC_SELECTOR_REFERENCES_" seq_cst, align 8, !invariant.load !0
199 %call = tail call i8* bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to i8* (i8*, i8*)*)(i8* %x, i8* %0)
200 %inc = add i32 %i.01, 1
201 %exitcond = icmp eq i32 %inc, 10000
202 br i1 %exitcond, label %for.end, label %for.body
204 for.end: ; preds = %for.body
208 declare i8* @objc_msgSend(i8*, i8*, ...) nonlazybind
210 define void @test_multi_def(i64* dereferenceable(8) %x1,
211 ; CHECK-LABEL: test_multi_def:
212 ; CHECK: ## %bb.0: ## %entry
213 ; CHECK-NEXT: movq %rdx, %r8
214 ; CHECK-NEXT: xorl %r9d, %r9d
215 ; CHECK-NEXT: movq (%rdi), %rdi
216 ; CHECK-NEXT: movq (%rsi), %rsi
217 ; CHECK-NEXT: .p2align 4, 0x90
218 ; CHECK-NEXT: LBB4_2: ## %for.body
219 ; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
220 ; CHECK-NEXT: movq %rdi, %rax
221 ; CHECK-NEXT: mulq %rsi
222 ; CHECK-NEXT: addq %rax, (%r8)
223 ; CHECK-NEXT: adcq %rdx, 8(%r8)
224 ; CHECK-NEXT: ## %bb.1: ## %for.check
225 ; CHECK-NEXT: ## in Loop: Header=BB4_2 Depth=1
226 ; CHECK-NEXT: incq %r9
227 ; CHECK-NEXT: addq $16, %r8
228 ; CHECK-NEXT: cmpq %rcx, %r9
229 ; CHECK-NEXT: jl LBB4_2
230 ; CHECK-NEXT: ## %bb.3: ## %exit
232 i64* dereferenceable(8) %x2,
233 i128* %y, i64 %count) nounwind {
238 %inc = add nsw i64 %i, 1
239 %done = icmp sge i64 %inc, %count
240 br i1 %done, label %exit, label %for.body
243 %i = phi i64 [ 0, %entry ], [ %inc, %for.check ]
244 %x1_load = load i64, i64* %x1, align 8, !invariant.load !0
245 %x1_zext = zext i64 %x1_load to i128
246 %x2_load = load i64, i64* %x2, align 8, !invariant.load !0
247 %x2_zext = zext i64 %x2_load to i128
248 %x_prod = mul i128 %x1_zext, %x2_zext
249 %y_elem = getelementptr inbounds i128, i128* %y, i64 %i
250 %y_load = load i128, i128* %y_elem, align 8
251 %y_plus = add i128 %x_prod, %y_load
252 store i128 %y_plus, i128* %y_elem, align 8
259 define void @test_div_def(i32* dereferenceable(8) %x1,
260 ; CHECK-LABEL: test_div_def:
261 ; CHECK: ## %bb.0: ## %entry
262 ; CHECK-NEXT: movq %rdx, %r8
263 ; CHECK-NEXT: xorl %r9d, %r9d
264 ; CHECK-NEXT: movl (%rdi), %edi
265 ; CHECK-NEXT: movl (%rsi), %esi
266 ; CHECK-NEXT: .p2align 4, 0x90
267 ; CHECK-NEXT: LBB5_2: ## %for.body
268 ; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
269 ; CHECK-NEXT: movl %edi, %eax
270 ; CHECK-NEXT: xorl %edx, %edx
271 ; CHECK-NEXT: divl %esi
272 ; CHECK-NEXT: addl %eax, (%r8,%r9,4)
273 ; CHECK-NEXT: ## %bb.1: ## %for.check
274 ; CHECK-NEXT: ## in Loop: Header=BB5_2 Depth=1
275 ; CHECK-NEXT: incq %r9
276 ; CHECK-NEXT: cmpl %ecx, %r9d
277 ; CHECK-NEXT: jl LBB5_2
278 ; CHECK-NEXT: ## %bb.3: ## %exit
280 i32* dereferenceable(8) %x2,
281 i32* %y, i32 %count) nounwind {
286 %inc = add nsw i32 %i, 1
287 %done = icmp sge i32 %inc, %count
288 br i1 %done, label %exit, label %for.body
291 %i = phi i32 [ 0, %entry ], [ %inc, %for.check ]
292 %x1_load = load i32, i32* %x1, align 8, !invariant.load !0
293 %x2_load = load i32, i32* %x2, align 8, !invariant.load !0
294 %x_quot = udiv i32 %x1_load, %x2_load
295 %y_elem = getelementptr inbounds i32, i32* %y, i32 %i
296 %y_load = load i32, i32* %y_elem, align 8
297 %y_plus = add i32 %x_quot, %y_load
298 store i32 %y_plus, i32* %y_elem, align 8