1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s -check-prefix=X64
3 ; RUN: llc < %s -mtriple=i686-unknown | FileCheck %s -check-prefix=X86
5 %struct.SA = type { i32 , i32 , i32 , i32 , i32};
7 define void @foo(ptr nocapture %ctx, i32 %n) local_unnamed_addr #0 {
9 ; X64: # %bb.0: # %entry
10 ; X64-NEXT: movl 16(%rdi), %eax
11 ; X64-NEXT: movl (%rdi), %ecx
12 ; X64-NEXT: addl %eax, %ecx
13 ; X64-NEXT: addl %eax, %ecx
14 ; X64-NEXT: addl %eax, %ecx
15 ; X64-NEXT: leal (%rcx,%rax), %edx
16 ; X64-NEXT: leal 1(%rax,%rcx), %ecx
17 ; X64-NEXT: movl %ecx, 12(%rdi)
18 ; X64-NEXT: leal 1(%rax,%rdx), %eax
19 ; X64-NEXT: movl %eax, 16(%rdi)
23 ; X86: # %bb.0: # %entry
24 ; X86-NEXT: pushl %esi
25 ; X86-NEXT: .cfi_def_cfa_offset 8
26 ; X86-NEXT: .cfi_offset %esi, -8
27 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
28 ; X86-NEXT: movl 16(%eax), %ecx
29 ; X86-NEXT: movl (%eax), %edx
30 ; X86-NEXT: addl %ecx, %edx
31 ; X86-NEXT: addl %ecx, %edx
32 ; X86-NEXT: addl %ecx, %edx
33 ; X86-NEXT: leal 1(%ecx,%edx), %esi
34 ; X86-NEXT: addl %ecx, %edx
35 ; X86-NEXT: movl %esi, 12(%eax)
36 ; X86-NEXT: leal 1(%ecx,%edx), %ecx
37 ; X86-NEXT: movl %ecx, 16(%eax)
39 ; X86-NEXT: .cfi_def_cfa_offset 4
42 %0 = load i32, ptr %ctx, align 8
43 %h3 = getelementptr inbounds %struct.SA, ptr %ctx, i64 0, i32 3
44 %h4 = getelementptr inbounds %struct.SA, ptr %ctx, i64 0, i32 4
45 %1 = load i32, ptr %h4, align 8
47 %add1 = add i32 %add, %1
48 %add2 = add i32 %add1, %1
49 %add3 = add i32 %add2, %1
50 %add4 = add i32 %add3, %1
51 store i32 %add4, ptr %h3, align 4
52 %add29 = add i32 %add4, %1
53 store i32 %add29, ptr %h4, align 8
59 define void @foo_loop(ptr nocapture %ctx, i32 %n) local_unnamed_addr #0 {
60 ; X64-LABEL: foo_loop:
61 ; X64: # %bb.0: # %entry
62 ; X64-NEXT: .p2align 4
63 ; X64-NEXT: .LBB1_1: # %loop
64 ; X64-NEXT: # =>This Inner Loop Header: Depth=1
65 ; X64-NEXT: movl (%rdi), %ecx
66 ; X64-NEXT: movl 16(%rdi), %eax
67 ; X64-NEXT: leal 1(%rcx,%rax), %edx
68 ; X64-NEXT: movl %edx, 12(%rdi)
70 ; X64-NEXT: jne .LBB1_1
71 ; X64-NEXT: # %bb.2: # %exit
72 ; X64-NEXT: addl %eax, %ecx
73 ; X64-NEXT: leal 1(%rax,%rcx), %ecx
74 ; X64-NEXT: leal (%rax,%rax), %edx
75 ; X64-NEXT: addl %eax, %edx
76 ; X64-NEXT: addl %edx, %ecx
77 ; X64-NEXT: addl %edx, %ecx
78 ; X64-NEXT: movl %ecx, 16(%rdi)
81 ; X86-LABEL: foo_loop:
82 ; X86: # %bb.0: # %entry
83 ; X86-NEXT: pushl %edi
84 ; X86-NEXT: .cfi_def_cfa_offset 8
85 ; X86-NEXT: pushl %esi
86 ; X86-NEXT: .cfi_def_cfa_offset 12
87 ; X86-NEXT: .cfi_offset %esi, -12
88 ; X86-NEXT: .cfi_offset %edi, -8
89 ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
90 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
91 ; X86-NEXT: .p2align 4
92 ; X86-NEXT: .LBB1_1: # %loop
93 ; X86-NEXT: # =>This Inner Loop Header: Depth=1
94 ; X86-NEXT: movl (%eax), %esi
95 ; X86-NEXT: movl 16(%eax), %ecx
96 ; X86-NEXT: leal 1(%esi,%ecx), %edi
97 ; X86-NEXT: movl %edi, 12(%eax)
99 ; X86-NEXT: jne .LBB1_1
100 ; X86-NEXT: # %bb.2: # %exit
101 ; X86-NEXT: addl %ecx, %esi
102 ; X86-NEXT: leal 1(%ecx,%esi), %edx
103 ; X86-NEXT: leal (%ecx,%ecx), %esi
104 ; X86-NEXT: addl %ecx, %esi
105 ; X86-NEXT: addl %esi, %edx
106 ; X86-NEXT: addl %esi, %edx
107 ; X86-NEXT: movl %edx, 16(%eax)
108 ; X86-NEXT: popl %esi
109 ; X86-NEXT: .cfi_def_cfa_offset 8
110 ; X86-NEXT: popl %edi
111 ; X86-NEXT: .cfi_def_cfa_offset 4
117 %iter = phi i32 [%n ,%entry ] ,[ %iter.ctr ,%loop]
118 %0 = load i32, ptr %ctx, align 8
119 %h3 = getelementptr inbounds %struct.SA, ptr %ctx, i64 0, i32 3
120 %h4 = getelementptr inbounds %struct.SA, ptr %ctx, i64 0, i32 4
121 %1 = load i32, ptr %h4, align 8
123 %add4 = add i32 %add, %1
124 store i32 %add4, ptr %h3, align 4
125 %add291 = add i32 %add4, %1
126 %add292 = add i32 %add291, %1
127 %add293 = add i32 %add292, %1
128 %add294 = add i32 %add293, %1
129 %add295 = add i32 %add294, %1
130 %add296 = add i32 %add295, %1
131 %add29 = add i32 %add296, %1
132 %iter.ctr = sub i32 %iter , 1
133 %res = icmp ne i32 %iter.ctr , 0
134 br i1 %res , label %loop , label %exit
137 store i32 %add29, ptr %h4, align 8