1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -global-isel | FileCheck %s
4 ; The fundamental problem: an add separated from other arithmetic by a sign or
5 ; zero extension can't be combined with the later instructions. However, if the
6 ; first add is 'nsw' or 'nuw' respectively, then we can promote the extension
7 ; ahead of that add to allow optimizations.
9 define i64 @add_nsw_consts(i32 %i) {
10 ; CHECK-LABEL: add_nsw_consts:
12 ; CHECK-NEXT: addl $5, %edi
13 ; CHECK-NEXT: movslq %edi, %rax
14 ; CHECK-NEXT: addq $7, %rax
17 %add = add nsw i32 %i, 5
18 %ext = sext i32 %add to i64
19 %idx = add i64 %ext, 7
23 ; An x86 bonus: If we promote the sext ahead of the 'add nsw',
24 ; we allow LEA formation and eliminate an add instruction.
26 define i64 @add_nsw_sext_add(i32 %i, i64 %x) {
27 ; CHECK-LABEL: add_nsw_sext_add:
29 ; CHECK-NEXT: addl $5, %edi
30 ; CHECK-NEXT: movslq %edi, %rax
31 ; CHECK-NEXT: addq %rsi, %rax
34 %add = add nsw i32 %i, 5
35 %ext = sext i32 %add to i64
36 %idx = add i64 %x, %ext
40 ; Throw in a scale (left shift) because an LEA can do that too.
41 ; Use a negative constant (LEA displacement) to verify that's handled correctly.
43 define i64 @add_nsw_sext_lsh_add(i32 %i, i64 %x) {
44 ; CHECK-LABEL: add_nsw_sext_lsh_add:
46 ; CHECK-NEXT: addl $-5, %edi
47 ; CHECK-NEXT: movslq %edi, %rax
48 ; CHECK-NEXT: movq $3, %rcx
51 %add = add nsw i32 %i, -5
52 %ext = sext i32 %add to i64
53 %shl = shl i64 %ext, 3
54 %idx = add i64 %x, %shl
58 ; Don't promote the sext if it has no users. The wider add instruction needs an
59 ; extra byte to encode.
61 define i64 @add_nsw_sext(i32 %i, i64 %x) {
62 ; CHECK-LABEL: add_nsw_sext:
64 ; CHECK-NEXT: addl $5, %edi
65 ; CHECK-NEXT: movslq %edi, %rax
68 %add = add nsw i32 %i, 5
69 %ext = sext i32 %add to i64
73 ; The typical use case: a 64-bit system where an 'int' is used as an index into an array.
75 define i8* @gep8(i32 %i, i8* %x) {
78 ; CHECK-NEXT: addl $5, %edi
79 ; CHECK-NEXT: movslq %edi, %rax
80 ; CHECK-NEXT: leaq (%rsi,%rax), %rax
83 %add = add nsw i32 %i, 5
84 %ext = sext i32 %add to i64
85 %idx = getelementptr i8, i8* %x, i64 %ext
89 define i16* @gep16(i32 %i, i16* %x) {
92 ; CHECK-NEXT: movq $2, %rax
93 ; CHECK-NEXT: addl $-5, %edi
94 ; CHECK-NEXT: movslq %edi, %rcx
95 ; CHECK-NEXT: imulq %rax, %rcx
96 ; CHECK-NEXT: leaq (%rsi,%rcx), %rax
99 %add = add nsw i32 %i, -5
100 %ext = sext i32 %add to i64
101 %idx = getelementptr i16, i16* %x, i64 %ext
105 define i32* @gep32(i32 %i, i32* %x) {
106 ; CHECK-LABEL: gep32:
108 ; CHECK-NEXT: movq $4, %rax
109 ; CHECK-NEXT: addl $5, %edi
110 ; CHECK-NEXT: movslq %edi, %rcx
111 ; CHECK-NEXT: imulq %rax, %rcx
112 ; CHECK-NEXT: leaq (%rsi,%rcx), %rax
115 %add = add nsw i32 %i, 5
116 %ext = sext i32 %add to i64
117 %idx = getelementptr i32, i32* %x, i64 %ext
121 define i64* @gep64(i32 %i, i64* %x) {
122 ; CHECK-LABEL: gep64:
124 ; CHECK-NEXT: movq $8, %rax
125 ; CHECK-NEXT: addl $-5, %edi
126 ; CHECK-NEXT: movslq %edi, %rcx
127 ; CHECK-NEXT: imulq %rax, %rcx
128 ; CHECK-NEXT: leaq (%rsi,%rcx), %rax
131 %add = add nsw i32 %i, -5
132 %ext = sext i32 %add to i64
133 %idx = getelementptr i64, i64* %x, i64 %ext
137 ; LEA can't scale by 16, but the adds can still be combined into an LEA.
139 define i128* @gep128(i32 %i, i128* %x) {
140 ; CHECK-LABEL: gep128:
142 ; CHECK-NEXT: movq $16, %rax
143 ; CHECK-NEXT: addl $5, %edi
144 ; CHECK-NEXT: movslq %edi, %rcx
145 ; CHECK-NEXT: imulq %rax, %rcx
146 ; CHECK-NEXT: leaq (%rsi,%rcx), %rax
149 %add = add nsw i32 %i, 5
150 %ext = sext i32 %add to i64
151 %idx = getelementptr i128, i128* %x, i64 %ext
155 ; A bigger win can be achieved when there is more than one use of the
156 ; sign extended value. In this case, we can eliminate sign extension
157 ; instructions plus use more efficient addressing modes for memory ops.
159 define void @PR20134(i32* %a, i32 %i) {
160 ; CHECK-LABEL: PR20134:
162 ; CHECK: movq $4, %rax
163 ; CHECK-NEXT: leal 1(%rsi), %ecx
164 ; CHECK-NEXT: movslq %ecx, %rcx
165 ; CHECK-NEXT: imulq %rax, %rcx
166 ; CHECK-NEXT: leaq (%rdi,%rcx), %rcx
167 ; CHECK-NEXT: leal 2(%rsi), %edx
168 ; CHECK-NEXT: movslq %edx, %rdx
169 ; CHECK-NEXT: imulq %rax, %rdx
170 ; CHECK-NEXT: leaq (%rdi,%rdx), %rdx
171 ; CHECK-NEXT: movl (%rdx), %edx
172 ; CHECK-NEXT: addl (%rcx), %edx
173 ; CHECK-NEXT: movslq %esi, %rcx
174 ; CHECK-NEXT: imulq %rax, %rcx
175 ; CHECK-NEXT: leaq (%rdi,%rcx), %rax
176 ; CHECK-NEXT: movl %edx, (%rax)
179 %add1 = add nsw i32 %i, 1
180 %idx1 = sext i32 %add1 to i64
181 %gep1 = getelementptr i32, i32* %a, i64 %idx1
182 %load1 = load i32, i32* %gep1, align 4
184 %add2 = add nsw i32 %i, 2
185 %idx2 = sext i32 %add2 to i64
186 %gep2 = getelementptr i32, i32* %a, i64 %idx2
187 %load2 = load i32, i32* %gep2, align 4
189 %add3 = add i32 %load1, %load2
190 %idx3 = sext i32 %i to i64
191 %gep3 = getelementptr i32, i32* %a, i64 %idx3
192 store i32 %add3, i32* %gep3, align 4
196 ; The same as @PR20134 but sign extension is replaced with zero extension
197 define void @PR20134_zext(i32* %a, i32 %i) {
199 ; CHECK: movq $4, %rax
200 ; CHECK-NEXT: leal 1(%rsi), %ecx
201 ; CHECK-NEXT: imulq %rax, %rcx
202 ; CHECK-NEXT: leaq (%rdi,%rcx), %rcx
203 ; CHECK-NEXT: leal 2(%rsi), %edx
204 ; CHECK-NEXT: imulq %rax, %rdx
205 ; CHECK-NEXT: leaq (%rdi,%rdx), %rdx
206 ; CHECK-NEXT: movl (%rdx), %edx
207 ; CHECK-NEXT: addl (%rcx), %edx
208 ; CHECK-NEXT: imulq %rax, %rsi
209 ; CHECK-NEXT: leaq (%rdi,%rsi), %rax
210 ; CHECK-NEXT: movl %edx, (%rax)
213 %add1 = add nuw i32 %i, 1
214 %idx1 = zext i32 %add1 to i64
215 %gep1 = getelementptr i32, i32* %a, i64 %idx1
216 %load1 = load i32, i32* %gep1, align 4
218 %add2 = add nuw i32 %i, 2
219 %idx2 = zext i32 %add2 to i64
220 %gep2 = getelementptr i32, i32* %a, i64 %idx2
221 %load2 = load i32, i32* %gep2, align 4
223 %add3 = add i32 %load1, %load2
224 %idx3 = zext i32 %i to i64
225 %gep3 = getelementptr i32, i32* %a, i64 %idx3
226 store i32 %add3, i32* %gep3, align 4