1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-linux -mcpu=corei7-avx | FileCheck %s
3 ; RUN: opt -instsimplify -disable-output < %s
5 define <4 x i32*> @AGEP0(i32* %ptr) nounwind {
8 ; CHECK-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm0
9 ; CHECK-NEXT: vpaddd {{\.LCPI.*}}, %xmm0, %xmm0
11 %vecinit.i = insertelement <4 x i32*> undef, i32* %ptr, i32 0
12 %vecinit2.i = insertelement <4 x i32*> %vecinit.i, i32* %ptr, i32 1
13 %vecinit4.i = insertelement <4 x i32*> %vecinit2.i, i32* %ptr, i32 2
14 %vecinit6.i = insertelement <4 x i32*> %vecinit4.i, i32* %ptr, i32 3
15 %A2 = getelementptr i32, <4 x i32*> %vecinit6.i, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
16 %A3 = getelementptr i32, <4 x i32*> %A2, <4 x i32> <i32 10, i32 14, i32 19, i32 233>
20 define i32 @AGEP1(<4 x i32*> %param) nounwind {
23 ; CHECK-NEXT: vextractps $3, %xmm0, %eax
24 ; CHECK-NEXT: movl 16(%eax), %eax
26 %A2 = getelementptr i32, <4 x i32*> %param, <4 x i32> <i32 1, i32 2, i32 3, i32 4>
27 %k = extractelement <4 x i32*> %A2, i32 3
28 %v = load i32, i32* %k
32 define i32 @AGEP2(<4 x i32*> %param, <4 x i32> %off) nounwind {
35 ; CHECK-NEXT: vpslld $2, %xmm1, %xmm1
36 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
37 ; CHECK-NEXT: vpextrd $3, %xmm0, %eax
38 ; CHECK-NEXT: movl (%eax), %eax
40 %A2 = getelementptr i32, <4 x i32*> %param, <4 x i32> %off
41 %k = extractelement <4 x i32*> %A2, i32 3
42 %v = load i32, i32* %k
46 define <4 x i32*> @AGEP3(<4 x i32*> %param, <4 x i32> %off) nounwind {
49 ; CHECK-NEXT: pushl %eax
50 ; CHECK-NEXT: vpslld $2, %xmm1, %xmm1
51 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
52 ; CHECK-NEXT: movl %esp, %eax
53 ; CHECK-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
54 ; CHECK-NEXT: popl %eax
56 %A2 = getelementptr i32, <4 x i32*> %param, <4 x i32> %off
58 %k = insertelement <4 x i32*> %A2, i32* %v, i32 3
62 define <4 x i16*> @AGEP4(<4 x i16*> %param, <4 x i32> %off) nounwind {
63 ; Multiply offset by two (add it to itself).
64 ; add the base to the offset
67 ; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm1
68 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
70 %A = getelementptr i16, <4 x i16*> %param, <4 x i32> %off
74 define <4 x i8*> @AGEP5(<4 x i8*> %param, <4 x i8> %off) nounwind {
77 ; CHECK-NEXT: vpslld $24, %xmm1, %xmm1
78 ; CHECK-NEXT: vpsrad $24, %xmm1, %xmm1
79 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
81 %A = getelementptr i8, <4 x i8*> %param, <4 x i8> %off
86 ; The size of each element is 1 byte. No need to multiply by element size.
87 define <4 x i8*> @AGEP6(<4 x i8*> %param, <4 x i32> %off) nounwind {
90 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
92 %A = getelementptr i8, <4 x i8*> %param, <4 x i32> %off
96 define <4 x i8*> @AGEP7(<4 x i8*> %param, i32 %off) nounwind {
99 ; CHECK-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm1
100 ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0
102 %A = getelementptr i8, <4 x i8*> %param, i32 %off
106 define <4 x i16*> @AGEP8(i16* %param, <4 x i32> %off) nounwind {
107 ; Multiply offset by two (add it to itself).
108 ; add the base to the offset
109 ; CHECK-LABEL: AGEP8:
111 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
112 ; CHECK-NEXT: vbroadcastss {{[0-9]+}}(%esp), %xmm1
113 ; CHECK-NEXT: vpaddd %xmm0, %xmm1, %xmm0
115 %A = getelementptr i16, i16* %param, <4 x i32> %off
119 define <64 x i16*> @AGEP9(i16* %param, <64 x i32> %off) nounwind {
120 ; CHECK-LABEL: AGEP9:
122 ; CHECK-NEXT: pushl %ebp
123 ; CHECK-NEXT: movl %esp, %ebp
124 ; CHECK-NEXT: andl $-32, %esp
125 ; CHECK-NEXT: subl $160, %esp
126 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm3
127 ; CHECK-NEXT: vbroadcastss 12(%ebp), %xmm5
128 ; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3
129 ; CHECK-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
130 ; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0
131 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
132 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
133 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
134 ; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm0
135 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
136 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
137 ; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm0
138 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
139 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
140 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
141 ; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm0
142 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
143 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
144 ; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm0
145 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
146 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
147 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
148 ; CHECK-NEXT: vmovdqa 40(%ebp), %xmm0
149 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
150 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
151 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
152 ; CHECK-NEXT: vmovdqa 56(%ebp), %xmm0
153 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
154 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
155 ; CHECK-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%e{{[sb]}}p) # 16-byte Spill
156 ; CHECK-NEXT: vmovdqa 72(%ebp), %xmm3
157 ; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3
158 ; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm0
159 ; CHECK-NEXT: vmovdqa %xmm0, (%esp) # 16-byte Spill
160 ; CHECK-NEXT: vmovdqa 88(%ebp), %xmm4
161 ; CHECK-NEXT: vpaddd %xmm4, %xmm4, %xmm4
162 ; CHECK-NEXT: vpaddd %xmm4, %xmm5, %xmm4
163 ; CHECK-NEXT: vmovdqa 104(%ebp), %xmm1
164 ; CHECK-NEXT: vpaddd %xmm1, %xmm1, %xmm1
165 ; CHECK-NEXT: vpaddd %xmm1, %xmm5, %xmm1
166 ; CHECK-NEXT: vmovdqa 120(%ebp), %xmm6
167 ; CHECK-NEXT: vpaddd %xmm6, %xmm6, %xmm6
168 ; CHECK-NEXT: vpaddd %xmm6, %xmm5, %xmm6
169 ; CHECK-NEXT: vmovdqa 136(%ebp), %xmm2
170 ; CHECK-NEXT: vpaddd %xmm2, %xmm2, %xmm2
171 ; CHECK-NEXT: vpaddd %xmm2, %xmm5, %xmm2
172 ; CHECK-NEXT: vmovdqa 152(%ebp), %xmm7
173 ; CHECK-NEXT: vpaddd %xmm7, %xmm7, %xmm7
174 ; CHECK-NEXT: vpaddd %xmm7, %xmm5, %xmm7
175 ; CHECK-NEXT: vmovdqa 168(%ebp), %xmm0
176 ; CHECK-NEXT: vpaddd %xmm0, %xmm0, %xmm0
177 ; CHECK-NEXT: vpaddd %xmm0, %xmm5, %xmm0
178 ; CHECK-NEXT: vmovdqa 184(%ebp), %xmm3
179 ; CHECK-NEXT: vpaddd %xmm3, %xmm3, %xmm3
180 ; CHECK-NEXT: vpaddd %xmm3, %xmm5, %xmm3
181 ; CHECK-NEXT: movl 8(%ebp), %eax
182 ; CHECK-NEXT: vmovdqa %xmm3, 240(%eax)
183 ; CHECK-NEXT: vmovdqa %xmm0, 224(%eax)
184 ; CHECK-NEXT: vmovdqa %xmm7, 208(%eax)
185 ; CHECK-NEXT: vmovdqa %xmm2, 192(%eax)
186 ; CHECK-NEXT: vmovdqa %xmm6, 176(%eax)
187 ; CHECK-NEXT: vmovdqa %xmm1, 160(%eax)
188 ; CHECK-NEXT: vmovdqa %xmm4, 144(%eax)
189 ; CHECK-NEXT: vmovaps (%esp), %xmm0 # 16-byte Reload
190 ; CHECK-NEXT: vmovaps %xmm0, 128(%eax)
191 ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
192 ; CHECK-NEXT: vmovaps %xmm0, 112(%eax)
193 ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
194 ; CHECK-NEXT: vmovaps %xmm0, 96(%eax)
195 ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
196 ; CHECK-NEXT: vmovaps %xmm0, 80(%eax)
197 ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
198 ; CHECK-NEXT: vmovaps %xmm0, 64(%eax)
199 ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
200 ; CHECK-NEXT: vmovaps %xmm0, 48(%eax)
201 ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
202 ; CHECK-NEXT: vmovaps %xmm0, 32(%eax)
203 ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
204 ; CHECK-NEXT: vmovaps %xmm0, 16(%eax)
205 ; CHECK-NEXT: vmovaps {{[-0-9]+}}(%e{{[sb]}}p), %xmm0 # 16-byte Reload
206 ; CHECK-NEXT: vmovaps %xmm0, (%eax)
207 ; CHECK-NEXT: movl %ebp, %esp
208 ; CHECK-NEXT: popl %ebp
209 ; CHECK-NEXT: vzeroupper
210 ; CHECK-NEXT: retl $4
211 %A = getelementptr i16, i16* %param, <64 x i32> %off