1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -gvn -S < %s | FileCheck %s
4 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:4:5"
5 target triple = "x86_64-unknown-linux-gnu"
7 define void @f0(i1 %alwaysFalse, i64 %val, i64* %loc) {
10 ; CHECK-NEXT: store i64 [[VAL:%.*]], i64* [[LOC:%.*]], align 8
11 ; CHECK-NEXT: br i1 [[ALWAYSFALSE:%.*]], label [[NEVERTAKEN:%.*]], label [[ALWAYSTAKEN:%.*]]
13 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i64* [[LOC]] to i8 addrspace(4)**
14 ; CHECK-NEXT: [[PTR:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)** [[LOC_BC]], align 8
15 ; CHECK-NEXT: store i8 5, i8 addrspace(4)* [[PTR]], align 1
16 ; CHECK-NEXT: ret void
18 ; CHECK-NEXT: ret void
21 store i64 %val, i64* %loc
22 br i1 %alwaysFalse, label %neverTaken, label %alwaysTaken
25 %loc.bc = bitcast i64* %loc to i8 addrspace(4)**
26 %ptr = load i8 addrspace(4)*, i8 addrspace(4)** %loc.bc
27 store i8 5, i8 addrspace(4)* %ptr
34 define i64 @f1(i1 %alwaysFalse, i8 addrspace(4)* %val, i8 addrspace(4)** %loc) {
37 ; CHECK-NEXT: store i8 addrspace(4)* [[VAL:%.*]], i8 addrspace(4)** [[LOC:%.*]], align 8
38 ; CHECK-NEXT: br i1 [[ALWAYSFALSE:%.*]], label [[NEVERTAKEN:%.*]], label [[ALWAYSTAKEN:%.*]]
40 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)** [[LOC]] to i64*
41 ; CHECK-NEXT: [[INT:%.*]] = load i64, i64* [[LOC_BC]], align 8
42 ; CHECK-NEXT: ret i64 [[INT]]
44 ; CHECK-NEXT: ret i64 42
47 store i8 addrspace(4)* %val, i8 addrspace(4)** %loc
48 br i1 %alwaysFalse, label %neverTaken, label %alwaysTaken
51 %loc.bc = bitcast i8 addrspace(4)** %loc to i64*
52 %int = load i64, i64* %loc.bc
59 ;; Note: For terseness, we stop using the %alwaysfalse trick for the
60 ;; tests below and just exercise the bits of forwarding logic directly.
62 declare void @llvm.memset.p4i8.i64(i8 addrspace(4)* nocapture, i8, i64, i1) nounwind
64 ; Can't forward as the load might be dead. (Pretend we wrote out the alwaysfalse idiom above.)
65 define i8 addrspace(4)* @neg_forward_memset(i8 addrspace(4)* addrspace(4)* %loc) {
66 ; CHECK-LABEL: @neg_forward_memset(
68 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
69 ; CHECK-NEXT: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8 7, i64 8, i1 false)
70 ; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], align 8
71 ; CHECK-NEXT: ret i8 addrspace(4)* [[REF]]
74 %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i8 addrspace(4)*
75 call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8 7, i64 8, i1 false)
76 %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc
77 ret i8 addrspace(4)* %ref
80 define <1 x i8 addrspace(4)*> @neg_forward_memset_vload(<1 x i8 addrspace(4)*> addrspace(4)* %loc) {
81 ; CHECK-LABEL: @neg_forward_memset_vload(
83 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast <1 x i8 addrspace(4)*> addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
84 ; CHECK-NEXT: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8 7, i64 8, i1 false)
85 ; CHECK-NEXT: [[REF:%.*]] = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* [[LOC]], align 8
86 ; CHECK-NEXT: ret <1 x i8 addrspace(4)*> [[REF]]
89 %loc.bc = bitcast <1 x i8 addrspace(4)*> addrspace(4)* %loc to i8 addrspace(4)*
90 call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8 7, i64 8, i1 false)
91 %ref = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* %loc
92 ret <1 x i8 addrspace(4)*> %ref
96 ; Can forward since we can do so w/o breaking types
97 define i8 addrspace(4)* @forward_memset_zero(i8 addrspace(4)* addrspace(4)* %loc) {
98 ; CHECK-LABEL: @forward_memset_zero(
100 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
101 ; CHECK-NEXT: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8 0, i64 8, i1 false)
102 ; CHECK-NEXT: ret i8 addrspace(4)* null
105 %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i8 addrspace(4)*
106 call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8 0, i64 8, i1 false)
107 %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc
108 ret i8 addrspace(4)* %ref
111 ; Can't forward as the load might be dead. (Pretend we wrote out the alwaysfalse idiom above.)
112 define i8 addrspace(4)* @neg_forward_store(i8 addrspace(4)* addrspace(4)* %loc) {
113 ; CHECK-LABEL: @neg_forward_store(
115 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i64 addrspace(4)*
116 ; CHECK-NEXT: store i64 5, i64 addrspace(4)* [[LOC_BC]], align 8
117 ; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], align 8
118 ; CHECK-NEXT: ret i8 addrspace(4)* [[REF]]
121 %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i64 addrspace(4)*
122 store i64 5, i64 addrspace(4)* %loc.bc
123 %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc
124 ret i8 addrspace(4)* %ref
127 define <1 x i8 addrspace(4)*> @neg_forward_store_vload(<1 x i8 addrspace(4)*> addrspace(4)* %loc) {
128 ; CHECK-LABEL: @neg_forward_store_vload(
130 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast <1 x i8 addrspace(4)*> addrspace(4)* [[LOC:%.*]] to i64 addrspace(4)*
131 ; CHECK-NEXT: store i64 5, i64 addrspace(4)* [[LOC_BC]], align 8
132 ; CHECK-NEXT: [[REF:%.*]] = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* [[LOC]], align 8
133 ; CHECK-NEXT: ret <1 x i8 addrspace(4)*> [[REF]]
136 %loc.bc = bitcast <1 x i8 addrspace(4)*> addrspace(4)* %loc to i64 addrspace(4)*
137 store i64 5, i64 addrspace(4)* %loc.bc
138 %ref = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* %loc
139 ret <1 x i8 addrspace(4)*> %ref
142 ; Nulls have known bit patterns, so we can forward
143 define i8 addrspace(4)* @forward_store_zero(i8 addrspace(4)* addrspace(4)* %loc) {
144 ; CHECK-LABEL: @forward_store_zero(
146 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i64 addrspace(4)*
147 ; CHECK-NEXT: store i64 0, i64 addrspace(4)* [[LOC_BC]], align 8
148 ; CHECK-NEXT: ret i8 addrspace(4)* null
151 %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i64 addrspace(4)*
152 store i64 0, i64 addrspace(4)* %loc.bc
153 %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc
154 ret i8 addrspace(4)* %ref
157 ; Nulls have known bit patterns, so we can forward
158 define i8 addrspace(4)* @forward_store_zero2(i8 addrspace(4)* addrspace(4)* %loc) {
159 ; CHECK-LABEL: @forward_store_zero2(
161 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to <2 x i32> addrspace(4)*
162 ; CHECK-NEXT: store <2 x i32> zeroinitializer, <2 x i32> addrspace(4)* [[LOC_BC]], align 8
163 ; CHECK-NEXT: ret i8 addrspace(4)* null
166 %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to <2 x i32> addrspace(4)*
167 store <2 x i32> zeroinitializer, <2 x i32> addrspace(4)* %loc.bc
168 %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc
169 ret i8 addrspace(4)* %ref
174 @NonZeroConstant = constant <4 x i64> <i64 3, i64 3, i64 3, i64 3>
175 @NonZeroConstant2 = constant <4 x i64 addrspace(4)*> <
176 i64 addrspace(4)* getelementptr (i64, i64 addrspace(4)* null, i32 3),
177 i64 addrspace(4)* getelementptr (i64, i64 addrspace(4)* null, i32 3),
178 i64 addrspace(4)* getelementptr (i64, i64 addrspace(4)* null, i32 3),
179 i64 addrspace(4)* getelementptr (i64, i64 addrspace(4)* null, i32 3)>
180 @ZeroConstant = constant <4 x i64> zeroinitializer
183 ; Can't forward as the load might be dead. (Pretend we wrote out the alwaysfalse idiom above.)
184 define i8 addrspace(4)* @neg_forward_memcopy(i8 addrspace(4)* addrspace(4)* %loc) {
185 ; CHECK-LABEL: @neg_forward_memcopy(
187 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
188 ; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64>* @NonZeroConstant to i8*), i64 8, i1 false)
189 ; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], align 8
190 ; CHECK-NEXT: ret i8 addrspace(4)* [[REF]]
193 %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i8 addrspace(4)*
194 %src.bc = bitcast <4 x i64>* @NonZeroConstant to i8*
195 call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8* %src.bc, i64 8, i1 false)
196 %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc
197 ret i8 addrspace(4)* %ref
200 define i64 addrspace(4)* @neg_forward_memcopy2(i64 addrspace(4)* addrspace(4)* %loc) {
201 ; CHECK-LABEL: @neg_forward_memcopy2(
203 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i64 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
204 ; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64>* @NonZeroConstant to i8*), i64 8, i1 false)
205 ; CHECK-NEXT: [[REF:%.*]] = load i64 addrspace(4)*, i64 addrspace(4)* addrspace(4)* [[LOC]], align 8
206 ; CHECK-NEXT: ret i64 addrspace(4)* [[REF]]
209 %loc.bc = bitcast i64 addrspace(4)* addrspace(4)* %loc to i8 addrspace(4)*
210 %src.bc = bitcast <4 x i64>* @NonZeroConstant to i8*
211 call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8* %src.bc, i64 8, i1 false)
212 %ref = load i64 addrspace(4)*, i64 addrspace(4)* addrspace(4)* %loc
213 ret i64 addrspace(4)* %ref
216 define i8 addrspace(4)* @forward_memcopy(i8 addrspace(4)* addrspace(4)* %loc) {
217 ; CHECK-LABEL: @forward_memcopy(
219 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
220 ; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64 addrspace(4)*>* @NonZeroConstant2 to i8*), i64 8, i1 false)
221 ; CHECK-NEXT: ret i8 addrspace(4)* bitcast (i64 addrspace(4)* getelementptr (i64, i64 addrspace(4)* null, i32 3) to i8 addrspace(4)*)
224 %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i8 addrspace(4)*
225 %src.bc = bitcast <4 x i64 addrspace(4)*>* @NonZeroConstant2 to i8*
226 call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8* %src.bc, i64 8, i1 false)
227 %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc
228 ret i8 addrspace(4)* %ref
231 define i64 addrspace(4)* @forward_memcopy2(i64 addrspace(4)* addrspace(4)* %loc) {
232 ; CHECK-LABEL: @forward_memcopy2(
234 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i64 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
235 ; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64 addrspace(4)*>* @NonZeroConstant2 to i8*), i64 8, i1 false)
236 ; CHECK-NEXT: ret i64 addrspace(4)* getelementptr (i64, i64 addrspace(4)* null, i32 3)
239 %loc.bc = bitcast i64 addrspace(4)* addrspace(4)* %loc to i8 addrspace(4)*
240 %src.bc = bitcast <4 x i64 addrspace(4)*>* @NonZeroConstant2 to i8*
241 call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8* %src.bc, i64 8, i1 false)
242 %ref = load i64 addrspace(4)*, i64 addrspace(4)* addrspace(4)* %loc
243 ret i64 addrspace(4)* %ref
246 define <1 x i8 addrspace(4)*> @neg_forward_memcpy_vload(<1 x i8 addrspace(4)*> addrspace(4)* %loc) {
247 ; CHECK-LABEL: @neg_forward_memcpy_vload(
249 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast <1 x i8 addrspace(4)*> addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
250 ; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64>* @NonZeroConstant to i8*), i64 8, i1 false)
251 ; CHECK-NEXT: [[REF:%.*]] = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* [[LOC]], align 8
252 ; CHECK-NEXT: ret <1 x i8 addrspace(4)*> [[REF]]
255 %loc.bc = bitcast <1 x i8 addrspace(4)*> addrspace(4)* %loc to i8 addrspace(4)*
256 %src.bc = bitcast <4 x i64>* @NonZeroConstant to i8*
257 call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8* %src.bc, i64 8, i1 false)
258 %ref = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* %loc
259 ret <1 x i8 addrspace(4)*> %ref
262 define <4 x i64 addrspace(4)*> @neg_forward_memcpy_vload2(<4 x i64 addrspace(4)*> addrspace(4)* %loc) {
263 ; CHECK-LABEL: @neg_forward_memcpy_vload2(
265 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast <4 x i64 addrspace(4)*> addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
266 ; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64>* @NonZeroConstant to i8*), i64 32, i1 false)
267 ; CHECK-NEXT: [[REF:%.*]] = load <4 x i64 addrspace(4)*>, <4 x i64 addrspace(4)*> addrspace(4)* [[LOC]], align 32
268 ; CHECK-NEXT: ret <4 x i64 addrspace(4)*> [[REF]]
271 %loc.bc = bitcast <4 x i64 addrspace(4)*> addrspace(4)* %loc to i8 addrspace(4)*
272 %src.bc = bitcast <4 x i64>* @NonZeroConstant to i8*
273 call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8* %src.bc, i64 32, i1 false)
274 %ref = load <4 x i64 addrspace(4)*>, <4 x i64 addrspace(4)*> addrspace(4)* %loc
275 ret <4 x i64 addrspace(4)*> %ref
278 define <4 x i64> @neg_forward_memcpy_vload3(<4 x i64> addrspace(4)* %loc) {
279 ; CHECK-LABEL: @neg_forward_memcpy_vload3(
281 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast <4 x i64> addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
282 ; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64 addrspace(4)*>* @NonZeroConstant2 to i8*), i64 32, i1 false)
283 ; CHECK-NEXT: [[REF:%.*]] = load <4 x i64>, <4 x i64> addrspace(4)* [[LOC]], align 32
284 ; CHECK-NEXT: ret <4 x i64> [[REF]]
287 %loc.bc = bitcast <4 x i64> addrspace(4)* %loc to i8 addrspace(4)*
288 %src.bc = bitcast <4 x i64 addrspace(4)*>* @NonZeroConstant2 to i8*
289 call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8* %src.bc, i64 32, i1 false)
290 %ref = load <4 x i64>, <4 x i64> addrspace(4)* %loc
294 define <1 x i64 addrspace(4)*> @forward_memcpy_vload3(<4 x i64 addrspace(4)*> addrspace(4)* %loc) {
295 ; CHECK-LABEL: @forward_memcpy_vload3(
297 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast <4 x i64 addrspace(4)*> addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
298 ; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64 addrspace(4)*>* @NonZeroConstant2 to i8*), i64 32, i1 false)
299 ; CHECK-NEXT: ret <1 x i64 addrspace(4)*> <i64 addrspace(4)* getelementptr (i64, i64 addrspace(4)* null, i32 3)>
302 %loc.bc = bitcast <4 x i64 addrspace(4)*> addrspace(4)* %loc to i8 addrspace(4)*
303 %src.bc = bitcast <4 x i64 addrspace(4)*>* @NonZeroConstant2 to i8*
304 call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8* %src.bc, i64 32, i1 false)
305 %ref = load <4 x i64 addrspace(4)*>, <4 x i64 addrspace(4)*> addrspace(4)* %loc
306 %val = extractelement <4 x i64 addrspace(4)*> %ref, i32 0
307 %ret = insertelement <1 x i64 addrspace(4)*> undef, i64 addrspace(4)* %val, i32 0
308 ret <1 x i64 addrspace(4)*> %ret
311 ; Can forward since we can do so w/o breaking types
312 define i8 addrspace(4)* @forward_memcpy_zero(i8 addrspace(4)* addrspace(4)* %loc) {
313 ; CHECK-LABEL: @forward_memcpy_zero(
315 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
316 ; CHECK-NEXT: call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8* bitcast (<4 x i64>* @ZeroConstant to i8*), i64 8, i1 false)
317 ; CHECK-NEXT: ret i8 addrspace(4)* null
320 %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to i8 addrspace(4)*
321 %src.bc = bitcast <4 x i64>* @ZeroConstant to i8*
322 call void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8* %src.bc, i64 8, i1 false)
323 %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc
324 ret i8 addrspace(4)* %ref
327 declare void @llvm.memcpy.p4i8.p0i8.i64(i8 addrspace(4)* nocapture, i8* nocapture, i64, i1) nounwind
330 ; Same as the neg_forward_store cases, but for non defs.
331 ; (Pretend we wrote out the alwaysfalse idiom above.)
332 define i8 addrspace(4)* @neg_store_clobber(i8 addrspace(4)* addrspace(4)* %loc) {
333 ; CHECK-LABEL: @neg_store_clobber(
335 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to <2 x i64> addrspace(4)*
336 ; CHECK-NEXT: store <2 x i64> <i64 4, i64 4>, <2 x i64> addrspace(4)* [[LOC_BC]], align 16
337 ; CHECK-NEXT: [[LOC_OFF:%.*]] = getelementptr i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], i64 1
338 ; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC_OFF]], align 8
339 ; CHECK-NEXT: ret i8 addrspace(4)* [[REF]]
342 %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to <2 x i64> addrspace(4)*
343 store <2 x i64> <i64 4, i64 4>, <2 x i64> addrspace(4)* %loc.bc
344 %loc.off = getelementptr i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc, i64 1
345 %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc.off
346 ret i8 addrspace(4)* %ref
349 declare void @use(<2 x i64>) inaccessiblememonly
351 ; Same as the neg_forward_store cases, but for non defs.
352 ; (Pretend we wrote out the alwaysfalse idiom above.)
353 define i8 addrspace(4)* @neg_load_clobber(i8 addrspace(4)* addrspace(4)* %loc) {
354 ; CHECK-LABEL: @neg_load_clobber(
356 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to <2 x i64> addrspace(4)*
357 ; CHECK-NEXT: [[V:%.*]] = load <2 x i64>, <2 x i64> addrspace(4)* [[LOC_BC]], align 16
358 ; CHECK-NEXT: call void @use(<2 x i64> [[V]])
359 ; CHECK-NEXT: [[LOC_OFF:%.*]] = getelementptr i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], i64 1
360 ; CHECK-NEXT: [[REF:%.*]] = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC_OFF]], align 8
361 ; CHECK-NEXT: ret i8 addrspace(4)* [[REF]]
364 %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to <2 x i64> addrspace(4)*
365 %v = load <2 x i64>, <2 x i64> addrspace(4)* %loc.bc
366 call void @use(<2 x i64> %v)
367 %loc.off = getelementptr i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc, i64 1
368 %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc.off
369 ret i8 addrspace(4)* %ref
372 define i8 addrspace(4)* @store_clobber_zero(i8 addrspace(4)* addrspace(4)* %loc) {
373 ; CHECK-LABEL: @store_clobber_zero(
375 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)* addrspace(4)* [[LOC:%.*]] to <2 x i64> addrspace(4)*
376 ; CHECK-NEXT: store <2 x i64> zeroinitializer, <2 x i64> addrspace(4)* [[LOC_BC]], align 16
377 ; CHECK-NEXT: [[LOC_OFF:%.*]] = getelementptr i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* [[LOC]], i64 1
378 ; CHECK-NEXT: ret i8 addrspace(4)* null
381 %loc.bc = bitcast i8 addrspace(4)* addrspace(4)* %loc to <2 x i64> addrspace(4)*
382 store <2 x i64> zeroinitializer, <2 x i64> addrspace(4)* %loc.bc
383 %loc.off = getelementptr i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc, i64 1
384 %ref = load i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %loc.off
385 ret i8 addrspace(4)* %ref
389 define void @smaller_vector(i8* %p) {
390 ; CHECK-LABEL: @smaller_vector(
392 ; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[P:%.*]] to <4 x i64 addrspace(4)*>*
393 ; CHECK-NEXT: [[B:%.*]] = bitcast i8* [[P]] to <2 x i64 addrspace(4)*>*
394 ; CHECK-NEXT: [[V4:%.*]] = load <4 x i64 addrspace(4)*>, <4 x i64 addrspace(4)*>* [[A]], align 32
395 ; CHECK-NEXT: [[V2:%.*]] = load <2 x i64 addrspace(4)*>, <2 x i64 addrspace(4)*>* [[B]], align 32
396 ; CHECK-NEXT: call void @use.v2(<2 x i64 addrspace(4)*> [[V2]])
397 ; CHECK-NEXT: call void @use.v4(<4 x i64 addrspace(4)*> [[V4]])
398 ; CHECK-NEXT: ret void
401 %a = bitcast i8* %p to <4 x i64 addrspace(4)*>*
402 %b = bitcast i8* %p to <2 x i64 addrspace(4)*>*
403 %v4 = load <4 x i64 addrspace(4)*>, <4 x i64 addrspace(4)*>* %a, align 32
404 %v2 = load <2 x i64 addrspace(4)*>, <2 x i64 addrspace(4)*>* %b, align 32
405 call void @use.v2(<2 x i64 addrspace(4)*> %v2)
406 call void @use.v4(<4 x i64 addrspace(4)*> %v4)
410 define i64 addrspace(4)* @vector_extract(i8* %p) {
411 ; CHECK-LABEL: @vector_extract(
413 ; CHECK-NEXT: [[A:%.*]] = bitcast i8* [[P:%.*]] to <4 x i64 addrspace(4)*>*
414 ; CHECK-NEXT: [[B:%.*]] = bitcast i8* [[P]] to i64 addrspace(4)**
415 ; CHECK-NEXT: [[V4:%.*]] = load <4 x i64 addrspace(4)*>, <4 x i64 addrspace(4)*>* [[A]], align 32
416 ; CHECK-NEXT: [[RES:%.*]] = load i64 addrspace(4)*, i64 addrspace(4)** [[B]], align 32
417 ; CHECK-NEXT: call void @use.v4(<4 x i64 addrspace(4)*> [[V4]])
418 ; CHECK-NEXT: ret i64 addrspace(4)* [[RES]]
421 %a = bitcast i8* %p to <4 x i64 addrspace(4)*>*
422 %b = bitcast i8* %p to i64 addrspace(4)**
423 %v4 = load <4 x i64 addrspace(4)*>, <4 x i64 addrspace(4)*>* %a, align 32
424 %res = load i64 addrspace(4)*, i64 addrspace(4)** %b, align 32
425 call void @use.v4(<4 x i64 addrspace(4)*> %v4)
426 ret i64 addrspace(4)* %res
429 declare void @use.v2(<2 x i64 addrspace(4)*>)
430 declare void @use.v4(<4 x i64 addrspace(4)*>)
431 define i8 addrspace(5)* @multini(i1 %alwaysFalse, i8 addrspace(4)* %val, i8 addrspace(4)** %loc) {
432 ; CHECK-LABEL: @multini(
434 ; CHECK-NEXT: store i8 addrspace(4)* [[VAL:%.*]], i8 addrspace(4)** [[LOC:%.*]], align 8
435 ; CHECK-NEXT: br i1 [[ALWAYSFALSE:%.*]], label [[NEVERTAKEN:%.*]], label [[ALWAYSTAKEN:%.*]]
437 ; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast i8 addrspace(4)** [[LOC]] to i8 addrspace(5)**
438 ; CHECK-NEXT: [[DIFFERENTAS:%.*]] = load i8 addrspace(5)*, i8 addrspace(5)** [[LOC_BC]], align 8
439 ; CHECK-NEXT: ret i8 addrspace(5)* [[DIFFERENTAS]]
440 ; CHECK: alwaysTaken:
441 ; CHECK-NEXT: ret i8 addrspace(5)* null
444 store i8 addrspace(4)* %val, i8 addrspace(4)** %loc
445 br i1 %alwaysFalse, label %neverTaken, label %alwaysTaken
448 %loc.bc = bitcast i8 addrspace(4)** %loc to i8 addrspace(5)**
449 %differentas = load i8 addrspace(5)*, i8 addrspace(5)** %loc.bc
450 ret i8 addrspace(5)* %differentas
453 ret i8 addrspace(5)* null