1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -scoped-noalias-aa -slp-vectorizer -mtriple=arm64-apple-darwin -enable-new-pm=false -S %s | FileCheck %s
4 define void @needs_versioning_not_profitable(i32* %dst, i32* %src) {
5 ; CHECK-LABEL: @needs_versioning_not_profitable(
7 ; CHECK-NEXT: [[SRC_0:%.*]] = load i32, i32* [[SRC:%.*]], align 4
8 ; CHECK-NEXT: [[R_0:%.*]] = ashr i32 [[SRC_0]], 16
9 ; CHECK-NEXT: store i32 [[R_0]], i32* [[DST:%.*]], align 4
10 ; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1
11 ; CHECK-NEXT: [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4
12 ; CHECK-NEXT: [[R_1:%.*]] = ashr i32 [[SRC_1]], 16
13 ; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1
14 ; CHECK-NEXT: store i32 [[R_1]], i32* [[DST_GEP_1]], align 4
15 ; CHECK-NEXT: ret void
18 %src.0 = load i32, i32* %src, align 4
19 %r.0 = ashr i32 %src.0, 16
20 store i32 %r.0, i32* %dst, align 4
21 %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1
22 %src.1 = load i32, i32* %src.gep.1, align 4
23 %r.1 = ashr i32 %src.1, 16
24 %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1
25 store i32 %r.1, i32* %dst.gep.1, align 4
29 define void @needs_versioning_profitable(i32* %dst, i32* %src) {
30 ; CHECK-LABEL: @needs_versioning_profitable(
32 ; CHECK-NEXT: [[SRC_0:%.*]] = load i32, i32* [[SRC:%.*]], align 4
33 ; CHECK-NEXT: [[R_0:%.*]] = ashr i32 [[SRC_0]], 16
34 ; CHECK-NEXT: store i32 [[R_0]], i32* [[DST:%.*]], align 4
35 ; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1
36 ; CHECK-NEXT: [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4
37 ; CHECK-NEXT: [[R_1:%.*]] = ashr i32 [[SRC_1]], 16
38 ; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1
39 ; CHECK-NEXT: store i32 [[R_1]], i32* [[DST_GEP_1]], align 4
40 ; CHECK-NEXT: [[SRC_GEP_2:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 2
41 ; CHECK-NEXT: [[SRC_2:%.*]] = load i32, i32* [[SRC_GEP_2]], align 4
42 ; CHECK-NEXT: [[R_2:%.*]] = ashr i32 [[SRC_2]], 16
43 ; CHECK-NEXT: [[DST_GEP_2:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 2
44 ; CHECK-NEXT: store i32 [[R_2]], i32* [[DST_GEP_2]], align 4
45 ; CHECK-NEXT: [[SRC_GEP_3:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 3
46 ; CHECK-NEXT: [[SRC_3:%.*]] = load i32, i32* [[SRC_GEP_3]], align 4
47 ; CHECK-NEXT: [[R_3:%.*]] = ashr i32 [[SRC_3]], 16
48 ; CHECK-NEXT: [[DST_GEP_3:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 3
49 ; CHECK-NEXT: store i32 [[R_3]], i32* [[DST_GEP_3]], align 4
50 ; CHECK-NEXT: ret void
53 %src.0 = load i32, i32* %src, align 4
54 %r.0 = ashr i32 %src.0, 16
55 store i32 %r.0, i32* %dst, align 4
56 %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1
57 %src.1 = load i32, i32* %src.gep.1, align 4
58 %r.1 = ashr i32 %src.1, 16
59 %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1
60 store i32 %r.1, i32* %dst.gep.1, align 4
61 %src.gep.2 = getelementptr inbounds i32, i32* %src, i64 2
62 %src.2 = load i32, i32* %src.gep.2, align 4
63 %r.2 = ashr i32 %src.2, 16
64 %dst.gep.2 = getelementptr inbounds i32, i32* %dst, i64 2
65 store i32 %r.2, i32* %dst.gep.2, align 4
66 %src.gep.3 = getelementptr inbounds i32, i32* %src, i64 3
67 %src.3 = load i32, i32* %src.gep.3, align 4
68 %r.3 = ashr i32 %src.3, 16
69 %dst.gep.3 = getelementptr inbounds i32, i32* %dst, i64 3
70 store i32 %r.3, i32* %dst.gep.3, align 4
76 define void @no_version(i32* nocapture %dst, i32* nocapture readonly %src) {
77 ; CHECK-LABEL: @no_version(
79 ; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC:%.*]], i64 1
80 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[SRC]] to <2 x i32>*
81 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i32>, <2 x i32>* [[TMP0]], align 4
82 ; CHECK-NEXT: [[TMP2:%.*]] = ashr <2 x i32> [[TMP1]], <i32 16, i32 16>
83 ; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST:%.*]], i64 1
84 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast i32* [[DST]] to <2 x i32>*
85 ; CHECK-NEXT: store <2 x i32> [[TMP2]], <2 x i32>* [[TMP3]], align 4
86 ; CHECK-NEXT: ret void
89 %src.0 = load i32, i32* %src, align 4
90 %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1
91 %src.1 = load i32, i32* %src.gep.1, align 4
92 %r.0 = ashr i32 %src.0, 16
93 %r.1 = ashr i32 %src.1, 16
94 %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1
95 store i32 %r.0, i32* %dst, align 4
96 store i32 %r.1, i32* %dst.gep.1, align 4
100 define void @version_multiple(i32* nocapture %out_block, i32* nocapture readonly %counter) {
101 ; CHECK-LABEL: @version_multiple(
103 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[COUNTER:%.*]], align 4
104 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[OUT_BLOCK:%.*]], align 4
105 ; CHECK-NEXT: [[XOR:%.*]] = xor i32 [[TMP1]], [[TMP0]]
106 ; CHECK-NEXT: store i32 [[XOR]], i32* [[OUT_BLOCK]], align 4
107 ; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 1
108 ; CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[ARRAYIDX_1]], align 4
109 ; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 1
110 ; CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[ARRAYIDX2_1]], align 4
111 ; CHECK-NEXT: [[XOR_1:%.*]] = xor i32 [[TMP3]], [[TMP2]]
112 ; CHECK-NEXT: store i32 [[XOR_1]], i32* [[ARRAYIDX2_1]], align 4
113 ; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 2
114 ; CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[ARRAYIDX_2]], align 4
115 ; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 2
116 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[ARRAYIDX2_2]], align 4
117 ; CHECK-NEXT: [[XOR_2:%.*]] = xor i32 [[TMP5]], [[TMP4]]
118 ; CHECK-NEXT: store i32 [[XOR_2]], i32* [[ARRAYIDX2_2]], align 4
119 ; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i32, i32* [[COUNTER]], i64 3
120 ; CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[ARRAYIDX_3]], align 4
121 ; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i32, i32* [[OUT_BLOCK]], i64 3
122 ; CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[ARRAYIDX2_3]], align 4
123 ; CHECK-NEXT: [[XOR_3:%.*]] = xor i32 [[TMP7]], [[TMP6]]
124 ; CHECK-NEXT: store i32 [[XOR_3]], i32* [[ARRAYIDX2_3]], align 4
125 ; CHECK-NEXT: ret void
128 %0 = load i32, i32* %counter, align 4
129 %1 = load i32, i32* %out_block, align 4
130 %xor = xor i32 %1, %0
131 store i32 %xor, i32* %out_block, align 4
132 %arrayidx.1 = getelementptr inbounds i32, i32* %counter, i64 1
133 %2 = load i32, i32* %arrayidx.1, align 4
134 %arrayidx2.1 = getelementptr inbounds i32, i32* %out_block, i64 1
135 %3 = load i32, i32* %arrayidx2.1, align 4
136 %xor.1 = xor i32 %3, %2
137 store i32 %xor.1, i32* %arrayidx2.1, align 4
138 %arrayidx.2 = getelementptr inbounds i32, i32* %counter, i64 2
139 %4 = load i32, i32* %arrayidx.2, align 4
140 %arrayidx2.2 = getelementptr inbounds i32, i32* %out_block, i64 2
141 %5 = load i32, i32* %arrayidx2.2, align 4
142 %xor.2 = xor i32 %5, %4
143 store i32 %xor.2, i32* %arrayidx2.2, align 4
144 %arrayidx.3 = getelementptr inbounds i32, i32* %counter, i64 3
145 %6 = load i32, i32* %arrayidx.3, align 4
146 %arrayidx2.3 = getelementptr inbounds i32, i32* %out_block, i64 3
147 %7 = load i32, i32* %arrayidx2.3, align 4
148 %xor.3 = xor i32 %7, %6
149 store i32 %xor.3, i32* %arrayidx2.3, align 4
153 define i32 @use_outside_version_bb(i32* %dst, i32* %src, i1 %c.1) {
154 ; CHECK-LABEL: @use_outside_version_bb(
156 ; CHECK-NEXT: [[SRC_0:%.*]] = load i32, i32* [[SRC:%.*]], align 4
157 ; CHECK-NEXT: [[R_0:%.*]] = ashr i32 [[SRC_0]], 16
158 ; CHECK-NEXT: store i32 [[R_0]], i32* [[DST:%.*]], align 4
159 ; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1
160 ; CHECK-NEXT: [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4
161 ; CHECK-NEXT: [[R_1:%.*]] = ashr i32 [[SRC_1]], 16
162 ; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1
163 ; CHECK-NEXT: store i32 [[R_1]], i32* [[DST_GEP_1]], align 4
164 ; CHECK-NEXT: br label [[EXIT:%.*]]
166 ; CHECK-NEXT: ret i32 [[R_0]]
169 %src.0 = load i32, i32* %src, align 4
170 %r.0 = ashr i32 %src.0, 16
171 store i32 %r.0, i32* %dst, align 4
172 %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1
173 %src.1 = load i32, i32* %src.gep.1, align 4
174 %r.1 = ashr i32 %src.1, 16
175 %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1
176 store i32 %r.1, i32* %dst.gep.1, align 4
183 define i32 @value_used_in_return(i32* %dst, i32* %src, i32 %x) {
184 ; CHECK-LABEL: @value_used_in_return(
186 ; CHECK-NEXT: [[SRC_0:%.*]] = load i32, i32* [[SRC:%.*]], align 4
187 ; CHECK-NEXT: [[R_0:%.*]] = ashr i32 [[SRC_0]], 16
188 ; CHECK-NEXT: store i32 [[R_0]], i32* [[DST:%.*]], align 4
189 ; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1
190 ; CHECK-NEXT: [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4
191 ; CHECK-NEXT: [[R_1:%.*]] = ashr i32 [[SRC_1]], 16
192 ; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1
193 ; CHECK-NEXT: store i32 [[R_1]], i32* [[DST_GEP_1]], align 4
194 ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[X:%.*]], 20
195 ; CHECK-NEXT: ret i32 [[ADD]]
198 %src.0 = load i32, i32* %src, align 4
199 %r.0 = ashr i32 %src.0, 16
200 store i32 %r.0, i32* %dst, align 4
201 %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1
202 %src.1 = load i32, i32* %src.gep.1, align 4
203 %r.1 = ashr i32 %src.1, 16
204 %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1
205 store i32 %r.1, i32* %dst.gep.1, align 4
206 %add = add i32 %x, 20
209 define i32 @needs_versioning2_cond_br(i32* %dst, i32* %src, i1 %c.1) {
210 ; CHECK-LABEL: @needs_versioning2_cond_br(
212 ; CHECK-NEXT: br i1 [[C_1:%.*]], label [[THEN:%.*]], label [[ELSE:%.*]]
214 ; CHECK-NEXT: [[SRC_0:%.*]] = load i32, i32* [[SRC:%.*]], align 4
215 ; CHECK-NEXT: [[R_0:%.*]] = ashr i32 [[SRC_0]], 16
216 ; CHECK-NEXT: store i32 [[R_0]], i32* [[DST:%.*]], align 4
217 ; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1
218 ; CHECK-NEXT: [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4
219 ; CHECK-NEXT: [[R_1:%.*]] = ashr i32 [[SRC_1]], 16
220 ; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1
221 ; CHECK-NEXT: store i32 [[R_1]], i32* [[DST_GEP_1]], align 4
222 ; CHECK-NEXT: ret i32 10
224 ; CHECK-NEXT: ret i32 0
227 br i1 %c.1, label %then, label %else
230 %src.0 = load i32, i32* %src, align 4
231 %r.0 = ashr i32 %src.0, 16
232 store i32 %r.0, i32* %dst, align 4
233 %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1
234 %src.1 = load i32, i32* %src.gep.1, align 4
235 %r.1 = ashr i32 %src.1, 16
236 %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1
237 store i32 %r.1, i32* %dst.gep.1, align 4
245 define void @pointer_defined_in_bb(i32* %dst, i32** %src.p) {
246 ; CHECK-LABEL: @pointer_defined_in_bb(
248 ; CHECK-NEXT: [[SRC:%.*]] = load i32*, i32** [[SRC_P:%.*]], align 8
249 ; CHECK-NEXT: [[SRC_0:%.*]] = load i32, i32* [[SRC]], align 4
250 ; CHECK-NEXT: [[R_0:%.*]] = ashr i32 [[SRC_0]], 16
251 ; CHECK-NEXT: store i32 [[R_0]], i32* [[DST:%.*]], align 4
252 ; CHECK-NEXT: [[SRC_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[SRC]], i64 1
253 ; CHECK-NEXT: [[SRC_1:%.*]] = load i32, i32* [[SRC_GEP_1]], align 4
254 ; CHECK-NEXT: [[R_1:%.*]] = ashr i32 [[SRC_1]], 16
255 ; CHECK-NEXT: [[DST_GEP_1:%.*]] = getelementptr inbounds i32, i32* [[DST]], i64 1
256 ; CHECK-NEXT: store i32 [[R_1]], i32* [[DST_GEP_1]], align 4
257 ; CHECK-NEXT: ret void
260 %src = load i32*, i32** %src.p
261 %src.0 = load i32, i32* %src, align 4
262 %r.0 = ashr i32 %src.0, 16
263 store i32 %r.0, i32* %dst, align 4
264 %src.gep.1 = getelementptr inbounds i32, i32* %src, i64 1
265 %src.1 = load i32, i32* %src.gep.1, align 4
266 %r.1 = ashr i32 %src.1, 16
267 %dst.gep.1 = getelementptr inbounds i32, i32* %dst, i64 1
268 store i32 %r.1, i32* %dst.gep.1, align 4
272 define void @clobber_same_underlying_object(i32* %this) {
273 ; CHECK-LABEL: @clobber_same_underlying_object(
275 ; CHECK-NEXT: [[P_3:%.*]] = getelementptr inbounds i32, i32* [[THIS:%.*]], i32 3
276 ; CHECK-NEXT: store i32 10, i32* [[P_3]], align 8
277 ; CHECK-NEXT: tail call void @clobber()
278 ; CHECK-NEXT: [[P_4:%.*]] = getelementptr inbounds i32, i32* [[THIS]], i32 4
279 ; CHECK-NEXT: [[L2:%.*]] = load i32, i32* [[P_4]], align 8
280 ; CHECK-NEXT: store i32 20, i32* [[P_4]], align 8
281 ; CHECK-NEXT: ret void
284 %p.3 = getelementptr inbounds i32, i32* %this, i32 3
285 store i32 10, i32* %p.3, align 8
286 tail call void @clobber()
287 %p.4 = getelementptr inbounds i32, i32* %this, i32 4
288 %l2 = load i32, i32* %p.4, align 8
289 store i32 20, i32* %p.4, align 8
293 declare void @clobber()
295 define void @slp_not_beneficial(i32* %A, i32* %B) {
296 ; CHECK-LABEL: @slp_not_beneficial(
298 ; CHECK-NEXT: [[TMP:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i32 4
299 ; CHECK-NEXT: store i32 0, i32* [[TMP]], align 8
300 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i32, i32* [[A]], i32 5
301 ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i32 4
302 ; CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 8
303 ; CHECK-NEXT: store i32 [[TMP5]], i32* [[TMP3]], align 8
304 ; CHECK-NEXT: ret void
307 %tmp = getelementptr inbounds i32, i32* %A, i32 4
308 store i32 0, i32* %tmp, align 8
309 %tmp3 = getelementptr inbounds i32, i32* %A, i32 5
310 %tmp4 = getelementptr inbounds i32, i32* %B, i32 4
311 %tmp5 = load i32, i32* %tmp4, align 8
312 store i32 %tmp5, i32* %tmp3, align 8
316 define void @widget(double* %ptr, double* %ptr.2) {
317 ; CHECK-LABEL: @widget(
319 ; CHECK-NEXT: [[TMP3:%.*]] = load double, double* null, align 8
320 ; CHECK-NEXT: [[TMP4:%.*]] = fmul double undef, [[TMP3]]
321 ; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, double* [[PTR:%.*]], i32 0
322 ; CHECK-NEXT: [[TMP6:%.*]] = load double, double* [[TMP5]], align 8
323 ; CHECK-NEXT: [[TMP7:%.*]] = fadd double [[TMP6]], [[TMP4]]
324 ; CHECK-NEXT: store double [[TMP7]], double* [[TMP5]], align 8
325 ; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds double, double* [[PTR_2:%.*]], i64 0
326 ; CHECK-NEXT: [[TMP9:%.*]] = load double, double* [[TMP8]], align 8
327 ; CHECK-NEXT: [[TMP10:%.*]] = fmul double undef, [[TMP9]]
328 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds double, double* [[PTR]], i32 1
329 ; CHECK-NEXT: [[TMP12:%.*]] = load double, double* [[TMP11]], align 8
330 ; CHECK-NEXT: [[TMP13:%.*]] = fadd double [[TMP12]], [[TMP10]]
331 ; CHECK-NEXT: store double [[TMP13]], double* [[TMP11]], align 8
332 ; CHECK-NEXT: br label [[BB15:%.*]]
334 ; CHECK-NEXT: br label [[BB15]]
337 %tmp3 = load double, double* null, align 8
338 %tmp4 = fmul double undef, %tmp3
339 %tmp5 = getelementptr inbounds double, double* %ptr, i32 0
340 %tmp6 = load double, double* %tmp5, align 8
341 %tmp7 = fadd double %tmp6, %tmp4
342 store double %tmp7, double* %tmp5, align 8
343 %tmp8 = getelementptr inbounds double, double* %ptr.2, i64 0
344 %tmp9 = load double, double* %tmp8, align 8
345 %tmp10 = fmul double undef, %tmp9
346 %tmp11 = getelementptr inbounds double, double* %ptr, i32 1
347 %tmp12 = load double, double* %tmp11, align 8
348 %tmp13 = fadd double %tmp12, %tmp10
349 store double %tmp13, double* %tmp11, align 8
352 bb15: ; preds = %bb15, %bb14
356 %struct = type { i32, i32, float, float }
358 ; Some points we collected as candidates for runtime checks have been removed
359 ; before generating runtime checks. Make sure versioning is skipped.
360 define void @test_bounds_removed_before_runtime_checks(%struct * %A, i32** %B, i1 %c) {
361 ; CHECK-LABEL: @test_bounds_removed_before_runtime_checks(
363 ; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [[STRUCT:%.*]], %struct* [[A:%.*]], i64 0, i32 0
364 ; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[STRUCT]], %struct* [[A]], i64 0, i32 1
365 ; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[TMP11]] to <2 x i32>*
366 ; CHECK-NEXT: store <2 x i32> <i32 10, i32 300>, <2 x i32>* [[TMP0]], align 8
367 ; CHECK-NEXT: [[TMP13:%.*]] = load i32*, i32** [[B:%.*]], align 8
368 ; CHECK-NEXT: br i1 [[C:%.*]], label [[BB23:%.*]], label [[BB14:%.*]]
370 ; CHECK-NEXT: [[TMP15:%.*]] = sext i32 10 to i64
371 ; CHECK-NEXT: [[TMP16:%.*]] = add nsw i64 2, [[TMP15]]
372 ; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, i32* [[TMP13]], i64 [[TMP16]]
373 ; CHECK-NEXT: [[TMP18:%.*]] = bitcast i32* [[TMP17]] to i8*
374 ; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i8, i8* [[TMP18]], i64 3
375 ; CHECK-NEXT: [[TMP20:%.*]] = getelementptr inbounds [[STRUCT]], %struct* [[A]], i64 0, i32 2
376 ; CHECK-NEXT: store float 0.000000e+00, float* [[TMP20]], align 8
377 ; CHECK-NEXT: [[TMP21:%.*]] = load i8, i8* [[TMP19]], align 1
378 ; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds [[STRUCT]], %struct* [[A]], i64 0, i32 3
379 ; CHECK-NEXT: store float 0.000000e+00, float* [[TMP22]], align 4
380 ; CHECK-NEXT: br label [[BB23]]
382 ; CHECK-NEXT: ret void
385 %tmp1 = fmul float 10.0, 20.0
386 %tmp2 = fptosi float %tmp1 to i32
387 %tmp3 = fmul float 30.0, 20.0
388 %tmp4 = fptosi float %tmp3 to i32
389 %tmp5 = icmp sgt i32 100, %tmp2
390 %tmp6 = select i1 %tmp5, i32 %tmp2, i32 10
391 %tmp7 = select i1 false, i32 0, i32 %tmp6
392 %tmp8 = icmp sgt i32 200, %tmp4
393 %tmp9 = select i1 %tmp8, i32 %tmp4, i32 300
394 %tmp10 = select i1 false, i32 0, i32 %tmp9
395 %tmp11 = getelementptr inbounds %struct, %struct* %A, i64 0, i32 0
396 store i32 %tmp7, i32* %tmp11, align 8
397 %tmp12 = getelementptr inbounds %struct, %struct* %A, i64 0, i32 1
398 store i32 %tmp10, i32* %tmp12, align 4
399 %tmp13 = load i32*, i32** %B, align 8
400 br i1 %c, label %bb23, label %bb14
403 %tmp15 = sext i32 %tmp7 to i64
404 %tmp16 = add nsw i64 2, %tmp15
405 %tmp17 = getelementptr inbounds i32, i32* %tmp13, i64 %tmp16
406 %tmp18 = bitcast i32* %tmp17 to i8*
407 %tmp19 = getelementptr inbounds i8, i8* %tmp18, i64 3
408 %tmp20 = getelementptr inbounds %struct, %struct* %A, i64 0, i32 2
409 store float 0.0, float* %tmp20, align 8
410 %tmp21 = load i8, i8* %tmp19, align 1
411 %tmp22 = getelementptr inbounds %struct, %struct* %A, i64 0, i32 3
412 store float 0.0, float* %tmp22, align 4
419 ; In this test there's a single bound, do not generate runtime checks.
420 define void @single_membound(double* %arg, double* %arg1, double %x) {
421 ; CHECK-LABEL: @single_membound(
423 ; CHECK-NEXT: [[TMP:%.*]] = fsub double [[X:%.*]], 9.900000e+01
424 ; CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds double, double* [[ARG:%.*]], i64 1
425 ; CHECK-NEXT: store double [[TMP]], double* [[TMP9]], align 8
426 ; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds double, double* [[ARG1:%.*]], i64 0
427 ; CHECK-NEXT: [[TMP12:%.*]] = load double, double* [[TMP10]], align 8
428 ; CHECK-NEXT: [[TMP13:%.*]] = fsub double 1.000000e+00, [[TMP12]]
429 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds double, double* [[ARG]], i64 2
430 ; CHECK-NEXT: br label [[BB15:%.*]]
432 ; CHECK-NEXT: [[TMP16:%.*]] = fmul double [[TMP]], 2.000000e+01
433 ; CHECK-NEXT: store double [[TMP16]], double* [[TMP9]], align 8
434 ; CHECK-NEXT: [[TMP17:%.*]] = fmul double [[TMP13]], 3.000000e+01
435 ; CHECK-NEXT: store double [[TMP17]], double* [[TMP14]], align 8
436 ; CHECK-NEXT: ret void
439 %tmp = fsub double %x, 99.0
440 %tmp9 = getelementptr inbounds double, double* %arg, i64 1
441 store double %tmp, double* %tmp9, align 8
442 %tmp10 = getelementptr inbounds double, double* %arg1, i64 0
443 %tmp12 = load double, double* %tmp10, align 8
444 %tmp13 = fsub double 1.0, %tmp12
445 %tmp14 = getelementptr inbounds double, double* %arg, i64 2
449 %tmp16 = fmul double %tmp, 20.0
450 store double %tmp16, double* %tmp9, align 8
451 %tmp17 = fmul double %tmp13, 30.0
452 store double %tmp17, double* %tmp14, align 8