1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=slp-vectorizer -S -mtriple=i386-apple-macosx10.9.0 -mcpu=corei7-avx | FileCheck %s
4 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
6 @a = common global ptr null, align 8
8 ; Function Attrs: nounwind ssp uwtable
12 ; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr @a, align 8
13 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP0]], i32 0
14 ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x ptr> [[TMP1]], <2 x ptr> poison, <2 x i32> zeroinitializer
15 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i64, <2 x ptr> [[TMP2]], <2 x i64> <i64 11, i64 56>
16 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i64, ptr [[TMP0]], i64 11
17 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint <2 x ptr> [[TMP3]] to <2 x i64>
18 ; CHECK-NEXT: store <2 x i64> [[TMP4]], ptr [[ADD_PTR]], align 8
19 ; CHECK-NEXT: ret i32 undef
22 %0 = load ptr, ptr @a, align 8
23 %add.ptr = getelementptr inbounds i64, ptr %0, i64 11
24 %1 = ptrtoint ptr %add.ptr to i64
25 store i64 %1, ptr %add.ptr, align 8
26 %add.ptr1 = getelementptr inbounds i64, ptr %0, i64 56
27 %2 = ptrtoint ptr %add.ptr1 to i64
28 %arrayidx2 = getelementptr inbounds i64, ptr %0, i64 12
29 store i64 %2, ptr %arrayidx2, align 8
33 declare float @llvm.powi.f32.i32(float, i32)
34 define void @fn2(ptr %a, ptr %b, ptr %c) {
37 ; CHECK-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr [[A:%.*]], align 4
38 ; CHECK-NEXT: [[TMP1:%.*]] = load <4 x i32>, ptr [[B:%.*]], align 4
39 ; CHECK-NEXT: [[TMP2:%.*]] = add <4 x i32> [[TMP0]], [[TMP1]]
40 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x i32> [[TMP2]], i32 0
41 ; CHECK-NEXT: [[TMP4:%.*]] = sitofp <4 x i32> [[TMP2]] to <4 x float>
42 ; CHECK-NEXT: [[TMP5:%.*]] = call <4 x float> @llvm.powi.v4f32.i32(<4 x float> [[TMP4]], i32 [[TMP3]])
43 ; CHECK-NEXT: store <4 x float> [[TMP5]], ptr [[C:%.*]], align 4
44 ; CHECK-NEXT: ret void
47 %i0 = load i32, ptr %a, align 4
48 %i1 = load i32, ptr %b, align 4
49 %add1 = add i32 %i0, %i1
50 %fp1 = sitofp i32 %add1 to float
51 %call1 = tail call float @llvm.powi.f32.i32(float %fp1,i32 %add1) nounwind readnone
53 %arrayidx2 = getelementptr inbounds i32, ptr %a, i32 1
54 %i2 = load i32, ptr %arrayidx2, align 4
55 %arrayidx3 = getelementptr inbounds i32, ptr %b, i32 1
56 %i3 = load i32, ptr %arrayidx3, align 4
57 %add2 = add i32 %i2, %i3
58 %fp2 = sitofp i32 %add2 to float
59 %call2 = tail call float @llvm.powi.f32.i32(float %fp2,i32 %add1) nounwind readnone
61 %arrayidx4 = getelementptr inbounds i32, ptr %a, i32 2
62 %i4 = load i32, ptr %arrayidx4, align 4
63 %arrayidx5 = getelementptr inbounds i32, ptr %b, i32 2
64 %i5 = load i32, ptr %arrayidx5, align 4
65 %add3 = add i32 %i4, %i5
66 %fp3 = sitofp i32 %add3 to float
67 %call3 = tail call float @llvm.powi.f32.i32(float %fp3,i32 %add1) nounwind readnone
69 %arrayidx6 = getelementptr inbounds i32, ptr %a, i32 3
70 %i6 = load i32, ptr %arrayidx6, align 4
71 %arrayidx7 = getelementptr inbounds i32, ptr %b, i32 3
72 %i7 = load i32, ptr %arrayidx7, align 4
73 %add4 = add i32 %i6, %i7
74 %fp4 = sitofp i32 %add4 to float
75 %call4 = tail call float @llvm.powi.f32.i32(float %fp4,i32 %add1) nounwind readnone
77 store float %call1, ptr %c, align 4
78 %arrayidx8 = getelementptr inbounds float, ptr %c, i32 1
79 store float %call2, ptr %arrayidx8, align 4
80 %arrayidx9 = getelementptr inbounds float, ptr %c, i32 2
81 store float %call3, ptr %arrayidx9, align 4
82 %arrayidx10 = getelementptr inbounds float, ptr %c, i32 3
83 store float %call4, ptr %arrayidx10, align 4
88 define void @externally_used_ptrs() {
89 ; CHECK-LABEL: @externally_used_ptrs(
91 ; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr @a, align 8
92 ; CHECK-NEXT: [[ADD_PTR:%.*]] = getelementptr inbounds i64, ptr [[TMP0]], i64 11
93 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP0]], i32 0
94 ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x ptr> [[TMP1]], <2 x ptr> poison, <2 x i32> zeroinitializer
95 ; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i64, <2 x ptr> [[TMP2]], <2 x i64> <i64 56, i64 11>
96 ; CHECK-NEXT: [[TMP4:%.*]] = ptrtoint <2 x ptr> [[TMP3]] to <2 x i64>
97 ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr [[ADD_PTR]], align 8
98 ; CHECK-NEXT: [[TMP6:%.*]] = add <2 x i64> [[TMP4]], [[TMP5]]
99 ; CHECK-NEXT: store <2 x i64> [[TMP6]], ptr [[ADD_PTR]], align 8
100 ; CHECK-NEXT: ret void
103 %0 = load ptr, ptr @a, align 8
104 %add.ptr = getelementptr inbounds i64, ptr %0, i64 11
105 %1 = ptrtoint ptr %add.ptr to i64
106 %add.ptr1 = getelementptr inbounds i64, ptr %0, i64 56
107 %2 = ptrtoint ptr %add.ptr1 to i64
108 %arrayidx2 = getelementptr inbounds i64, ptr %0, i64 12
109 %3 = load i64, ptr %arrayidx2, align 8
110 %4 = load i64, ptr %add.ptr, align 8
113 store i64 %6, ptr %add.ptr, align 8
114 store i64 %5, ptr %arrayidx2, align 8