1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -basicaa -slp-vectorizer -dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5 target triple = "x86_64-apple-macosx10.8.0"
7 ; Simple 3-pair chain with loads and stores
8 define void @test1(double* %a, double* %b, double* %c) {
10 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
11 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
12 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
13 ; CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* [[TMP3]], align 8
14 ; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]]
15 ; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[C:%.*]] to <2 x double>*
16 ; CHECK-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8
17 ; CHECK-NEXT: ret void
19 %i0 = load double, double* %a, align 8
20 %i1 = load double, double* %b, align 8
21 %mul = fmul double %i0, %i1
22 %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
23 %i3 = load double, double* %arrayidx3, align 8
24 %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
25 %i4 = load double, double* %arrayidx4, align 8
26 %mul5 = fmul double %i3, %i4
27 store double %mul, double* %c, align 8
28 %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
29 store double %mul5, double* %arrayidx5, align 8
33 ; Simple 3-pair chain with loads and stores, obfuscated with bitcasts
34 define void @test2(double* %a, double* %b, i8* %e) {
35 ; CHECK-LABEL: @test2(
36 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast double* [[A:%.*]] to <2 x double>*
37 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, <2 x double>* [[TMP1]], align 8
38 ; CHECK-NEXT: [[TMP3:%.*]] = bitcast double* [[B:%.*]] to <2 x double>*
39 ; CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, <2 x double>* [[TMP3]], align 8
40 ; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]]
41 ; CHECK-NEXT: [[C:%.*]] = bitcast i8* [[E:%.*]] to double*
42 ; CHECK-NEXT: [[TMP6:%.*]] = bitcast double* [[C]] to <2 x double>*
43 ; CHECK-NEXT: store <2 x double> [[TMP5]], <2 x double>* [[TMP6]], align 8
44 ; CHECK-NEXT: ret void
46 %i0 = load double, double* %a, align 8
47 %i1 = load double, double* %b, align 8
48 %mul = fmul double %i0, %i1
49 %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
50 %i3 = load double, double* %arrayidx3, align 8
51 %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
52 %i4 = load double, double* %arrayidx4, align 8
53 %mul5 = fmul double %i3, %i4
54 %c = bitcast i8* %e to double*
55 store double %mul, double* %c, align 8
56 %carrayidx5 = getelementptr inbounds i8, i8* %e, i64 8
57 %arrayidx5 = bitcast i8* %carrayidx5 to double*
58 store double %mul5, double* %arrayidx5, align 8
62 ; Don't vectorize volatile loads.
63 define void @test_volatile_load(double* %a, double* %b, double* %c) {
64 ; CHECK-LABEL: @test_volatile_load(
65 ; CHECK-NEXT: [[I0:%.*]] = load volatile double, double* [[A:%.*]], align 8
66 ; CHECK-NEXT: [[I1:%.*]] = load volatile double, double* [[B:%.*]], align 8
67 ; CHECK-NEXT: [[MUL:%.*]] = fmul double [[I0]], [[I1]]
68 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A]], i64 1
69 ; CHECK-NEXT: [[I3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
70 ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[B]], i64 1
71 ; CHECK-NEXT: [[I4:%.*]] = load double, double* [[ARRAYIDX4]], align 8
72 ; CHECK-NEXT: [[MUL5:%.*]] = fmul double [[I3]], [[I4]]
73 ; CHECK-NEXT: store double [[MUL]], double* [[C:%.*]], align 8
74 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[C]], i64 1
75 ; CHECK-NEXT: store double [[MUL5]], double* [[ARRAYIDX5]], align 8
76 ; CHECK-NEXT: ret void
78 %i0 = load volatile double, double* %a, align 8
79 %i1 = load volatile double, double* %b, align 8
80 %mul = fmul double %i0, %i1
81 %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
82 %i3 = load double, double* %arrayidx3, align 8
83 %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
84 %i4 = load double, double* %arrayidx4, align 8
85 %mul5 = fmul double %i3, %i4
86 store double %mul, double* %c, align 8
87 %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
88 store double %mul5, double* %arrayidx5, align 8
92 ; Don't vectorize volatile stores.
93 define void @test_volatile_store(double* %a, double* %b, double* %c) {
94 ; CHECK-LABEL: @test_volatile_store(
95 ; CHECK-NEXT: [[I0:%.*]] = load double, double* [[A:%.*]], align 8
96 ; CHECK-NEXT: [[I1:%.*]] = load double, double* [[B:%.*]], align 8
97 ; CHECK-NEXT: [[MUL:%.*]] = fmul double [[I0]], [[I1]]
98 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[A]], i64 1
99 ; CHECK-NEXT: [[I3:%.*]] = load double, double* [[ARRAYIDX3]], align 8
100 ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds double, double* [[B]], i64 1
101 ; CHECK-NEXT: [[I4:%.*]] = load double, double* [[ARRAYIDX4]], align 8
102 ; CHECK-NEXT: [[MUL5:%.*]] = fmul double [[I3]], [[I4]]
103 ; CHECK-NEXT: store volatile double [[MUL]], double* [[C:%.*]], align 8
104 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds double, double* [[C]], i64 1
105 ; CHECK-NEXT: store volatile double [[MUL5]], double* [[ARRAYIDX5]], align 8
106 ; CHECK-NEXT: ret void
108 %i0 = load double, double* %a, align 8
109 %i1 = load double, double* %b, align 8
110 %mul = fmul double %i0, %i1
111 %arrayidx3 = getelementptr inbounds double, double* %a, i64 1
112 %i3 = load double, double* %arrayidx3, align 8
113 %arrayidx4 = getelementptr inbounds double, double* %b, i64 1
114 %i4 = load double, double* %arrayidx4, align 8
115 %mul5 = fmul double %i3, %i4
116 store volatile double %mul, double* %c, align 8
117 %arrayidx5 = getelementptr inbounds double, double* %c, i64 1
118 store volatile double %mul5, double* %arrayidx5, align 8