1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=slp-vectorizer,dce -S -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx | FileCheck %s
4 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
5 target triple = "x86_64-apple-macosx10.8.0"
7 ; Simple 3-pair chain with loads and stores
8 define void @test1(ptr %a, ptr %b, ptr %c) {
10 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[A:%.*]], align 8
11 ; CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, ptr [[B:%.*]], align 8
12 ; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]]
13 ; CHECK-NEXT: store <2 x double> [[TMP5]], ptr [[C:%.*]], align 8
14 ; CHECK-NEXT: ret void
16 %i0 = load double, ptr %a, align 8
17 %i1 = load double, ptr %b, align 8
18 %mul = fmul double %i0, %i1
19 %arrayidx3 = getelementptr inbounds double, ptr %a, i64 1
20 %i3 = load double, ptr %arrayidx3, align 8
21 %arrayidx4 = getelementptr inbounds double, ptr %b, i64 1
22 %i4 = load double, ptr %arrayidx4, align 8
23 %mul5 = fmul double %i3, %i4
24 store double %mul, ptr %c, align 8
25 %arrayidx5 = getelementptr inbounds double, ptr %c, i64 1
26 store double %mul5, ptr %arrayidx5, align 8
30 ; Simple 3-pair chain with loads and stores, obfuscated with bitcasts
31 define void @test2(ptr %a, ptr %b, ptr %e) {
32 ; CHECK-LABEL: @test2(
33 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[A:%.*]], align 8
34 ; CHECK-NEXT: [[TMP4:%.*]] = load <2 x double>, ptr [[B:%.*]], align 8
35 ; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]]
36 ; CHECK-NEXT: store <2 x double> [[TMP5]], ptr [[E:%.*]], align 8
37 ; CHECK-NEXT: ret void
39 %i0 = load double, ptr %a, align 8
40 %i1 = load double, ptr %b, align 8
41 %mul = fmul double %i0, %i1
42 %arrayidx3 = getelementptr inbounds double, ptr %a, i64 1
43 %i3 = load double, ptr %arrayidx3, align 8
44 %arrayidx4 = getelementptr inbounds double, ptr %b, i64 1
45 %i4 = load double, ptr %arrayidx4, align 8
46 %mul5 = fmul double %i3, %i4
47 store double %mul, ptr %e, align 8
48 %carrayidx5 = getelementptr inbounds i8, ptr %e, i64 8
49 store double %mul5, ptr %carrayidx5, align 8
53 ; Don't vectorize volatile loads.
54 define void @test_volatile_load(ptr %a, ptr %b, ptr %c) {
55 ; CHECK-LABEL: @test_volatile_load(
56 ; CHECK-NEXT: [[I0:%.*]] = load volatile double, ptr [[A:%.*]], align 8
57 ; CHECK-NEXT: [[I1:%.*]] = load volatile double, ptr [[B:%.*]], align 8
58 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, ptr [[A]], i64 1
59 ; CHECK-NEXT: [[I3:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
60 ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds double, ptr [[B]], i64 1
61 ; CHECK-NEXT: [[I4:%.*]] = load double, ptr [[ARRAYIDX4]], align 8
62 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> poison, double [[I0]], i32 0
63 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double [[I3]], i32 1
64 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x double> poison, double [[I1]], i32 0
65 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> [[TMP3]], double [[I4]], i32 1
66 ; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[TMP2]], [[TMP4]]
67 ; CHECK-NEXT: store <2 x double> [[TMP5]], ptr [[C:%.*]], align 8
68 ; CHECK-NEXT: ret void
70 %i0 = load volatile double, ptr %a, align 8
71 %i1 = load volatile double, ptr %b, align 8
72 %mul = fmul double %i0, %i1
73 %arrayidx3 = getelementptr inbounds double, ptr %a, i64 1
74 %i3 = load double, ptr %arrayidx3, align 8
75 %arrayidx4 = getelementptr inbounds double, ptr %b, i64 1
76 %i4 = load double, ptr %arrayidx4, align 8
77 %mul5 = fmul double %i3, %i4
78 store double %mul, ptr %c, align 8
79 %arrayidx5 = getelementptr inbounds double, ptr %c, i64 1
80 store double %mul5, ptr %arrayidx5, align 8
84 ; Don't vectorize volatile stores.
85 define void @test_volatile_store(ptr %a, ptr %b, ptr %c) {
86 ; CHECK-LABEL: @test_volatile_store(
87 ; CHECK-NEXT: [[I0:%.*]] = load double, ptr [[A:%.*]], align 8
88 ; CHECK-NEXT: [[I1:%.*]] = load double, ptr [[B:%.*]], align 8
89 ; CHECK-NEXT: [[MUL:%.*]] = fmul double [[I0]], [[I1]]
90 ; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, ptr [[A]], i64 1
91 ; CHECK-NEXT: [[I3:%.*]] = load double, ptr [[ARRAYIDX3]], align 8
92 ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds double, ptr [[B]], i64 1
93 ; CHECK-NEXT: [[I4:%.*]] = load double, ptr [[ARRAYIDX4]], align 8
94 ; CHECK-NEXT: [[MUL5:%.*]] = fmul double [[I3]], [[I4]]
95 ; CHECK-NEXT: store volatile double [[MUL]], ptr [[C:%.*]], align 8
96 ; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds double, ptr [[C]], i64 1
97 ; CHECK-NEXT: store volatile double [[MUL5]], ptr [[ARRAYIDX5]], align 8
98 ; CHECK-NEXT: ret void
100 %i0 = load double, ptr %a, align 8
101 %i1 = load double, ptr %b, align 8
102 %mul = fmul double %i0, %i1
103 %arrayidx3 = getelementptr inbounds double, ptr %a, i64 1
104 %i3 = load double, ptr %arrayidx3, align 8
105 %arrayidx4 = getelementptr inbounds double, ptr %b, i64 1
106 %i4 = load double, ptr %arrayidx4, align 8
107 %mul5 = fmul double %i3, %i4
108 store volatile double %mul, ptr %c, align 8
109 %arrayidx5 = getelementptr inbounds double, ptr %c, i64 1
110 store volatile double %mul5, ptr %arrayidx5, align 8