1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux-gnu -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE
3 ; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux-gnu -mattr=+sse4.2 | FileCheck %s --check-prefixes=CHECK,SSE
4 ; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX
5 ; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux-gnu -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX
6 ; RUN: opt < %s -slp-vectorizer -instcombine -S -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512bw,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX
8 define void @store_i32(i32* nocapture %0, i32 %1, i32 %2) {
9 ; CHECK-LABEL: @store_i32(
10 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i32* [[TMP0:%.*]] to <4 x i32>*
11 ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i32>, <4 x i32>* [[TMP4]], align 4, !tbaa [[TBAA0:![0-9]+]]
12 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1:%.*]], i32 0
13 ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <4 x i32> [[TMP6]], <4 x i32> poison, <4 x i32> zeroinitializer
14 ; CHECK-NEXT: [[TMP8:%.*]] = mul <4 x i32> [[TMP5]], [[TMP7]]
15 ; CHECK-NEXT: [[TMP9:%.*]] = lshr <4 x i32> [[TMP8]], <i32 15, i32 15, i32 15, i32 15>
16 ; CHECK-NEXT: [[TMP10:%.*]] = icmp ult <4 x i32> [[TMP9]], <i32 255, i32 255, i32 255, i32 255>
17 ; CHECK-NEXT: [[TMP11:%.*]] = select <4 x i1> [[TMP10]], <4 x i32> [[TMP9]], <4 x i32> <i32 255, i32 255, i32 255, i32 255>
18 ; CHECK-NEXT: [[TMP12:%.*]] = bitcast i32* [[TMP0]] to <4 x i32>*
19 ; CHECK-NEXT: store <4 x i32> [[TMP11]], <4 x i32>* [[TMP12]], align 4, !tbaa [[TBAA0]]
20 ; CHECK-NEXT: ret void
22 %4 = load i32, i32* %0, align 4, !tbaa !2
25 %7 = icmp ult i32 %6, 255
26 %8 = select i1 %7, i32 %6, i32 255
27 store i32 %8, i32* %0, align 4, !tbaa !2
28 %9 = getelementptr inbounds i32, i32* %0, i64 1
29 %10 = load i32, i32* %9, align 4, !tbaa !2
31 %12 = lshr i32 %11, 15
32 %13 = icmp ult i32 %12, 255
33 %14 = select i1 %13, i32 %12, i32 255
34 store i32 %14, i32* %9, align 4, !tbaa !2
35 %15 = getelementptr inbounds i32, i32* %0, i64 2
36 %16 = load i32, i32* %15, align 4, !tbaa !2
38 %18 = lshr i32 %17, 15
39 %19 = icmp ult i32 %18, 255
40 %20 = select i1 %19, i32 %18, i32 255
41 store i32 %20, i32* %15, align 4, !tbaa !2
42 %21 = getelementptr inbounds i32, i32* %0, i64 3
43 %22 = load i32, i32* %21, align 4, !tbaa !2
45 %24 = lshr i32 %23, 15
46 %25 = icmp ult i32 %24, 255
47 %26 = select i1 %25, i32 %24, i32 255
48 store i32 %26, i32* %21, align 4, !tbaa !2
52 define void @store_i8(i8* nocapture %0, i32 %1, i32 %2) {
53 ; CHECK-LABEL: @store_i8(
54 ; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP0:%.*]] to <4 x i8>*
55 ; CHECK-NEXT: [[TMP5:%.*]] = load <4 x i8>, <4 x i8>* [[TMP4]], align 1, !tbaa [[TBAA4:![0-9]+]]
56 ; CHECK-NEXT: [[TMP6:%.*]] = zext <4 x i8> [[TMP5]] to <4 x i32>
57 ; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1:%.*]], i32 0
58 ; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP7]], <4 x i32> poison, <4 x i32> zeroinitializer
59 ; CHECK-NEXT: [[TMP9:%.*]] = mul <4 x i32> [[TMP8]], [[TMP6]]
60 ; CHECK-NEXT: [[TMP10:%.*]] = lshr <4 x i32> [[TMP9]], <i32 15, i32 15, i32 15, i32 15>
61 ; CHECK-NEXT: [[TMP11:%.*]] = icmp ult <4 x i32> [[TMP10]], <i32 255, i32 255, i32 255, i32 255>
62 ; CHECK-NEXT: [[TMP12:%.*]] = select <4 x i1> [[TMP11]], <4 x i32> [[TMP10]], <4 x i32> <i32 255, i32 255, i32 255, i32 255>
63 ; CHECK-NEXT: [[TMP13:%.*]] = trunc <4 x i32> [[TMP12]] to <4 x i8>
64 ; CHECK-NEXT: [[TMP14:%.*]] = bitcast i8* [[TMP0]] to <4 x i8>*
65 ; CHECK-NEXT: store <4 x i8> [[TMP13]], <4 x i8>* [[TMP14]], align 1, !tbaa [[TBAA4]]
66 ; CHECK-NEXT: ret void
68 %4 = load i8, i8* %0, align 1, !tbaa !6
69 %5 = zext i8 %4 to i32
72 %8 = icmp ult i32 %7, 255
73 %9 = select i1 %8, i32 %7, i32 255
74 %10 = trunc i32 %9 to i8
75 store i8 %10, i8* %0, align 1, !tbaa !6
76 %11 = getelementptr inbounds i8, i8* %0, i64 1
77 %12 = load i8, i8* %11, align 1, !tbaa !6
78 %13 = zext i8 %12 to i32
80 %15 = lshr i32 %14, 15
81 %16 = icmp ult i32 %15, 255
82 %17 = select i1 %16, i32 %15, i32 255
83 %18 = trunc i32 %17 to i8
84 store i8 %18, i8* %11, align 1, !tbaa !6
85 %19 = getelementptr inbounds i8, i8* %0, i64 2
86 %20 = load i8, i8* %19, align 1, !tbaa !6
87 %21 = zext i8 %20 to i32
89 %23 = lshr i32 %22, 15
90 %24 = icmp ult i32 %23, 255
91 %25 = select i1 %24, i32 %23, i32 255
92 %26 = trunc i32 %25 to i8
93 store i8 %26, i8* %19, align 1, !tbaa !6
94 %27 = getelementptr inbounds i8, i8* %0, i64 3
95 %28 = load i8, i8* %27, align 1, !tbaa !6
96 %29 = zext i8 %28 to i32
98 %31 = lshr i32 %30, 15
99 %32 = icmp ult i32 %31, 255
100 %33 = select i1 %32, i32 %31, i32 255
101 %34 = trunc i32 %33 to i8
102 store i8 %34, i8* %27, align 1, !tbaa !6
106 define void @store_i64(i64* nocapture %0, i32 %1, i32 %2) {
107 ; SSE-LABEL: @store_i64(
108 ; SSE-NEXT: [[TMP4:%.*]] = zext i32 [[TMP1:%.*]] to i64
109 ; SSE-NEXT: [[TMP5:%.*]] = load i64, i64* [[TMP0:%.*]], align 8, !tbaa [[TBAA5:![0-9]+]]
110 ; SSE-NEXT: [[TMP6:%.*]] = mul i64 [[TMP5]], [[TMP4]]
111 ; SSE-NEXT: [[TMP7:%.*]] = lshr i64 [[TMP6]], 15
112 ; SSE-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP7]] to i32
113 ; SSE-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 255
114 ; SSE-NEXT: [[TMP10:%.*]] = and i64 [[TMP7]], 4294967295
115 ; SSE-NEXT: [[TMP11:%.*]] = select i1 [[TMP9]], i64 [[TMP10]], i64 255
116 ; SSE-NEXT: store i64 [[TMP11]], i64* [[TMP0]], align 8, !tbaa [[TBAA5]]
117 ; SSE-NEXT: [[TMP12:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 1
118 ; SSE-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8, !tbaa [[TBAA5]]
119 ; SSE-NEXT: [[TMP14:%.*]] = mul i64 [[TMP13]], [[TMP4]]
120 ; SSE-NEXT: [[TMP15:%.*]] = lshr i64 [[TMP14]], 15
121 ; SSE-NEXT: [[TMP16:%.*]] = trunc i64 [[TMP15]] to i32
122 ; SSE-NEXT: [[TMP17:%.*]] = icmp ult i32 [[TMP16]], 255
123 ; SSE-NEXT: [[TMP18:%.*]] = and i64 [[TMP15]], 4294967295
124 ; SSE-NEXT: [[TMP19:%.*]] = select i1 [[TMP17]], i64 [[TMP18]], i64 255
125 ; SSE-NEXT: store i64 [[TMP19]], i64* [[TMP12]], align 8, !tbaa [[TBAA5]]
126 ; SSE-NEXT: [[TMP20:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 2
127 ; SSE-NEXT: [[TMP21:%.*]] = load i64, i64* [[TMP20]], align 8, !tbaa [[TBAA5]]
128 ; SSE-NEXT: [[TMP22:%.*]] = mul i64 [[TMP21]], [[TMP4]]
129 ; SSE-NEXT: [[TMP23:%.*]] = lshr i64 [[TMP22]], 15
130 ; SSE-NEXT: [[TMP24:%.*]] = trunc i64 [[TMP23]] to i32
131 ; SSE-NEXT: [[TMP25:%.*]] = icmp ult i32 [[TMP24]], 255
132 ; SSE-NEXT: [[TMP26:%.*]] = and i64 [[TMP23]], 4294967295
133 ; SSE-NEXT: [[TMP27:%.*]] = select i1 [[TMP25]], i64 [[TMP26]], i64 255
134 ; SSE-NEXT: store i64 [[TMP27]], i64* [[TMP20]], align 8, !tbaa [[TBAA5]]
135 ; SSE-NEXT: [[TMP28:%.*]] = getelementptr inbounds i64, i64* [[TMP0]], i64 3
136 ; SSE-NEXT: [[TMP29:%.*]] = load i64, i64* [[TMP28]], align 8, !tbaa [[TBAA5]]
137 ; SSE-NEXT: [[TMP30:%.*]] = mul i64 [[TMP29]], [[TMP4]]
138 ; SSE-NEXT: [[TMP31:%.*]] = lshr i64 [[TMP30]], 15
139 ; SSE-NEXT: [[TMP32:%.*]] = trunc i64 [[TMP31]] to i32
140 ; SSE-NEXT: [[TMP33:%.*]] = icmp ult i32 [[TMP32]], 255
141 ; SSE-NEXT: [[TMP34:%.*]] = and i64 [[TMP31]], 4294967295
142 ; SSE-NEXT: [[TMP35:%.*]] = select i1 [[TMP33]], i64 [[TMP34]], i64 255
143 ; SSE-NEXT: store i64 [[TMP35]], i64* [[TMP28]], align 8, !tbaa [[TBAA5]]
146 ; AVX-LABEL: @store_i64(
147 ; AVX-NEXT: [[TMP4:%.*]] = zext i32 [[TMP1:%.*]] to i64
148 ; AVX-NEXT: [[TMP5:%.*]] = bitcast i64* [[TMP0:%.*]] to <4 x i64>*
149 ; AVX-NEXT: [[TMP6:%.*]] = load <4 x i64>, <4 x i64>* [[TMP5]], align 8, !tbaa [[TBAA5:![0-9]+]]
150 ; AVX-NEXT: [[TMP7:%.*]] = insertelement <4 x i64> poison, i64 [[TMP4]], i32 0
151 ; AVX-NEXT: [[TMP8:%.*]] = shufflevector <4 x i64> [[TMP7]], <4 x i64> poison, <4 x i32> zeroinitializer
152 ; AVX-NEXT: [[TMP9:%.*]] = mul <4 x i64> [[TMP6]], [[TMP8]]
153 ; AVX-NEXT: [[TMP10:%.*]] = lshr <4 x i64> [[TMP9]], <i64 15, i64 15, i64 15, i64 15>
154 ; AVX-NEXT: [[TMP11:%.*]] = trunc <4 x i64> [[TMP10]] to <4 x i32>
155 ; AVX-NEXT: [[TMP12:%.*]] = icmp ult <4 x i32> [[TMP11]], <i32 255, i32 255, i32 255, i32 255>
156 ; AVX-NEXT: [[TMP13:%.*]] = and <4 x i64> [[TMP10]], <i64 4294967295, i64 4294967295, i64 4294967295, i64 4294967295>
157 ; AVX-NEXT: [[TMP14:%.*]] = select <4 x i1> [[TMP12]], <4 x i64> [[TMP13]], <4 x i64> <i64 255, i64 255, i64 255, i64 255>
158 ; AVX-NEXT: [[TMP15:%.*]] = bitcast i64* [[TMP0]] to <4 x i64>*
159 ; AVX-NEXT: store <4 x i64> [[TMP14]], <4 x i64>* [[TMP15]], align 8, !tbaa [[TBAA5]]
162 %4 = zext i32 %1 to i64
163 %5 = load i64, i64* %0, align 8, !tbaa !7
166 %8 = trunc i64 %7 to i32
167 %9 = icmp ult i32 %8, 255
168 %10 = and i64 %7, 4294967295
169 %11 = select i1 %9, i64 %10, i64 255
170 store i64 %11, i64* %0, align 8, !tbaa !7
171 %12 = getelementptr inbounds i64, i64* %0, i64 1
172 %13 = load i64, i64* %12, align 8, !tbaa !7
173 %14 = mul i64 %13, %4
174 %15 = lshr i64 %14, 15
175 %16 = trunc i64 %15 to i32
176 %17 = icmp ult i32 %16, 255
177 %18 = and i64 %15, 4294967295
178 %19 = select i1 %17, i64 %18, i64 255
179 store i64 %19, i64* %12, align 8, !tbaa !7
180 %20 = getelementptr inbounds i64, i64* %0, i64 2
181 %21 = load i64, i64* %20, align 8, !tbaa !7
182 %22 = mul i64 %21, %4
183 %23 = lshr i64 %22, 15
184 %24 = trunc i64 %23 to i32
185 %25 = icmp ult i32 %24, 255
186 %26 = and i64 %23, 4294967295
187 %27 = select i1 %25, i64 %26, i64 255
188 store i64 %27, i64* %20, align 8, !tbaa !7
189 %28 = getelementptr inbounds i64, i64* %0, i64 3
190 %29 = load i64, i64* %28, align 8, !tbaa !7
191 %30 = mul i64 %29, %4
192 %31 = lshr i64 %30, 15
193 %32 = trunc i64 %31 to i32
194 %33 = icmp ult i32 %32, 255
195 %34 = and i64 %31, 4294967295
196 %35 = select i1 %33, i64 %34, i64 255
197 store i64 %35, i64* %28, align 8, !tbaa !7
201 !2 = !{!3, !3, i64 0}
202 !3 = !{!"int", !4, i64 0}
203 !4 = !{!"omnipotent char", !5, i64 0}
204 !5 = !{!"Simple C++ TBAA"}
205 !6 = !{!4, !4, i64 0}
206 !7 = !{!8, !8, i64 0}
207 !8 = !{!"long", !4, i64 0}