1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -passes=slp-vectorizer -slp-vectorize-hor -slp-vectorize-hor-store -S < %s -mtriple=x86_64-apple-macosx -mcpu=haswell | FileCheck %s --check-prefixes=CHECK,AVX2
3 ; RUN: opt -passes=slp-vectorizer -slp-vectorize-hor -slp-vectorize-hor-store -S < %s -mtriple=x86_64-apple-macosx -mcpu=x86-64-v4 | FileCheck %s --check-prefixes=CHECK,AVX512
5 ;unsigned load_le32(unsigned char *data) {
6 ; unsigned le32 = (data[0]<<0) | (data[1]<<8) | (data[2]<<16) | (data[3]<<24);
10 define i32 @_Z9load_le32Ph(ptr nocapture readonly %data) {
11 ; CHECK-LABEL: @_Z9load_le32Ph(
13 ; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[DATA:%.*]], align 1
14 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP0]] to i32
15 ; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr [[DATA]], i64 1
16 ; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX1]], align 1
17 ; CHECK-NEXT: [[CONV2:%.*]] = zext i8 [[TMP1]] to i32
18 ; CHECK-NEXT: [[SHL3:%.*]] = shl nuw nsw i32 [[CONV2]], 8
19 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL3]], [[CONV]]
20 ; CHECK-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i8, ptr [[DATA]], i64 2
21 ; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ARRAYIDX4]], align 1
22 ; CHECK-NEXT: [[CONV5:%.*]] = zext i8 [[TMP2]] to i32
23 ; CHECK-NEXT: [[SHL6:%.*]] = shl nuw nsw i32 [[CONV5]], 16
24 ; CHECK-NEXT: [[OR7:%.*]] = or i32 [[OR]], [[SHL6]]
25 ; CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i8, ptr [[DATA]], i64 3
26 ; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[ARRAYIDX8]], align 1
27 ; CHECK-NEXT: [[CONV9:%.*]] = zext i8 [[TMP3]] to i32
28 ; CHECK-NEXT: [[SHL10:%.*]] = shl nuw i32 [[CONV9]], 24
29 ; CHECK-NEXT: [[OR11:%.*]] = or i32 [[OR7]], [[SHL10]]
30 ; CHECK-NEXT: ret i32 [[OR11]]
33 %0 = load i8, ptr %data, align 1
34 %conv = zext i8 %0 to i32
35 %arrayidx1 = getelementptr inbounds i8, ptr %data, i64 1
36 %1 = load i8, ptr %arrayidx1, align 1
37 %conv2 = zext i8 %1 to i32
38 %shl3 = shl nuw nsw i32 %conv2, 8
39 %or = or i32 %shl3, %conv
40 %arrayidx4 = getelementptr inbounds i8, ptr %data, i64 2
41 %2 = load i8, ptr %arrayidx4, align 1
42 %conv5 = zext i8 %2 to i32
43 %shl6 = shl nuw nsw i32 %conv5, 16
44 %or7 = or i32 %or, %shl6
45 %arrayidx8 = getelementptr inbounds i8, ptr %data, i64 3
46 %3 = load i8, ptr %arrayidx8, align 1
47 %conv9 = zext i8 %3 to i32
48 %shl10 = shl nuw i32 %conv9, 24
49 %or11 = or i32 %or7, %shl10
53 define <4 x float> @PR16739_byref(ptr nocapture readonly dereferenceable(16) %x) {
54 ; AVX2-LABEL: @PR16739_byref(
55 ; AVX2-NEXT: [[GEP2:%.*]] = getelementptr inbounds <4 x float>, ptr [[X:%.*]], i64 0, i64 2
56 ; AVX2-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[X]], align 4
57 ; AVX2-NEXT: [[X2:%.*]] = load float, ptr [[GEP2]], align 4
58 ; AVX2-NEXT: [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
59 ; AVX2-NEXT: [[I2:%.*]] = insertelement <4 x float> [[TMP2]], float [[X2]], i32 2
60 ; AVX2-NEXT: [[I3:%.*]] = insertelement <4 x float> [[I2]], float [[X2]], i32 3
61 ; AVX2-NEXT: ret <4 x float> [[I3]]
63 ; AVX512-LABEL: @PR16739_byref(
64 ; AVX512-NEXT: [[GEP1:%.*]] = getelementptr inbounds <4 x float>, ptr [[X:%.*]], i64 0, i64 1
65 ; AVX512-NEXT: [[X0:%.*]] = load float, ptr [[X]], align 4
66 ; AVX512-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[GEP1]], align 4
67 ; AVX512-NEXT: [[I0:%.*]] = insertelement <4 x float> poison, float [[X0]], i32 0
68 ; AVX512-NEXT: [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
69 ; AVX512-NEXT: [[I21:%.*]] = shufflevector <4 x float> [[I0]], <4 x float> [[TMP2]], <4 x i32> <i32 0, i32 4, i32 5, i32 poison>
70 ; AVX512-NEXT: [[TMP3:%.*]] = shufflevector <4 x float> [[I21]], <4 x float> [[TMP2]], <4 x i32> <i32 0, i32 1, i32 2, i32 5>
71 ; AVX512-NEXT: ret <4 x float> [[TMP3]]
73 %gep1 = getelementptr inbounds <4 x float>, ptr %x, i64 0, i64 1
74 %gep2 = getelementptr inbounds <4 x float>, ptr %x, i64 0, i64 2
75 %x0 = load float, ptr %x
76 %x1 = load float, ptr %gep1
77 %x2 = load float, ptr %gep2
78 %i0 = insertelement <4 x float> poison, float %x0, i32 0
79 %i1 = insertelement <4 x float> %i0, float %x1, i32 1
80 %i2 = insertelement <4 x float> %i1, float %x2, i32 2
81 %i3 = insertelement <4 x float> %i2, float %x2, i32 3
85 define <4 x float> @PR16739_byref_alt(ptr nocapture readonly dereferenceable(16) %x) {
86 ; CHECK-LABEL: @PR16739_byref_alt(
87 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x float>, ptr [[X:%.*]], align 4
88 ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <2 x float> [[TMP1]], <2 x float> poison, <4 x i32> <i32 0, i32 0, i32 1, i32 1>
89 ; CHECK-NEXT: ret <4 x float> [[TMP2]]
91 %gep1 = getelementptr inbounds <4 x float>, ptr %x, i64 0, i64 1
92 %x0 = load float, ptr %x
93 %x1 = load float, ptr %gep1
94 %i0 = insertelement <4 x float> poison, float %x0, i32 0
95 %i1 = insertelement <4 x float> %i0, float %x0, i32 1
96 %i2 = insertelement <4 x float> %i1, float %x1, i32 2
97 %i3 = insertelement <4 x float> %i2, float %x1, i32 3
101 define <4 x float> @PR16739_byval(ptr nocapture readonly dereferenceable(16) %x) {
102 ; CHECK-LABEL: @PR16739_byval(
103 ; CHECK-NEXT: [[T1:%.*]] = load i64, ptr [[X:%.*]], align 16
104 ; CHECK-NEXT: [[T2:%.*]] = getelementptr inbounds <4 x float>, ptr [[X]], i64 0, i64 2
105 ; CHECK-NEXT: [[T4:%.*]] = load i64, ptr [[T2]], align 8
106 ; CHECK-NEXT: [[T5:%.*]] = trunc i64 [[T1]] to i32
107 ; CHECK-NEXT: [[T6:%.*]] = bitcast i32 [[T5]] to float
108 ; CHECK-NEXT: [[T7:%.*]] = insertelement <4 x float> poison, float [[T6]], i32 0
109 ; CHECK-NEXT: [[T8:%.*]] = lshr i64 [[T1]], 32
110 ; CHECK-NEXT: [[T9:%.*]] = trunc i64 [[T8]] to i32
111 ; CHECK-NEXT: [[T10:%.*]] = bitcast i32 [[T9]] to float
112 ; CHECK-NEXT: [[T11:%.*]] = insertelement <4 x float> [[T7]], float [[T10]], i32 1
113 ; CHECK-NEXT: [[T12:%.*]] = trunc i64 [[T4]] to i32
114 ; CHECK-NEXT: [[T13:%.*]] = bitcast i32 [[T12]] to float
115 ; CHECK-NEXT: [[T14:%.*]] = insertelement <4 x float> [[T11]], float [[T13]], i32 2
116 ; CHECK-NEXT: [[T15:%.*]] = insertelement <4 x float> [[T14]], float [[T13]], i32 3
117 ; CHECK-NEXT: ret <4 x float> [[T15]]
119 %t1 = load i64, ptr %x, align 16
120 %t2 = getelementptr inbounds <4 x float>, ptr %x, i64 0, i64 2
121 %t4 = load i64, ptr %t2, align 8
122 %t5 = trunc i64 %t1 to i32
123 %t6 = bitcast i32 %t5 to float
124 %t7 = insertelement <4 x float> poison, float %t6, i32 0
125 %t8 = lshr i64 %t1, 32
126 %t9 = trunc i64 %t8 to i32
127 %t10 = bitcast i32 %t9 to float
128 %t11 = insertelement <4 x float> %t7, float %t10, i32 1
129 %t12 = trunc i64 %t4 to i32
130 %t13 = bitcast i32 %t12 to float
131 %t14 = insertelement <4 x float> %t11, float %t13, i32 2
132 %t15 = insertelement <4 x float> %t14, float %t13, i32 3
136 define void @PR43578_prefer128(ptr %r, ptr %p, ptr %q) #0 {
137 ; CHECK-LABEL: @PR43578_prefer128(
138 ; CHECK-NEXT: [[P2:%.*]] = getelementptr inbounds i64, ptr [[P:%.*]], i64 2
139 ; CHECK-NEXT: [[Q2:%.*]] = getelementptr inbounds i64, ptr [[Q:%.*]], i64 2
140 ; CHECK-NEXT: [[TMP1:%.*]] = load <2 x i64>, ptr [[P]], align 2
141 ; CHECK-NEXT: [[TMP2:%.*]] = load <2 x i64>, ptr [[Q]], align 2
142 ; CHECK-NEXT: [[TMP3:%.*]] = sub nsw <2 x i64> [[TMP1]], [[TMP2]]
143 ; CHECK-NEXT: [[TMP4:%.*]] = load <2 x i64>, ptr [[P2]], align 2
144 ; CHECK-NEXT: [[TMP5:%.*]] = load <2 x i64>, ptr [[Q2]], align 2
145 ; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <2 x i64> [[TMP4]], [[TMP5]]
146 ; CHECK-NEXT: [[TMP7:%.*]] = extractelement <2 x i64> [[TMP3]], i32 0
147 ; CHECK-NEXT: [[G0:%.*]] = getelementptr inbounds i32, ptr [[R:%.*]], i64 [[TMP7]]
148 ; CHECK-NEXT: [[TMP8:%.*]] = extractelement <2 x i64> [[TMP3]], i32 1
149 ; CHECK-NEXT: [[G1:%.*]] = getelementptr inbounds i32, ptr [[R]], i64 [[TMP8]]
150 ; CHECK-NEXT: [[TMP9:%.*]] = extractelement <2 x i64> [[TMP6]], i32 0
151 ; CHECK-NEXT: [[G2:%.*]] = getelementptr inbounds i32, ptr [[R]], i64 [[TMP9]]
152 ; CHECK-NEXT: [[TMP10:%.*]] = extractelement <2 x i64> [[TMP6]], i32 1
153 ; CHECK-NEXT: [[G3:%.*]] = getelementptr inbounds i32, ptr [[R]], i64 [[TMP10]]
154 ; CHECK-NEXT: ret void
156 %p1 = getelementptr inbounds i64, ptr %p, i64 1
157 %p2 = getelementptr inbounds i64, ptr %p, i64 2
158 %p3 = getelementptr inbounds i64, ptr %p, i64 3
160 %q1 = getelementptr inbounds i64, ptr %q, i64 1
161 %q2 = getelementptr inbounds i64, ptr %q, i64 2
162 %q3 = getelementptr inbounds i64, ptr %q, i64 3
164 %x0 = load i64, ptr %p, align 2
165 %x1 = load i64, ptr %p1, align 2
166 %x2 = load i64, ptr %p2, align 2
167 %x3 = load i64, ptr %p3, align 2
169 %y0 = load i64, ptr %q, align 2
170 %y1 = load i64, ptr %q1, align 2
171 %y2 = load i64, ptr %q2, align 2
172 %y3 = load i64, ptr %q3, align 2
174 %sub0 = sub nsw i64 %x0, %y0
175 %sub1 = sub nsw i64 %x1, %y1
176 %sub2 = sub nsw i64 %x2, %y2
177 %sub3 = sub nsw i64 %x3, %y3
179 %g0 = getelementptr inbounds i32, ptr %r, i64 %sub0
180 %g1 = getelementptr inbounds i32, ptr %r, i64 %sub1
181 %g2 = getelementptr inbounds i32, ptr %r, i64 %sub2
182 %g3 = getelementptr inbounds i32, ptr %r, i64 %sub3
186 attributes #0 = { "prefer-vector-width"="128" }