1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt -S -passes=slp-vectorizer -mtriple=aarch64--linux-gnu -mcpu=generic < %s | FileCheck %s
4 target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
5 target triple = "aarch64"
7 %struct.weight_t = type { i32, i32 }
9 define void @f_noalias(ptr noalias nocapture %dst, ptr noalias nocapture readonly %src, ptr noalias nocapture readonly %w) {
10 ; CHECK-LABEL: @f_noalias(
12 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[W:%.*]], align 16
13 ; CHECK-NEXT: [[OFFSET:%.*]] = getelementptr inbounds [[STRUCT_WEIGHT_T:%.*]], ptr [[W]], i64 0, i32 1
14 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[OFFSET]], align 4
15 ; CHECK-NEXT: [[TMP2:%.*]] = load <4 x i8>, ptr [[SRC:%.*]], align 1
16 ; CHECK-NEXT: [[TMP3:%.*]] = zext <4 x i8> [[TMP2]] to <4 x i32>
17 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <4 x i32> poison, i32 [[TMP0]], i32 0
18 ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector <4 x i32> [[TMP4]], <4 x i32> poison, <4 x i32> zeroinitializer
19 ; CHECK-NEXT: [[TMP6:%.*]] = mul nsw <4 x i32> [[TMP5]], [[TMP3]]
20 ; CHECK-NEXT: [[TMP7:%.*]] = insertelement <4 x i32> poison, i32 [[TMP1]], i32 0
21 ; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x i32> [[TMP7]], <4 x i32> poison, <4 x i32> zeroinitializer
22 ; CHECK-NEXT: [[TMP9:%.*]] = add nsw <4 x i32> [[TMP6]], [[TMP8]]
23 ; CHECK-NEXT: [[TMP10:%.*]] = icmp ult <4 x i32> [[TMP9]], splat (i32 256)
24 ; CHECK-NEXT: [[TMP11:%.*]] = icmp sgt <4 x i32> [[TMP9]], zeroinitializer
25 ; CHECK-NEXT: [[TMP12:%.*]] = sext <4 x i1> [[TMP11]] to <4 x i32>
26 ; CHECK-NEXT: [[TMP13:%.*]] = select <4 x i1> [[TMP10]], <4 x i32> [[TMP9]], <4 x i32> [[TMP12]]
27 ; CHECK-NEXT: [[TMP14:%.*]] = trunc <4 x i32> [[TMP13]] to <4 x i8>
28 ; CHECK-NEXT: store <4 x i8> [[TMP14]], ptr [[DST:%.*]], align 1
29 ; CHECK-NEXT: ret void
32 %0 = load i32, ptr %w, align 16
33 %offset = getelementptr inbounds %struct.weight_t, ptr %w, i64 0, i32 1
34 %1 = load i32, ptr %offset, align 4
35 %2 = load i8, ptr %src, align 1
36 %conv = zext i8 %2 to i32
37 %mul = mul nsw i32 %0, %conv
38 %add = add nsw i32 %mul, %1
39 %tobool.not.i = icmp ult i32 %add, 256
40 %3 = icmp sgt i32 %add, 0
41 %shr.i = sext i1 %3 to i32
42 %cond.i = select i1 %tobool.not.i, i32 %add, i32 %shr.i
43 %conv.i = trunc i32 %cond.i to i8
44 store i8 %conv.i, ptr %dst, align 1
45 %arrayidx.1 = getelementptr inbounds i8, ptr %src, i64 1
46 %4 = load i8, ptr %arrayidx.1, align 1
47 %conv.1 = zext i8 %4 to i32
48 %mul.1 = mul nsw i32 %0, %conv.1
49 %add.1 = add nsw i32 %mul.1, %1
50 %tobool.not.i.1 = icmp ult i32 %add.1, 256
51 %5 = icmp sgt i32 %add.1, 0
52 %shr.i.1 = sext i1 %5 to i32
53 %cond.i.1 = select i1 %tobool.not.i.1, i32 %add.1, i32 %shr.i.1
54 %conv.i.1 = trunc i32 %cond.i.1 to i8
55 %arrayidx2.1 = getelementptr inbounds i8, ptr %dst, i64 1
56 store i8 %conv.i.1, ptr %arrayidx2.1, align 1
57 %arrayidx.2 = getelementptr inbounds i8, ptr %src, i64 2
58 %6 = load i8, ptr %arrayidx.2, align 1
59 %conv.2 = zext i8 %6 to i32
60 %mul.2 = mul nsw i32 %0, %conv.2
61 %add.2 = add nsw i32 %mul.2, %1
62 %tobool.not.i.2 = icmp ult i32 %add.2, 256
63 %7 = icmp sgt i32 %add.2, 0
64 %shr.i.2 = sext i1 %7 to i32
65 %cond.i.2 = select i1 %tobool.not.i.2, i32 %add.2, i32 %shr.i.2
66 %conv.i.2 = trunc i32 %cond.i.2 to i8
67 %arrayidx2.2 = getelementptr inbounds i8, ptr %dst, i64 2
68 store i8 %conv.i.2, ptr %arrayidx2.2, align 1
69 %arrayidx.3 = getelementptr inbounds i8, ptr %src, i64 3
70 %8 = load i8, ptr %arrayidx.3, align 1
71 %conv.3 = zext i8 %8 to i32
72 %mul.3 = mul nsw i32 %0, %conv.3
73 %add.3 = add nsw i32 %mul.3, %1
74 %tobool.not.i.3 = icmp ult i32 %add.3, 256
75 %9 = icmp sgt i32 %add.3, 0
76 %shr.i.3 = sext i1 %9 to i32
77 %cond.i.3 = select i1 %tobool.not.i.3, i32 %add.3, i32 %shr.i.3
78 %conv.i.3 = trunc i32 %cond.i.3 to i8
79 %arrayidx2.3 = getelementptr inbounds i8, ptr %dst, i64 3
80 store i8 %conv.i.3, ptr %arrayidx2.3, align 1
84 ; This is the same test as above, expect that the pointers don't have 'noalias'.
85 ; This currently prevents SLP vectorization, but the SLP vectorizer should
86 ; be taught to emit runtime checks enabling vectorization.
88 define void @f_alias(ptr nocapture %dst, ptr nocapture readonly %src, ptr nocapture readonly %w) {
89 ; CHECK-LABEL: @f_alias(
91 ; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[W:%.*]], align 16
92 ; CHECK-NEXT: [[OFFSET:%.*]] = getelementptr inbounds [[STRUCT_WEIGHT_T:%.*]], ptr [[W]], i64 0, i32 1
93 ; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[OFFSET]], align 4
94 ; CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[SRC:%.*]], align 1
95 ; CHECK-NEXT: [[CONV:%.*]] = zext i8 [[TMP2]] to i32
96 ; CHECK-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP0]], [[CONV]]
97 ; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[MUL]], [[TMP1]]
98 ; CHECK-NEXT: [[TOBOOL_NOT_I:%.*]] = icmp ult i32 [[ADD]], 256
99 ; CHECK-NEXT: [[TMP3:%.*]] = icmp sgt i32 [[ADD]], 0
100 ; CHECK-NEXT: [[SHR_I:%.*]] = sext i1 [[TMP3]] to i32
101 ; CHECK-NEXT: [[COND_I:%.*]] = select i1 [[TOBOOL_NOT_I]], i32 [[ADD]], i32 [[SHR_I]]
102 ; CHECK-NEXT: [[CONV_I:%.*]] = trunc i32 [[COND_I]] to i8
103 ; CHECK-NEXT: store i8 [[CONV_I]], ptr [[DST:%.*]], align 1
104 ; CHECK-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 1
105 ; CHECK-NEXT: [[TMP4:%.*]] = load i8, ptr [[ARRAYIDX_1]], align 1
106 ; CHECK-NEXT: [[CONV_1:%.*]] = zext i8 [[TMP4]] to i32
107 ; CHECK-NEXT: [[MUL_1:%.*]] = mul nsw i32 [[TMP0]], [[CONV_1]]
108 ; CHECK-NEXT: [[ADD_1:%.*]] = add nsw i32 [[MUL_1]], [[TMP1]]
109 ; CHECK-NEXT: [[TOBOOL_NOT_I_1:%.*]] = icmp ult i32 [[ADD_1]], 256
110 ; CHECK-NEXT: [[TMP5:%.*]] = icmp sgt i32 [[ADD_1]], 0
111 ; CHECK-NEXT: [[SHR_I_1:%.*]] = sext i1 [[TMP5]] to i32
112 ; CHECK-NEXT: [[COND_I_1:%.*]] = select i1 [[TOBOOL_NOT_I_1]], i32 [[ADD_1]], i32 [[SHR_I_1]]
113 ; CHECK-NEXT: [[CONV_I_1:%.*]] = trunc i32 [[COND_I_1]] to i8
114 ; CHECK-NEXT: [[ARRAYIDX2_1:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 1
115 ; CHECK-NEXT: store i8 [[CONV_I_1]], ptr [[ARRAYIDX2_1]], align 1
116 ; CHECK-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 2
117 ; CHECK-NEXT: [[TMP6:%.*]] = load i8, ptr [[ARRAYIDX_2]], align 1
118 ; CHECK-NEXT: [[CONV_2:%.*]] = zext i8 [[TMP6]] to i32
119 ; CHECK-NEXT: [[MUL_2:%.*]] = mul nsw i32 [[TMP0]], [[CONV_2]]
120 ; CHECK-NEXT: [[ADD_2:%.*]] = add nsw i32 [[MUL_2]], [[TMP1]]
121 ; CHECK-NEXT: [[TOBOOL_NOT_I_2:%.*]] = icmp ult i32 [[ADD_2]], 256
122 ; CHECK-NEXT: [[TMP7:%.*]] = icmp sgt i32 [[ADD_2]], 0
123 ; CHECK-NEXT: [[SHR_I_2:%.*]] = sext i1 [[TMP7]] to i32
124 ; CHECK-NEXT: [[COND_I_2:%.*]] = select i1 [[TOBOOL_NOT_I_2]], i32 [[ADD_2]], i32 [[SHR_I_2]]
125 ; CHECK-NEXT: [[CONV_I_2:%.*]] = trunc i32 [[COND_I_2]] to i8
126 ; CHECK-NEXT: [[ARRAYIDX2_2:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 2
127 ; CHECK-NEXT: store i8 [[CONV_I_2]], ptr [[ARRAYIDX2_2]], align 1
128 ; CHECK-NEXT: [[ARRAYIDX_3:%.*]] = getelementptr inbounds i8, ptr [[SRC]], i64 3
129 ; CHECK-NEXT: [[TMP8:%.*]] = load i8, ptr [[ARRAYIDX_3]], align 1
130 ; CHECK-NEXT: [[CONV_3:%.*]] = zext i8 [[TMP8]] to i32
131 ; CHECK-NEXT: [[MUL_3:%.*]] = mul nsw i32 [[TMP0]], [[CONV_3]]
132 ; CHECK-NEXT: [[ADD_3:%.*]] = add nsw i32 [[MUL_3]], [[TMP1]]
133 ; CHECK-NEXT: [[TOBOOL_NOT_I_3:%.*]] = icmp ult i32 [[ADD_3]], 256
134 ; CHECK-NEXT: [[TMP9:%.*]] = icmp sgt i32 [[ADD_3]], 0
135 ; CHECK-NEXT: [[SHR_I_3:%.*]] = sext i1 [[TMP9]] to i32
136 ; CHECK-NEXT: [[COND_I_3:%.*]] = select i1 [[TOBOOL_NOT_I_3]], i32 [[ADD_3]], i32 [[SHR_I_3]]
137 ; CHECK-NEXT: [[CONV_I_3:%.*]] = trunc i32 [[COND_I_3]] to i8
138 ; CHECK-NEXT: [[ARRAYIDX2_3:%.*]] = getelementptr inbounds i8, ptr [[DST]], i64 3
139 ; CHECK-NEXT: store i8 [[CONV_I_3]], ptr [[ARRAYIDX2_3]], align 1
140 ; CHECK-NEXT: ret void
143 %0 = load i32, ptr %w, align 16
144 %offset = getelementptr inbounds %struct.weight_t, ptr %w, i64 0, i32 1
145 %1 = load i32, ptr %offset, align 4
146 %2 = load i8, ptr %src, align 1
147 %conv = zext i8 %2 to i32
148 %mul = mul nsw i32 %0, %conv
149 %add = add nsw i32 %mul, %1
150 %tobool.not.i = icmp ult i32 %add, 256
151 %3 = icmp sgt i32 %add, 0
152 %shr.i = sext i1 %3 to i32
153 %cond.i = select i1 %tobool.not.i, i32 %add, i32 %shr.i
154 %conv.i = trunc i32 %cond.i to i8
155 store i8 %conv.i, ptr %dst, align 1
156 %arrayidx.1 = getelementptr inbounds i8, ptr %src, i64 1
157 %4 = load i8, ptr %arrayidx.1, align 1
158 %conv.1 = zext i8 %4 to i32
159 %mul.1 = mul nsw i32 %0, %conv.1
160 %add.1 = add nsw i32 %mul.1, %1
161 %tobool.not.i.1 = icmp ult i32 %add.1, 256
162 %5 = icmp sgt i32 %add.1, 0
163 %shr.i.1 = sext i1 %5 to i32
164 %cond.i.1 = select i1 %tobool.not.i.1, i32 %add.1, i32 %shr.i.1
165 %conv.i.1 = trunc i32 %cond.i.1 to i8
166 %arrayidx2.1 = getelementptr inbounds i8, ptr %dst, i64 1
167 store i8 %conv.i.1, ptr %arrayidx2.1, align 1
168 %arrayidx.2 = getelementptr inbounds i8, ptr %src, i64 2
169 %6 = load i8, ptr %arrayidx.2, align 1
170 %conv.2 = zext i8 %6 to i32
171 %mul.2 = mul nsw i32 %0, %conv.2
172 %add.2 = add nsw i32 %mul.2, %1
173 %tobool.not.i.2 = icmp ult i32 %add.2, 256
174 %7 = icmp sgt i32 %add.2, 0
175 %shr.i.2 = sext i1 %7 to i32
176 %cond.i.2 = select i1 %tobool.not.i.2, i32 %add.2, i32 %shr.i.2
177 %conv.i.2 = trunc i32 %cond.i.2 to i8
178 %arrayidx2.2 = getelementptr inbounds i8, ptr %dst, i64 2
179 store i8 %conv.i.2, ptr %arrayidx2.2, align 1
180 %arrayidx.3 = getelementptr inbounds i8, ptr %src, i64 3
181 %8 = load i8, ptr %arrayidx.3, align 1
182 %conv.3 = zext i8 %8 to i32
183 %mul.3 = mul nsw i32 %0, %conv.3
184 %add.3 = add nsw i32 %mul.3, %1
185 %tobool.not.i.3 = icmp ult i32 %add.3, 256
186 %9 = icmp sgt i32 %add.3, 0
187 %shr.i.3 = sext i1 %9 to i32
188 %cond.i.3 = select i1 %tobool.not.i.3, i32 %add.3, i32 %shr.i.3
189 %conv.i.3 = trunc i32 %cond.i.3 to i8
190 %arrayidx2.3 = getelementptr inbounds i8, ptr %dst, i64 3
191 store i8 %conv.i.3, ptr %arrayidx2.3, align 1