1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
6 define i32 @test4(i32 %a) nounwind {
8 ; CHECK-NEXT: [[T2:%.*]] = and i32 [[A:%.*]], 255
9 ; CHECK-NEXT: ret i32 [[T2]]
11 %t2 = tail call i32 @llvm.bswap.i32( i32 %a )
12 %t4 = lshr i32 %t2, 24
17 define i32 @test6(i32 %a) nounwind {
18 ; CHECK-LABEL: @test6(
19 ; CHECK-NEXT: [[T2:%.*]] = lshr i32 [[A:%.*]], 24
20 ; CHECK-NEXT: ret i32 [[T2]]
22 %t2 = tail call i32 @llvm.bswap.i32( i32 %a )
23 %t4 = and i32 %t2, 255
28 define i16 @test7(i32 %A) {
29 ; CHECK-LABEL: @test7(
30 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[A:%.*]], 16
31 ; CHECK-NEXT: [[D:%.*]] = trunc i32 [[TMP1]] to i16
32 ; CHECK-NEXT: ret i16 [[D]]
34 %B = tail call i32 @llvm.bswap.i32(i32 %A) nounwind
35 %C = trunc i32 %B to i16
36 %D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
40 define <2 x i16> @test7_vector(<2 x i32> %A) {
41 ; CHECK-LABEL: @test7_vector(
42 ; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i32> [[A:%.*]], <i32 16, i32 16>
43 ; CHECK-NEXT: [[D:%.*]] = trunc <2 x i32> [[TMP1]] to <2 x i16>
44 ; CHECK-NEXT: ret <2 x i16> [[D]]
46 %B = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %A) nounwind
47 %C = trunc <2 x i32> %B to <2 x i16>
48 %D = tail call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %C) nounwind
52 define i16 @test8(i64 %A) {
53 ; CHECK-LABEL: @test8(
54 ; CHECK-NEXT: [[TMP1:%.*]] = lshr i64 [[A:%.*]], 48
55 ; CHECK-NEXT: [[D:%.*]] = trunc i64 [[TMP1]] to i16
56 ; CHECK-NEXT: ret i16 [[D]]
58 %B = tail call i64 @llvm.bswap.i64(i64 %A) nounwind
59 %C = trunc i64 %B to i16
60 %D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
64 define <2 x i16> @test8_vector(<2 x i64> %A) {
65 ; CHECK-LABEL: @test8_vector(
66 ; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i64> [[A:%.*]], <i64 48, i64 48>
67 ; CHECK-NEXT: [[D:%.*]] = trunc <2 x i64> [[TMP1]] to <2 x i16>
68 ; CHECK-NEXT: ret <2 x i16> [[D]]
70 %B = tail call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %A) nounwind
71 %C = trunc <2 x i64> %B to <2 x i16>
72 %D = tail call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %C) nounwind
76 ; Misc: Fold bswap(undef) to undef.
79 ; CHECK-NEXT: ret i64 undef
81 %a = call i64 @llvm.bswap.i64(i64 undef)
86 ; Fold: OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
87 ; Fold: OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
88 define i16 @bs_and16i(i16 %a, i16 %b) #0 {
89 ; CHECK-LABEL: @bs_and16i(
90 ; CHECK-NEXT: [[TMP1:%.*]] = and i16 [[A:%.*]], 4391
91 ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP1]])
92 ; CHECK-NEXT: ret i16 [[TMP2]]
94 %1 = tail call i16 @llvm.bswap.i16(i16 %a)
95 %2 = and i16 %1, 10001
99 define i16 @bs_and16(i16 %a, i16 %b) #0 {
100 ; CHECK-LABEL: @bs_and16(
101 ; CHECK-NEXT: [[TMP1:%.*]] = and i16 [[A:%.*]], [[B:%.*]]
102 ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP1]])
103 ; CHECK-NEXT: ret i16 [[TMP2]]
105 %t1 = tail call i16 @llvm.bswap.i16(i16 %a)
106 %t2 = tail call i16 @llvm.bswap.i16(i16 %b)
107 %t3 = and i16 %t1, %t2
111 define i16 @bs_or16(i16 %a, i16 %b) #0 {
112 ; CHECK-LABEL: @bs_or16(
113 ; CHECK-NEXT: [[TMP1:%.*]] = or i16 [[A:%.*]], [[B:%.*]]
114 ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP1]])
115 ; CHECK-NEXT: ret i16 [[TMP2]]
117 %t1 = tail call i16 @llvm.bswap.i16(i16 %a)
118 %t2 = tail call i16 @llvm.bswap.i16(i16 %b)
119 %t3 = or i16 %t1, %t2
123 define i16 @bs_xor16(i16 %a, i16 %b) #0 {
124 ; CHECK-LABEL: @bs_xor16(
125 ; CHECK-NEXT: [[TMP1:%.*]] = xor i16 [[A:%.*]], [[B:%.*]]
126 ; CHECK-NEXT: [[TMP2:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP1]])
127 ; CHECK-NEXT: ret i16 [[TMP2]]
129 %t1 = tail call i16 @llvm.bswap.i16(i16 %a)
130 %t2 = tail call i16 @llvm.bswap.i16(i16 %b)
131 %t3 = xor i16 %t1, %t2
135 define i32 @bs_and32i(i32 %a, i32 %b) #0 {
136 ; CHECK-LABEL: @bs_and32i(
137 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], -1585053440
138 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
139 ; CHECK-NEXT: ret i32 [[TMP2]]
141 %t1 = tail call i32 @llvm.bswap.i32(i32 %a)
142 %t2 = and i32 %t1, 100001
146 define i32 @bs_and32(i32 %a, i32 %b) #0 {
147 ; CHECK-LABEL: @bs_and32(
148 ; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], [[B:%.*]]
149 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
150 ; CHECK-NEXT: ret i32 [[TMP2]]
152 %t1 = tail call i32 @llvm.bswap.i32(i32 %a)
153 %t2 = tail call i32 @llvm.bswap.i32(i32 %b)
154 %t3 = and i32 %t1, %t2
158 define i32 @bs_or32(i32 %a, i32 %b) #0 {
159 ; CHECK-LABEL: @bs_or32(
160 ; CHECK-NEXT: [[TMP1:%.*]] = or i32 [[A:%.*]], [[B:%.*]]
161 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
162 ; CHECK-NEXT: ret i32 [[TMP2]]
164 %t1 = tail call i32 @llvm.bswap.i32(i32 %a)
165 %t2 = tail call i32 @llvm.bswap.i32(i32 %b)
166 %t3 = or i32 %t1, %t2
170 define i32 @bs_xor32(i32 %a, i32 %b) #0 {
171 ; CHECK-LABEL: @bs_xor32(
172 ; CHECK-NEXT: [[TMP1:%.*]] = xor i32 [[A:%.*]], [[B:%.*]]
173 ; CHECK-NEXT: [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
174 ; CHECK-NEXT: ret i32 [[TMP2]]
176 %t1 = tail call i32 @llvm.bswap.i32(i32 %a)
177 %t2 = tail call i32 @llvm.bswap.i32(i32 %b)
178 %t3 = xor i32 %t1, %t2
182 define i64 @bs_and64i(i64 %a, i64 %b) #0 {
183 ; CHECK-LABEL: @bs_and64i(
184 ; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[A:%.*]], 129085117527228416
185 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
186 ; CHECK-NEXT: ret i64 [[TMP2]]
188 %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
189 %t2 = and i64 %t1, 1000000001
193 define i64 @bs_and64(i64 %a, i64 %b) #0 {
194 ; CHECK-LABEL: @bs_and64(
195 ; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[A:%.*]], [[B:%.*]]
196 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
197 ; CHECK-NEXT: ret i64 [[TMP2]]
199 %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
200 %t2 = tail call i64 @llvm.bswap.i64(i64 %b)
201 %t3 = and i64 %t1, %t2
205 define i64 @bs_or64(i64 %a, i64 %b) #0 {
206 ; CHECK-LABEL: @bs_or64(
207 ; CHECK-NEXT: [[TMP1:%.*]] = or i64 [[A:%.*]], [[B:%.*]]
208 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
209 ; CHECK-NEXT: ret i64 [[TMP2]]
211 %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
212 %t2 = tail call i64 @llvm.bswap.i64(i64 %b)
213 %t3 = or i64 %t1, %t2
217 define i64 @bs_xor64(i64 %a, i64 %b) #0 {
218 ; CHECK-LABEL: @bs_xor64(
219 ; CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[A:%.*]], [[B:%.*]]
220 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
221 ; CHECK-NEXT: ret i64 [[TMP2]]
223 %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
224 %t2 = tail call i64 @llvm.bswap.i64(i64 %b)
225 %t3 = xor i64 %t1, %t2
229 define <2 x i32> @bs_and32vec(<2 x i32> %a, <2 x i32> %b) #0 {
230 ; CHECK-LABEL: @bs_and32vec(
231 ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], [[B:%.*]]
232 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
233 ; CHECK-NEXT: ret <2 x i32> [[TMP2]]
235 %t1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
236 %t2 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %b)
237 %t3 = and <2 x i32> %t1, %t2
241 define <2 x i32> @bs_or32vec(<2 x i32> %a, <2 x i32> %b) #0 {
242 ; CHECK-LABEL: @bs_or32vec(
243 ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[A:%.*]], [[B:%.*]]
244 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
245 ; CHECK-NEXT: ret <2 x i32> [[TMP2]]
247 %t1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
248 %t2 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %b)
249 %t3 = or <2 x i32> %t1, %t2
253 define <2 x i32> @bs_xor32vec(<2 x i32> %a, <2 x i32> %b) #0 {
254 ; CHECK-LABEL: @bs_xor32vec(
255 ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[A:%.*]], [[B:%.*]]
256 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
257 ; CHECK-NEXT: ret <2 x i32> [[TMP2]]
259 %t1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
260 %t2 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %b)
261 %t3 = xor <2 x i32> %t1, %t2
265 define <2 x i32> @bs_and32ivec(<2 x i32> %a, <2 x i32> %b) #0 {
266 ; CHECK-LABEL: @bs_and32ivec(
267 ; CHECK-NEXT: [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], <i32 -1585053440, i32 -1585053440>
268 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
269 ; CHECK-NEXT: ret <2 x i32> [[TMP2]]
271 %t1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
272 %t2 = and <2 x i32> %t1, <i32 100001, i32 100001>
276 define <2 x i32> @bs_or32ivec(<2 x i32> %a, <2 x i32> %b) #0 {
277 ; CHECK-LABEL: @bs_or32ivec(
278 ; CHECK-NEXT: [[TMP1:%.*]] = or <2 x i32> [[A:%.*]], <i32 -1585053440, i32 -1585053440>
279 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
280 ; CHECK-NEXT: ret <2 x i32> [[TMP2]]
282 %t1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
283 %t2 = or <2 x i32> %t1, <i32 100001, i32 100001>
287 define <2 x i32> @bs_xor32ivec(<2 x i32> %a, <2 x i32> %b) #0 {
288 ; CHECK-LABEL: @bs_xor32ivec(
289 ; CHECK-NEXT: [[TMP1:%.*]] = xor <2 x i32> [[A:%.*]], <i32 -1585053440, i32 -1585053440>
290 ; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
291 ; CHECK-NEXT: ret <2 x i32> [[TMP2]]
293 %t1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
294 %t2 = xor <2 x i32> %t1, <i32 100001, i32 100001>
298 define i64 @bs_and64_multiuse1(i64 %a, i64 %b) #0 {
299 ; CHECK-LABEL: @bs_and64_multiuse1(
300 ; CHECK-NEXT: [[T1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[A:%.*]])
301 ; CHECK-NEXT: [[T2:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
302 ; CHECK-NEXT: [[T3:%.*]] = and i64 [[T1]], [[T2]]
303 ; CHECK-NEXT: [[T4:%.*]] = mul i64 [[T3]], [[T1]]
304 ; CHECK-NEXT: [[T5:%.*]] = mul i64 [[T4]], [[T2]]
305 ; CHECK-NEXT: ret i64 [[T5]]
307 %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
308 %t2 = tail call i64 @llvm.bswap.i64(i64 %b)
309 %t3 = and i64 %t1, %t2
310 %t4 = mul i64 %t3, %t1 ; to increase use count of the bswaps
311 %t5 = mul i64 %t4, %t2 ; to increase use count of the bswaps
315 define i64 @bs_and64_multiuse2(i64 %a, i64 %b) #0 {
316 ; CHECK-LABEL: @bs_and64_multiuse2(
317 ; CHECK-NEXT: [[T1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[A:%.*]])
318 ; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[A]], [[B:%.*]]
319 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
320 ; CHECK-NEXT: [[T4:%.*]] = mul i64 [[TMP2]], [[T1]]
321 ; CHECK-NEXT: ret i64 [[T4]]
323 %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
324 %t2 = tail call i64 @llvm.bswap.i64(i64 %b)
325 %t3 = and i64 %t1, %t2
326 %t4 = mul i64 %t3, %t1 ; to increase use count of the bswaps
330 define i64 @bs_and64_multiuse3(i64 %a, i64 %b) #0 {
331 ; CHECK-LABEL: @bs_and64_multiuse3(
332 ; CHECK-NEXT: [[T2:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
333 ; CHECK-NEXT: [[TMP1:%.*]] = and i64 [[A:%.*]], [[B]]
334 ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
335 ; CHECK-NEXT: [[T4:%.*]] = mul i64 [[TMP2]], [[T2]]
336 ; CHECK-NEXT: ret i64 [[T4]]
338 %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
339 %t2 = tail call i64 @llvm.bswap.i64(i64 %b)
340 %t3 = and i64 %t1, %t2
341 %t4 = mul i64 %t3, %t2 ; to increase use count of the bswaps
345 define i64 @bs_and64i_multiuse(i64 %a, i64 %b) #0 {
346 ; CHECK-LABEL: @bs_and64i_multiuse(
347 ; CHECK-NEXT: [[T1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[A:%.*]])
348 ; CHECK-NEXT: [[T2:%.*]] = and i64 [[T1]], 1000000001
349 ; CHECK-NEXT: [[T3:%.*]] = mul i64 [[T2]], [[T1]]
350 ; CHECK-NEXT: ret i64 [[T3]]
352 %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
353 %t2 = and i64 %t1, 1000000001
354 %t3 = mul i64 %t2, %t1 ; to increase use count of the bswap
358 declare i16 @llvm.bswap.i16(i16)
359 declare i32 @llvm.bswap.i32(i32)
360 declare i64 @llvm.bswap.i64(i64)
361 declare <2 x i16> @llvm.bswap.v2i16(<2 x i16>)
362 declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>)
363 declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)