1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
4 ; If we have some pattern that leaves only some low bits set, and then performs
5 ; left-shift of those bits, we can combine those two shifts into a shift+mask.
7 ; There are many variants to this pattern:
8 ; c) (trunc ((x & (-1 >> maskNbits)))) << shiftNbits
10 ; ((trunc(x)) << shiftNbits) & (-1 >> ((-(maskNbits+shiftNbits))+32))
14 declare void @use32(i32)
15 declare void @use64(i64)
17 define i32 @t0_basic(i64 %x, i32 %nbits) {
18 ; CHECK-LABEL: @t0_basic(
19 ; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
20 ; CHECK-NEXT: [[T1:%.*]] = lshr i64 -1, [[T0]]
21 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -33
22 ; CHECK-NEXT: call void @use64(i64 [[T0]])
23 ; CHECK-NEXT: call void @use64(i64 [[T1]])
24 ; CHECK-NEXT: call void @use32(i32 [[T2]])
25 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X:%.*]] to i32
26 ; CHECK-NEXT: [[TMP2:%.*]] = shl i32 [[TMP1]], [[T2]]
27 ; CHECK-NEXT: [[T5:%.*]] = and i32 [[TMP2]], 2147483647
28 ; CHECK-NEXT: ret i32 [[T5]]
30 %t0 = zext i32 %nbits to i64
31 %t1 = lshr i64 -1, %t0
32 %t2 = add i32 %nbits, -33
34 call void @use64(i64 %t0)
35 call void @use64(i64 %t1)
36 call void @use32(i32 %t2)
39 %t4 = trunc i64 %t3 to i32
40 %t5 = shl i32 %t4, %t2 ; shift is smaller than mask
46 declare void @use8xi32(<8 x i32>)
47 declare void @use8xi64(<8 x i64>)
49 define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
50 ; CHECK-LABEL: @t1_vec_splat(
51 ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
52 ; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i64> splat (i64 -1), [[T0]]
53 ; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], splat (i32 -33)
54 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
55 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
56 ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]])
57 ; CHECK-NEXT: [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
58 ; CHECK-NEXT: [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
59 ; CHECK-NEXT: [[T5:%.*]] = and <8 x i32> [[TMP2]], splat (i32 2147483647)
60 ; CHECK-NEXT: ret <8 x i32> [[T5]]
62 %t0 = zext <8 x i32> %nbits to <8 x i64>
63 %t1 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %t0
64 %t2 = add <8 x i32> %nbits, <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33>
66 call void @use8xi64(<8 x i64> %t0)
67 call void @use8xi64(<8 x i64> %t1)
68 call void @use8xi32(<8 x i32> %t2)
70 %t3 = and <8 x i64> %t1, %x
71 %t4 = trunc <8 x i64> %t3 to <8 x i32>
72 %t5 = shl <8 x i32> %t4, %t2 ; shift is smaller than mask
76 define <8 x i32> @t2_vec_splat_poison(<8 x i64> %x, <8 x i32> %nbits) {
77 ; CHECK-LABEL: @t2_vec_splat_poison(
78 ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
79 ; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 poison, i64 -1>, [[T0]]
80 ; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 poison, i32 -33>
81 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
82 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
83 ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]])
84 ; CHECK-NEXT: [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
85 ; CHECK-NEXT: [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
86 ; CHECK-NEXT: [[T5:%.*]] = and <8 x i32> [[TMP2]], <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647, i32 poison, i32 2147483647>
87 ; CHECK-NEXT: ret <8 x i32> [[T5]]
89 %t0 = zext <8 x i32> %nbits to <8 x i64>
90 %t1 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 poison, i64 -1>, %t0
91 %t2 = add <8 x i32> %nbits, <i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 -33, i32 poison, i32 -33>
93 call void @use8xi64(<8 x i64> %t0)
94 call void @use8xi64(<8 x i64> %t1)
95 call void @use8xi32(<8 x i32> %t2)
97 %t3 = and <8 x i64> %t1, %x
98 %t4 = trunc <8 x i64> %t3 to <8 x i32>
99 %t5 = shl <8 x i32> %t4, %t2 ; shift is smaller than mask
103 define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
104 ; CHECK-LABEL: @t3_vec_nonsplat(
105 ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
106 ; CHECK-NEXT: [[T1:%.*]] = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 poison, i64 -1>, [[T0]]
107 ; CHECK-NEXT: [[T2:%.*]] = add <8 x i32> [[NBITS]], <i32 -64, i32 -63, i32 -33, i32 -32, i32 63, i32 64, i32 poison, i32 65>
108 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
109 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
110 ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T2]])
111 ; CHECK-NEXT: [[TMP1:%.*]] = trunc <8 x i64> [[X:%.*]] to <8 x i32>
112 ; CHECK-NEXT: [[TMP2:%.*]] = shl <8 x i32> [[TMP1]], [[T2]]
113 ; CHECK-NEXT: [[T5:%.*]] = and <8 x i32> [[TMP2]], <i32 poison, i32 1, i32 2147483647, i32 -1, i32 -1, i32 -1, i32 poison, i32 poison>
114 ; CHECK-NEXT: ret <8 x i32> [[T5]]
116 %t0 = zext <8 x i32> %nbits to <8 x i64>
117 %t1 = lshr <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 poison, i64 -1>, %t0
118 %t2 = add <8 x i32> %nbits, <i32 -64, i32 -63, i32 -33, i32 -32, i32 63, i32 64, i32 poison, i32 65>
120 call void @use8xi64(<8 x i64> %t0)
121 call void @use8xi64(<8 x i64> %t1)
122 call void @use8xi32(<8 x i32> %t2)
124 %t3 = and <8 x i64> %t1, %x
125 %t4 = trunc <8 x i64> %t3 to <8 x i32>
126 %t5 = shl <8 x i32> %t4, %t2 ; shift is smaller than mask
132 define i32 @n4_extrause0(i64 %x, i32 %nbits) {
133 ; CHECK-LABEL: @n4_extrause0(
134 ; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
135 ; CHECK-NEXT: [[T1:%.*]] = lshr i64 -1, [[T0]]
136 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -33
137 ; CHECK-NEXT: call void @use64(i64 [[T0]])
138 ; CHECK-NEXT: call void @use64(i64 [[T1]])
139 ; CHECK-NEXT: call void @use32(i32 [[T2]])
140 ; CHECK-NEXT: [[T3:%.*]] = and i64 [[T1]], [[X:%.*]]
141 ; CHECK-NEXT: call void @use64(i64 [[T3]])
142 ; CHECK-NEXT: [[T4:%.*]] = trunc i64 [[T3]] to i32
143 ; CHECK-NEXT: [[T5:%.*]] = shl i32 [[T4]], [[T2]]
144 ; CHECK-NEXT: ret i32 [[T5]]
146 %t0 = zext i32 %nbits to i64
147 %t1 = lshr i64 -1, %t0
148 %t2 = add i32 %nbits, -33
150 call void @use64(i64 %t0)
151 call void @use64(i64 %t1)
152 call void @use32(i32 %t2)
154 %t3 = and i64 %t1, %x
155 call void @use64(i64 %t3)
156 %t4 = trunc i64 %t3 to i32
157 %t5 = shl i32 %t4, %t2 ; shift is smaller than mask
161 define i32 @n5_extrause1(i64 %x, i32 %nbits) {
162 ; CHECK-LABEL: @n5_extrause1(
163 ; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
164 ; CHECK-NEXT: [[T1:%.*]] = lshr i64 -1, [[T0]]
165 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -33
166 ; CHECK-NEXT: call void @use64(i64 [[T0]])
167 ; CHECK-NEXT: call void @use64(i64 [[T1]])
168 ; CHECK-NEXT: call void @use32(i32 [[T2]])
169 ; CHECK-NEXT: [[T3:%.*]] = and i64 [[T1]], [[X:%.*]]
170 ; CHECK-NEXT: [[T4:%.*]] = trunc i64 [[T3]] to i32
171 ; CHECK-NEXT: call void @use32(i32 [[T4]])
172 ; CHECK-NEXT: [[T5:%.*]] = shl i32 [[T4]], [[T2]]
173 ; CHECK-NEXT: ret i32 [[T5]]
175 %t0 = zext i32 %nbits to i64
176 %t1 = lshr i64 -1, %t0
177 %t2 = add i32 %nbits, -33
179 call void @use64(i64 %t0)
180 call void @use64(i64 %t1)
181 call void @use32(i32 %t2)
183 %t3 = and i64 %t1, %x
184 %t4 = trunc i64 %t3 to i32
185 call void @use32(i32 %t4)
186 %t5 = shl i32 %t4, %t2 ; shift is smaller than mask
190 define i32 @n6_extrause2(i64 %x, i32 %nbits) {
191 ; CHECK-LABEL: @n6_extrause2(
192 ; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
193 ; CHECK-NEXT: [[T1:%.*]] = lshr i64 -1, [[T0]]
194 ; CHECK-NEXT: [[T2:%.*]] = add i32 [[NBITS]], -33
195 ; CHECK-NEXT: call void @use64(i64 [[T0]])
196 ; CHECK-NEXT: call void @use64(i64 [[T1]])
197 ; CHECK-NEXT: call void @use32(i32 [[T2]])
198 ; CHECK-NEXT: [[T3:%.*]] = and i64 [[T1]], [[X:%.*]]
199 ; CHECK-NEXT: call void @use64(i64 [[T3]])
200 ; CHECK-NEXT: [[T4:%.*]] = trunc i64 [[T3]] to i32
201 ; CHECK-NEXT: call void @use32(i32 [[T4]])
202 ; CHECK-NEXT: [[T5:%.*]] = shl i32 [[T4]], [[T2]]
203 ; CHECK-NEXT: ret i32 [[T5]]
205 %t0 = zext i32 %nbits to i64
206 %t1 = lshr i64 -1, %t0
207 %t2 = add i32 %nbits, -33
209 call void @use64(i64 %t0)
210 call void @use64(i64 %t1)
211 call void @use32(i32 %t2)
213 %t3 = and i64 %t1, %x
214 call void @use64(i64 %t3)
215 %t4 = trunc i64 %t3 to i32
216 call void @use32(i32 %t4)
217 %t5 = shl i32 %t4, %t2 ; shift is smaller than mask