1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
4 ; If we have some pattern that leaves only some low bits set, and then performs
5 ; left-shift of those bits, we can combine those two shifts into a shift+mask.
7 ; There are many variants to this pattern:
8 ; d) (trunc ((x & ((-1 << maskNbits) >> maskNbits)))) << shiftNbits
10 ; (trunc(x)) << shiftNbits
14 declare void @use32(i32)
15 declare void @use64(i64)
17 define i32 @t0_basic(i64 %x, i32 %nbits) {
18 ; CHECK-LABEL: @t0_basic(
19 ; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
20 ; CHECK-NEXT: [[T1:%.*]] = shl i64 -1, [[T0]]
21 ; CHECK-NEXT: [[T2:%.*]] = lshr i64 [[T1]], [[T0]]
22 ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -32
23 ; CHECK-NEXT: [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
24 ; CHECK-NEXT: call void @use64(i64 [[T0]])
25 ; CHECK-NEXT: call void @use64(i64 [[T1]])
26 ; CHECK-NEXT: call void @use64(i64 [[T2]])
27 ; CHECK-NEXT: call void @use32(i32 [[T3]])
28 ; CHECK-NEXT: call void @use64(i64 [[T4]])
29 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[X]] to i32
30 ; CHECK-NEXT: [[T6:%.*]] = shl i32 [[TMP1]], [[T3]]
31 ; CHECK-NEXT: ret i32 [[T6]]
33 %t0 = zext i32 %nbits to i64
35 %t2 = lshr i64 %t1, %t0
36 %t3 = add i32 %nbits, -32
39 call void @use64(i64 %t0)
40 call void @use64(i64 %t1)
41 call void @use64(i64 %t2)
42 call void @use32(i32 %t3)
43 call void @use64(i64 %t4)
45 %t5 = trunc i64 %t4 to i32
46 %t6 = shl i32 %t5, %t3
52 declare void @use8xi32(<8 x i32>)
53 declare void @use8xi64(<8 x i64>)
55 define <8 x i32> @t1_vec_splat(<8 x i64> %x, <8 x i32> %nbits) {
56 ; CHECK-LABEL: @t1_vec_splat(
57 ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
58 ; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, [[T0]]
59 ; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
60 ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32>
61 ; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
62 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
63 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
64 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T2]])
65 ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T3]])
66 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T4]])
67 ; CHECK-NEXT: [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
68 ; CHECK-NEXT: [[T6:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
69 ; CHECK-NEXT: ret <8 x i32> [[T6]]
71 %t0 = zext <8 x i32> %nbits to <8 x i64>
72 %t1 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>, %t0
73 %t2 = lshr <8 x i64> %t1, %t0
74 %t3 = add <8 x i32> %nbits, <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32>
75 %t4 = and <8 x i64> %t2, %x
77 call void @use8xi64(<8 x i64> %t0)
78 call void @use8xi64(<8 x i64> %t1)
79 call void @use8xi64(<8 x i64> %t2)
80 call void @use8xi32(<8 x i32> %t3)
81 call void @use8xi64(<8 x i64> %t4)
83 %t5 = trunc <8 x i64> %t4 to <8 x i32>
84 %t6 = shl <8 x i32> %t5, %t3
88 define <8 x i32> @t2_vec_splat_undef(<8 x i64> %x, <8 x i32> %nbits) {
89 ; CHECK-LABEL: @t2_vec_splat_undef(
90 ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
91 ; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
92 ; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
93 ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32>
94 ; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
95 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
96 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
97 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T2]])
98 ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T3]])
99 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T4]])
100 ; CHECK-NEXT: [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
101 ; CHECK-NEXT: [[T6:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
102 ; CHECK-NEXT: ret <8 x i32> [[T6]]
104 %t0 = zext <8 x i32> %nbits to <8 x i64>
105 %t1 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, %t0
106 %t2 = lshr <8 x i64> %t1, %t0
107 %t3 = add <8 x i32> %nbits, <i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 -32, i32 undef, i32 -32>
108 %t4 = and <8 x i64> %t2, %x
110 call void @use8xi64(<8 x i64> %t0)
111 call void @use8xi64(<8 x i64> %t1)
112 call void @use8xi64(<8 x i64> %t2)
113 call void @use8xi32(<8 x i32> %t3)
114 call void @use8xi64(<8 x i64> %t4)
116 %t5 = trunc <8 x i64> %t4 to <8 x i32>
117 %t6 = shl <8 x i32> %t5, %t3
121 define <8 x i32> @t3_vec_nonsplat(<8 x i64> %x, <8 x i32> %nbits) {
122 ; CHECK-LABEL: @t3_vec_nonsplat(
123 ; CHECK-NEXT: [[T0:%.*]] = zext <8 x i32> [[NBITS:%.*]] to <8 x i64>
124 ; CHECK-NEXT: [[T1:%.*]] = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, [[T0]]
125 ; CHECK-NEXT: [[T2:%.*]] = lshr <8 x i64> [[T1]], [[T0]]
126 ; CHECK-NEXT: [[T3:%.*]] = add <8 x i32> [[NBITS]], <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64>
127 ; CHECK-NEXT: [[T4:%.*]] = and <8 x i64> [[T2]], [[X:%.*]]
128 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T0]])
129 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T1]])
130 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T2]])
131 ; CHECK-NEXT: call void @use8xi32(<8 x i32> [[T3]])
132 ; CHECK-NEXT: call void @use8xi64(<8 x i64> [[T4]])
133 ; CHECK-NEXT: [[TMP1:%.*]] = trunc <8 x i64> [[X]] to <8 x i32>
134 ; CHECK-NEXT: [[T6:%.*]] = shl <8 x i32> [[TMP1]], [[T3]]
135 ; CHECK-NEXT: ret <8 x i32> [[T6]]
137 %t0 = zext <8 x i32> %nbits to <8 x i64>
138 %t1 = shl <8 x i64> <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 undef, i64 -1>, %t0
139 %t2 = lshr <8 x i64> %t1, %t0
140 %t3 = add <8 x i32> %nbits, <i32 -32, i32 -1, i32 0, i32 1, i32 31, i32 32, i32 undef, i32 64>
141 %t4 = and <8 x i64> %t2, %x
143 call void @use8xi64(<8 x i64> %t0)
144 call void @use8xi64(<8 x i64> %t1)
145 call void @use8xi64(<8 x i64> %t2)
146 call void @use8xi32(<8 x i32> %t3)
147 call void @use8xi64(<8 x i64> %t4)
149 %t5 = trunc <8 x i64> %t4 to <8 x i32>
150 %t6 = shl <8 x i32> %t5, %t3
156 define i32 @n4_extrause(i64 %x, i32 %nbits) {
157 ; CHECK-LABEL: @n4_extrause(
158 ; CHECK-NEXT: [[T0:%.*]] = zext i32 [[NBITS:%.*]] to i64
159 ; CHECK-NEXT: [[T1:%.*]] = shl i64 -1, [[T0]]
160 ; CHECK-NEXT: [[T2:%.*]] = lshr i64 [[T1]], [[T0]]
161 ; CHECK-NEXT: [[T3:%.*]] = add i32 [[NBITS]], -32
162 ; CHECK-NEXT: [[T4:%.*]] = and i64 [[T2]], [[X:%.*]]
163 ; CHECK-NEXT: call void @use64(i64 [[T0]])
164 ; CHECK-NEXT: call void @use64(i64 [[T1]])
165 ; CHECK-NEXT: call void @use64(i64 [[T2]])
166 ; CHECK-NEXT: call void @use32(i32 [[T3]])
167 ; CHECK-NEXT: call void @use64(i64 [[T4]])
168 ; CHECK-NEXT: [[T5:%.*]] = trunc i64 [[T4]] to i32
169 ; CHECK-NEXT: call void @use32(i32 [[T5]])
170 ; CHECK-NEXT: [[T6:%.*]] = shl i32 [[T5]], [[T3]]
171 ; CHECK-NEXT: ret i32 [[T6]]
173 %t0 = zext i32 %nbits to i64
174 %t1 = shl i64 -1, %t0
175 %t2 = lshr i64 %t1, %t0
176 %t3 = add i32 %nbits, -32
177 %t4 = and i64 %t2, %x
179 call void @use64(i64 %t0)
180 call void @use64(i64 %t1)
181 call void @use64(i64 %t2)
182 call void @use32(i32 %t3)
183 call void @use64(i64 %t4)
185 %t5 = trunc i64 %t4 to i32
186 call void @use32(i32 %t5)
187 %t6 = shl i32 %t5, %t3