1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2 ; RUN: opt < %s -instcombine -S | FileCheck %s
4 target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
6 ; Canonicalize or(shl,lshr) by constant to funnel shift intrinsics.
7 ; This should help cost modeling for vectorization, inlining, etc.
8 ; If a target does not have a fshl instruction, the expansion will
9 ; be exactly these same 3 basic ops (shl/lshr/or).
11 define i32 @fshl_i32_constant(i32 %x, i32 %y) {
12 ; CHECK-LABEL: @fshl_i32_constant(
13 ; CHECK-NEXT: [[R:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[Y:%.*]], i32 11)
14 ; CHECK-NEXT: ret i32 [[R]]
17 %shr = lshr i32 %y, 21
18 %r = or i32 %shr, %shl
22 define i42 @fshr_i42_constant(i42 %x, i42 %y) {
23 ; CHECK-LABEL: @fshr_i42_constant(
24 ; CHECK-NEXT: [[R:%.*]] = call i42 @llvm.fshl.i42(i42 [[Y:%.*]], i42 [[X:%.*]], i42 11)
25 ; CHECK-NEXT: ret i42 [[R]]
27 %shr = lshr i42 %x, 31
29 %r = or i42 %shr, %shl
33 ; Vector types are allowed.
35 define <2 x i16> @fshl_v2i16_constant_splat(<2 x i16> %x, <2 x i16> %y) {
36 ; CHECK-LABEL: @fshl_v2i16_constant_splat(
37 ; CHECK-NEXT: [[R:%.*]] = call <2 x i16> @llvm.fshl.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[Y:%.*]], <2 x i16> <i16 1, i16 1>)
38 ; CHECK-NEXT: ret <2 x i16> [[R]]
40 %shl = shl <2 x i16> %x, <i16 1, i16 1>
41 %shr = lshr <2 x i16> %y, <i16 15, i16 15>
42 %r = or <2 x i16> %shl, %shr
46 define <2 x i16> @fshl_v2i16_constant_splat_undef0(<2 x i16> %x, <2 x i16> %y) {
47 ; CHECK-LABEL: @fshl_v2i16_constant_splat_undef0(
48 ; CHECK-NEXT: [[R:%.*]] = call <2 x i16> @llvm.fshl.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[Y:%.*]], <2 x i16> <i16 1, i16 1>)
49 ; CHECK-NEXT: ret <2 x i16> [[R]]
51 %shl = shl <2 x i16> %x, <i16 undef, i16 1>
52 %shr = lshr <2 x i16> %y, <i16 15, i16 15>
53 %r = or <2 x i16> %shl, %shr
57 define <2 x i16> @fshl_v2i16_constant_splat_undef1(<2 x i16> %x, <2 x i16> %y) {
58 ; CHECK-LABEL: @fshl_v2i16_constant_splat_undef1(
59 ; CHECK-NEXT: [[R:%.*]] = call <2 x i16> @llvm.fshl.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[Y:%.*]], <2 x i16> <i16 1, i16 1>)
60 ; CHECK-NEXT: ret <2 x i16> [[R]]
62 %shl = shl <2 x i16> %x, <i16 1, i16 1>
63 %shr = lshr <2 x i16> %y, <i16 15, i16 undef>
64 %r = or <2 x i16> %shl, %shr
68 ; Non-power-of-2 vector types are allowed.
70 define <2 x i17> @fshr_v2i17_constant_splat(<2 x i17> %x, <2 x i17> %y) {
71 ; CHECK-LABEL: @fshr_v2i17_constant_splat(
72 ; CHECK-NEXT: [[R:%.*]] = call <2 x i17> @llvm.fshl.v2i17(<2 x i17> [[Y:%.*]], <2 x i17> [[X:%.*]], <2 x i17> <i17 5, i17 5>)
73 ; CHECK-NEXT: ret <2 x i17> [[R]]
75 %shr = lshr <2 x i17> %x, <i17 12, i17 12>
76 %shl = shl <2 x i17> %y, <i17 5, i17 5>
77 %r = or <2 x i17> %shr, %shl
81 define <2 x i17> @fshr_v2i17_constant_splat_undef0(<2 x i17> %x, <2 x i17> %y) {
82 ; CHECK-LABEL: @fshr_v2i17_constant_splat_undef0(
83 ; CHECK-NEXT: [[R:%.*]] = call <2 x i17> @llvm.fshl.v2i17(<2 x i17> [[Y:%.*]], <2 x i17> [[X:%.*]], <2 x i17> <i17 5, i17 5>)
84 ; CHECK-NEXT: ret <2 x i17> [[R]]
86 %shr = lshr <2 x i17> %x, <i17 12, i17 undef>
87 %shl = shl <2 x i17> %y, <i17 undef, i17 5>
88 %r = or <2 x i17> %shr, %shl
92 define <2 x i17> @fshr_v2i17_constant_splat_undef1(<2 x i17> %x, <2 x i17> %y) {
93 ; CHECK-LABEL: @fshr_v2i17_constant_splat_undef1(
94 ; CHECK-NEXT: [[R:%.*]] = call <2 x i17> @llvm.fshl.v2i17(<2 x i17> [[Y:%.*]], <2 x i17> [[X:%.*]], <2 x i17> <i17 5, i17 5>)
95 ; CHECK-NEXT: ret <2 x i17> [[R]]
97 %shr = lshr <2 x i17> %x, <i17 12, i17 undef>
98 %shl = shl <2 x i17> %y, <i17 5, i17 undef>
99 %r = or <2 x i17> %shr, %shl
103 ; Allow arbitrary shift constants.
104 ; Support undef elements.
106 define <2 x i32> @fshr_v2i32_constant_nonsplat(<2 x i32> %x, <2 x i32> %y) {
107 ; CHECK-LABEL: @fshr_v2i32_constant_nonsplat(
108 ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> [[X:%.*]], <2 x i32> <i32 15, i32 13>)
109 ; CHECK-NEXT: ret <2 x i32> [[R]]
111 %shr = lshr <2 x i32> %x, <i32 17, i32 19>
112 %shl = shl <2 x i32> %y, <i32 15, i32 13>
113 %r = or <2 x i32> %shl, %shr
117 define <2 x i32> @fshr_v2i32_constant_nonsplat_undef0(<2 x i32> %x, <2 x i32> %y) {
118 ; CHECK-LABEL: @fshr_v2i32_constant_nonsplat_undef0(
119 ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> [[X:%.*]], <2 x i32> <i32 0, i32 13>)
120 ; CHECK-NEXT: ret <2 x i32> [[R]]
122 %shr = lshr <2 x i32> %x, <i32 undef, i32 19>
123 %shl = shl <2 x i32> %y, <i32 15, i32 13>
124 %r = or <2 x i32> %shl, %shr
128 define <2 x i32> @fshr_v2i32_constant_nonsplat_undef1(<2 x i32> %x, <2 x i32> %y) {
129 ; CHECK-LABEL: @fshr_v2i32_constant_nonsplat_undef1(
130 ; CHECK-NEXT: [[R:%.*]] = call <2 x i32> @llvm.fshl.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> [[X:%.*]], <2 x i32> <i32 15, i32 0>)
131 ; CHECK-NEXT: ret <2 x i32> [[R]]
133 %shr = lshr <2 x i32> %x, <i32 17, i32 19>
134 %shl = shl <2 x i32> %y, <i32 15, i32 undef>
135 %r = or <2 x i32> %shl, %shr
139 define <2 x i36> @fshl_v2i36_constant_nonsplat(<2 x i36> %x, <2 x i36> %y) {
140 ; CHECK-LABEL: @fshl_v2i36_constant_nonsplat(
141 ; CHECK-NEXT: [[R:%.*]] = call <2 x i36> @llvm.fshl.v2i36(<2 x i36> [[X:%.*]], <2 x i36> [[Y:%.*]], <2 x i36> <i36 21, i36 11>)
142 ; CHECK-NEXT: ret <2 x i36> [[R]]
144 %shl = shl <2 x i36> %x, <i36 21, i36 11>
145 %shr = lshr <2 x i36> %y, <i36 15, i36 25>
146 %r = or <2 x i36> %shl, %shr
150 define <3 x i36> @fshl_v3i36_constant_nonsplat_undef0(<3 x i36> %x, <3 x i36> %y) {
151 ; CHECK-LABEL: @fshl_v3i36_constant_nonsplat_undef0(
152 ; CHECK-NEXT: [[R:%.*]] = call <3 x i36> @llvm.fshl.v3i36(<3 x i36> [[X:%.*]], <3 x i36> [[Y:%.*]], <3 x i36> <i36 21, i36 11, i36 0>)
153 ; CHECK-NEXT: ret <3 x i36> [[R]]
155 %shl = shl <3 x i36> %x, <i36 21, i36 11, i36 undef>
156 %shr = lshr <3 x i36> %y, <i36 15, i36 25, i36 undef>
157 %r = or <3 x i36> %shl, %shr
161 ; Fold or(shl(x,a),lshr(y,bw-a)) -> fshl(x,y,a) iff a < bw
163 define i64 @fshl_sub_mask(i64 %x, i64 %y, i64 %a) {
164 ; CHECK-LABEL: @fshl_sub_mask(
165 ; CHECK-NEXT: [[R:%.*]] = call i64 @llvm.fshl.i64(i64 [[X:%.*]], i64 [[Y:%.*]], i64 [[A:%.*]])
166 ; CHECK-NEXT: ret i64 [[R]]
168 %mask = and i64 %a, 63
169 %shl = shl i64 %x, %mask
170 %sub = sub nuw nsw i64 64, %mask
171 %shr = lshr i64 %y, %sub
172 %r = or i64 %shl, %shr
176 ; Fold or(lshr(v,a),shl(v,bw-a)) -> fshr(y,x,a) iff a < bw
178 define i64 @fshr_sub_mask(i64 %x, i64 %y, i64 %a) {
179 ; CHECK-LABEL: @fshr_sub_mask(
180 ; CHECK-NEXT: [[R:%.*]] = call i64 @llvm.fshr.i64(i64 [[Y:%.*]], i64 [[X:%.*]], i64 [[A:%.*]])
181 ; CHECK-NEXT: ret i64 [[R]]
183 %mask = and i64 %a, 63
184 %shr = lshr i64 %x, %mask
185 %sub = sub nuw nsw i64 64, %mask
186 %shl = shl i64 %y, %sub
187 %r = or i64 %shl, %shr
191 define <2 x i64> @fshr_sub_mask_vector(<2 x i64> %x, <2 x i64> %y, <2 x i64> %a) {
192 ; CHECK-LABEL: @fshr_sub_mask_vector(
193 ; CHECK-NEXT: [[R:%.*]] = call <2 x i64> @llvm.fshr.v2i64(<2 x i64> [[Y:%.*]], <2 x i64> [[X:%.*]], <2 x i64> [[A:%.*]])
194 ; CHECK-NEXT: ret <2 x i64> [[R]]
196 %mask = and <2 x i64> %a, <i64 63, i64 63>
197 %shr = lshr <2 x i64> %x, %mask
198 %sub = sub nuw nsw <2 x i64> <i64 64, i64 64>, %mask
199 %shl = shl <2 x i64> %y, %sub
200 %r = or <2 x i64> %shl, %shr
204 ; PR35155 - these are optionally UB-free funnel shift left/right patterns that are narrowed to a smaller bitwidth.
206 define i16 @fshl_16bit(i16 %x, i16 %y, i32 %shift) {
207 ; CHECK-LABEL: @fshl_16bit(
208 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[SHIFT:%.*]] to i16
209 ; CHECK-NEXT: [[CONV2:%.*]] = call i16 @llvm.fshl.i16(i16 [[X:%.*]], i16 [[Y:%.*]], i16 [[TMP1]])
210 ; CHECK-NEXT: ret i16 [[CONV2]]
212 %and = and i32 %shift, 15
213 %convx = zext i16 %x to i32
214 %shl = shl i32 %convx, %and
215 %sub = sub i32 16, %and
216 %convy = zext i16 %y to i32
217 %shr = lshr i32 %convy, %sub
218 %or = or i32 %shr, %shl
219 %conv2 = trunc i32 %or to i16
223 ; Commute the 'or' operands and try a vector type.
225 define <2 x i16> @fshl_commute_16bit_vec(<2 x i16> %x, <2 x i16> %y, <2 x i32> %shift) {
226 ; CHECK-LABEL: @fshl_commute_16bit_vec(
227 ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i32> [[SHIFT:%.*]] to <2 x i16>
228 ; CHECK-NEXT: [[CONV2:%.*]] = call <2 x i16> @llvm.fshl.v2i16(<2 x i16> [[X:%.*]], <2 x i16> [[Y:%.*]], <2 x i16> [[TMP1]])
229 ; CHECK-NEXT: ret <2 x i16> [[CONV2]]
231 %and = and <2 x i32> %shift, <i32 15, i32 15>
232 %convx = zext <2 x i16> %x to <2 x i32>
233 %shl = shl <2 x i32> %convx, %and
234 %sub = sub <2 x i32> <i32 16, i32 16>, %and
235 %convy = zext <2 x i16> %y to <2 x i32>
236 %shr = lshr <2 x i32> %convy, %sub
237 %or = or <2 x i32> %shl, %shr
238 %conv2 = trunc <2 x i32> %or to <2 x i16>
242 ; Change the size, shift direction (the subtract is on the left-shift), and mask op.
244 define i8 @fshr_8bit(i8 %x, i8 %y, i3 %shift) {
245 ; CHECK-LABEL: @fshr_8bit(
246 ; CHECK-NEXT: [[TMP1:%.*]] = zext i3 [[SHIFT:%.*]] to i8
247 ; CHECK-NEXT: [[CONV2:%.*]] = call i8 @llvm.fshr.i8(i8 [[Y:%.*]], i8 [[X:%.*]], i8 [[TMP1]])
248 ; CHECK-NEXT: ret i8 [[CONV2]]
250 %and = zext i3 %shift to i32
251 %convx = zext i8 %x to i32
252 %shr = lshr i32 %convx, %and
253 %sub = sub i32 8, %and
254 %convy = zext i8 %y to i32
255 %shl = shl i32 %convy, %sub
256 %or = or i32 %shl, %shr
257 %conv2 = trunc i32 %or to i8
261 ; The right-shifted value does not need to be a zexted value; here it is masked.
262 ; The shift mask could be less than the bitwidth, but this is still ok.
264 define i8 @fshr_commute_8bit(i32 %x, i32 %y, i32 %shift) {
265 ; CHECK-LABEL: @fshr_commute_8bit(
266 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[SHIFT:%.*]] to i8
267 ; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], 3
268 ; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[Y:%.*]] to i8
269 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[X:%.*]] to i8
270 ; CHECK-NEXT: [[CONV2:%.*]] = call i8 @llvm.fshr.i8(i8 [[TMP3]], i8 [[TMP4]], i8 [[TMP2]])
271 ; CHECK-NEXT: ret i8 [[CONV2]]
273 %and = and i32 %shift, 3
274 %convx = and i32 %x, 255
275 %shr = lshr i32 %convx, %and
276 %sub = sub i32 8, %and
277 %convy = and i32 %y, 255
278 %shl = shl i32 %convy, %sub
279 %or = or i32 %shr, %shl
280 %conv2 = trunc i32 %or to i8
284 ; The left-shifted value does not need to be masked at all.
286 define i8 @fshr_commute_8bit_unmasked_shl(i32 %x, i32 %y, i32 %shift) {
287 ; CHECK-LABEL: @fshr_commute_8bit_unmasked_shl(
288 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i32 [[SHIFT:%.*]] to i8
289 ; CHECK-NEXT: [[TMP2:%.*]] = and i8 [[TMP1]], 3
290 ; CHECK-NEXT: [[TMP3:%.*]] = trunc i32 [[Y:%.*]] to i8
291 ; CHECK-NEXT: [[TMP4:%.*]] = trunc i32 [[X:%.*]] to i8
292 ; CHECK-NEXT: [[CONV2:%.*]] = call i8 @llvm.fshr.i8(i8 [[TMP3]], i8 [[TMP4]], i8 [[TMP2]])
293 ; CHECK-NEXT: ret i8 [[CONV2]]
295 %and = and i32 %shift, 3
296 %convx = and i32 %x, 255
297 %shr = lshr i32 %convx, %and
298 %sub = sub i32 8, %and
299 %convy = and i32 %y, 255
300 %shl = shl i32 %y, %sub
301 %or = or i32 %shr, %shl
302 %conv2 = trunc i32 %or to i8
306 ; Convert select pattern to funnel shift that ends in 'or'.
308 define i8 @fshr_select(i8 %x, i8 %y, i8 %shamt) {
309 ; CHECK-LABEL: @fshr_select(
310 ; CHECK-NEXT: [[TMP1:%.*]] = freeze i8 [[X:%.*]]
311 ; CHECK-NEXT: [[R:%.*]] = call i8 @llvm.fshr.i8(i8 [[TMP1]], i8 [[Y:%.*]], i8 [[SHAMT:%.*]])
312 ; CHECK-NEXT: ret i8 [[R]]
314 %cmp = icmp eq i8 %shamt, 0
315 %sub = sub i8 8, %shamt
316 %shr = lshr i8 %y, %shamt
317 %shl = shl i8 %x, %sub
318 %or = or i8 %shl, %shr
319 %r = select i1 %cmp, i8 %y, i8 %or
323 ; Convert select pattern to funnel shift that ends in 'or'.
325 define i16 @fshl_select(i16 %x, i16 %y, i16 %shamt) {
326 ; CHECK-LABEL: @fshl_select(
327 ; CHECK-NEXT: [[TMP1:%.*]] = freeze i16 [[Y:%.*]]
328 ; CHECK-NEXT: [[R:%.*]] = call i16 @llvm.fshl.i16(i16 [[X:%.*]], i16 [[TMP1]], i16 [[SHAMT:%.*]])
329 ; CHECK-NEXT: ret i16 [[R]]
331 %cmp = icmp eq i16 %shamt, 0
332 %sub = sub i16 16, %shamt
333 %shr = lshr i16 %y, %sub
334 %shl = shl i16 %x, %shamt
335 %or = or i16 %shr, %shl
336 %r = select i1 %cmp, i16 %x, i16 %or
340 ; Convert select pattern to funnel shift that ends in 'or'.
342 define <2 x i64> @fshl_select_vector(<2 x i64> %x, <2 x i64> %y, <2 x i64> %shamt) {
343 ; CHECK-LABEL: @fshl_select_vector(
344 ; CHECK-NEXT: [[TMP1:%.*]] = freeze <2 x i64> [[X:%.*]]
345 ; CHECK-NEXT: [[R:%.*]] = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> [[Y:%.*]], <2 x i64> [[TMP1]], <2 x i64> [[SHAMT:%.*]])
346 ; CHECK-NEXT: ret <2 x i64> [[R]]
348 %cmp = icmp eq <2 x i64> %shamt, zeroinitializer
349 %sub = sub <2 x i64> <i64 64, i64 64>, %shamt
350 %shr = lshr <2 x i64> %x, %sub
351 %shl = shl <2 x i64> %y, %shamt
352 %or = or <2 x i64> %shl, %shr
353 %r = select <2 x i1> %cmp, <2 x i64> %y, <2 x i64> %or
357 ; Negative test - an oversized shift in the narrow type would produce the wrong value.
359 define i8 @unmasked_shlop_unmasked_shift_amount(i32 %x, i32 %y, i32 %shamt) {
360 ; CHECK-LABEL: @unmasked_shlop_unmasked_shift_amount(
361 ; CHECK-NEXT: [[MASKY:%.*]] = and i32 [[Y:%.*]], 255
362 ; CHECK-NEXT: [[T4:%.*]] = sub i32 8, [[SHAMT:%.*]]
363 ; CHECK-NEXT: [[T5:%.*]] = shl i32 [[X:%.*]], [[T4]]
364 ; CHECK-NEXT: [[T6:%.*]] = lshr i32 [[MASKY]], [[SHAMT]]
365 ; CHECK-NEXT: [[T7:%.*]] = or i32 [[T5]], [[T6]]
366 ; CHECK-NEXT: [[T8:%.*]] = trunc i32 [[T7]] to i8
367 ; CHECK-NEXT: ret i8 [[T8]]
369 %masky = and i32 %y, 255
370 %t4 = sub i32 8, %shamt
371 %t5 = shl i32 %x, %t4
372 %t6 = lshr i32 %masky, %shamt
373 %t7 = or i32 %t5, %t6
374 %t8 = trunc i32 %t7 to i8
378 ; Negative test - an oversized shift in the narrow type would produce the wrong value.
380 define i8 @unmasked_shlop_insufficient_mask_shift_amount(i16 %x, i16 %y, i16 %shamt) {
381 ; CHECK-LABEL: @unmasked_shlop_insufficient_mask_shift_amount(
382 ; CHECK-NEXT: [[SHM:%.*]] = and i16 [[SHAMT:%.*]], 15
383 ; CHECK-NEXT: [[MASKX:%.*]] = and i16 [[X:%.*]], 255
384 ; CHECK-NEXT: [[T4:%.*]] = sub nsw i16 8, [[SHM]]
385 ; CHECK-NEXT: [[T5:%.*]] = shl i16 [[Y:%.*]], [[T4]]
386 ; CHECK-NEXT: [[T6:%.*]] = lshr i16 [[MASKX]], [[SHM]]
387 ; CHECK-NEXT: [[T7:%.*]] = or i16 [[T5]], [[T6]]
388 ; CHECK-NEXT: [[T8:%.*]] = trunc i16 [[T7]] to i8
389 ; CHECK-NEXT: ret i8 [[T8]]
391 %shm = and i16 %shamt, 15
392 %maskx = and i16 %x, 255
393 %t4 = sub i16 8, %shm
394 %t5 = shl i16 %y, %t4
395 %t6 = lshr i16 %maskx, %shm
396 %t7 = or i16 %t5, %t6
397 %t8 = trunc i16 %t7 to i8
401 define i8 @unmasked_shlop_masked_shift_amount(i16 %x, i16 %y, i16 %shamt) {
402 ; CHECK-LABEL: @unmasked_shlop_masked_shift_amount(
403 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i16 [[SHAMT:%.*]] to i8
404 ; CHECK-NEXT: [[TMP2:%.*]] = trunc i16 [[Y:%.*]] to i8
405 ; CHECK-NEXT: [[TMP3:%.*]] = trunc i16 [[X:%.*]] to i8
406 ; CHECK-NEXT: [[T8:%.*]] = call i8 @llvm.fshr.i8(i8 [[TMP2]], i8 [[TMP3]], i8 [[TMP1]])
407 ; CHECK-NEXT: ret i8 [[T8]]
409 %shm = and i16 %shamt, 7
410 %maskx = and i16 %x, 255
411 %t4 = sub i16 8, %shm
412 %t5 = shl i16 %y, %t4
413 %t6 = lshr i16 %maskx, %shm
414 %t7 = or i16 %t5, %t6
415 %t8 = trunc i16 %t7 to i8