1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=aarch64-- | FileCheck %s
4 declare i16 @llvm.ushl.sat.i16(i16, i16)
5 declare <2 x i16> @llvm.ushl.sat.v2i16(<2 x i16>, <2 x i16>)
7 ; fold (shlsat undef, x) -> 0
8 define i16 @combine_shl_undef(i16 %x, i16 %y) nounwind {
9 ; CHECK-LABEL: combine_shl_undef:
11 ; CHECK-NEXT: mov w0, wzr
13 %tmp = call i16 @llvm.ushl.sat.i16(i16 undef, i16 %y)
17 ; fold (shlsat x, undef) -> undef
18 define i16 @combine_shl_by_undef(i16 %x, i16 %y) nounwind {
19 ; CHECK-LABEL: combine_shl_by_undef:
22 %tmp = call i16 @llvm.ushl.sat.i16(i16 %x, i16 undef)
26 ; fold (shlsat poison, x) -> 0
27 define i16 @combine_shl_poison(i16 %x, i16 %y) nounwind {
28 ; CHECK-LABEL: combine_shl_poison:
30 ; CHECK-NEXT: mov w0, wzr
32 %tmp = call i16 @llvm.ushl.sat.i16(i16 poison, i16 %y)
36 ; fold (shlsat x, poison) -> undef
37 define i16 @combine_shl_by_poison(i16 %x, i16 %y) nounwind {
38 ; CHECK-LABEL: combine_shl_by_poison:
41 %tmp = call i16 @llvm.ushl.sat.i16(i16 %x, i16 poison)
45 ; fold (shlsat x, bitwidth) -> undef
46 define i16 @combine_shl_by_bitwidth(i16 %x, i16 %y) nounwind {
47 ; CHECK-LABEL: combine_shl_by_bitwidth:
50 %tmp = call i16 @llvm.ushl.sat.i16(i16 %x, i16 16)
54 ; fold (ushlsat 0, x) -> 0
55 define i16 @combine_shl_zero(i16 %x, i16 %y) nounwind {
56 ; CHECK-LABEL: combine_shl_zero:
58 ; CHECK-NEXT: mov w0, wzr
60 %tmp = call i16 @llvm.ushl.sat.i16(i16 0, i16 %y)
64 ; fold (ushlsat x, 0) -> x
65 define i16 @combine_shlsat_by_zero(i16 %x, i16 %y) nounwind {
66 ; CHECK-LABEL: combine_shlsat_by_zero:
69 %tmp = call i16 @llvm.ushl.sat.i16(i16 %x, i16 0)
73 ; fold (ushlsat c1, c2) -> c3
74 define i16 @combine_shlsat_constfold(i16 %x, i16 %y) nounwind {
75 ; CHECK-LABEL: combine_shlsat_constfold:
77 ; CHECK-NEXT: mov w0, #32 // =0x20
79 %tmp = call i16 @llvm.ushl.sat.i16(i16 8, i16 2)
83 ; fold (ushlsat c1, c2) -> sat max
84 define i16 @combine_shlsat_satmax(i16 %x, i16 %y) nounwind {
85 ; CHECK-LABEL: combine_shlsat_satmax:
87 ; CHECK-NEXT: mov w0, #65535 // =0xffff
89 %tmp = call i16 @llvm.ushl.sat.i16(i16 8, i16 15)
94 declare void @sink2xi16(i16, i16)
96 ; fold (ushlsat c1, c2) -> c3 , c1/c2/c3 being vectors
97 define void @combine_shlsat_vector() nounwind {
98 ; CHECK-LABEL: combine_shlsat_vector:
100 ; CHECK-NEXT: str x30, [sp, #-16]! // 8-byte Folded Spill
101 ; CHECK-NEXT: mov w0, #32 // =0x20
102 ; CHECK-NEXT: mov w1, #65535 // =0xffff
103 ; CHECK-NEXT: bl sink2xi16
104 ; CHECK-NEXT: ldr x30, [sp], #16 // 8-byte Folded Reload
106 %tmp = call <2 x i16> @llvm.ushl.sat.v2i16(<2 x i16><i16 8, i16 8>,
107 <2 x i16><i16 2, i16 15>)
108 ; Pass elements as arguments in a call to get CHECK statements that verify
109 ; the constant folding.
110 %e0 = extractelement <2 x i16> %tmp, i16 0
111 %e1 = extractelement <2 x i16> %tmp, i16 1
112 call void @sink2xi16(i16 %e0, i16 %e1)
116 ; Fold shlsat -> shl, if known not to saturate.
117 define i16 @combine_shlsat_to_shl(i16 %x) nounwind {
118 ; CHECK-LABEL: combine_shlsat_to_shl:
120 ; CHECK-NEXT: and w0, w0, #0xfffffffc
123 %tmp = call i16 @llvm.ushl.sat.i16(i16 %x2, i16 2)
127 ; Do not fold shlsat -> shl.
128 define i16 @combine_shlsat_to_shl_no_fold(i16 %x) nounwind {
129 ; CHECK-LABEL: combine_shlsat_to_shl_no_fold:
131 ; CHECK-NEXT: lsl w8, w0, #14
132 ; CHECK-NEXT: and w8, w8, #0x3fff0000
133 ; CHECK-NEXT: lsl w9, w8, #3
134 ; CHECK-NEXT: cmp w8, w9, lsr #3
135 ; CHECK-NEXT: csinv w8, w9, wzr, eq
136 ; CHECK-NEXT: lsr w0, w8, #16
139 %tmp = call i16 @llvm.ushl.sat.i16(i16 %x2, i16 3)
143 ; Fold shlsat -> shl, if known not to saturate.
144 define <2 x i16> @combine_shlsat_to_shl_vec(<2 x i8> %a) nounwind {
145 ; CHECK-LABEL: combine_shlsat_to_shl_vec:
147 ; CHECK-NEXT: shl v0.2s, v0.2s, #8
149 %ext = zext <2 x i8> %a to <2 x i16>
150 %tmp = call <2 x i16> @llvm.ushl.sat.v2i16(
152 <2 x i16> <i16 8, i16 8>)