[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / AArch64 / AArch64Combine.td
blob75ec7565b569515da70118a751703a6edde06072
1 //=- AArch64.td - Define AArch64 Combine Rules ---------------*- tablegen -*-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //
10 //===----------------------------------------------------------------------===//
12 include "llvm/Target/GlobalISel/Combine.td"
14 def fconstant_to_constant : GICombineRule<
15   (defs root:$root),
16   (match (wip_match_opcode G_FCONSTANT):$root,
17          [{ return matchFConstantToConstant(*${root}, MRI); }]),
18   (apply [{ applyFConstantToConstant(*${root}); }])>;
20 def icmp_redundant_trunc_matchdata : GIDefMatchData<"Register">;
21 def icmp_redundant_trunc : GICombineRule<
22   (defs root:$root, icmp_redundant_trunc_matchdata:$matchinfo),
23   (match (wip_match_opcode G_ICMP):$root,
24          [{ return matchICmpRedundantTrunc(*${root}, MRI, Helper.getKnownBits(), ${matchinfo}); }]),
25   (apply [{ applyICmpRedundantTrunc(*${root}, MRI, B, Observer, ${matchinfo}); }])>;
27 // AArch64-specific offset folding for G_GLOBAL_VALUE.
28 def fold_global_offset_matchdata : GIDefMatchData<"std::pair<uint64_t, uint64_t>">;
29 def fold_global_offset : GICombineRule<
30   (defs root:$root, fold_global_offset_matchdata:$matchinfo),
31   (match (wip_match_opcode G_GLOBAL_VALUE):$root,
32           [{ return matchFoldGlobalOffset(*${root}, MRI, ${matchinfo}); }]),
33   (apply [{  return applyFoldGlobalOffset(*${root}, MRI, B, Observer, ${matchinfo});}])
36 def AArch64PreLegalizerCombinerHelper: GICombinerHelper<
37   "AArch64GenPreLegalizerCombinerHelper", [all_combines,
38                                            fconstant_to_constant,
39                                            icmp_redundant_trunc,
40                                            fold_global_offset]> {
41   let DisableRuleOption = "aarch64prelegalizercombiner-disable-rule";
42   let StateClass = "AArch64PreLegalizerCombinerHelperState";
43   let AdditionalArguments = [];
46 def AArch64O0PreLegalizerCombinerHelper: GICombinerHelper<
47   "AArch64GenO0PreLegalizerCombinerHelper", [optnone_combines]> {
48   let DisableRuleOption = "aarch64O0prelegalizercombiner-disable-rule";
49   let StateClass = "AArch64O0PreLegalizerCombinerHelperState";
50   let AdditionalArguments = [];
53 // Matchdata for combines which replace a G_SHUFFLE_VECTOR with a
54 // target-specific opcode.
55 def shuffle_matchdata : GIDefMatchData<"ShuffleVectorPseudo">;
57 def rev : GICombineRule<
58   (defs root:$root, shuffle_matchdata:$matchinfo),
59   (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
60          [{ return matchREV(*${root}, MRI, ${matchinfo}); }]),
61   (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
64 def zip : GICombineRule<
65   (defs root:$root, shuffle_matchdata:$matchinfo),
66   (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
67          [{ return matchZip(*${root}, MRI, ${matchinfo}); }]),
68   (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
71 def uzp : GICombineRule<
72   (defs root:$root, shuffle_matchdata:$matchinfo),
73   (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
74          [{ return matchUZP(*${root}, MRI, ${matchinfo}); }]),
75   (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
78 def dup: GICombineRule <
79   (defs root:$root, shuffle_matchdata:$matchinfo),
80   (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
81          [{ return matchDup(*${root}, MRI, ${matchinfo}); }]),
82   (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
85 def trn : GICombineRule<
86   (defs root:$root, shuffle_matchdata:$matchinfo),
87   (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
88          [{ return matchTRN(*${root}, MRI, ${matchinfo}); }]),
89   (apply [{ applyShuffleVectorPseudo(*${root}, ${matchinfo}); }])
92 def ext: GICombineRule <
93   (defs root:$root, shuffle_matchdata:$matchinfo),
94   (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
95          [{ return matchEXT(*${root}, MRI, ${matchinfo}); }]),
96   (apply [{ applyEXT(*${root}, ${matchinfo}); }])
99 def shuf_to_ins_matchdata : GIDefMatchData<"std::tuple<Register, int, Register, int>">;
100 def shuf_to_ins: GICombineRule <
101   (defs root:$root, shuf_to_ins_matchdata:$matchinfo),
102   (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
103          [{ return matchINS(*${root}, MRI, ${matchinfo}); }]),
104   (apply [{ return applyINS(*${root}, MRI, B, ${matchinfo}); }])
107 def vashr_vlshr_imm_matchdata : GIDefMatchData<"int64_t">;
108 def vashr_vlshr_imm : GICombineRule<
109   (defs root:$root, vashr_vlshr_imm_matchdata:$matchinfo),
110   (match (wip_match_opcode G_ASHR, G_LSHR):$root,
111           [{ return matchVAshrLshrImm(*${root}, MRI, ${matchinfo}); }]),
112   (apply [{ applyVAshrLshrImm(*${root}, MRI, ${matchinfo}); }])
115 def form_duplane_matchdata :
116   GIDefMatchData<"std::pair<unsigned, int>">;
117 def form_duplane : GICombineRule <
118   (defs root:$root, form_duplane_matchdata:$matchinfo),
119   (match (wip_match_opcode G_SHUFFLE_VECTOR):$root,
120           [{ return matchDupLane(*${root}, MRI, ${matchinfo}); }]),
121   (apply [{ applyDupLane(*${root}, MRI, B, ${matchinfo}); }])
124 def shuffle_vector_lowering : GICombineGroup<[dup, rev, ext, zip, uzp, trn,
125                                               form_duplane,
126                                               shuf_to_ins]>;
128 def adjust_icmp_imm_matchdata :
129   GIDefMatchData<"std::pair<uint64_t, CmpInst::Predicate>">;
130 def adjust_icmp_imm : GICombineRule <
131   (defs root:$root, adjust_icmp_imm_matchdata:$matchinfo),
132   (match (wip_match_opcode G_ICMP):$root,
133           [{ return matchAdjustICmpImmAndPred(*${root}, MRI, ${matchinfo}); }]),
134   (apply [{ applyAdjustICmpImmAndPred(*${root}, ${matchinfo}, B, Observer); }])
137 def swap_icmp_operands : GICombineRule <
138   (defs root:$root),
139   (match (wip_match_opcode G_ICMP):$root,
140           [{ return trySwapICmpOperands(*${root}, MRI); }]),
141   (apply [{ applySwapICmpOperands(*${root}, Observer); }])
144 def icmp_lowering : GICombineGroup<[adjust_icmp_imm, swap_icmp_operands]>;
146 def extractvecelt_pairwise_add_matchdata : GIDefMatchData<"std::tuple<unsigned, LLT, Register>">;
147 def extractvecelt_pairwise_add : GICombineRule<
148   (defs root:$root, extractvecelt_pairwise_add_matchdata:$matchinfo),
149   (match (wip_match_opcode G_EXTRACT_VECTOR_ELT):$root,
150           [{ return matchExtractVecEltPairwiseAdd(*${root}, MRI, ${matchinfo}); }]),
151   (apply [{ applyExtractVecEltPairwiseAdd(*${root}, MRI, B, ${matchinfo}); }])
154 def mul_const_matchdata : GIDefMatchData<"std::function<void(MachineIRBuilder&, Register)>">;
155 def mul_const : GICombineRule<
156   (defs root:$root, mul_const_matchdata:$matchinfo),
157   (match (wip_match_opcode G_MUL):$root,
158           [{ return matchAArch64MulConstCombine(*${root}, MRI, ${matchinfo}); }]),
159   (apply [{ applyAArch64MulConstCombine(*${root}, MRI, B, ${matchinfo}); }])
162 def build_vector_to_dup : GICombineRule<
163   (defs root:$root),
164   (match (wip_match_opcode G_BUILD_VECTOR):$root,
165           [{ return matchBuildVectorToDup(*${root}, MRI); }]),
166   (apply [{ return applyBuildVectorToDup(*${root}, MRI, B); }])
169 def build_vector_lowering : GICombineGroup<[build_vector_to_dup]>;
171 def lower_vector_fcmp : GICombineRule<
172   (defs root:$root),
173   (match (wip_match_opcode G_FCMP):$root,
174     [{ return lowerVectorFCMP(*${root}, MRI, B); }]),
175   (apply [{}])>;
177 def form_truncstore_matchdata : GIDefMatchData<"Register">;
178 def form_truncstore : GICombineRule<
179   (defs root:$root, form_truncstore_matchdata:$matchinfo),
180   (match (wip_match_opcode G_STORE):$root,
181           [{ return matchFormTruncstore(*${root}, MRI, ${matchinfo}); }]),
182   (apply [{ applyFormTruncstore(*${root}, MRI, B, Observer, ${matchinfo}); }])
185 def fold_merge_to_zext : GICombineRule<
186   (defs root:$d),
187   (match (wip_match_opcode G_MERGE_VALUES):$d,
188           [{ return matchFoldMergeToZext(*${d}, MRI); }]),
189   (apply [{ applyFoldMergeToZext(*${d}, MRI, B, Observer); }])
192 // Post-legalization combines which should happen at all optimization levels.
193 // (E.g. ones that facilitate matching for the selector) For example, matching
194 // pseudos.
195 def AArch64PostLegalizerLoweringHelper
196     : GICombinerHelper<"AArch64GenPostLegalizerLoweringHelper",
197                        [shuffle_vector_lowering, vashr_vlshr_imm,
198                         icmp_lowering, build_vector_lowering,
199                         lower_vector_fcmp, form_truncstore]> {
200   let DisableRuleOption = "aarch64postlegalizerlowering-disable-rule";
203 // Post-legalization combines which are primarily optimizations.
204 def AArch64PostLegalizerCombinerHelper
205     : GICombinerHelper<"AArch64GenPostLegalizerCombinerHelper",
206                        [copy_prop, erase_undef_store, combines_for_extload,
207                         sext_trunc_sextload,
208                         hoist_logic_op_with_same_opcode_hands,
209                         redundant_and, xor_of_and_with_same_reg,
210                         extractvecelt_pairwise_add, redundant_or,
211                         mul_const, redundant_sext_inreg,
212                         form_bitfield_extract, rotate_out_of_range,
213                         icmp_to_true_false_known_bits, merge_unmerge,
214                         select_combines, fold_merge_to_zext,
215                         constant_fold, identity_combines,
216                         ptr_add_immed_chain]> {
217   let DisableRuleOption = "aarch64postlegalizercombiner-disable-rule";