1 //===- PatternMatchTest.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "GISelMITest.h"
10 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/CodeGen/MIRParser/MIRParser.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineModuleInfo.h"
16 #include "llvm/CodeGen/TargetFrameLowering.h"
17 #include "llvm/CodeGen/TargetInstrInfo.h"
18 #include "llvm/CodeGen/TargetLowering.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/MC/TargetRegistry.h"
21 #include "llvm/Support/SourceMgr.h"
22 #include "llvm/Support/TargetSelect.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetOptions.h"
25 #include "gtest/gtest.h"
28 using namespace MIPatternMatch
;
32 TEST_F(AArch64GISelMITest
, MatchIntConstant
) {
36 auto MIBCst
= B
.buildConstant(LLT::scalar(64), 42);
38 bool match
= mi_match(MIBCst
.getReg(0), *MRI
, m_ICst(Cst
));
43 TEST_F(AArch64GISelMITest
, MatchIntConstantRegister
) {
47 auto MIBCst
= B
.buildConstant(LLT::scalar(64), 42);
48 std::optional
<ValueAndVReg
> Src0
;
49 bool match
= mi_match(MIBCst
.getReg(0), *MRI
, m_GCst(Src0
));
51 EXPECT_EQ(Src0
->VReg
, MIBCst
.getReg(0));
54 TEST_F(AArch64GISelMITest
, MatchIntConstantSplat
) {
59 LLT s64
= LLT::scalar(64);
60 LLT v2s64
= LLT::fixed_vector(2, s64
);
61 LLT v4s64
= LLT::fixed_vector(4, s64
);
63 MachineInstrBuilder FortyTwoSplat
=
64 B
.buildSplatBuildVector(v4s64
, B
.buildConstant(s64
, 42));
66 EXPECT_TRUE(mi_match(FortyTwoSplat
.getReg(0), *MRI
, m_ICstOrSplat(Cst
)));
69 MachineInstrBuilder NonConstantSplat
=
70 B
.buildBuildVector(v4s64
, {Copies
[0], Copies
[0], Copies
[0], Copies
[0]});
71 EXPECT_FALSE(mi_match(NonConstantSplat
.getReg(0), *MRI
, m_ICstOrSplat(Cst
)));
73 auto ICst
= B
.buildConstant(s64
, 15).getReg(0);
74 auto SmallSplat
= B
.buildBuildVector(v2s64
, {ICst
, ICst
}).getReg(0);
75 auto LargeSplat
= B
.buildConcatVectors(v4s64
, {SmallSplat
, SmallSplat
});
76 EXPECT_TRUE(mi_match(LargeSplat
.getReg(0), *MRI
, m_ICstOrSplat(Cst
)));
79 TEST_F(AArch64GISelMITest
, MachineInstrPtrBind
) {
83 auto MIBAdd
= B
.buildAdd(LLT::scalar(64), Copies
[0], Copies
[1]);
84 // Test 'MachineInstr *' bind.
86 MachineInstr
*MIPtr
= MIBAdd
.getInstr();
87 bool match
= mi_match(MIPtr
, *MRI
, m_GAdd(m_Reg(), m_Reg()));
89 // Specialized mi_match for MachineInstr &.
90 MachineInstr
&MI
= *MIBAdd
.getInstr();
91 match
= mi_match(MI
, *MRI
, m_GAdd(m_Reg(), m_Reg()));
93 // MachineInstrBuilder has automatic conversion to MachineInstr *.
94 match
= mi_match(MIBAdd
, *MRI
, m_GAdd(m_Reg(), m_Reg()));
96 // Match instruction without def.
97 auto MIBBrcond
= B
.buildBrCond(Copies
[0], B
.getMBB());
98 MachineInstr
*MatchedMI
;
99 match
= mi_match(MIBBrcond
, *MRI
, m_MInstr(MatchedMI
));
101 EXPECT_TRUE(MIBBrcond
.getInstr() == MatchedMI
);
102 // Match instruction with two defs.
104 B
.buildUAddo(LLT::scalar(64), LLT::scalar(1), Copies
[0], Copies
[1]);
105 match
= mi_match(MIBUAddO
, *MRI
, m_MInstr(MatchedMI
));
107 EXPECT_TRUE(MIBUAddO
.getInstr() == MatchedMI
);
110 TEST_F(AArch64GISelMITest
, MatchBinaryOp
) {
114 LLT s32
= LLT::scalar(32);
115 LLT s64
= LLT::scalar(64);
116 LLT p0
= LLT::pointer(0, 64);
117 auto MIBAdd
= B
.buildAdd(s64
, Copies
[0], Copies
[1]);
118 // Test case for no bind.
120 mi_match(MIBAdd
.getReg(0), *MRI
, m_GAdd(m_Reg(), m_Reg()));
122 Register Src0
, Src1
, Src2
;
123 match
= mi_match(MIBAdd
.getReg(0), *MRI
,
124 m_GAdd(m_Reg(Src0
), m_Reg(Src1
)));
126 EXPECT_EQ(Src0
, Copies
[0]);
127 EXPECT_EQ(Src1
, Copies
[1]);
129 // Build MUL(ADD %0, %1), %2
130 auto MIBMul
= B
.buildMul(s64
, MIBAdd
, Copies
[2]);
133 match
= mi_match(MIBMul
.getReg(0), *MRI
,
134 m_GMul(m_Reg(Src0
), m_Reg(Src1
)));
136 EXPECT_EQ(Src0
, MIBAdd
.getReg(0));
137 EXPECT_EQ(Src1
, Copies
[2]);
139 // Try to match MUL(ADD)
140 match
= mi_match(MIBMul
.getReg(0), *MRI
,
141 m_GMul(m_GAdd(m_Reg(Src0
), m_Reg(Src1
)), m_Reg(Src2
)));
143 EXPECT_EQ(Src0
, Copies
[0]);
144 EXPECT_EQ(Src1
, Copies
[1]);
145 EXPECT_EQ(Src2
, Copies
[2]);
147 // Test Commutativity.
148 auto MIBMul2
= B
.buildMul(s64
, Copies
[0], B
.buildConstant(s64
, 42));
149 // Try to match MUL(Cst, Reg) on src of MUL(Reg, Cst) to validate
152 match
= mi_match(MIBMul2
.getReg(0), *MRI
,
153 m_GMul(m_ICst(Cst
), m_Reg(Src0
)));
156 EXPECT_EQ(Src0
, Copies
[0]);
158 // Make sure commutative doesn't work with something like SUB.
159 auto MIBSub
= B
.buildSub(s64
, Copies
[0], B
.buildConstant(s64
, 42));
160 match
= mi_match(MIBSub
.getReg(0), *MRI
,
161 m_GSub(m_ICst(Cst
), m_Reg(Src0
)));
164 auto MIBFMul
= B
.buildInstr(TargetOpcode::G_FMUL
, {s64
},
165 {Copies
[0], B
.buildConstant(s64
, 42)});
166 // Match and test commutativity for FMUL.
167 match
= mi_match(MIBFMul
.getReg(0), *MRI
,
168 m_GFMul(m_ICst(Cst
), m_Reg(Src0
)));
171 EXPECT_EQ(Src0
, Copies
[0]);
174 auto MIBFSub
= B
.buildInstr(TargetOpcode::G_FSUB
, {s64
},
175 {Copies
[0], B
.buildConstant(s64
, 42)});
176 match
= mi_match(MIBFSub
.getReg(0), *MRI
,
177 m_GFSub(m_Reg(Src0
), m_Reg()));
179 EXPECT_EQ(Src0
, Copies
[0]);
182 auto MIBAnd
= B
.buildAnd(s64
, Copies
[0], Copies
[1]);
184 match
= mi_match(MIBAnd
.getReg(0), *MRI
,
185 m_GAnd(m_Reg(Src0
), m_Reg(Src1
)));
187 EXPECT_EQ(Src0
, Copies
[0]);
188 EXPECT_EQ(Src1
, Copies
[1]);
191 auto MIBOr
= B
.buildOr(s64
, Copies
[0], Copies
[1]);
193 match
= mi_match(MIBOr
.getReg(0), *MRI
,
194 m_GOr(m_Reg(Src0
), m_Reg(Src1
)));
196 EXPECT_EQ(Src0
, Copies
[0]);
197 EXPECT_EQ(Src1
, Copies
[1]);
199 // Match lshr, and make sure a different shift amount type works.
200 auto TruncCopy1
= B
.buildTrunc(s32
, Copies
[1]);
201 auto LShr
= B
.buildLShr(s64
, Copies
[0], TruncCopy1
);
202 match
= mi_match(LShr
.getReg(0), *MRI
,
203 m_GLShr(m_Reg(Src0
), m_Reg(Src1
)));
205 EXPECT_EQ(Src0
, Copies
[0]);
206 EXPECT_EQ(Src1
, TruncCopy1
.getReg(0));
208 // Match shl, and make sure a different shift amount type works.
209 auto Shl
= B
.buildShl(s64
, Copies
[0], TruncCopy1
);
210 match
= mi_match(Shl
.getReg(0), *MRI
,
211 m_GShl(m_Reg(Src0
), m_Reg(Src1
)));
213 EXPECT_EQ(Src0
, Copies
[0]);
214 EXPECT_EQ(Src1
, TruncCopy1
.getReg(0));
216 // Build a G_PTR_ADD and check that we can match it.
217 auto PtrAdd
= B
.buildPtrAdd(p0
, {B
.buildUndef(p0
)}, Copies
[0]);
218 match
= mi_match(PtrAdd
.getReg(0), *MRI
, m_GPtrAdd(m_Reg(Src0
), m_Reg(Src1
)));
220 EXPECT_EQ(Src0
, PtrAdd
->getOperand(1).getReg());
221 EXPECT_EQ(Src1
, Copies
[0]);
223 auto MIBCst
= B
.buildConstant(s64
, 42);
224 auto MIBAddCst
= B
.buildAdd(s64
, MIBCst
, Copies
[0]);
225 auto MIBUnmerge
= B
.buildUnmerge({s32
, s32
}, B
.buildConstant(s64
, 42));
227 // m_BinOp with opcode.
228 // Match binary instruction, opcode and its non-commutative operands.
229 match
= mi_match(MIBAddCst
, *MRI
,
230 m_BinOp(TargetOpcode::G_ADD
, m_ICst(Cst
), m_Reg(Src0
)));
232 EXPECT_EQ(Src0
, Copies
[0]);
235 // Opcode doesn't match.
236 match
= mi_match(MIBAddCst
, *MRI
,
237 m_BinOp(TargetOpcode::G_MUL
, m_ICst(Cst
), m_Reg(Src0
)));
240 match
= mi_match(MIBAddCst
, *MRI
,
241 m_BinOp(TargetOpcode::G_ADD
, m_Reg(Src0
), m_ICst(Cst
)));
244 // Instruction is not binary.
245 match
= mi_match(MIBCst
, *MRI
,
246 m_BinOp(TargetOpcode::G_MUL
, m_Reg(Src0
), m_Reg(Src1
)));
248 match
= mi_match(MIBUnmerge
, *MRI
,
249 m_BinOp(TargetOpcode::G_MUL
, m_Reg(Src0
), m_Reg(Src1
)));
252 // m_CommutativeBinOp with opcode.
255 m_CommutativeBinOp(TargetOpcode::G_ADD
, m_ICst(Cst
), m_Reg(Src0
)));
257 EXPECT_EQ(Src0
, Copies
[0]);
262 m_CommutativeBinOp(TargetOpcode::G_MUL
, m_ICst(Cst
), m_Reg(Src0
)));
267 m_CommutativeBinOp(TargetOpcode::G_ADD
, m_Reg(Src0
), m_ICst(Cst
)));
269 EXPECT_EQ(Src0
, Copies
[0]);
274 m_CommutativeBinOp(TargetOpcode::G_MUL
, m_Reg(Src0
), m_Reg(Src1
)));
278 m_CommutativeBinOp(TargetOpcode::G_MUL
, m_Reg(Src0
), m_Reg(Src1
)));
282 TEST_F(AArch64GISelMITest
, MatchICmp
) {
287 const LLT s1
= LLT::scalar(1);
288 auto CmpEq
= B
.buildICmp(CmpInst::ICMP_EQ
, s1
, Copies
[0], Copies
[1]);
290 // Check match any predicate.
292 mi_match(CmpEq
.getReg(0), *MRI
, m_GICmp(m_Pred(), m_Reg(), m_Reg()));
295 // Check we get the predicate and registers.
296 CmpInst::Predicate Pred
;
299 match
= mi_match(CmpEq
.getReg(0), *MRI
,
300 m_GICmp(m_Pred(Pred
), m_Reg(Reg0
), m_Reg(Reg1
)));
302 EXPECT_EQ(CmpInst::ICMP_EQ
, Pred
);
303 EXPECT_EQ(Copies
[0], Reg0
);
304 EXPECT_EQ(Copies
[1], Reg1
);
307 TEST_F(AArch64GISelMITest
, MatchFCmp
) {
312 const LLT s1
= LLT::scalar(1);
313 auto CmpEq
= B
.buildFCmp(CmpInst::FCMP_OEQ
, s1
, Copies
[0], Copies
[1]);
315 // Check match any predicate.
317 mi_match(CmpEq
.getReg(0), *MRI
, m_GFCmp(m_Pred(), m_Reg(), m_Reg()));
320 // Check we get the predicate and registers.
321 CmpInst::Predicate Pred
;
324 match
= mi_match(CmpEq
.getReg(0), *MRI
,
325 m_GFCmp(m_Pred(Pred
), m_Reg(Reg0
), m_Reg(Reg1
)));
327 EXPECT_EQ(CmpInst::FCMP_OEQ
, Pred
);
328 EXPECT_EQ(Copies
[0], Reg0
);
329 EXPECT_EQ(Copies
[1], Reg1
);
332 TEST_F(AArch64GISelMITest
, MatcCommutativeICmp
) {
336 const LLT s1
= LLT::scalar(1);
337 Register LHS
= Copies
[0];
338 Register RHS
= Copies
[1];
339 CmpInst::Predicate MatchedPred
;
341 for (unsigned P
= CmpInst::Predicate::FIRST_ICMP_PREDICATE
;
342 P
< CmpInst::Predicate::LAST_ICMP_PREDICATE
; ++P
) {
343 auto CurrPred
= static_cast<CmpInst::Predicate
>(P
);
344 auto Cmp
= B
.buildICmp(CurrPred
, s1
, LHS
, RHS
);
348 m_c_GICmp(m_Pred(MatchedPred
), m_SpecificReg(LHS
), m_SpecificReg(RHS
)));
350 EXPECT_EQ(MatchedPred
, CurrPred
);
351 // Commuting operands should still match, but the predicate should be
355 m_c_GICmp(m_Pred(MatchedPred
), m_SpecificReg(RHS
), m_SpecificReg(LHS
)));
357 EXPECT_EQ(MatchedPred
, CmpInst::getSwappedPredicate(CurrPred
));
361 TEST_F(AArch64GISelMITest
, MatcCommutativeFCmp
) {
365 const LLT s1
= LLT::scalar(1);
366 Register LHS
= Copies
[0];
367 Register RHS
= Copies
[1];
368 CmpInst::Predicate MatchedPred
;
370 for (unsigned P
= CmpInst::Predicate::FIRST_FCMP_PREDICATE
;
371 P
< CmpInst::Predicate::LAST_FCMP_PREDICATE
; ++P
) {
372 auto CurrPred
= static_cast<CmpInst::Predicate
>(P
);
373 auto Cmp
= B
.buildFCmp(CurrPred
, s1
, LHS
, RHS
);
377 m_c_GFCmp(m_Pred(MatchedPred
), m_SpecificReg(LHS
), m_SpecificReg(RHS
)));
379 EXPECT_EQ(MatchedPred
, CurrPred
);
380 // Commuting operands should still match, but the predicate should be
384 m_c_GFCmp(m_Pred(MatchedPred
), m_SpecificReg(RHS
), m_SpecificReg(LHS
)));
386 EXPECT_EQ(MatchedPred
, CmpInst::getSwappedPredicate(CurrPred
));
390 TEST_F(AArch64GISelMITest
, MatchFPUnaryOp
) {
395 // Truncate s64 to s32.
396 LLT s32
= LLT::scalar(32);
397 auto Copy0s32
= B
.buildFPTrunc(s32
, Copies
[0]);
400 auto MIBFabs
= B
.buildInstr(TargetOpcode::G_FABS
, {s32
}, {Copy0s32
});
402 mi_match(MIBFabs
.getReg(0), *MRI
, m_GFabs(m_Reg()));
406 auto MIBFNeg
= B
.buildInstr(TargetOpcode::G_FNEG
, {s32
}, {Copy0s32
});
407 match
= mi_match(MIBFNeg
.getReg(0), *MRI
, m_GFNeg(m_Reg(Src
)));
409 EXPECT_EQ(Src
, Copy0s32
.getReg(0));
411 match
= mi_match(MIBFabs
.getReg(0), *MRI
, m_GFabs(m_Reg(Src
)));
413 EXPECT_EQ(Src
, Copy0s32
.getReg(0));
415 // Build and match FConstant.
416 auto MIBFCst
= B
.buildFConstant(s32
, .5);
417 const ConstantFP
*TmpFP
{};
418 match
= mi_match(MIBFCst
.getReg(0), *MRI
, m_GFCst(TmpFP
));
421 APFloat
APF((float).5);
422 auto *CFP
= ConstantFP::get(Context
, APF
);
423 EXPECT_EQ(CFP
, TmpFP
);
425 // Build double float.
426 LLT s64
= LLT::scalar(64);
427 auto MIBFCst64
= B
.buildFConstant(s64
, .5);
428 const ConstantFP
*TmpFP64
{};
429 match
= mi_match(MIBFCst64
.getReg(0), *MRI
, m_GFCst(TmpFP64
));
431 EXPECT_TRUE(TmpFP64
);
433 auto CFP64
= ConstantFP::get(Context
, APF64
);
434 EXPECT_EQ(CFP64
, TmpFP64
);
435 EXPECT_NE(TmpFP64
, TmpFP
);
438 LLT s16
= LLT::scalar(16);
439 auto MIBFCst16
= B
.buildFConstant(s16
, .5);
440 const ConstantFP
*TmpFP16
{};
441 match
= mi_match(MIBFCst16
.getReg(0), *MRI
, m_GFCst(TmpFP16
));
443 EXPECT_TRUE(TmpFP16
);
446 APF16
.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven
, &Ignored
);
447 auto CFP16
= ConstantFP::get(Context
, APF16
);
448 EXPECT_EQ(TmpFP16
, CFP16
);
449 EXPECT_NE(TmpFP16
, TmpFP
);
452 TEST_F(AArch64GISelMITest
, MatchExtendsTrunc
) {
457 LLT s64
= LLT::scalar(64);
458 LLT s32
= LLT::scalar(32);
460 auto MIBTrunc
= B
.buildTrunc(s32
, Copies
[0]);
461 auto MIBAExt
= B
.buildAnyExt(s64
, MIBTrunc
);
462 auto MIBZExt
= B
.buildZExt(s64
, MIBTrunc
);
463 auto MIBSExt
= B
.buildSExt(s64
, MIBTrunc
);
466 mi_match(MIBTrunc
.getReg(0), *MRI
, m_GTrunc(m_Reg(Src0
)));
468 EXPECT_EQ(Src0
, Copies
[0]);
470 mi_match(MIBAExt
.getReg(0), *MRI
, m_GAnyExt(m_Reg(Src0
)));
472 EXPECT_EQ(Src0
, MIBTrunc
.getReg(0));
474 match
= mi_match(MIBSExt
.getReg(0), *MRI
, m_GSExt(m_Reg(Src0
)));
476 EXPECT_EQ(Src0
, MIBTrunc
.getReg(0));
478 match
= mi_match(MIBZExt
.getReg(0), *MRI
, m_GZExt(m_Reg(Src0
)));
480 EXPECT_EQ(Src0
, MIBTrunc
.getReg(0));
482 // Match ext(trunc src)
483 match
= mi_match(MIBAExt
.getReg(0), *MRI
,
484 m_GAnyExt(m_GTrunc(m_Reg(Src0
))));
486 EXPECT_EQ(Src0
, Copies
[0]);
488 match
= mi_match(MIBSExt
.getReg(0), *MRI
,
489 m_GSExt(m_GTrunc(m_Reg(Src0
))));
491 EXPECT_EQ(Src0
, Copies
[0]);
493 match
= mi_match(MIBZExt
.getReg(0), *MRI
,
494 m_GZExt(m_GTrunc(m_Reg(Src0
))));
496 EXPECT_EQ(Src0
, Copies
[0]);
499 TEST_F(AArch64GISelMITest
, MatchSpecificType
) {
504 // Try to match a 64bit add.
505 LLT s64
= LLT::scalar(64);
506 LLT s32
= LLT::scalar(32);
507 auto MIBAdd
= B
.buildAdd(s64
, Copies
[0], Copies
[1]);
508 EXPECT_FALSE(mi_match(MIBAdd
.getReg(0), *MRI
,
509 m_GAdd(m_SpecificType(s32
), m_Reg())));
510 EXPECT_TRUE(mi_match(MIBAdd
.getReg(0), *MRI
,
511 m_GAdd(m_SpecificType(s64
), m_Reg())));
513 // Try to match the destination type of a bitcast.
514 LLT v2s32
= LLT::fixed_vector(2, 32);
515 auto MIBCast
= B
.buildCast(v2s32
, Copies
[0]);
517 mi_match(MIBCast
.getReg(0), *MRI
, m_GBitcast(m_Reg())));
519 mi_match(MIBCast
.getReg(0), *MRI
, m_SpecificType(v2s32
)));
521 mi_match(MIBCast
.getReg(1), *MRI
, m_SpecificType(s64
)));
523 // Build a PTRToInt and INTTOPTR and match and test them.
524 LLT PtrTy
= LLT::pointer(0, 64);
525 auto MIBIntToPtr
= B
.buildCast(PtrTy
, Copies
[0]);
526 auto MIBPtrToInt
= B
.buildCast(s64
, MIBIntToPtr
);
529 // match the ptrtoint(inttoptr reg)
530 bool match
= mi_match(MIBPtrToInt
.getReg(0), *MRI
,
531 m_GPtrToInt(m_GIntToPtr(m_Reg(Src0
))));
533 EXPECT_EQ(Src0
, Copies
[0]);
536 TEST_F(AArch64GISelMITest
, MatchCombinators
) {
541 LLT s64
= LLT::scalar(64);
542 LLT s32
= LLT::scalar(32);
543 auto MIBAdd
= B
.buildAdd(s64
, Copies
[0], Copies
[1]);
546 mi_match(MIBAdd
.getReg(0), *MRI
,
547 m_all_of(m_SpecificType(s64
), m_GAdd(m_Reg(Src0
), m_Reg(Src1
))));
549 EXPECT_EQ(Src0
, Copies
[0]);
550 EXPECT_EQ(Src1
, Copies
[1]);
551 // Check for s32 (which should fail).
553 mi_match(MIBAdd
.getReg(0), *MRI
,
554 m_all_of(m_SpecificType(s32
), m_GAdd(m_Reg(Src0
), m_Reg(Src1
))));
557 mi_match(MIBAdd
.getReg(0), *MRI
,
558 m_any_of(m_SpecificType(s32
), m_GAdd(m_Reg(Src0
), m_Reg(Src1
))));
560 EXPECT_EQ(Src0
, Copies
[0]);
561 EXPECT_EQ(Src1
, Copies
[1]);
563 // Match a case where none of the predicates hold true.
565 MIBAdd
.getReg(0), *MRI
,
566 m_any_of(m_SpecificType(LLT::scalar(16)), m_GSub(m_Reg(), m_Reg())));
570 TEST_F(AArch64GISelMITest
, MatchMiscellaneous
) {
575 LLT s64
= LLT::scalar(64);
576 auto MIBAdd
= B
.buildAdd(s64
, Copies
[0], Copies
[1]);
577 Register Reg
= MIBAdd
.getReg(0);
579 // Only one use of Reg.
580 B
.buildCast(LLT::pointer(0, 32), MIBAdd
);
581 EXPECT_TRUE(mi_match(Reg
, *MRI
, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
582 EXPECT_TRUE(mi_match(Reg
, *MRI
, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
584 // Add multiple debug uses of Reg.
585 B
.buildInstr(TargetOpcode::DBG_VALUE
, {}, {Reg
});
586 B
.buildInstr(TargetOpcode::DBG_VALUE
, {}, {Reg
});
588 EXPECT_FALSE(mi_match(Reg
, *MRI
, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
589 EXPECT_TRUE(mi_match(Reg
, *MRI
, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
591 // Multiple non-debug uses of Reg.
592 B
.buildCast(LLT::pointer(1, 32), MIBAdd
);
593 EXPECT_FALSE(mi_match(Reg
, *MRI
, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
594 EXPECT_FALSE(mi_match(Reg
, *MRI
, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
597 TEST_F(AArch64GISelMITest
, MatchSpecificConstant
) {
602 // Basic case: Can we match a G_CONSTANT with a specific value?
603 auto FortyTwo
= B
.buildConstant(LLT::scalar(64), 42);
604 EXPECT_TRUE(mi_match(FortyTwo
.getReg(0), *MRI
, m_SpecificICst(42)));
605 EXPECT_FALSE(mi_match(FortyTwo
.getReg(0), *MRI
, m_SpecificICst(123)));
607 // Test that this works inside of a more complex pattern.
608 LLT s64
= LLT::scalar(64);
609 auto MIBAdd
= B
.buildAdd(s64
, Copies
[0], FortyTwo
);
610 EXPECT_TRUE(mi_match(MIBAdd
.getReg(2), *MRI
, m_SpecificICst(42)));
613 EXPECT_FALSE(mi_match(MIBAdd
.getReg(2), *MRI
, m_SpecificICst(123)));
615 // No constant on the LHS.
616 EXPECT_FALSE(mi_match(MIBAdd
.getReg(1), *MRI
, m_SpecificICst(42)));
619 TEST_F(AArch64GISelMITest
, MatchSpecificConstantSplat
) {
624 LLT s64
= LLT::scalar(64);
625 LLT v4s64
= LLT::fixed_vector(4, s64
);
627 MachineInstrBuilder FortyTwoSplat
=
628 B
.buildSplatBuildVector(v4s64
, B
.buildConstant(s64
, 42));
629 MachineInstrBuilder FortyTwo
= B
.buildConstant(s64
, 42);
631 EXPECT_TRUE(mi_match(FortyTwoSplat
.getReg(0), *MRI
, m_SpecificICstSplat(42)));
633 mi_match(FortyTwoSplat
.getReg(0), *MRI
, m_SpecificICstSplat(43)));
634 EXPECT_FALSE(mi_match(FortyTwo
.getReg(0), *MRI
, m_SpecificICstSplat(42)));
636 MachineInstrBuilder NonConstantSplat
=
637 B
.buildBuildVector(v4s64
, {Copies
[0], Copies
[0], Copies
[0], Copies
[0]});
639 MachineInstrBuilder AddSplat
=
640 B
.buildAdd(v4s64
, NonConstantSplat
, FortyTwoSplat
);
641 EXPECT_TRUE(mi_match(AddSplat
.getReg(2), *MRI
, m_SpecificICstSplat(42)));
642 EXPECT_FALSE(mi_match(AddSplat
.getReg(2), *MRI
, m_SpecificICstSplat(43)));
643 EXPECT_FALSE(mi_match(AddSplat
.getReg(1), *MRI
, m_SpecificICstSplat(42)));
645 MachineInstrBuilder Add
= B
.buildAdd(s64
, Copies
[0], FortyTwo
);
646 EXPECT_FALSE(mi_match(Add
.getReg(2), *MRI
, m_SpecificICstSplat(42)));
649 TEST_F(AArch64GISelMITest
, MatchSpecificConstantOrSplat
) {
654 LLT s64
= LLT::scalar(64);
655 LLT v4s64
= LLT::fixed_vector(4, s64
);
657 MachineInstrBuilder FortyTwoSplat
=
658 B
.buildSplatBuildVector(v4s64
, B
.buildConstant(s64
, 42));
659 MachineInstrBuilder FortyTwo
= B
.buildConstant(s64
, 42);
662 mi_match(FortyTwoSplat
.getReg(0), *MRI
, m_SpecificICstOrSplat(42)));
664 mi_match(FortyTwoSplat
.getReg(0), *MRI
, m_SpecificICstOrSplat(43)));
665 EXPECT_TRUE(mi_match(FortyTwo
.getReg(0), *MRI
, m_SpecificICstOrSplat(42)));
667 MachineInstrBuilder NonConstantSplat
=
668 B
.buildBuildVector(v4s64
, {Copies
[0], Copies
[0], Copies
[0], Copies
[0]});
670 MachineInstrBuilder AddSplat
=
671 B
.buildAdd(v4s64
, NonConstantSplat
, FortyTwoSplat
);
672 EXPECT_TRUE(mi_match(AddSplat
.getReg(2), *MRI
, m_SpecificICstOrSplat(42)));
673 EXPECT_FALSE(mi_match(AddSplat
.getReg(2), *MRI
, m_SpecificICstOrSplat(43)));
674 EXPECT_FALSE(mi_match(AddSplat
.getReg(1), *MRI
, m_SpecificICstOrSplat(42)));
676 MachineInstrBuilder Add
= B
.buildAdd(s64
, Copies
[0], FortyTwo
);
677 EXPECT_TRUE(mi_match(Add
.getReg(2), *MRI
, m_SpecificICstOrSplat(42)));
680 TEST_F(AArch64GISelMITest
, MatchZeroInt
) {
684 auto Zero
= B
.buildConstant(LLT::scalar(64), 0);
685 EXPECT_TRUE(mi_match(Zero
.getReg(0), *MRI
, m_ZeroInt()));
687 auto FortyTwo
= B
.buildConstant(LLT::scalar(64), 42);
688 EXPECT_FALSE(mi_match(FortyTwo
.getReg(0), *MRI
, m_ZeroInt()));
691 TEST_F(AArch64GISelMITest
, MatchAllOnesInt
) {
695 auto AllOnes
= B
.buildConstant(LLT::scalar(64), -1);
696 EXPECT_TRUE(mi_match(AllOnes
.getReg(0), *MRI
, m_AllOnesInt()));
698 auto FortyTwo
= B
.buildConstant(LLT::scalar(64), 42);
699 EXPECT_FALSE(mi_match(FortyTwo
.getReg(0), *MRI
, m_AllOnesInt()));
702 TEST_F(AArch64GISelMITest
, MatchFPOrIntConst
) {
707 Register IntOne
= B
.buildConstant(LLT::scalar(64), 1).getReg(0);
708 Register FPOne
= B
.buildFConstant(LLT::scalar(64), 1.0).getReg(0);
709 std::optional
<ValueAndVReg
> ValReg
;
710 std::optional
<FPValueAndVReg
> FValReg
;
712 EXPECT_TRUE(mi_match(IntOne
, *MRI
, m_GCst(ValReg
)));
713 EXPECT_EQ(IntOne
, ValReg
->VReg
);
714 EXPECT_FALSE(mi_match(IntOne
, *MRI
, m_GFCst(FValReg
)));
716 EXPECT_FALSE(mi_match(FPOne
, *MRI
, m_GCst(ValReg
)));
717 EXPECT_TRUE(mi_match(FPOne
, *MRI
, m_GFCst(FValReg
)));
718 EXPECT_EQ(FPOne
, FValReg
->VReg
);
721 TEST_F(AArch64GISelMITest
, MatchConstantSplat
) {
726 LLT s64
= LLT::scalar(64);
727 LLT v2s64
= LLT::fixed_vector(2, 64);
728 LLT v4s64
= LLT::fixed_vector(4, 64);
730 Register FPOne
= B
.buildFConstant(s64
, 1.0).getReg(0);
731 Register FPZero
= B
.buildFConstant(s64
, 0.0).getReg(0);
732 Register Undef
= B
.buildUndef(s64
).getReg(0);
733 std::optional
<FPValueAndVReg
> FValReg
;
735 // GFCstOrSplatGFCstMatch allows undef as part of splat. Undef often comes
736 // from padding to legalize into available operation and then ignore added
737 // elements e.g. v3s64 to v4s64.
739 EXPECT_TRUE(mi_match(FPZero
, *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
740 EXPECT_EQ(FPZero
, FValReg
->VReg
);
742 EXPECT_FALSE(mi_match(Undef
, *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
744 auto ZeroSplat
= B
.buildBuildVector(v4s64
, {FPZero
, FPZero
, FPZero
, FPZero
});
746 mi_match(ZeroSplat
.getReg(0), *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
747 EXPECT_EQ(FPZero
, FValReg
->VReg
);
749 auto ZeroUndef
= B
.buildBuildVector(v4s64
, {FPZero
, FPZero
, FPZero
, Undef
});
751 mi_match(ZeroUndef
.getReg(0), *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
752 EXPECT_EQ(FPZero
, FValReg
->VReg
);
754 // All undefs are not constant splat.
755 auto UndefSplat
= B
.buildBuildVector(v4s64
, {Undef
, Undef
, Undef
, Undef
});
757 mi_match(UndefSplat
.getReg(0), *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
759 auto ZeroOne
= B
.buildBuildVector(v4s64
, {FPZero
, FPZero
, FPZero
, FPOne
});
761 mi_match(ZeroOne
.getReg(0), *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
763 auto NonConstantSplat
=
764 B
.buildBuildVector(v4s64
, {Copies
[0], Copies
[0], Copies
[0], Copies
[0]});
765 EXPECT_FALSE(mi_match(NonConstantSplat
.getReg(0), *MRI
,
766 GFCstOrSplatGFCstMatch(FValReg
)));
768 auto Mixed
= B
.buildBuildVector(v4s64
, {FPZero
, FPZero
, FPZero
, Copies
[0]});
770 mi_match(Mixed
.getReg(0), *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
772 // Look through G_CONCAT_VECTORS.
773 auto SmallZeroSplat
= B
.buildBuildVector(v2s64
, {FPZero
, FPZero
}).getReg(0);
774 auto LargeZeroSplat
=
775 B
.buildConcatVectors(v4s64
, {SmallZeroSplat
, SmallZeroSplat
});
776 EXPECT_TRUE(mi_match(LargeZeroSplat
.getReg(0), *MRI
,
777 GFCstOrSplatGFCstMatch(FValReg
)));
779 auto SmallZeroSplat2
= B
.buildBuildVector(v2s64
, {FPZero
, FPZero
}).getReg(0);
780 auto SmallZeroSplat3
= B
.buildCopy(v2s64
, SmallZeroSplat
).getReg(0);
781 auto LargeZeroSplat2
=
782 B
.buildConcatVectors(v4s64
, {SmallZeroSplat2
, SmallZeroSplat3
});
783 EXPECT_TRUE(mi_match(LargeZeroSplat2
.getReg(0), *MRI
,
784 GFCstOrSplatGFCstMatch(FValReg
)));
786 // Not all G_CONCAT_VECTORS are splats.
787 auto SmallOneSplat
= B
.buildBuildVector(v2s64
, {FPOne
, FPOne
}).getReg(0);
788 auto LargeMixedSplat
=
789 B
.buildConcatVectors(v4s64
, {SmallZeroSplat
, SmallOneSplat
});
790 EXPECT_FALSE(mi_match(LargeMixedSplat
.getReg(0), *MRI
,
791 GFCstOrSplatGFCstMatch(FValReg
)));
793 auto SmallMixedSplat
= B
.buildBuildVector(v2s64
, {FPOne
, FPZero
}).getReg(0);
795 B
.buildConcatVectors(v4s64
, {SmallMixedSplat
, SmallMixedSplat
});
797 mi_match(LargeSplat
.getReg(0), *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
799 auto SmallUndefSplat
= B
.buildBuildVector(v2s64
, {Undef
, Undef
}).getReg(0);
800 auto LargeUndefSplat
=
801 B
.buildConcatVectors(v4s64
, {SmallUndefSplat
, SmallUndefSplat
});
802 EXPECT_FALSE(mi_match(LargeUndefSplat
.getReg(0), *MRI
,
803 GFCstOrSplatGFCstMatch(FValReg
)));
805 auto UndefVec
= B
.buildUndef(v2s64
).getReg(0);
806 auto LargeUndefSplat2
= B
.buildConcatVectors(v4s64
, {UndefVec
, UndefVec
});
807 EXPECT_FALSE(mi_match(LargeUndefSplat2
.getReg(0), *MRI
,
808 GFCstOrSplatGFCstMatch(FValReg
)));
811 TEST_F(AArch64GISelMITest
, MatchNeg
) {
816 LLT s64
= LLT::scalar(64);
817 auto Zero
= B
.buildConstant(LLT::scalar(64), 0);
818 auto NegInst
= B
.buildSub(s64
, Zero
, Copies
[0]);
821 // Match: G_SUB = 0, %Reg
822 EXPECT_TRUE(mi_match(NegInst
.getReg(0), *MRI
, m_Neg(m_Reg(NegatedReg
))));
823 EXPECT_EQ(NegatedReg
, Copies
[0]);
825 // Don't match: G_SUB = %Reg, 0
826 auto NotNegInst1
= B
.buildSub(s64
, Copies
[0], Zero
);
827 EXPECT_FALSE(mi_match(NotNegInst1
.getReg(0), *MRI
, m_Neg(m_Reg(NegatedReg
))));
829 // Don't match: G_SUB = 42, %Reg
830 auto FortyTwo
= B
.buildConstant(LLT::scalar(64), 42);
831 auto NotNegInst2
= B
.buildSub(s64
, FortyTwo
, Copies
[0]);
832 EXPECT_FALSE(mi_match(NotNegInst2
.getReg(0), *MRI
, m_Neg(m_Reg(NegatedReg
))));
835 // %sub = G_SUB = 0, %negated_reg
836 // %add = G_ADD = %x, %sub
837 auto AddInst
= B
.buildAdd(s64
, Copies
[1], NegInst
);
838 NegatedReg
= Register();
839 EXPECT_TRUE(mi_match(AddInst
.getReg(2), *MRI
, m_Neg(m_Reg(NegatedReg
))));
840 EXPECT_EQ(NegatedReg
, Copies
[0]);
843 TEST_F(AArch64GISelMITest
, MatchNot
) {
848 LLT s64
= LLT::scalar(64);
849 auto AllOnes
= B
.buildConstant(LLT::scalar(64), -1);
850 auto NotInst1
= B
.buildXor(s64
, Copies
[0], AllOnes
);
853 // Match: G_XOR %NotReg, -1
854 EXPECT_TRUE(mi_match(NotInst1
.getReg(0), *MRI
, m_Not(m_Reg(NotReg
))));
855 EXPECT_EQ(NotReg
, Copies
[0]);
857 // Match: G_XOR -1, %NotReg
858 auto NotInst2
= B
.buildXor(s64
, AllOnes
, Copies
[1]);
859 EXPECT_TRUE(mi_match(NotInst2
.getReg(0), *MRI
, m_Not(m_Reg(NotReg
))));
860 EXPECT_EQ(NotReg
, Copies
[1]);
862 // Don't match: G_XOR %NotReg, 42
863 auto FortyTwo
= B
.buildConstant(LLT::scalar(64), 42);
864 auto WrongCst
= B
.buildXor(s64
, Copies
[0], FortyTwo
);
865 EXPECT_FALSE(mi_match(WrongCst
.getReg(0), *MRI
, m_Not(m_Reg(NotReg
))));
868 // %xor = G_XOR %NotReg, -1
869 // %add = G_ADD %x, %xor
870 auto AddInst
= B
.buildAdd(s64
, Copies
[1], NotInst1
);
872 EXPECT_TRUE(mi_match(AddInst
.getReg(2), *MRI
, m_Not(m_Reg(NotReg
))));
873 EXPECT_EQ(NotReg
, Copies
[0]);
876 TEST_F(AArch64GISelMITest
, MatchSpecificReg
) {
880 auto Cst1
= B
.buildConstant(LLT::scalar(64), 42);
881 auto Cst2
= B
.buildConstant(LLT::scalar(64), 314);
882 Register Reg
= Cst1
.getReg(0);
883 // Basic case: Same register twice.
884 EXPECT_TRUE(mi_match(Reg
, *MRI
, m_SpecificReg(Reg
)));
885 // Basic case: Two explicitly different registers.
886 EXPECT_FALSE(mi_match(Reg
, *MRI
, m_SpecificReg(Cst2
.getReg(0))));
887 // Check that we can tell that an instruction uses a specific register.
888 auto Add
= B
.buildAdd(LLT::scalar(64), Cst1
, Cst2
);
889 EXPECT_TRUE(mi_match(Add
.getReg(0), *MRI
, m_GAdd(m_SpecificReg(Reg
), m_Reg())));
894 int main(int argc
, char **argv
) {
895 ::testing::InitGoogleTest(&argc
, argv
);
897 return RUN_ALL_TESTS();