1 //===- PatternMatchTest.cpp -----------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "GISelMITest.h"
10 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/CodeGen/MIRParser/MIRParser.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineModuleInfo.h"
16 #include "llvm/CodeGen/TargetFrameLowering.h"
17 #include "llvm/CodeGen/TargetInstrInfo.h"
18 #include "llvm/CodeGen/TargetLowering.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/MC/TargetRegistry.h"
21 #include "llvm/Support/SourceMgr.h"
22 #include "llvm/Support/TargetSelect.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetOptions.h"
25 #include "gtest/gtest.h"
28 using namespace MIPatternMatch
;
32 TEST_F(AArch64GISelMITest
, MatchIntConstant
) {
36 auto MIBCst
= B
.buildConstant(LLT::scalar(64), 42);
38 bool match
= mi_match(MIBCst
.getReg(0), *MRI
, m_ICst(Cst
));
43 TEST_F(AArch64GISelMITest
, MatchIntConstantRegister
) {
47 auto MIBCst
= B
.buildConstant(LLT::scalar(64), 42);
48 Optional
<ValueAndVReg
> Src0
;
49 bool match
= mi_match(MIBCst
.getReg(0), *MRI
, m_GCst(Src0
));
51 EXPECT_EQ(Src0
->VReg
, MIBCst
.getReg(0));
54 TEST_F(AArch64GISelMITest
, MatchIntConstantSplat
) {
59 LLT s64
= LLT::scalar(64);
60 LLT v4s64
= LLT::fixed_vector(4, s64
);
62 MachineInstrBuilder FortyTwoSplat
=
63 B
.buildSplatVector(v4s64
, B
.buildConstant(s64
, 42));
65 EXPECT_TRUE(mi_match(FortyTwoSplat
.getReg(0), *MRI
, m_ICstOrSplat(Cst
)));
68 MachineInstrBuilder NonConstantSplat
=
69 B
.buildBuildVector(v4s64
, {Copies
[0], Copies
[0], Copies
[0], Copies
[0]});
70 EXPECT_FALSE(mi_match(NonConstantSplat
.getReg(0), *MRI
, m_ICstOrSplat(Cst
)));
73 TEST_F(AArch64GISelMITest
, MachineInstrPtrBind
) {
77 auto MIBAdd
= B
.buildAdd(LLT::scalar(64), Copies
[0], Copies
[1]);
78 // Test 'MachineInstr *' bind.
80 MachineInstr
*MIPtr
= MIBAdd
.getInstr();
81 bool match
= mi_match(MIPtr
, *MRI
, m_GAdd(m_Reg(), m_Reg()));
83 // Specialized mi_match for MachineInstr &.
84 MachineInstr
&MI
= *MIBAdd
.getInstr();
85 match
= mi_match(MI
, *MRI
, m_GAdd(m_Reg(), m_Reg()));
87 // MachineInstrBuilder has automatic conversion to MachineInstr *.
88 match
= mi_match(MIBAdd
, *MRI
, m_GAdd(m_Reg(), m_Reg()));
90 // Match instruction without def.
91 auto MIBBrcond
= B
.buildBrCond(Copies
[0], B
.getMBB());
92 MachineInstr
*MatchedMI
;
93 match
= mi_match(MIBBrcond
, *MRI
, m_MInstr(MatchedMI
));
95 EXPECT_TRUE(MIBBrcond
.getInstr() == MatchedMI
);
96 // Match instruction with two defs.
98 B
.buildUAddo(LLT::scalar(64), LLT::scalar(1), Copies
[0], Copies
[1]);
99 match
= mi_match(MIBUAddO
, *MRI
, m_MInstr(MatchedMI
));
101 EXPECT_TRUE(MIBUAddO
.getInstr() == MatchedMI
);
104 TEST_F(AArch64GISelMITest
, MatchBinaryOp
) {
108 LLT s32
= LLT::scalar(32);
109 LLT s64
= LLT::scalar(64);
110 LLT p0
= LLT::pointer(0, 64);
111 auto MIBAdd
= B
.buildAdd(s64
, Copies
[0], Copies
[1]);
112 // Test case for no bind.
114 mi_match(MIBAdd
.getReg(0), *MRI
, m_GAdd(m_Reg(), m_Reg()));
116 Register Src0
, Src1
, Src2
;
117 match
= mi_match(MIBAdd
.getReg(0), *MRI
,
118 m_GAdd(m_Reg(Src0
), m_Reg(Src1
)));
120 EXPECT_EQ(Src0
, Copies
[0]);
121 EXPECT_EQ(Src1
, Copies
[1]);
123 // Build MUL(ADD %0, %1), %2
124 auto MIBMul
= B
.buildMul(s64
, MIBAdd
, Copies
[2]);
127 match
= mi_match(MIBMul
.getReg(0), *MRI
,
128 m_GMul(m_Reg(Src0
), m_Reg(Src1
)));
130 EXPECT_EQ(Src0
, MIBAdd
.getReg(0));
131 EXPECT_EQ(Src1
, Copies
[2]);
133 // Try to match MUL(ADD)
134 match
= mi_match(MIBMul
.getReg(0), *MRI
,
135 m_GMul(m_GAdd(m_Reg(Src0
), m_Reg(Src1
)), m_Reg(Src2
)));
137 EXPECT_EQ(Src0
, Copies
[0]);
138 EXPECT_EQ(Src1
, Copies
[1]);
139 EXPECT_EQ(Src2
, Copies
[2]);
141 // Test Commutativity.
142 auto MIBMul2
= B
.buildMul(s64
, Copies
[0], B
.buildConstant(s64
, 42));
143 // Try to match MUL(Cst, Reg) on src of MUL(Reg, Cst) to validate
146 match
= mi_match(MIBMul2
.getReg(0), *MRI
,
147 m_GMul(m_ICst(Cst
), m_Reg(Src0
)));
150 EXPECT_EQ(Src0
, Copies
[0]);
152 // Make sure commutative doesn't work with something like SUB.
153 auto MIBSub
= B
.buildSub(s64
, Copies
[0], B
.buildConstant(s64
, 42));
154 match
= mi_match(MIBSub
.getReg(0), *MRI
,
155 m_GSub(m_ICst(Cst
), m_Reg(Src0
)));
158 auto MIBFMul
= B
.buildInstr(TargetOpcode::G_FMUL
, {s64
},
159 {Copies
[0], B
.buildConstant(s64
, 42)});
160 // Match and test commutativity for FMUL.
161 match
= mi_match(MIBFMul
.getReg(0), *MRI
,
162 m_GFMul(m_ICst(Cst
), m_Reg(Src0
)));
165 EXPECT_EQ(Src0
, Copies
[0]);
168 auto MIBFSub
= B
.buildInstr(TargetOpcode::G_FSUB
, {s64
},
169 {Copies
[0], B
.buildConstant(s64
, 42)});
170 match
= mi_match(MIBFSub
.getReg(0), *MRI
,
171 m_GFSub(m_Reg(Src0
), m_Reg()));
173 EXPECT_EQ(Src0
, Copies
[0]);
176 auto MIBAnd
= B
.buildAnd(s64
, Copies
[0], Copies
[1]);
178 match
= mi_match(MIBAnd
.getReg(0), *MRI
,
179 m_GAnd(m_Reg(Src0
), m_Reg(Src1
)));
181 EXPECT_EQ(Src0
, Copies
[0]);
182 EXPECT_EQ(Src1
, Copies
[1]);
185 auto MIBOr
= B
.buildOr(s64
, Copies
[0], Copies
[1]);
187 match
= mi_match(MIBOr
.getReg(0), *MRI
,
188 m_GOr(m_Reg(Src0
), m_Reg(Src1
)));
190 EXPECT_EQ(Src0
, Copies
[0]);
191 EXPECT_EQ(Src1
, Copies
[1]);
193 // Match lshr, and make sure a different shift amount type works.
194 auto TruncCopy1
= B
.buildTrunc(s32
, Copies
[1]);
195 auto LShr
= B
.buildLShr(s64
, Copies
[0], TruncCopy1
);
196 match
= mi_match(LShr
.getReg(0), *MRI
,
197 m_GLShr(m_Reg(Src0
), m_Reg(Src1
)));
199 EXPECT_EQ(Src0
, Copies
[0]);
200 EXPECT_EQ(Src1
, TruncCopy1
.getReg(0));
202 // Match shl, and make sure a different shift amount type works.
203 auto Shl
= B
.buildShl(s64
, Copies
[0], TruncCopy1
);
204 match
= mi_match(Shl
.getReg(0), *MRI
,
205 m_GShl(m_Reg(Src0
), m_Reg(Src1
)));
207 EXPECT_EQ(Src0
, Copies
[0]);
208 EXPECT_EQ(Src1
, TruncCopy1
.getReg(0));
210 // Build a G_PTR_ADD and check that we can match it.
211 auto PtrAdd
= B
.buildPtrAdd(p0
, {B
.buildUndef(p0
)}, Copies
[0]);
212 match
= mi_match(PtrAdd
.getReg(0), *MRI
, m_GPtrAdd(m_Reg(Src0
), m_Reg(Src1
)));
214 EXPECT_EQ(Src0
, PtrAdd
->getOperand(1).getReg());
215 EXPECT_EQ(Src1
, Copies
[0]);
217 auto MIBCst
= B
.buildConstant(s64
, 42);
218 auto MIBAddCst
= B
.buildAdd(s64
, MIBCst
, Copies
[0]);
219 auto MIBUnmerge
= B
.buildUnmerge({s32
, s32
}, B
.buildConstant(s64
, 42));
221 // m_BinOp with opcode.
222 // Match binary instruction, opcode and its non-commutative operands.
223 match
= mi_match(MIBAddCst
, *MRI
,
224 m_BinOp(TargetOpcode::G_ADD
, m_ICst(Cst
), m_Reg(Src0
)));
226 EXPECT_EQ(Src0
, Copies
[0]);
229 // Opcode doesn't match.
230 match
= mi_match(MIBAddCst
, *MRI
,
231 m_BinOp(TargetOpcode::G_MUL
, m_ICst(Cst
), m_Reg(Src0
)));
234 match
= mi_match(MIBAddCst
, *MRI
,
235 m_BinOp(TargetOpcode::G_ADD
, m_Reg(Src0
), m_ICst(Cst
)));
238 // Instruction is not binary.
239 match
= mi_match(MIBCst
, *MRI
,
240 m_BinOp(TargetOpcode::G_MUL
, m_Reg(Src0
), m_Reg(Src1
)));
242 match
= mi_match(MIBUnmerge
, *MRI
,
243 m_BinOp(TargetOpcode::G_MUL
, m_Reg(Src0
), m_Reg(Src1
)));
246 // m_CommutativeBinOp with opcode.
249 m_CommutativeBinOp(TargetOpcode::G_ADD
, m_ICst(Cst
), m_Reg(Src0
)));
251 EXPECT_EQ(Src0
, Copies
[0]);
256 m_CommutativeBinOp(TargetOpcode::G_MUL
, m_ICst(Cst
), m_Reg(Src0
)));
261 m_CommutativeBinOp(TargetOpcode::G_ADD
, m_Reg(Src0
), m_ICst(Cst
)));
263 EXPECT_EQ(Src0
, Copies
[0]);
268 m_CommutativeBinOp(TargetOpcode::G_MUL
, m_Reg(Src0
), m_Reg(Src1
)));
272 m_CommutativeBinOp(TargetOpcode::G_MUL
, m_Reg(Src0
), m_Reg(Src1
)));
276 TEST_F(AArch64GISelMITest
, MatchICmp
) {
281 const LLT s1
= LLT::scalar(1);
282 auto CmpEq
= B
.buildICmp(CmpInst::ICMP_EQ
, s1
, Copies
[0], Copies
[1]);
284 // Check match any predicate.
286 mi_match(CmpEq
.getReg(0), *MRI
, m_GICmp(m_Pred(), m_Reg(), m_Reg()));
289 // Check we get the predicate and registers.
290 CmpInst::Predicate Pred
;
293 match
= mi_match(CmpEq
.getReg(0), *MRI
,
294 m_GICmp(m_Pred(Pred
), m_Reg(Reg0
), m_Reg(Reg1
)));
296 EXPECT_EQ(CmpInst::ICMP_EQ
, Pred
);
297 EXPECT_EQ(Copies
[0], Reg0
);
298 EXPECT_EQ(Copies
[1], Reg1
);
301 TEST_F(AArch64GISelMITest
, MatchFCmp
) {
306 const LLT s1
= LLT::scalar(1);
307 auto CmpEq
= B
.buildFCmp(CmpInst::FCMP_OEQ
, s1
, Copies
[0], Copies
[1]);
309 // Check match any predicate.
311 mi_match(CmpEq
.getReg(0), *MRI
, m_GFCmp(m_Pred(), m_Reg(), m_Reg()));
314 // Check we get the predicate and registers.
315 CmpInst::Predicate Pred
;
318 match
= mi_match(CmpEq
.getReg(0), *MRI
,
319 m_GFCmp(m_Pred(Pred
), m_Reg(Reg0
), m_Reg(Reg1
)));
321 EXPECT_EQ(CmpInst::FCMP_OEQ
, Pred
);
322 EXPECT_EQ(Copies
[0], Reg0
);
323 EXPECT_EQ(Copies
[1], Reg1
);
326 TEST_F(AArch64GISelMITest
, MatchFPUnaryOp
) {
331 // Truncate s64 to s32.
332 LLT s32
= LLT::scalar(32);
333 auto Copy0s32
= B
.buildFPTrunc(s32
, Copies
[0]);
336 auto MIBFabs
= B
.buildInstr(TargetOpcode::G_FABS
, {s32
}, {Copy0s32
});
338 mi_match(MIBFabs
.getReg(0), *MRI
, m_GFabs(m_Reg()));
342 auto MIBFNeg
= B
.buildInstr(TargetOpcode::G_FNEG
, {s32
}, {Copy0s32
});
343 match
= mi_match(MIBFNeg
.getReg(0), *MRI
, m_GFNeg(m_Reg(Src
)));
345 EXPECT_EQ(Src
, Copy0s32
.getReg(0));
347 match
= mi_match(MIBFabs
.getReg(0), *MRI
, m_GFabs(m_Reg(Src
)));
349 EXPECT_EQ(Src
, Copy0s32
.getReg(0));
351 // Build and match FConstant.
352 auto MIBFCst
= B
.buildFConstant(s32
, .5);
353 const ConstantFP
*TmpFP
{};
354 match
= mi_match(MIBFCst
.getReg(0), *MRI
, m_GFCst(TmpFP
));
357 APFloat
APF((float).5);
358 auto *CFP
= ConstantFP::get(Context
, APF
);
359 EXPECT_EQ(CFP
, TmpFP
);
361 // Build double float.
362 LLT s64
= LLT::scalar(64);
363 auto MIBFCst64
= B
.buildFConstant(s64
, .5);
364 const ConstantFP
*TmpFP64
{};
365 match
= mi_match(MIBFCst64
.getReg(0), *MRI
, m_GFCst(TmpFP64
));
367 EXPECT_TRUE(TmpFP64
);
369 auto CFP64
= ConstantFP::get(Context
, APF64
);
370 EXPECT_EQ(CFP64
, TmpFP64
);
371 EXPECT_NE(TmpFP64
, TmpFP
);
374 LLT s16
= LLT::scalar(16);
375 auto MIBFCst16
= B
.buildFConstant(s16
, .5);
376 const ConstantFP
*TmpFP16
{};
377 match
= mi_match(MIBFCst16
.getReg(0), *MRI
, m_GFCst(TmpFP16
));
379 EXPECT_TRUE(TmpFP16
);
382 APF16
.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven
, &Ignored
);
383 auto CFP16
= ConstantFP::get(Context
, APF16
);
384 EXPECT_EQ(TmpFP16
, CFP16
);
385 EXPECT_NE(TmpFP16
, TmpFP
);
388 TEST_F(AArch64GISelMITest
, MatchExtendsTrunc
) {
393 LLT s64
= LLT::scalar(64);
394 LLT s32
= LLT::scalar(32);
396 auto MIBTrunc
= B
.buildTrunc(s32
, Copies
[0]);
397 auto MIBAExt
= B
.buildAnyExt(s64
, MIBTrunc
);
398 auto MIBZExt
= B
.buildZExt(s64
, MIBTrunc
);
399 auto MIBSExt
= B
.buildSExt(s64
, MIBTrunc
);
402 mi_match(MIBTrunc
.getReg(0), *MRI
, m_GTrunc(m_Reg(Src0
)));
404 EXPECT_EQ(Src0
, Copies
[0]);
406 mi_match(MIBAExt
.getReg(0), *MRI
, m_GAnyExt(m_Reg(Src0
)));
408 EXPECT_EQ(Src0
, MIBTrunc
.getReg(0));
410 match
= mi_match(MIBSExt
.getReg(0), *MRI
, m_GSExt(m_Reg(Src0
)));
412 EXPECT_EQ(Src0
, MIBTrunc
.getReg(0));
414 match
= mi_match(MIBZExt
.getReg(0), *MRI
, m_GZExt(m_Reg(Src0
)));
416 EXPECT_EQ(Src0
, MIBTrunc
.getReg(0));
418 // Match ext(trunc src)
419 match
= mi_match(MIBAExt
.getReg(0), *MRI
,
420 m_GAnyExt(m_GTrunc(m_Reg(Src0
))));
422 EXPECT_EQ(Src0
, Copies
[0]);
424 match
= mi_match(MIBSExt
.getReg(0), *MRI
,
425 m_GSExt(m_GTrunc(m_Reg(Src0
))));
427 EXPECT_EQ(Src0
, Copies
[0]);
429 match
= mi_match(MIBZExt
.getReg(0), *MRI
,
430 m_GZExt(m_GTrunc(m_Reg(Src0
))));
432 EXPECT_EQ(Src0
, Copies
[0]);
435 TEST_F(AArch64GISelMITest
, MatchSpecificType
) {
440 // Try to match a 64bit add.
441 LLT s64
= LLT::scalar(64);
442 LLT s32
= LLT::scalar(32);
443 auto MIBAdd
= B
.buildAdd(s64
, Copies
[0], Copies
[1]);
444 EXPECT_FALSE(mi_match(MIBAdd
.getReg(0), *MRI
,
445 m_GAdd(m_SpecificType(s32
), m_Reg())));
446 EXPECT_TRUE(mi_match(MIBAdd
.getReg(0), *MRI
,
447 m_GAdd(m_SpecificType(s64
), m_Reg())));
449 // Try to match the destination type of a bitcast.
450 LLT v2s32
= LLT::fixed_vector(2, 32);
451 auto MIBCast
= B
.buildCast(v2s32
, Copies
[0]);
453 mi_match(MIBCast
.getReg(0), *MRI
, m_GBitcast(m_Reg())));
455 mi_match(MIBCast
.getReg(0), *MRI
, m_SpecificType(v2s32
)));
457 mi_match(MIBCast
.getReg(1), *MRI
, m_SpecificType(s64
)));
459 // Build a PTRToInt and INTTOPTR and match and test them.
460 LLT PtrTy
= LLT::pointer(0, 64);
461 auto MIBIntToPtr
= B
.buildCast(PtrTy
, Copies
[0]);
462 auto MIBPtrToInt
= B
.buildCast(s64
, MIBIntToPtr
);
465 // match the ptrtoint(inttoptr reg)
466 bool match
= mi_match(MIBPtrToInt
.getReg(0), *MRI
,
467 m_GPtrToInt(m_GIntToPtr(m_Reg(Src0
))));
469 EXPECT_EQ(Src0
, Copies
[0]);
472 TEST_F(AArch64GISelMITest
, MatchCombinators
) {
477 LLT s64
= LLT::scalar(64);
478 LLT s32
= LLT::scalar(32);
479 auto MIBAdd
= B
.buildAdd(s64
, Copies
[0], Copies
[1]);
482 mi_match(MIBAdd
.getReg(0), *MRI
,
483 m_all_of(m_SpecificType(s64
), m_GAdd(m_Reg(Src0
), m_Reg(Src1
))));
485 EXPECT_EQ(Src0
, Copies
[0]);
486 EXPECT_EQ(Src1
, Copies
[1]);
487 // Check for s32 (which should fail).
489 mi_match(MIBAdd
.getReg(0), *MRI
,
490 m_all_of(m_SpecificType(s32
), m_GAdd(m_Reg(Src0
), m_Reg(Src1
))));
493 mi_match(MIBAdd
.getReg(0), *MRI
,
494 m_any_of(m_SpecificType(s32
), m_GAdd(m_Reg(Src0
), m_Reg(Src1
))));
496 EXPECT_EQ(Src0
, Copies
[0]);
497 EXPECT_EQ(Src1
, Copies
[1]);
499 // Match a case where none of the predicates hold true.
501 MIBAdd
.getReg(0), *MRI
,
502 m_any_of(m_SpecificType(LLT::scalar(16)), m_GSub(m_Reg(), m_Reg())));
506 TEST_F(AArch64GISelMITest
, MatchMiscellaneous
) {
511 LLT s64
= LLT::scalar(64);
512 auto MIBAdd
= B
.buildAdd(s64
, Copies
[0], Copies
[1]);
513 Register Reg
= MIBAdd
.getReg(0);
515 // Only one use of Reg.
516 B
.buildCast(LLT::pointer(0, 32), MIBAdd
);
517 EXPECT_TRUE(mi_match(Reg
, *MRI
, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
518 EXPECT_TRUE(mi_match(Reg
, *MRI
, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
520 // Add multiple debug uses of Reg.
521 B
.buildInstr(TargetOpcode::DBG_VALUE
, {}, {Reg
});
522 B
.buildInstr(TargetOpcode::DBG_VALUE
, {}, {Reg
});
524 EXPECT_FALSE(mi_match(Reg
, *MRI
, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
525 EXPECT_TRUE(mi_match(Reg
, *MRI
, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
527 // Multiple non-debug uses of Reg.
528 B
.buildCast(LLT::pointer(1, 32), MIBAdd
);
529 EXPECT_FALSE(mi_match(Reg
, *MRI
, m_OneUse(m_GAdd(m_Reg(), m_Reg()))));
530 EXPECT_FALSE(mi_match(Reg
, *MRI
, m_OneNonDBGUse(m_GAdd(m_Reg(), m_Reg()))));
533 TEST_F(AArch64GISelMITest
, MatchSpecificConstant
) {
538 // Basic case: Can we match a G_CONSTANT with a specific value?
539 auto FortyTwo
= B
.buildConstant(LLT::scalar(64), 42);
540 EXPECT_TRUE(mi_match(FortyTwo
.getReg(0), *MRI
, m_SpecificICst(42)));
541 EXPECT_FALSE(mi_match(FortyTwo
.getReg(0), *MRI
, m_SpecificICst(123)));
543 // Test that this works inside of a more complex pattern.
544 LLT s64
= LLT::scalar(64);
545 auto MIBAdd
= B
.buildAdd(s64
, Copies
[0], FortyTwo
);
546 EXPECT_TRUE(mi_match(MIBAdd
.getReg(2), *MRI
, m_SpecificICst(42)));
549 EXPECT_FALSE(mi_match(MIBAdd
.getReg(2), *MRI
, m_SpecificICst(123)));
551 // No constant on the LHS.
552 EXPECT_FALSE(mi_match(MIBAdd
.getReg(1), *MRI
, m_SpecificICst(42)));
555 TEST_F(AArch64GISelMITest
, MatchSpecificConstantSplat
) {
560 LLT s64
= LLT::scalar(64);
561 LLT v4s64
= LLT::fixed_vector(4, s64
);
563 MachineInstrBuilder FortyTwoSplat
=
564 B
.buildSplatVector(v4s64
, B
.buildConstant(s64
, 42));
565 MachineInstrBuilder FortyTwo
= B
.buildConstant(s64
, 42);
567 EXPECT_TRUE(mi_match(FortyTwoSplat
.getReg(0), *MRI
, m_SpecificICstSplat(42)));
569 mi_match(FortyTwoSplat
.getReg(0), *MRI
, m_SpecificICstSplat(43)));
570 EXPECT_FALSE(mi_match(FortyTwo
.getReg(0), *MRI
, m_SpecificICstSplat(42)));
572 MachineInstrBuilder NonConstantSplat
=
573 B
.buildBuildVector(v4s64
, {Copies
[0], Copies
[0], Copies
[0], Copies
[0]});
575 MachineInstrBuilder AddSplat
=
576 B
.buildAdd(v4s64
, NonConstantSplat
, FortyTwoSplat
);
577 EXPECT_TRUE(mi_match(AddSplat
.getReg(2), *MRI
, m_SpecificICstSplat(42)));
578 EXPECT_FALSE(mi_match(AddSplat
.getReg(2), *MRI
, m_SpecificICstSplat(43)));
579 EXPECT_FALSE(mi_match(AddSplat
.getReg(1), *MRI
, m_SpecificICstSplat(42)));
581 MachineInstrBuilder Add
= B
.buildAdd(s64
, Copies
[0], FortyTwo
);
582 EXPECT_FALSE(mi_match(Add
.getReg(2), *MRI
, m_SpecificICstSplat(42)));
585 TEST_F(AArch64GISelMITest
, MatchSpecificConstantOrSplat
) {
590 LLT s64
= LLT::scalar(64);
591 LLT v4s64
= LLT::fixed_vector(4, s64
);
593 MachineInstrBuilder FortyTwoSplat
=
594 B
.buildSplatVector(v4s64
, B
.buildConstant(s64
, 42));
595 MachineInstrBuilder FortyTwo
= B
.buildConstant(s64
, 42);
598 mi_match(FortyTwoSplat
.getReg(0), *MRI
, m_SpecificICstOrSplat(42)));
600 mi_match(FortyTwoSplat
.getReg(0), *MRI
, m_SpecificICstOrSplat(43)));
601 EXPECT_TRUE(mi_match(FortyTwo
.getReg(0), *MRI
, m_SpecificICstOrSplat(42)));
603 MachineInstrBuilder NonConstantSplat
=
604 B
.buildBuildVector(v4s64
, {Copies
[0], Copies
[0], Copies
[0], Copies
[0]});
606 MachineInstrBuilder AddSplat
=
607 B
.buildAdd(v4s64
, NonConstantSplat
, FortyTwoSplat
);
608 EXPECT_TRUE(mi_match(AddSplat
.getReg(2), *MRI
, m_SpecificICstOrSplat(42)));
609 EXPECT_FALSE(mi_match(AddSplat
.getReg(2), *MRI
, m_SpecificICstOrSplat(43)));
610 EXPECT_FALSE(mi_match(AddSplat
.getReg(1), *MRI
, m_SpecificICstOrSplat(42)));
612 MachineInstrBuilder Add
= B
.buildAdd(s64
, Copies
[0], FortyTwo
);
613 EXPECT_TRUE(mi_match(Add
.getReg(2), *MRI
, m_SpecificICstOrSplat(42)));
616 TEST_F(AArch64GISelMITest
, MatchZeroInt
) {
620 auto Zero
= B
.buildConstant(LLT::scalar(64), 0);
621 EXPECT_TRUE(mi_match(Zero
.getReg(0), *MRI
, m_ZeroInt()));
623 auto FortyTwo
= B
.buildConstant(LLT::scalar(64), 42);
624 EXPECT_FALSE(mi_match(FortyTwo
.getReg(0), *MRI
, m_ZeroInt()));
627 TEST_F(AArch64GISelMITest
, MatchAllOnesInt
) {
631 auto AllOnes
= B
.buildConstant(LLT::scalar(64), -1);
632 EXPECT_TRUE(mi_match(AllOnes
.getReg(0), *MRI
, m_AllOnesInt()));
634 auto FortyTwo
= B
.buildConstant(LLT::scalar(64), 42);
635 EXPECT_FALSE(mi_match(FortyTwo
.getReg(0), *MRI
, m_AllOnesInt()));
638 TEST_F(AArch64GISelMITest
, MatchFPOrIntConst
) {
643 Register IntOne
= B
.buildConstant(LLT::scalar(64), 1).getReg(0);
644 Register FPOne
= B
.buildFConstant(LLT::scalar(64), 1.0).getReg(0);
645 Optional
<ValueAndVReg
> ValReg
;
646 Optional
<FPValueAndVReg
> FValReg
;
648 EXPECT_TRUE(mi_match(IntOne
, *MRI
, m_GCst(ValReg
)));
649 EXPECT_EQ(IntOne
, ValReg
->VReg
);
650 EXPECT_FALSE(mi_match(IntOne
, *MRI
, m_GFCst(FValReg
)));
652 EXPECT_FALSE(mi_match(FPOne
, *MRI
, m_GCst(ValReg
)));
653 EXPECT_TRUE(mi_match(FPOne
, *MRI
, m_GFCst(FValReg
)));
654 EXPECT_EQ(FPOne
, FValReg
->VReg
);
657 TEST_F(AArch64GISelMITest
, MatchConstantSplat
) {
662 LLT s64
= LLT::scalar(64);
663 LLT v4s64
= LLT::fixed_vector(4, 64);
665 Register FPOne
= B
.buildFConstant(s64
, 1.0).getReg(0);
666 Register FPZero
= B
.buildFConstant(s64
, 0.0).getReg(0);
667 Register Undef
= B
.buildUndef(s64
).getReg(0);
668 Optional
<FPValueAndVReg
> FValReg
;
670 // GFCstOrSplatGFCstMatch allows undef as part of splat. Undef often comes
671 // from padding to legalize into available operation and then ignore added
672 // elements e.g. v3s64 to v4s64.
674 EXPECT_TRUE(mi_match(FPZero
, *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
675 EXPECT_EQ(FPZero
, FValReg
->VReg
);
677 EXPECT_FALSE(mi_match(Undef
, *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
679 auto ZeroSplat
= B
.buildBuildVector(v4s64
, {FPZero
, FPZero
, FPZero
, FPZero
});
681 mi_match(ZeroSplat
.getReg(0), *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
682 EXPECT_EQ(FPZero
, FValReg
->VReg
);
684 auto ZeroUndef
= B
.buildBuildVector(v4s64
, {FPZero
, FPZero
, FPZero
, Undef
});
686 mi_match(ZeroUndef
.getReg(0), *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
687 EXPECT_EQ(FPZero
, FValReg
->VReg
);
689 // All undefs are not constant splat.
690 auto UndefSplat
= B
.buildBuildVector(v4s64
, {Undef
, Undef
, Undef
, Undef
});
692 mi_match(UndefSplat
.getReg(0), *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
694 auto ZeroOne
= B
.buildBuildVector(v4s64
, {FPZero
, FPZero
, FPZero
, FPOne
});
696 mi_match(ZeroOne
.getReg(0), *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
698 auto NonConstantSplat
=
699 B
.buildBuildVector(v4s64
, {Copies
[0], Copies
[0], Copies
[0], Copies
[0]});
700 EXPECT_FALSE(mi_match(NonConstantSplat
.getReg(0), *MRI
,
701 GFCstOrSplatGFCstMatch(FValReg
)));
703 auto Mixed
= B
.buildBuildVector(v4s64
, {FPZero
, FPZero
, FPZero
, Copies
[0]});
705 mi_match(Mixed
.getReg(0), *MRI
, GFCstOrSplatGFCstMatch(FValReg
)));
708 TEST_F(AArch64GISelMITest
, MatchNeg
) {
713 LLT s64
= LLT::scalar(64);
714 auto Zero
= B
.buildConstant(LLT::scalar(64), 0);
715 auto NegInst
= B
.buildSub(s64
, Zero
, Copies
[0]);
718 // Match: G_SUB = 0, %Reg
719 EXPECT_TRUE(mi_match(NegInst
.getReg(0), *MRI
, m_Neg(m_Reg(NegatedReg
))));
720 EXPECT_EQ(NegatedReg
, Copies
[0]);
722 // Don't match: G_SUB = %Reg, 0
723 auto NotNegInst1
= B
.buildSub(s64
, Copies
[0], Zero
);
724 EXPECT_FALSE(mi_match(NotNegInst1
.getReg(0), *MRI
, m_Neg(m_Reg(NegatedReg
))));
726 // Don't match: G_SUB = 42, %Reg
727 auto FortyTwo
= B
.buildConstant(LLT::scalar(64), 42);
728 auto NotNegInst2
= B
.buildSub(s64
, FortyTwo
, Copies
[0]);
729 EXPECT_FALSE(mi_match(NotNegInst2
.getReg(0), *MRI
, m_Neg(m_Reg(NegatedReg
))));
732 // %sub = G_SUB = 0, %negated_reg
733 // %add = G_ADD = %x, %sub
734 auto AddInst
= B
.buildAdd(s64
, Copies
[1], NegInst
);
735 NegatedReg
= Register();
736 EXPECT_TRUE(mi_match(AddInst
.getReg(2), *MRI
, m_Neg(m_Reg(NegatedReg
))));
737 EXPECT_EQ(NegatedReg
, Copies
[0]);
740 TEST_F(AArch64GISelMITest
, MatchNot
) {
745 LLT s64
= LLT::scalar(64);
746 auto AllOnes
= B
.buildConstant(LLT::scalar(64), -1);
747 auto NotInst1
= B
.buildXor(s64
, Copies
[0], AllOnes
);
750 // Match: G_XOR %NotReg, -1
751 EXPECT_TRUE(mi_match(NotInst1
.getReg(0), *MRI
, m_Not(m_Reg(NotReg
))));
752 EXPECT_EQ(NotReg
, Copies
[0]);
754 // Match: G_XOR -1, %NotReg
755 auto NotInst2
= B
.buildXor(s64
, AllOnes
, Copies
[1]);
756 EXPECT_TRUE(mi_match(NotInst2
.getReg(0), *MRI
, m_Not(m_Reg(NotReg
))));
757 EXPECT_EQ(NotReg
, Copies
[1]);
759 // Don't match: G_XOR %NotReg, 42
760 auto FortyTwo
= B
.buildConstant(LLT::scalar(64), 42);
761 auto WrongCst
= B
.buildXor(s64
, Copies
[0], FortyTwo
);
762 EXPECT_FALSE(mi_match(WrongCst
.getReg(0), *MRI
, m_Not(m_Reg(NotReg
))));
765 // %xor = G_XOR %NotReg, -1
766 // %add = G_ADD %x, %xor
767 auto AddInst
= B
.buildAdd(s64
, Copies
[1], NotInst1
);
769 EXPECT_TRUE(mi_match(AddInst
.getReg(2), *MRI
, m_Not(m_Reg(NotReg
))));
770 EXPECT_EQ(NotReg
, Copies
[0]);
774 int main(int argc
, char **argv
) {
775 ::testing::InitGoogleTest(&argc
, argv
);
777 return RUN_ALL_TESTS();