1 // RUN: llvm-tblgen -gen-compress-inst-emitter -I %p/../../include %s | \
2 // RUN: FileCheck --check-prefix=COMPRESS %s
4 // Check that combining conditions in AssemblerPredicate generates the correct
5 // output when using both the (all_of) AND operator, and the (any_of) OR
6 // operator in the RISC-V specific instruction compressor.
8 include "llvm/Target/Target.td"
10 def archInstrInfo : InstrInfo { }
11 def archAsmWriter : AsmWriter {
12 int PassSubtarget = 1;
16 let InstructionSet = archInstrInfo;
17 let AssemblyWriters = [archAsmWriter];
20 let Namespace = "arch" in {
21 def R0 : Register<"r0">;
23 def Regs : RegisterClass<"Regs", [i32], 32, (add R0)>;
25 class RVInst<int Opc, list<Predicate> Preds> : Instruction {
27 let OutOperandList = (outs);
28 let InOperandList = (ins Regs:$r);
31 let AsmString = NAME # " $r";
32 field bits<32> SoftFail = 0;
33 let Predicates = Preds;
35 class RVInst16<int Opc, list<Predicate> Preds> : Instruction {
37 let OutOperandList = (outs);
38 let InOperandList = (ins Regs:$r);
41 let AsmString = NAME # " $r";
42 field bits<16> SoftFail = 0;
43 let Predicates = Preds;
46 def AsmCond1 : SubtargetFeature<"cond1", "cond1", "true", "">;
47 def AsmCond2a: SubtargetFeature<"cond2a", "cond2a", "true", "">;
48 def AsmCond2b: SubtargetFeature<"cond2b", "cond2b", "true", "">;
49 def AsmCond3a: SubtargetFeature<"cond3a", "cond3a", "true", "">;
50 def AsmCond3b: SubtargetFeature<"cond3b", "cond3b", "true", "">;
52 def AsmPred1 : Predicate<"Pred1">, AssemblerPredicate<(all_of AsmCond1)>;
53 def AsmPred2 : Predicate<"Pred2">, AssemblerPredicate<(all_of AsmCond2a, AsmCond2b)>;
54 def AsmPred3 : Predicate<"Pred3">, AssemblerPredicate<(any_of AsmCond3a, AsmCond3b)>;
56 def BigInst : RVInst<1, [AsmPred1]>;
58 // COMPRESS-LABEL: static bool compressInst
59 // COMPRESS: case arch::BigInst
60 def SmallInst1 : RVInst16<1, []>;
61 def : CompressPat<(BigInst Regs:$r), (SmallInst1 Regs:$r), [AsmPred1]>;
62 // COMPRESS: if (STI.getFeatureBits()[arch::AsmCond1] &&
63 // COMPRESS-NEXT: (MI.getOperand(0).isReg()) &&
64 // COMPRESS-NEXT: (MRI.getRegClass(arch::RegsRegClassID).contains(MI.getOperand(0).getReg()))) {
65 // COMPRESS-NEXT: // SmallInst1 $r
67 def SmallInst2 : RVInst16<2, []>;
68 def : CompressPat<(BigInst Regs:$r), (SmallInst2 Regs:$r), [AsmPred2]>;
69 // COMPRESS: if (STI.getFeatureBits()[arch::AsmCond2a] &&
70 // COMPRESS-NEXT: STI.getFeatureBits()[arch::AsmCond2b] &&
71 // COMPRESS-NEXT: (MI.getOperand(0).isReg()) &&
72 // COMPRESS-NEXT: (MRI.getRegClass(arch::RegsRegClassID).contains(MI.getOperand(0).getReg()))) {
73 // COMPRESS-NEXT: // SmallInst2 $r
75 def SmallInst3 : RVInst16<2, []>;
76 def : CompressPat<(BigInst Regs:$r), (SmallInst3 Regs:$r), [AsmPred3]>;
77 // COMPRESS: if ((STI.getFeatureBits()[arch::AsmCond3a] || STI.getFeatureBits()[arch::AsmCond3b]) &&
78 // COMPRESS-NEXT: (MI.getOperand(0).isReg()) &&
79 // COMPRESS-NEXT: (MRI.getRegClass(arch::RegsRegClassID).contains(MI.getOperand(0).getReg()))) {
80 // COMPRESS-NEXT: // SmallInst3 $r
82 def SmallInst4 : RVInst16<2, []>;
83 def : CompressPat<(BigInst Regs:$r), (SmallInst4 Regs:$r), [AsmPred1, AsmPred2]>;
84 // COMPRESS: if (STI.getFeatureBits()[arch::AsmCond1] &&
85 // COMPRESS-NEXT: STI.getFeatureBits()[arch::AsmCond2a] &&
86 // COMPRESS-NEXT: STI.getFeatureBits()[arch::AsmCond2b] &&
87 // COMPRESS-NEXT: (MI.getOperand(0).isReg()) &&
88 // COMPRESS-NEXT: (MRI.getRegClass(arch::RegsRegClassID).contains(MI.getOperand(0).getReg()))) {
89 // COMPRESS-NEXT: // SmallInst4 $r
91 def SmallInst5 : RVInst16<2, []>;
92 def : CompressPat<(BigInst Regs:$r), (SmallInst5 Regs:$r), [AsmPred1, AsmPred3]>;
93 // COMPRESS: if (STI.getFeatureBits()[arch::AsmCond1] &&
94 // COMPRESS-NEXT: (STI.getFeatureBits()[arch::AsmCond3a] || STI.getFeatureBits()[arch::AsmCond3b]) &&
95 // COMPRESS-NEXT: (MI.getOperand(0).isReg()) &&
96 // COMPRESS-NEXT: (MRI.getRegClass(arch::RegsRegClassID).contains(MI.getOperand(0).getReg()))) {
97 // COMPRESS-NEXT: // SmallInst5 $r
99 // COMPRESS-LABEL: static bool uncompressInst