[InstCombine] Signed saturation patterns
[llvm-core.git] / lib / Target / X86 / X86EvexToVex.cpp
blob24c8e6d6f6eb5bcc05007d5bcffe2376cdbc275c
1 //===- X86EvexToVex.cpp ---------------------------------------------------===//
2 // Compress EVEX instructions to VEX encoding when possible to reduce code size
3 //
4 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 // See https://llvm.org/LICENSE.txt for license information.
6 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// This file defines the pass that goes over all AVX-512 instructions which
12 /// are encoded using the EVEX prefix and if possible replaces them by their
13 /// corresponding VEX encoding which is usually shorter by 2 bytes.
14 /// EVEX instructions may be encoded via the VEX prefix when the AVX-512
15 /// instruction has a corresponding AVX/AVX2 opcode, when vector length
16 /// accessed by instruction is less than 512 bits and when it does not use
17 // the xmm or the mask registers or xmm/ymm registers with indexes higher than 15.
18 /// The pass applies code reduction on the generated code for AVX-512 instrs.
20 //===----------------------------------------------------------------------===//
22 #include "MCTargetDesc/X86BaseInfo.h"
23 #include "MCTargetDesc/X86InstComments.h"
24 #include "X86.h"
25 #include "X86InstrInfo.h"
26 #include "X86Subtarget.h"
27 #include "llvm/ADT/StringRef.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/MC/MCInstrDesc.h"
33 #include "llvm/Pass.h"
34 #include <cassert>
35 #include <cstdint>
37 using namespace llvm;
39 // Including the generated EVEX2VEX tables.
40 struct X86EvexToVexCompressTableEntry {
41 uint16_t EvexOpcode;
42 uint16_t VexOpcode;
44 bool operator<(const X86EvexToVexCompressTableEntry &RHS) const {
45 return EvexOpcode < RHS.EvexOpcode;
48 friend bool operator<(const X86EvexToVexCompressTableEntry &TE,
49 unsigned Opc) {
50 return TE.EvexOpcode < Opc;
53 #include "X86GenEVEX2VEXTables.inc"
55 #define EVEX2VEX_DESC "Compressing EVEX instrs to VEX encoding when possible"
56 #define EVEX2VEX_NAME "x86-evex-to-vex-compress"
58 #define DEBUG_TYPE EVEX2VEX_NAME
60 namespace {
62 class EvexToVexInstPass : public MachineFunctionPass {
64 /// For EVEX instructions that can be encoded using VEX encoding, replace
65 /// them by the VEX encoding in order to reduce size.
66 bool CompressEvexToVexImpl(MachineInstr &MI) const;
68 public:
69 static char ID;
71 EvexToVexInstPass() : MachineFunctionPass(ID) { }
73 StringRef getPassName() const override { return EVEX2VEX_DESC; }
75 /// Loop over all of the basic blocks, replacing EVEX instructions
76 /// by equivalent VEX instructions when possible for reducing code size.
77 bool runOnMachineFunction(MachineFunction &MF) override;
79 // This pass runs after regalloc and doesn't support VReg operands.
80 MachineFunctionProperties getRequiredProperties() const override {
81 return MachineFunctionProperties().set(
82 MachineFunctionProperties::Property::NoVRegs);
85 private:
86 /// Machine instruction info used throughout the class.
87 const X86InstrInfo *TII;
90 } // end anonymous namespace
92 char EvexToVexInstPass::ID = 0;
94 bool EvexToVexInstPass::runOnMachineFunction(MachineFunction &MF) {
95 TII = MF.getSubtarget<X86Subtarget>().getInstrInfo();
97 const X86Subtarget &ST = MF.getSubtarget<X86Subtarget>();
98 if (!ST.hasAVX512())
99 return false;
101 bool Changed = false;
103 /// Go over all basic blocks in function and replace
104 /// EVEX encoded instrs by VEX encoding when possible.
105 for (MachineBasicBlock &MBB : MF) {
107 // Traverse the basic block.
108 for (MachineInstr &MI : MBB)
109 Changed |= CompressEvexToVexImpl(MI);
112 return Changed;
115 static bool usesExtendedRegister(const MachineInstr &MI) {
116 auto isHiRegIdx = [](unsigned Reg) {
117 // Check for XMM register with indexes between 16 - 31.
118 if (Reg >= X86::XMM16 && Reg <= X86::XMM31)
119 return true;
121 // Check for YMM register with indexes between 16 - 31.
122 if (Reg >= X86::YMM16 && Reg <= X86::YMM31)
123 return true;
125 return false;
128 // Check that operands are not ZMM regs or
129 // XMM/YMM regs with hi indexes between 16 - 31.
130 for (const MachineOperand &MO : MI.explicit_operands()) {
131 if (!MO.isReg())
132 continue;
134 Register Reg = MO.getReg();
136 assert(!(Reg >= X86::ZMM0 && Reg <= X86::ZMM31) &&
137 "ZMM instructions should not be in the EVEX->VEX tables");
139 if (isHiRegIdx(Reg))
140 return true;
143 return false;
146 // Do any custom cleanup needed to finalize the conversion.
147 static bool performCustomAdjustments(MachineInstr &MI, unsigned NewOpc) {
148 (void)NewOpc;
149 unsigned Opc = MI.getOpcode();
150 switch (Opc) {
151 case X86::VALIGNDZ128rri:
152 case X86::VALIGNDZ128rmi:
153 case X86::VALIGNQZ128rri:
154 case X86::VALIGNQZ128rmi: {
155 assert((NewOpc == X86::VPALIGNRrri || NewOpc == X86::VPALIGNRrmi) &&
156 "Unexpected new opcode!");
157 unsigned Scale = (Opc == X86::VALIGNQZ128rri ||
158 Opc == X86::VALIGNQZ128rmi) ? 8 : 4;
159 MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands()-1);
160 Imm.setImm(Imm.getImm() * Scale);
161 break;
163 case X86::VSHUFF32X4Z256rmi:
164 case X86::VSHUFF32X4Z256rri:
165 case X86::VSHUFF64X2Z256rmi:
166 case X86::VSHUFF64X2Z256rri:
167 case X86::VSHUFI32X4Z256rmi:
168 case X86::VSHUFI32X4Z256rri:
169 case X86::VSHUFI64X2Z256rmi:
170 case X86::VSHUFI64X2Z256rri: {
171 assert((NewOpc == X86::VPERM2F128rr || NewOpc == X86::VPERM2I128rr ||
172 NewOpc == X86::VPERM2F128rm || NewOpc == X86::VPERM2I128rm) &&
173 "Unexpected new opcode!");
174 MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands()-1);
175 int64_t ImmVal = Imm.getImm();
176 // Set bit 5, move bit 1 to bit 4, copy bit 0.
177 Imm.setImm(0x20 | ((ImmVal & 2) << 3) | (ImmVal & 1));
178 break;
180 case X86::VRNDSCALEPDZ128rri:
181 case X86::VRNDSCALEPDZ128rmi:
182 case X86::VRNDSCALEPSZ128rri:
183 case X86::VRNDSCALEPSZ128rmi:
184 case X86::VRNDSCALEPDZ256rri:
185 case X86::VRNDSCALEPDZ256rmi:
186 case X86::VRNDSCALEPSZ256rri:
187 case X86::VRNDSCALEPSZ256rmi:
188 case X86::VRNDSCALESDZr:
189 case X86::VRNDSCALESDZm:
190 case X86::VRNDSCALESSZr:
191 case X86::VRNDSCALESSZm:
192 case X86::VRNDSCALESDZr_Int:
193 case X86::VRNDSCALESDZm_Int:
194 case X86::VRNDSCALESSZr_Int:
195 case X86::VRNDSCALESSZm_Int:
196 const MachineOperand &Imm = MI.getOperand(MI.getNumExplicitOperands()-1);
197 int64_t ImmVal = Imm.getImm();
198 // Ensure that only bits 3:0 of the immediate are used.
199 if ((ImmVal & 0xf) != ImmVal)
200 return false;
201 break;
204 return true;
208 // For EVEX instructions that can be encoded using VEX encoding
209 // replace them by the VEX encoding in order to reduce size.
210 bool EvexToVexInstPass::CompressEvexToVexImpl(MachineInstr &MI) const {
211 // VEX format.
212 // # of bytes: 0,2,3 1 1 0,1 0,1,2,4 0,1
213 // [Prefixes] [VEX] OPCODE ModR/M [SIB] [DISP] [IMM]
215 // EVEX format.
216 // # of bytes: 4 1 1 1 4 / 1 1
217 // [Prefixes] EVEX Opcode ModR/M [SIB] [Disp32] / [Disp8*N] [Immediate]
219 const MCInstrDesc &Desc = MI.getDesc();
221 // Check for EVEX instructions only.
222 if ((Desc.TSFlags & X86II::EncodingMask) != X86II::EVEX)
223 return false;
225 // Check for EVEX instructions with mask or broadcast as in these cases
226 // the EVEX prefix is needed in order to carry this information
227 // thus preventing the transformation to VEX encoding.
228 if (Desc.TSFlags & (X86II::EVEX_K | X86II::EVEX_B))
229 return false;
231 // Check for EVEX instructions with L2 set. These instructions are 512-bits
232 // and can't be converted to VEX.
233 if (Desc.TSFlags & X86II::EVEX_L2)
234 return false;
236 #ifndef NDEBUG
237 // Make sure the tables are sorted.
238 static std::atomic<bool> TableChecked(false);
239 if (!TableChecked.load(std::memory_order_relaxed)) {
240 assert(std::is_sorted(std::begin(X86EvexToVex128CompressTable),
241 std::end(X86EvexToVex128CompressTable)) &&
242 "X86EvexToVex128CompressTable is not sorted!");
243 assert(std::is_sorted(std::begin(X86EvexToVex256CompressTable),
244 std::end(X86EvexToVex256CompressTable)) &&
245 "X86EvexToVex256CompressTable is not sorted!");
246 TableChecked.store(true, std::memory_order_relaxed);
248 #endif
250 // Use the VEX.L bit to select the 128 or 256-bit table.
251 ArrayRef<X86EvexToVexCompressTableEntry> Table =
252 (Desc.TSFlags & X86II::VEX_L) ? makeArrayRef(X86EvexToVex256CompressTable)
253 : makeArrayRef(X86EvexToVex128CompressTable);
255 auto I = llvm::lower_bound(Table, MI.getOpcode());
256 if (I == Table.end() || I->EvexOpcode != MI.getOpcode())
257 return false;
259 unsigned NewOpc = I->VexOpcode;
261 if (usesExtendedRegister(MI))
262 return false;
264 if (!performCustomAdjustments(MI, NewOpc))
265 return false;
267 MI.setDesc(TII->get(NewOpc));
268 MI.setAsmPrinterFlag(X86::AC_EVEX_2_VEX);
269 return true;
272 INITIALIZE_PASS(EvexToVexInstPass, EVEX2VEX_NAME, EVEX2VEX_DESC, false, false)
274 FunctionPass *llvm::createX86EvexToVexInsts() {
275 return new EvexToVexInstPass();