[Alignment][NFC] Use Align with TargetLowering::setMinFunctionAlignment
[llvm-core.git] / lib / Target / WebAssembly / WebAssemblyISelLowering.cpp
blob4ac9995db01e97354d6f79ab507c57f0ec973403
1 //=- WebAssemblyISelLowering.cpp - WebAssembly DAG Lowering Implementation -==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the WebAssemblyTargetLowering class.
11 ///
12 //===----------------------------------------------------------------------===//
14 #include "WebAssemblyISelLowering.h"
15 #include "MCTargetDesc/WebAssemblyMCTargetDesc.h"
16 #include "WebAssemblyMachineFunctionInfo.h"
17 #include "WebAssemblySubtarget.h"
18 #include "WebAssemblyTargetMachine.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineJumpTableInfo.h"
23 #include "llvm/CodeGen/MachineModuleInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/SelectionDAG.h"
26 #include "llvm/CodeGen/WasmEHFuncInfo.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/DiagnosticPrinter.h"
29 #include "llvm/IR/Function.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetOptions.h"
35 using namespace llvm;
37 #define DEBUG_TYPE "wasm-lower"
39 WebAssemblyTargetLowering::WebAssemblyTargetLowering(
40 const TargetMachine &TM, const WebAssemblySubtarget &STI)
41 : TargetLowering(TM), Subtarget(&STI) {
42 auto MVTPtr = Subtarget->hasAddr64() ? MVT::i64 : MVT::i32;
44 // Booleans always contain 0 or 1.
45 setBooleanContents(ZeroOrOneBooleanContent);
46 // Except in SIMD vectors
47 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
48 // We don't know the microarchitecture here, so just reduce register pressure.
49 setSchedulingPreference(Sched::RegPressure);
50 // Tell ISel that we have a stack pointer.
51 setStackPointerRegisterToSaveRestore(
52 Subtarget->hasAddr64() ? WebAssembly::SP64 : WebAssembly::SP32);
53 // Set up the register classes.
54 addRegisterClass(MVT::i32, &WebAssembly::I32RegClass);
55 addRegisterClass(MVT::i64, &WebAssembly::I64RegClass);
56 addRegisterClass(MVT::f32, &WebAssembly::F32RegClass);
57 addRegisterClass(MVT::f64, &WebAssembly::F64RegClass);
58 if (Subtarget->hasSIMD128()) {
59 addRegisterClass(MVT::v16i8, &WebAssembly::V128RegClass);
60 addRegisterClass(MVT::v8i16, &WebAssembly::V128RegClass);
61 addRegisterClass(MVT::v4i32, &WebAssembly::V128RegClass);
62 addRegisterClass(MVT::v4f32, &WebAssembly::V128RegClass);
64 if (Subtarget->hasUnimplementedSIMD128()) {
65 addRegisterClass(MVT::v2i64, &WebAssembly::V128RegClass);
66 addRegisterClass(MVT::v2f64, &WebAssembly::V128RegClass);
68 // Compute derived properties from the register classes.
69 computeRegisterProperties(Subtarget->getRegisterInfo());
71 setOperationAction(ISD::GlobalAddress, MVTPtr, Custom);
72 setOperationAction(ISD::ExternalSymbol, MVTPtr, Custom);
73 setOperationAction(ISD::JumpTable, MVTPtr, Custom);
74 setOperationAction(ISD::BlockAddress, MVTPtr, Custom);
75 setOperationAction(ISD::BRIND, MVT::Other, Custom);
77 // Take the default expansion for va_arg, va_copy, and va_end. There is no
78 // default action for va_start, so we do that custom.
79 setOperationAction(ISD::VASTART, MVT::Other, Custom);
80 setOperationAction(ISD::VAARG, MVT::Other, Expand);
81 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
82 setOperationAction(ISD::VAEND, MVT::Other, Expand);
84 for (auto T : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
85 // Don't expand the floating-point types to constant pools.
86 setOperationAction(ISD::ConstantFP, T, Legal);
87 // Expand floating-point comparisons.
88 for (auto CC : {ISD::SETO, ISD::SETUO, ISD::SETUEQ, ISD::SETONE,
89 ISD::SETULT, ISD::SETULE, ISD::SETUGT, ISD::SETUGE})
90 setCondCodeAction(CC, T, Expand);
91 // Expand floating-point library function operators.
92 for (auto Op :
93 {ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FMA})
94 setOperationAction(Op, T, Expand);
95 // Note supported floating-point library function operators that otherwise
96 // default to expand.
97 for (auto Op :
98 {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT, ISD::FRINT})
99 setOperationAction(Op, T, Legal);
100 // Support minimum and maximum, which otherwise default to expand.
101 setOperationAction(ISD::FMINIMUM, T, Legal);
102 setOperationAction(ISD::FMAXIMUM, T, Legal);
103 // WebAssembly currently has no builtin f16 support.
104 setOperationAction(ISD::FP16_TO_FP, T, Expand);
105 setOperationAction(ISD::FP_TO_FP16, T, Expand);
106 setLoadExtAction(ISD::EXTLOAD, T, MVT::f16, Expand);
107 setTruncStoreAction(T, MVT::f16, Expand);
110 // Expand unavailable integer operations.
111 for (auto Op :
112 {ISD::BSWAP, ISD::SMUL_LOHI, ISD::UMUL_LOHI, ISD::MULHS, ISD::MULHU,
113 ISD::SDIVREM, ISD::UDIVREM, ISD::SHL_PARTS, ISD::SRA_PARTS,
114 ISD::SRL_PARTS, ISD::ADDC, ISD::ADDE, ISD::SUBC, ISD::SUBE}) {
115 for (auto T : {MVT::i32, MVT::i64})
116 setOperationAction(Op, T, Expand);
117 if (Subtarget->hasSIMD128())
118 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
119 setOperationAction(Op, T, Expand);
120 if (Subtarget->hasUnimplementedSIMD128())
121 setOperationAction(Op, MVT::v2i64, Expand);
124 // SIMD-specific configuration
125 if (Subtarget->hasSIMD128()) {
126 // Support saturating add for i8x16 and i16x8
127 for (auto Op : {ISD::SADDSAT, ISD::UADDSAT})
128 for (auto T : {MVT::v16i8, MVT::v8i16})
129 setOperationAction(Op, T, Legal);
131 // Custom lower BUILD_VECTORs to minimize number of replace_lanes
132 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
133 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
134 if (Subtarget->hasUnimplementedSIMD128())
135 for (auto T : {MVT::v2i64, MVT::v2f64})
136 setOperationAction(ISD::BUILD_VECTOR, T, Custom);
138 // We have custom shuffle lowering to expose the shuffle mask
139 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
140 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
141 if (Subtarget->hasUnimplementedSIMD128())
142 for (auto T: {MVT::v2i64, MVT::v2f64})
143 setOperationAction(ISD::VECTOR_SHUFFLE, T, Custom);
145 // Custom lowering since wasm shifts must have a scalar shift amount
146 for (auto Op : {ISD::SHL, ISD::SRA, ISD::SRL}) {
147 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
148 setOperationAction(Op, T, Custom);
149 if (Subtarget->hasUnimplementedSIMD128())
150 setOperationAction(Op, MVT::v2i64, Custom);
153 // Custom lower lane accesses to expand out variable indices
154 for (auto Op : {ISD::EXTRACT_VECTOR_ELT, ISD::INSERT_VECTOR_ELT}) {
155 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
156 setOperationAction(Op, T, Custom);
157 if (Subtarget->hasUnimplementedSIMD128())
158 for (auto T : {MVT::v2i64, MVT::v2f64})
159 setOperationAction(Op, T, Custom);
162 // There is no i64x2.mul instruction
163 setOperationAction(ISD::MUL, MVT::v2i64, Expand);
165 // There are no vector select instructions
166 for (auto Op : {ISD::VSELECT, ISD::SELECT_CC, ISD::SELECT}) {
167 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v4f32})
168 setOperationAction(Op, T, Expand);
169 if (Subtarget->hasUnimplementedSIMD128())
170 for (auto T : {MVT::v2i64, MVT::v2f64})
171 setOperationAction(Op, T, Expand);
174 // Expand integer operations supported for scalars but not SIMD
175 for (auto Op : {ISD::CTLZ, ISD::CTTZ, ISD::CTPOP, ISD::SDIV, ISD::UDIV,
176 ISD::SREM, ISD::UREM, ISD::ROTL, ISD::ROTR}) {
177 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32})
178 setOperationAction(Op, T, Expand);
179 if (Subtarget->hasUnimplementedSIMD128())
180 setOperationAction(Op, MVT::v2i64, Expand);
183 // Expand float operations supported for scalars but not SIMD
184 for (auto Op : {ISD::FCEIL, ISD::FFLOOR, ISD::FTRUNC, ISD::FNEARBYINT,
185 ISD::FCOPYSIGN, ISD::FLOG, ISD::FLOG2, ISD::FLOG10,
186 ISD::FEXP, ISD::FEXP2, ISD::FRINT}) {
187 setOperationAction(Op, MVT::v4f32, Expand);
188 if (Subtarget->hasUnimplementedSIMD128())
189 setOperationAction(Op, MVT::v2f64, Expand);
192 // Expand additional SIMD ops that V8 hasn't implemented yet
193 if (!Subtarget->hasUnimplementedSIMD128()) {
194 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
195 setOperationAction(ISD::FDIV, MVT::v4f32, Expand);
199 // As a special case, these operators use the type to mean the type to
200 // sign-extend from.
201 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
202 if (!Subtarget->hasSignExt()) {
203 // Sign extends are legal only when extending a vector extract
204 auto Action = Subtarget->hasSIMD128() ? Custom : Expand;
205 for (auto T : {MVT::i8, MVT::i16, MVT::i32})
206 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Action);
208 for (auto T : MVT::integer_vector_valuetypes())
209 setOperationAction(ISD::SIGN_EXTEND_INREG, T, Expand);
211 // Dynamic stack allocation: use the default expansion.
212 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
213 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
214 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVTPtr, Expand);
216 setOperationAction(ISD::FrameIndex, MVT::i32, Custom);
217 setOperationAction(ISD::CopyToReg, MVT::Other, Custom);
219 // Expand these forms; we pattern-match the forms that we can handle in isel.
220 for (auto T : {MVT::i32, MVT::i64, MVT::f32, MVT::f64})
221 for (auto Op : {ISD::BR_CC, ISD::SELECT_CC})
222 setOperationAction(Op, T, Expand);
224 // We have custom switch handling.
225 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
227 // WebAssembly doesn't have:
228 // - Floating-point extending loads.
229 // - Floating-point truncating stores.
230 // - i1 extending loads.
231 // - extending/truncating SIMD loads/stores
232 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
233 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
234 for (auto T : MVT::integer_valuetypes())
235 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
236 setLoadExtAction(Ext, T, MVT::i1, Promote);
237 if (Subtarget->hasSIMD128()) {
238 for (auto T : {MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64, MVT::v4f32,
239 MVT::v2f64}) {
240 for (auto MemT : MVT::vector_valuetypes()) {
241 if (MVT(T) != MemT) {
242 setTruncStoreAction(T, MemT, Expand);
243 for (auto Ext : {ISD::EXTLOAD, ISD::ZEXTLOAD, ISD::SEXTLOAD})
244 setLoadExtAction(Ext, T, MemT, Expand);
250 // Don't do anything clever with build_pairs
251 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
253 // Trap lowers to wasm unreachable
254 setOperationAction(ISD::TRAP, MVT::Other, Legal);
256 // Exception handling intrinsics
257 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
258 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
260 setMaxAtomicSizeInBitsSupported(64);
262 if (Subtarget->hasBulkMemory()) {
263 // Use memory.copy and friends over multiple loads and stores
264 MaxStoresPerMemcpy = 1;
265 MaxStoresPerMemcpyOptSize = 1;
266 MaxStoresPerMemmove = 1;
267 MaxStoresPerMemmoveOptSize = 1;
268 MaxStoresPerMemset = 1;
269 MaxStoresPerMemsetOptSize = 1;
272 // Override the __gnu_f2h_ieee/__gnu_h2f_ieee names so that the f32 name is
273 // consistent with the f64 and f128 names.
274 setLibcallName(RTLIB::FPEXT_F16_F32, "__extendhfsf2");
275 setLibcallName(RTLIB::FPROUND_F32_F16, "__truncsfhf2");
277 // Define the emscripten name for return address helper.
278 // TODO: when implementing other WASM backends, make this generic or only do
279 // this on emscripten depending on what they end up doing.
280 setLibcallName(RTLIB::RETURN_ADDRESS, "emscripten_return_address");
282 // Always convert switches to br_tables unless there is only one case, which
283 // is equivalent to a simple branch. This reduces code size for wasm, and we
284 // defer possible jump table optimizations to the VM.
285 setMinimumJumpTableEntries(2);
288 TargetLowering::AtomicExpansionKind
289 WebAssemblyTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
290 // We have wasm instructions for these
291 switch (AI->getOperation()) {
292 case AtomicRMWInst::Add:
293 case AtomicRMWInst::Sub:
294 case AtomicRMWInst::And:
295 case AtomicRMWInst::Or:
296 case AtomicRMWInst::Xor:
297 case AtomicRMWInst::Xchg:
298 return AtomicExpansionKind::None;
299 default:
300 break;
302 return AtomicExpansionKind::CmpXChg;
305 FastISel *WebAssemblyTargetLowering::createFastISel(
306 FunctionLoweringInfo &FuncInfo, const TargetLibraryInfo *LibInfo) const {
307 return WebAssembly::createFastISel(FuncInfo, LibInfo);
310 MVT WebAssemblyTargetLowering::getScalarShiftAmountTy(const DataLayout & /*DL*/,
311 EVT VT) const {
312 unsigned BitWidth = NextPowerOf2(VT.getSizeInBits() - 1);
313 if (BitWidth > 1 && BitWidth < 8)
314 BitWidth = 8;
316 if (BitWidth > 64) {
317 // The shift will be lowered to a libcall, and compiler-rt libcalls expect
318 // the count to be an i32.
319 BitWidth = 32;
320 assert(BitWidth >= Log2_32_Ceil(VT.getSizeInBits()) &&
321 "32-bit shift counts ought to be enough for anyone");
324 MVT Result = MVT::getIntegerVT(BitWidth);
325 assert(Result != MVT::INVALID_SIMPLE_VALUE_TYPE &&
326 "Unable to represent scalar shift amount type");
327 return Result;
330 // Lower an fp-to-int conversion operator from the LLVM opcode, which has an
331 // undefined result on invalid/overflow, to the WebAssembly opcode, which
332 // traps on invalid/overflow.
333 static MachineBasicBlock *LowerFPToInt(MachineInstr &MI, DebugLoc DL,
334 MachineBasicBlock *BB,
335 const TargetInstrInfo &TII,
336 bool IsUnsigned, bool Int64,
337 bool Float64, unsigned LoweredOpcode) {
338 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
340 Register OutReg = MI.getOperand(0).getReg();
341 Register InReg = MI.getOperand(1).getReg();
343 unsigned Abs = Float64 ? WebAssembly::ABS_F64 : WebAssembly::ABS_F32;
344 unsigned FConst = Float64 ? WebAssembly::CONST_F64 : WebAssembly::CONST_F32;
345 unsigned LT = Float64 ? WebAssembly::LT_F64 : WebAssembly::LT_F32;
346 unsigned GE = Float64 ? WebAssembly::GE_F64 : WebAssembly::GE_F32;
347 unsigned IConst = Int64 ? WebAssembly::CONST_I64 : WebAssembly::CONST_I32;
348 unsigned Eqz = WebAssembly::EQZ_I32;
349 unsigned And = WebAssembly::AND_I32;
350 int64_t Limit = Int64 ? INT64_MIN : INT32_MIN;
351 int64_t Substitute = IsUnsigned ? 0 : Limit;
352 double CmpVal = IsUnsigned ? -(double)Limit * 2.0 : -(double)Limit;
353 auto &Context = BB->getParent()->getFunction().getContext();
354 Type *Ty = Float64 ? Type::getDoubleTy(Context) : Type::getFloatTy(Context);
356 const BasicBlock *LLVMBB = BB->getBasicBlock();
357 MachineFunction *F = BB->getParent();
358 MachineBasicBlock *TrueMBB = F->CreateMachineBasicBlock(LLVMBB);
359 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVMBB);
360 MachineBasicBlock *DoneMBB = F->CreateMachineBasicBlock(LLVMBB);
362 MachineFunction::iterator It = ++BB->getIterator();
363 F->insert(It, FalseMBB);
364 F->insert(It, TrueMBB);
365 F->insert(It, DoneMBB);
367 // Transfer the remainder of BB and its successor edges to DoneMBB.
368 DoneMBB->splice(DoneMBB->begin(), BB, std::next(MI.getIterator()), BB->end());
369 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
371 BB->addSuccessor(TrueMBB);
372 BB->addSuccessor(FalseMBB);
373 TrueMBB->addSuccessor(DoneMBB);
374 FalseMBB->addSuccessor(DoneMBB);
376 unsigned Tmp0, Tmp1, CmpReg, EqzReg, FalseReg, TrueReg;
377 Tmp0 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
378 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
379 CmpReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
380 EqzReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
381 FalseReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
382 TrueReg = MRI.createVirtualRegister(MRI.getRegClass(OutReg));
384 MI.eraseFromParent();
385 // For signed numbers, we can do a single comparison to determine whether
386 // fabs(x) is within range.
387 if (IsUnsigned) {
388 Tmp0 = InReg;
389 } else {
390 BuildMI(BB, DL, TII.get(Abs), Tmp0).addReg(InReg);
392 BuildMI(BB, DL, TII.get(FConst), Tmp1)
393 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, CmpVal)));
394 BuildMI(BB, DL, TII.get(LT), CmpReg).addReg(Tmp0).addReg(Tmp1);
396 // For unsigned numbers, we have to do a separate comparison with zero.
397 if (IsUnsigned) {
398 Tmp1 = MRI.createVirtualRegister(MRI.getRegClass(InReg));
399 Register SecondCmpReg =
400 MRI.createVirtualRegister(&WebAssembly::I32RegClass);
401 Register AndReg = MRI.createVirtualRegister(&WebAssembly::I32RegClass);
402 BuildMI(BB, DL, TII.get(FConst), Tmp1)
403 .addFPImm(cast<ConstantFP>(ConstantFP::get(Ty, 0.0)));
404 BuildMI(BB, DL, TII.get(GE), SecondCmpReg).addReg(Tmp0).addReg(Tmp1);
405 BuildMI(BB, DL, TII.get(And), AndReg).addReg(CmpReg).addReg(SecondCmpReg);
406 CmpReg = AndReg;
409 BuildMI(BB, DL, TII.get(Eqz), EqzReg).addReg(CmpReg);
411 // Create the CFG diamond to select between doing the conversion or using
412 // the substitute value.
413 BuildMI(BB, DL, TII.get(WebAssembly::BR_IF)).addMBB(TrueMBB).addReg(EqzReg);
414 BuildMI(FalseMBB, DL, TII.get(LoweredOpcode), FalseReg).addReg(InReg);
415 BuildMI(FalseMBB, DL, TII.get(WebAssembly::BR)).addMBB(DoneMBB);
416 BuildMI(TrueMBB, DL, TII.get(IConst), TrueReg).addImm(Substitute);
417 BuildMI(*DoneMBB, DoneMBB->begin(), DL, TII.get(TargetOpcode::PHI), OutReg)
418 .addReg(FalseReg)
419 .addMBB(FalseMBB)
420 .addReg(TrueReg)
421 .addMBB(TrueMBB);
423 return DoneMBB;
426 MachineBasicBlock *WebAssemblyTargetLowering::EmitInstrWithCustomInserter(
427 MachineInstr &MI, MachineBasicBlock *BB) const {
428 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
429 DebugLoc DL = MI.getDebugLoc();
431 switch (MI.getOpcode()) {
432 default:
433 llvm_unreachable("Unexpected instr type to insert");
434 case WebAssembly::FP_TO_SINT_I32_F32:
435 return LowerFPToInt(MI, DL, BB, TII, false, false, false,
436 WebAssembly::I32_TRUNC_S_F32);
437 case WebAssembly::FP_TO_UINT_I32_F32:
438 return LowerFPToInt(MI, DL, BB, TII, true, false, false,
439 WebAssembly::I32_TRUNC_U_F32);
440 case WebAssembly::FP_TO_SINT_I64_F32:
441 return LowerFPToInt(MI, DL, BB, TII, false, true, false,
442 WebAssembly::I64_TRUNC_S_F32);
443 case WebAssembly::FP_TO_UINT_I64_F32:
444 return LowerFPToInt(MI, DL, BB, TII, true, true, false,
445 WebAssembly::I64_TRUNC_U_F32);
446 case WebAssembly::FP_TO_SINT_I32_F64:
447 return LowerFPToInt(MI, DL, BB, TII, false, false, true,
448 WebAssembly::I32_TRUNC_S_F64);
449 case WebAssembly::FP_TO_UINT_I32_F64:
450 return LowerFPToInt(MI, DL, BB, TII, true, false, true,
451 WebAssembly::I32_TRUNC_U_F64);
452 case WebAssembly::FP_TO_SINT_I64_F64:
453 return LowerFPToInt(MI, DL, BB, TII, false, true, true,
454 WebAssembly::I64_TRUNC_S_F64);
455 case WebAssembly::FP_TO_UINT_I64_F64:
456 return LowerFPToInt(MI, DL, BB, TII, true, true, true,
457 WebAssembly::I64_TRUNC_U_F64);
458 llvm_unreachable("Unexpected instruction to emit with custom inserter");
462 const char *
463 WebAssemblyTargetLowering::getTargetNodeName(unsigned Opcode) const {
464 switch (static_cast<WebAssemblyISD::NodeType>(Opcode)) {
465 case WebAssemblyISD::FIRST_NUMBER:
466 break;
467 #define HANDLE_NODETYPE(NODE) \
468 case WebAssemblyISD::NODE: \
469 return "WebAssemblyISD::" #NODE;
470 #include "WebAssemblyISD.def"
471 #undef HANDLE_NODETYPE
473 return nullptr;
476 std::pair<unsigned, const TargetRegisterClass *>
477 WebAssemblyTargetLowering::getRegForInlineAsmConstraint(
478 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
479 // First, see if this is a constraint that directly corresponds to a
480 // WebAssembly register class.
481 if (Constraint.size() == 1) {
482 switch (Constraint[0]) {
483 case 'r':
484 assert(VT != MVT::iPTR && "Pointer MVT not expected here");
485 if (Subtarget->hasSIMD128() && VT.isVector()) {
486 if (VT.getSizeInBits() == 128)
487 return std::make_pair(0U, &WebAssembly::V128RegClass);
489 if (VT.isInteger() && !VT.isVector()) {
490 if (VT.getSizeInBits() <= 32)
491 return std::make_pair(0U, &WebAssembly::I32RegClass);
492 if (VT.getSizeInBits() <= 64)
493 return std::make_pair(0U, &WebAssembly::I64RegClass);
495 break;
496 default:
497 break;
501 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
504 bool WebAssemblyTargetLowering::isCheapToSpeculateCttz() const {
505 // Assume ctz is a relatively cheap operation.
506 return true;
509 bool WebAssemblyTargetLowering::isCheapToSpeculateCtlz() const {
510 // Assume clz is a relatively cheap operation.
511 return true;
514 bool WebAssemblyTargetLowering::isLegalAddressingMode(const DataLayout &DL,
515 const AddrMode &AM,
516 Type *Ty, unsigned AS,
517 Instruction *I) const {
518 // WebAssembly offsets are added as unsigned without wrapping. The
519 // isLegalAddressingMode gives us no way to determine if wrapping could be
520 // happening, so we approximate this by accepting only non-negative offsets.
521 if (AM.BaseOffs < 0)
522 return false;
524 // WebAssembly has no scale register operands.
525 if (AM.Scale != 0)
526 return false;
528 // Everything else is legal.
529 return true;
532 bool WebAssemblyTargetLowering::allowsMisalignedMemoryAccesses(
533 EVT /*VT*/, unsigned /*AddrSpace*/, unsigned /*Align*/,
534 MachineMemOperand::Flags /*Flags*/, bool *Fast) const {
535 // WebAssembly supports unaligned accesses, though it should be declared
536 // with the p2align attribute on loads and stores which do so, and there
537 // may be a performance impact. We tell LLVM they're "fast" because
538 // for the kinds of things that LLVM uses this for (merging adjacent stores
539 // of constants, etc.), WebAssembly implementations will either want the
540 // unaligned access or they'll split anyway.
541 if (Fast)
542 *Fast = true;
543 return true;
546 bool WebAssemblyTargetLowering::isIntDivCheap(EVT VT,
547 AttributeList Attr) const {
548 // The current thinking is that wasm engines will perform this optimization,
549 // so we can save on code size.
550 return true;
553 EVT WebAssemblyTargetLowering::getSetCCResultType(const DataLayout &DL,
554 LLVMContext &C,
555 EVT VT) const {
556 if (VT.isVector())
557 return VT.changeVectorElementTypeToInteger();
559 return TargetLowering::getSetCCResultType(DL, C, VT);
562 bool WebAssemblyTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
563 const CallInst &I,
564 MachineFunction &MF,
565 unsigned Intrinsic) const {
566 switch (Intrinsic) {
567 case Intrinsic::wasm_atomic_notify:
568 Info.opc = ISD::INTRINSIC_W_CHAIN;
569 Info.memVT = MVT::i32;
570 Info.ptrVal = I.getArgOperand(0);
571 Info.offset = 0;
572 Info.align = Align(4);
573 // atomic.notify instruction does not really load the memory specified with
574 // this argument, but MachineMemOperand should either be load or store, so
575 // we set this to a load.
576 // FIXME Volatile isn't really correct, but currently all LLVM atomic
577 // instructions are treated as volatiles in the backend, so we should be
578 // consistent. The same applies for wasm_atomic_wait intrinsics too.
579 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
580 return true;
581 case Intrinsic::wasm_atomic_wait_i32:
582 Info.opc = ISD::INTRINSIC_W_CHAIN;
583 Info.memVT = MVT::i32;
584 Info.ptrVal = I.getArgOperand(0);
585 Info.offset = 0;
586 Info.align = Align(4);
587 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
588 return true;
589 case Intrinsic::wasm_atomic_wait_i64:
590 Info.opc = ISD::INTRINSIC_W_CHAIN;
591 Info.memVT = MVT::i64;
592 Info.ptrVal = I.getArgOperand(0);
593 Info.offset = 0;
594 Info.align = Align(8);
595 Info.flags = MachineMemOperand::MOVolatile | MachineMemOperand::MOLoad;
596 return true;
597 default:
598 return false;
602 //===----------------------------------------------------------------------===//
603 // WebAssembly Lowering private implementation.
604 //===----------------------------------------------------------------------===//
606 //===----------------------------------------------------------------------===//
607 // Lowering Code
608 //===----------------------------------------------------------------------===//
610 static void fail(const SDLoc &DL, SelectionDAG &DAG, const char *Msg) {
611 MachineFunction &MF = DAG.getMachineFunction();
612 DAG.getContext()->diagnose(
613 DiagnosticInfoUnsupported(MF.getFunction(), Msg, DL.getDebugLoc()));
616 // Test whether the given calling convention is supported.
617 static bool callingConvSupported(CallingConv::ID CallConv) {
618 // We currently support the language-independent target-independent
619 // conventions. We don't yet have a way to annotate calls with properties like
620 // "cold", and we don't have any call-clobbered registers, so these are mostly
621 // all handled the same.
622 return CallConv == CallingConv::C || CallConv == CallingConv::Fast ||
623 CallConv == CallingConv::Cold ||
624 CallConv == CallingConv::PreserveMost ||
625 CallConv == CallingConv::PreserveAll ||
626 CallConv == CallingConv::CXX_FAST_TLS ||
627 CallConv == CallingConv::WASM_EmscriptenInvoke;
630 SDValue
631 WebAssemblyTargetLowering::LowerCall(CallLoweringInfo &CLI,
632 SmallVectorImpl<SDValue> &InVals) const {
633 SelectionDAG &DAG = CLI.DAG;
634 SDLoc DL = CLI.DL;
635 SDValue Chain = CLI.Chain;
636 SDValue Callee = CLI.Callee;
637 MachineFunction &MF = DAG.getMachineFunction();
638 auto Layout = MF.getDataLayout();
640 CallingConv::ID CallConv = CLI.CallConv;
641 if (!callingConvSupported(CallConv))
642 fail(DL, DAG,
643 "WebAssembly doesn't support language-specific or target-specific "
644 "calling conventions yet");
645 if (CLI.IsPatchPoint)
646 fail(DL, DAG, "WebAssembly doesn't support patch point yet");
648 if (CLI.IsTailCall) {
649 bool MustTail = CLI.CS && CLI.CS.isMustTailCall();
650 if (Subtarget->hasTailCall() && !CLI.IsVarArg) {
651 // Do not tail call unless caller and callee return types match
652 const Function &F = MF.getFunction();
653 const TargetMachine &TM = getTargetMachine();
654 Type *RetTy = F.getReturnType();
655 SmallVector<MVT, 4> CallerRetTys;
656 SmallVector<MVT, 4> CalleeRetTys;
657 computeLegalValueVTs(F, TM, RetTy, CallerRetTys);
658 computeLegalValueVTs(F, TM, CLI.RetTy, CalleeRetTys);
659 bool TypesMatch = CallerRetTys.size() == CalleeRetTys.size() &&
660 std::equal(CallerRetTys.begin(), CallerRetTys.end(),
661 CalleeRetTys.begin());
662 if (!TypesMatch) {
663 // musttail in this case would be an LLVM IR validation failure
664 assert(!MustTail);
665 CLI.IsTailCall = false;
667 } else {
668 CLI.IsTailCall = false;
669 if (MustTail) {
670 if (CLI.IsVarArg) {
671 // The return would pop the argument buffer
672 fail(DL, DAG, "WebAssembly does not support varargs tail calls");
673 } else {
674 fail(DL, DAG, "WebAssembly 'tail-call' feature not enabled");
680 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
681 if (Ins.size() > 1)
682 fail(DL, DAG, "WebAssembly doesn't support more than 1 returned value yet");
684 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
685 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
687 // The generic code may have added an sret argument. If we're lowering an
688 // invoke function, the ABI requires that the function pointer be the first
689 // argument, so we may have to swap the arguments.
690 if (CallConv == CallingConv::WASM_EmscriptenInvoke && Outs.size() >= 2 &&
691 Outs[0].Flags.isSRet()) {
692 std::swap(Outs[0], Outs[1]);
693 std::swap(OutVals[0], OutVals[1]);
696 unsigned NumFixedArgs = 0;
697 for (unsigned I = 0; I < Outs.size(); ++I) {
698 const ISD::OutputArg &Out = Outs[I];
699 SDValue &OutVal = OutVals[I];
700 if (Out.Flags.isNest())
701 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
702 if (Out.Flags.isInAlloca())
703 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
704 if (Out.Flags.isInConsecutiveRegs())
705 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
706 if (Out.Flags.isInConsecutiveRegsLast())
707 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
708 if (Out.Flags.isByVal() && Out.Flags.getByValSize() != 0) {
709 auto &MFI = MF.getFrameInfo();
710 int FI = MFI.CreateStackObject(Out.Flags.getByValSize(),
711 Out.Flags.getByValAlign(),
712 /*isSS=*/false);
713 SDValue SizeNode =
714 DAG.getConstant(Out.Flags.getByValSize(), DL, MVT::i32);
715 SDValue FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
716 Chain = DAG.getMemcpy(
717 Chain, DL, FINode, OutVal, SizeNode, Out.Flags.getByValAlign(),
718 /*isVolatile*/ false, /*AlwaysInline=*/false,
719 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
720 OutVal = FINode;
722 // Count the number of fixed args *after* legalization.
723 NumFixedArgs += Out.IsFixed;
726 bool IsVarArg = CLI.IsVarArg;
727 auto PtrVT = getPointerTy(Layout);
729 // Analyze operands of the call, assigning locations to each operand.
730 SmallVector<CCValAssign, 16> ArgLocs;
731 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
733 if (IsVarArg) {
734 // Outgoing non-fixed arguments are placed in a buffer. First
735 // compute their offsets and the total amount of buffer space needed.
736 for (unsigned I = NumFixedArgs; I < Outs.size(); ++I) {
737 const ISD::OutputArg &Out = Outs[I];
738 SDValue &Arg = OutVals[I];
739 EVT VT = Arg.getValueType();
740 assert(VT != MVT::iPTR && "Legalized args should be concrete");
741 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
742 unsigned Align = std::max(Out.Flags.getOrigAlign(),
743 Layout.getABITypeAlignment(Ty));
744 unsigned Offset = CCInfo.AllocateStack(Layout.getTypeAllocSize(Ty),
745 Align);
746 CCInfo.addLoc(CCValAssign::getMem(ArgLocs.size(), VT.getSimpleVT(),
747 Offset, VT.getSimpleVT(),
748 CCValAssign::Full));
752 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
754 SDValue FINode;
755 if (IsVarArg && NumBytes) {
756 // For non-fixed arguments, next emit stores to store the argument values
757 // to the stack buffer at the offsets computed above.
758 int FI = MF.getFrameInfo().CreateStackObject(NumBytes,
759 Layout.getStackAlignment(),
760 /*isSS=*/false);
761 unsigned ValNo = 0;
762 SmallVector<SDValue, 8> Chains;
763 for (SDValue Arg :
764 make_range(OutVals.begin() + NumFixedArgs, OutVals.end())) {
765 assert(ArgLocs[ValNo].getValNo() == ValNo &&
766 "ArgLocs should remain in order and only hold varargs args");
767 unsigned Offset = ArgLocs[ValNo++].getLocMemOffset();
768 FINode = DAG.getFrameIndex(FI, getPointerTy(Layout));
769 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, FINode,
770 DAG.getConstant(Offset, DL, PtrVT));
771 Chains.push_back(
772 DAG.getStore(Chain, DL, Arg, Add,
773 MachinePointerInfo::getFixedStack(MF, FI, Offset), 0));
775 if (!Chains.empty())
776 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
777 } else if (IsVarArg) {
778 FINode = DAG.getIntPtrConstant(0, DL);
781 if (Callee->getOpcode() == ISD::GlobalAddress) {
782 // If the callee is a GlobalAddress node (quite common, every direct call
783 // is) turn it into a TargetGlobalAddress node so that LowerGlobalAddress
784 // doesn't at MO_GOT which is not needed for direct calls.
785 GlobalAddressSDNode* GA = cast<GlobalAddressSDNode>(Callee);
786 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
787 getPointerTy(DAG.getDataLayout()),
788 GA->getOffset());
789 Callee = DAG.getNode(WebAssemblyISD::Wrapper, DL,
790 getPointerTy(DAG.getDataLayout()), Callee);
793 // Compute the operands for the CALLn node.
794 SmallVector<SDValue, 16> Ops;
795 Ops.push_back(Chain);
796 Ops.push_back(Callee);
798 // Add all fixed arguments. Note that for non-varargs calls, NumFixedArgs
799 // isn't reliable.
800 Ops.append(OutVals.begin(),
801 IsVarArg ? OutVals.begin() + NumFixedArgs : OutVals.end());
802 // Add a pointer to the vararg buffer.
803 if (IsVarArg)
804 Ops.push_back(FINode);
806 SmallVector<EVT, 8> InTys;
807 for (const auto &In : Ins) {
808 assert(!In.Flags.isByVal() && "byval is not valid for return values");
809 assert(!In.Flags.isNest() && "nest is not valid for return values");
810 if (In.Flags.isInAlloca())
811 fail(DL, DAG, "WebAssembly hasn't implemented inalloca return values");
812 if (In.Flags.isInConsecutiveRegs())
813 fail(DL, DAG, "WebAssembly hasn't implemented cons regs return values");
814 if (In.Flags.isInConsecutiveRegsLast())
815 fail(DL, DAG,
816 "WebAssembly hasn't implemented cons regs last return values");
817 // Ignore In.getOrigAlign() because all our arguments are passed in
818 // registers.
819 InTys.push_back(In.VT);
822 if (CLI.IsTailCall) {
823 // ret_calls do not return values to the current frame
824 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
825 return DAG.getNode(WebAssemblyISD::RET_CALL, DL, NodeTys, Ops);
828 InTys.push_back(MVT::Other);
829 SDVTList InTyList = DAG.getVTList(InTys);
830 SDValue Res =
831 DAG.getNode(Ins.empty() ? WebAssemblyISD::CALL0 : WebAssemblyISD::CALL1,
832 DL, InTyList, Ops);
833 if (Ins.empty()) {
834 Chain = Res;
835 } else {
836 InVals.push_back(Res);
837 Chain = Res.getValue(1);
840 return Chain;
843 bool WebAssemblyTargetLowering::CanLowerReturn(
844 CallingConv::ID /*CallConv*/, MachineFunction & /*MF*/, bool /*IsVarArg*/,
845 const SmallVectorImpl<ISD::OutputArg> &Outs,
846 LLVMContext & /*Context*/) const {
847 // WebAssembly can't currently handle returning tuples.
848 return Outs.size() <= 1;
851 SDValue WebAssemblyTargetLowering::LowerReturn(
852 SDValue Chain, CallingConv::ID CallConv, bool /*IsVarArg*/,
853 const SmallVectorImpl<ISD::OutputArg> &Outs,
854 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
855 SelectionDAG &DAG) const {
856 assert(Outs.size() <= 1 && "WebAssembly can only return up to one value");
857 if (!callingConvSupported(CallConv))
858 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
860 SmallVector<SDValue, 4> RetOps(1, Chain);
861 RetOps.append(OutVals.begin(), OutVals.end());
862 Chain = DAG.getNode(WebAssemblyISD::RETURN, DL, MVT::Other, RetOps);
864 // Record the number and types of the return values.
865 for (const ISD::OutputArg &Out : Outs) {
866 assert(!Out.Flags.isByVal() && "byval is not valid for return values");
867 assert(!Out.Flags.isNest() && "nest is not valid for return values");
868 assert(Out.IsFixed && "non-fixed return value is not valid");
869 if (Out.Flags.isInAlloca())
870 fail(DL, DAG, "WebAssembly hasn't implemented inalloca results");
871 if (Out.Flags.isInConsecutiveRegs())
872 fail(DL, DAG, "WebAssembly hasn't implemented cons regs results");
873 if (Out.Flags.isInConsecutiveRegsLast())
874 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last results");
877 return Chain;
880 SDValue WebAssemblyTargetLowering::LowerFormalArguments(
881 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
882 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
883 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
884 if (!callingConvSupported(CallConv))
885 fail(DL, DAG, "WebAssembly doesn't support non-C calling conventions");
887 MachineFunction &MF = DAG.getMachineFunction();
888 auto *MFI = MF.getInfo<WebAssemblyFunctionInfo>();
890 // Set up the incoming ARGUMENTS value, which serves to represent the liveness
891 // of the incoming values before they're represented by virtual registers.
892 MF.getRegInfo().addLiveIn(WebAssembly::ARGUMENTS);
894 for (const ISD::InputArg &In : Ins) {
895 if (In.Flags.isInAlloca())
896 fail(DL, DAG, "WebAssembly hasn't implemented inalloca arguments");
897 if (In.Flags.isNest())
898 fail(DL, DAG, "WebAssembly hasn't implemented nest arguments");
899 if (In.Flags.isInConsecutiveRegs())
900 fail(DL, DAG, "WebAssembly hasn't implemented cons regs arguments");
901 if (In.Flags.isInConsecutiveRegsLast())
902 fail(DL, DAG, "WebAssembly hasn't implemented cons regs last arguments");
903 // Ignore In.getOrigAlign() because all our arguments are passed in
904 // registers.
905 InVals.push_back(In.Used ? DAG.getNode(WebAssemblyISD::ARGUMENT, DL, In.VT,
906 DAG.getTargetConstant(InVals.size(),
907 DL, MVT::i32))
908 : DAG.getUNDEF(In.VT));
910 // Record the number and types of arguments.
911 MFI->addParam(In.VT);
914 // Varargs are copied into a buffer allocated by the caller, and a pointer to
915 // the buffer is passed as an argument.
916 if (IsVarArg) {
917 MVT PtrVT = getPointerTy(MF.getDataLayout());
918 Register VarargVreg =
919 MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrVT));
920 MFI->setVarargBufferVreg(VarargVreg);
921 Chain = DAG.getCopyToReg(
922 Chain, DL, VarargVreg,
923 DAG.getNode(WebAssemblyISD::ARGUMENT, DL, PtrVT,
924 DAG.getTargetConstant(Ins.size(), DL, MVT::i32)));
925 MFI->addParam(PtrVT);
928 // Record the number and types of arguments and results.
929 SmallVector<MVT, 4> Params;
930 SmallVector<MVT, 4> Results;
931 computeSignatureVTs(MF.getFunction().getFunctionType(), MF.getFunction(),
932 DAG.getTarget(), Params, Results);
933 for (MVT VT : Results)
934 MFI->addResult(VT);
935 // TODO: Use signatures in WebAssemblyMachineFunctionInfo too and unify
936 // the param logic here with ComputeSignatureVTs
937 assert(MFI->getParams().size() == Params.size() &&
938 std::equal(MFI->getParams().begin(), MFI->getParams().end(),
939 Params.begin()));
941 return Chain;
944 void WebAssemblyTargetLowering::ReplaceNodeResults(
945 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const {
946 switch (N->getOpcode()) {
947 case ISD::SIGN_EXTEND_INREG:
948 // Do not add any results, signifying that N should not be custom lowered
949 // after all. This happens because simd128 turns on custom lowering for
950 // SIGN_EXTEND_INREG, but for non-vector sign extends the result might be an
951 // illegal type.
952 break;
953 default:
954 llvm_unreachable(
955 "ReplaceNodeResults not implemented for this op for WebAssembly!");
959 //===----------------------------------------------------------------------===//
960 // Custom lowering hooks.
961 //===----------------------------------------------------------------------===//
963 SDValue WebAssemblyTargetLowering::LowerOperation(SDValue Op,
964 SelectionDAG &DAG) const {
965 SDLoc DL(Op);
966 switch (Op.getOpcode()) {
967 default:
968 llvm_unreachable("unimplemented operation lowering");
969 return SDValue();
970 case ISD::FrameIndex:
971 return LowerFrameIndex(Op, DAG);
972 case ISD::GlobalAddress:
973 return LowerGlobalAddress(Op, DAG);
974 case ISD::ExternalSymbol:
975 return LowerExternalSymbol(Op, DAG);
976 case ISD::JumpTable:
977 return LowerJumpTable(Op, DAG);
978 case ISD::BR_JT:
979 return LowerBR_JT(Op, DAG);
980 case ISD::VASTART:
981 return LowerVASTART(Op, DAG);
982 case ISD::BlockAddress:
983 case ISD::BRIND:
984 fail(DL, DAG, "WebAssembly hasn't implemented computed gotos");
985 return SDValue();
986 case ISD::RETURNADDR:
987 return LowerRETURNADDR(Op, DAG);
988 case ISD::FRAMEADDR:
989 return LowerFRAMEADDR(Op, DAG);
990 case ISD::CopyToReg:
991 return LowerCopyToReg(Op, DAG);
992 case ISD::EXTRACT_VECTOR_ELT:
993 case ISD::INSERT_VECTOR_ELT:
994 return LowerAccessVectorElement(Op, DAG);
995 case ISD::INTRINSIC_VOID:
996 case ISD::INTRINSIC_WO_CHAIN:
997 case ISD::INTRINSIC_W_CHAIN:
998 return LowerIntrinsic(Op, DAG);
999 case ISD::SIGN_EXTEND_INREG:
1000 return LowerSIGN_EXTEND_INREG(Op, DAG);
1001 case ISD::BUILD_VECTOR:
1002 return LowerBUILD_VECTOR(Op, DAG);
1003 case ISD::VECTOR_SHUFFLE:
1004 return LowerVECTOR_SHUFFLE(Op, DAG);
1005 case ISD::SHL:
1006 case ISD::SRA:
1007 case ISD::SRL:
1008 return LowerShift(Op, DAG);
1012 SDValue WebAssemblyTargetLowering::LowerCopyToReg(SDValue Op,
1013 SelectionDAG &DAG) const {
1014 SDValue Src = Op.getOperand(2);
1015 if (isa<FrameIndexSDNode>(Src.getNode())) {
1016 // CopyToReg nodes don't support FrameIndex operands. Other targets select
1017 // the FI to some LEA-like instruction, but since we don't have that, we
1018 // need to insert some kind of instruction that can take an FI operand and
1019 // produces a value usable by CopyToReg (i.e. in a vreg). So insert a dummy
1020 // local.copy between Op and its FI operand.
1021 SDValue Chain = Op.getOperand(0);
1022 SDLoc DL(Op);
1023 unsigned Reg = cast<RegisterSDNode>(Op.getOperand(1))->getReg();
1024 EVT VT = Src.getValueType();
1025 SDValue Copy(DAG.getMachineNode(VT == MVT::i32 ? WebAssembly::COPY_I32
1026 : WebAssembly::COPY_I64,
1027 DL, VT, Src),
1029 return Op.getNode()->getNumValues() == 1
1030 ? DAG.getCopyToReg(Chain, DL, Reg, Copy)
1031 : DAG.getCopyToReg(Chain, DL, Reg, Copy,
1032 Op.getNumOperands() == 4 ? Op.getOperand(3)
1033 : SDValue());
1035 return SDValue();
1038 SDValue WebAssemblyTargetLowering::LowerFrameIndex(SDValue Op,
1039 SelectionDAG &DAG) const {
1040 int FI = cast<FrameIndexSDNode>(Op)->getIndex();
1041 return DAG.getTargetFrameIndex(FI, Op.getValueType());
1044 SDValue WebAssemblyTargetLowering::LowerRETURNADDR(SDValue Op,
1045 SelectionDAG &DAG) const {
1046 SDLoc DL(Op);
1048 if (!Subtarget->getTargetTriple().isOSEmscripten()) {
1049 fail(DL, DAG,
1050 "Non-Emscripten WebAssembly hasn't implemented "
1051 "__builtin_return_address");
1052 return SDValue();
1055 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
1056 return SDValue();
1058 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1059 MakeLibCallOptions CallOptions;
1060 return makeLibCall(DAG, RTLIB::RETURN_ADDRESS, Op.getValueType(),
1061 {DAG.getConstant(Depth, DL, MVT::i32)}, CallOptions, DL)
1062 .first;
1065 SDValue WebAssemblyTargetLowering::LowerFRAMEADDR(SDValue Op,
1066 SelectionDAG &DAG) const {
1067 // Non-zero depths are not supported by WebAssembly currently. Use the
1068 // legalizer's default expansion, which is to return 0 (what this function is
1069 // documented to do).
1070 if (Op.getConstantOperandVal(0) > 0)
1071 return SDValue();
1073 DAG.getMachineFunction().getFrameInfo().setFrameAddressIsTaken(true);
1074 EVT VT = Op.getValueType();
1075 Register FP =
1076 Subtarget->getRegisterInfo()->getFrameRegister(DAG.getMachineFunction());
1077 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), FP, VT);
1080 SDValue WebAssemblyTargetLowering::LowerGlobalAddress(SDValue Op,
1081 SelectionDAG &DAG) const {
1082 SDLoc DL(Op);
1083 const auto *GA = cast<GlobalAddressSDNode>(Op);
1084 EVT VT = Op.getValueType();
1085 assert(GA->getTargetFlags() == 0 &&
1086 "Unexpected target flags on generic GlobalAddressSDNode");
1087 if (GA->getAddressSpace() != 0)
1088 fail(DL, DAG, "WebAssembly only expects the 0 address space");
1090 unsigned OperandFlags = 0;
1091 if (isPositionIndependent()) {
1092 const GlobalValue *GV = GA->getGlobal();
1093 if (getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV)) {
1094 MachineFunction &MF = DAG.getMachineFunction();
1095 MVT PtrVT = getPointerTy(MF.getDataLayout());
1096 const char *BaseName;
1097 if (GV->getValueType()->isFunctionTy()) {
1098 BaseName = MF.createExternalSymbolName("__table_base");
1099 OperandFlags = WebAssemblyII::MO_TABLE_BASE_REL;
1101 else {
1102 BaseName = MF.createExternalSymbolName("__memory_base");
1103 OperandFlags = WebAssemblyII::MO_MEMORY_BASE_REL;
1105 SDValue BaseAddr =
1106 DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1107 DAG.getTargetExternalSymbol(BaseName, PtrVT));
1109 SDValue SymAddr = DAG.getNode(
1110 WebAssemblyISD::WrapperPIC, DL, VT,
1111 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT, GA->getOffset(),
1112 OperandFlags));
1114 return DAG.getNode(ISD::ADD, DL, VT, BaseAddr, SymAddr);
1115 } else {
1116 OperandFlags = WebAssemblyII::MO_GOT;
1120 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1121 DAG.getTargetGlobalAddress(GA->getGlobal(), DL, VT,
1122 GA->getOffset(), OperandFlags));
1125 SDValue
1126 WebAssemblyTargetLowering::LowerExternalSymbol(SDValue Op,
1127 SelectionDAG &DAG) const {
1128 SDLoc DL(Op);
1129 const auto *ES = cast<ExternalSymbolSDNode>(Op);
1130 EVT VT = Op.getValueType();
1131 assert(ES->getTargetFlags() == 0 &&
1132 "Unexpected target flags on generic ExternalSymbolSDNode");
1133 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1134 DAG.getTargetExternalSymbol(ES->getSymbol(), VT));
1137 SDValue WebAssemblyTargetLowering::LowerJumpTable(SDValue Op,
1138 SelectionDAG &DAG) const {
1139 // There's no need for a Wrapper node because we always incorporate a jump
1140 // table operand into a BR_TABLE instruction, rather than ever
1141 // materializing it in a register.
1142 const JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
1143 return DAG.getTargetJumpTable(JT->getIndex(), Op.getValueType(),
1144 JT->getTargetFlags());
1147 SDValue WebAssemblyTargetLowering::LowerBR_JT(SDValue Op,
1148 SelectionDAG &DAG) const {
1149 SDLoc DL(Op);
1150 SDValue Chain = Op.getOperand(0);
1151 const auto *JT = cast<JumpTableSDNode>(Op.getOperand(1));
1152 SDValue Index = Op.getOperand(2);
1153 assert(JT->getTargetFlags() == 0 && "WebAssembly doesn't set target flags");
1155 SmallVector<SDValue, 8> Ops;
1156 Ops.push_back(Chain);
1157 Ops.push_back(Index);
1159 MachineJumpTableInfo *MJTI = DAG.getMachineFunction().getJumpTableInfo();
1160 const auto &MBBs = MJTI->getJumpTables()[JT->getIndex()].MBBs;
1162 // Add an operand for each case.
1163 for (auto MBB : MBBs)
1164 Ops.push_back(DAG.getBasicBlock(MBB));
1166 // TODO: For now, we just pick something arbitrary for a default case for now.
1167 // We really want to sniff out the guard and put in the real default case (and
1168 // delete the guard).
1169 Ops.push_back(DAG.getBasicBlock(MBBs[0]));
1171 return DAG.getNode(WebAssemblyISD::BR_TABLE, DL, MVT::Other, Ops);
1174 SDValue WebAssemblyTargetLowering::LowerVASTART(SDValue Op,
1175 SelectionDAG &DAG) const {
1176 SDLoc DL(Op);
1177 EVT PtrVT = getPointerTy(DAG.getMachineFunction().getDataLayout());
1179 auto *MFI = DAG.getMachineFunction().getInfo<WebAssemblyFunctionInfo>();
1180 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
1182 SDValue ArgN = DAG.getCopyFromReg(DAG.getEntryNode(), DL,
1183 MFI->getVarargBufferVreg(), PtrVT);
1184 return DAG.getStore(Op.getOperand(0), DL, ArgN, Op.getOperand(1),
1185 MachinePointerInfo(SV), 0);
1188 SDValue WebAssemblyTargetLowering::LowerIntrinsic(SDValue Op,
1189 SelectionDAG &DAG) const {
1190 MachineFunction &MF = DAG.getMachineFunction();
1191 unsigned IntNo;
1192 switch (Op.getOpcode()) {
1193 case ISD::INTRINSIC_VOID:
1194 case ISD::INTRINSIC_W_CHAIN:
1195 IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
1196 break;
1197 case ISD::INTRINSIC_WO_CHAIN:
1198 IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1199 break;
1200 default:
1201 llvm_unreachable("Invalid intrinsic");
1203 SDLoc DL(Op);
1205 switch (IntNo) {
1206 default:
1207 return SDValue(); // Don't custom lower most intrinsics.
1209 case Intrinsic::wasm_lsda: {
1210 EVT VT = Op.getValueType();
1211 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1212 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1213 auto &Context = MF.getMMI().getContext();
1214 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
1215 Twine(MF.getFunctionNumber()));
1216 return DAG.getNode(WebAssemblyISD::Wrapper, DL, VT,
1217 DAG.getMCSymbol(S, PtrVT));
1220 case Intrinsic::wasm_throw: {
1221 // We only support C++ exceptions for now
1222 int Tag = cast<ConstantSDNode>(Op.getOperand(2).getNode())->getZExtValue();
1223 if (Tag != CPP_EXCEPTION)
1224 llvm_unreachable("Invalid tag!");
1225 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
1226 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
1227 const char *SymName = MF.createExternalSymbolName("__cpp_exception");
1228 SDValue SymNode = DAG.getNode(WebAssemblyISD::Wrapper, DL, PtrVT,
1229 DAG.getTargetExternalSymbol(SymName, PtrVT));
1230 return DAG.getNode(WebAssemblyISD::THROW, DL,
1231 MVT::Other, // outchain type
1233 Op.getOperand(0), // inchain
1234 SymNode, // exception symbol
1235 Op.getOperand(3) // thrown value
1241 SDValue
1242 WebAssemblyTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
1243 SelectionDAG &DAG) const {
1244 SDLoc DL(Op);
1245 // If sign extension operations are disabled, allow sext_inreg only if operand
1246 // is a vector extract. SIMD does not depend on sign extension operations, but
1247 // allowing sext_inreg in this context lets us have simple patterns to select
1248 // extract_lane_s instructions. Expanding sext_inreg everywhere would be
1249 // simpler in this file, but would necessitate large and brittle patterns to
1250 // undo the expansion and select extract_lane_s instructions.
1251 assert(!Subtarget->hasSignExt() && Subtarget->hasSIMD128());
1252 if (Op.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
1253 const SDValue &Extract = Op.getOperand(0);
1254 MVT VecT = Extract.getOperand(0).getSimpleValueType();
1255 MVT ExtractedLaneT = static_cast<VTSDNode *>(Op.getOperand(1).getNode())
1256 ->getVT()
1257 .getSimpleVT();
1258 MVT ExtractedVecT =
1259 MVT::getVectorVT(ExtractedLaneT, 128 / ExtractedLaneT.getSizeInBits());
1260 if (ExtractedVecT == VecT)
1261 return Op;
1262 // Bitcast vector to appropriate type to ensure ISel pattern coverage
1263 const SDValue &Index = Extract.getOperand(1);
1264 unsigned IndexVal =
1265 static_cast<ConstantSDNode *>(Index.getNode())->getZExtValue();
1266 unsigned Scale =
1267 ExtractedVecT.getVectorNumElements() / VecT.getVectorNumElements();
1268 assert(Scale > 1);
1269 SDValue NewIndex =
1270 DAG.getConstant(IndexVal * Scale, DL, Index.getValueType());
1271 SDValue NewExtract = DAG.getNode(
1272 ISD::EXTRACT_VECTOR_ELT, DL, Extract.getValueType(),
1273 DAG.getBitcast(ExtractedVecT, Extract.getOperand(0)), NewIndex);
1274 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, Op.getValueType(),
1275 NewExtract, Op.getOperand(1));
1277 // Otherwise expand
1278 return SDValue();
1281 SDValue WebAssemblyTargetLowering::LowerBUILD_VECTOR(SDValue Op,
1282 SelectionDAG &DAG) const {
1283 SDLoc DL(Op);
1284 const EVT VecT = Op.getValueType();
1285 const EVT LaneT = Op.getOperand(0).getValueType();
1286 const size_t Lanes = Op.getNumOperands();
1287 auto IsConstant = [](const SDValue &V) {
1288 return V.getOpcode() == ISD::Constant || V.getOpcode() == ISD::ConstantFP;
1291 // Find the most common operand, which is approximately the best to splat
1292 using Entry = std::pair<SDValue, size_t>;
1293 SmallVector<Entry, 16> ValueCounts;
1294 size_t NumConst = 0, NumDynamic = 0;
1295 for (const SDValue &Lane : Op->op_values()) {
1296 if (Lane.isUndef()) {
1297 continue;
1298 } else if (IsConstant(Lane)) {
1299 NumConst++;
1300 } else {
1301 NumDynamic++;
1303 auto CountIt = std::find_if(ValueCounts.begin(), ValueCounts.end(),
1304 [&Lane](Entry A) { return A.first == Lane; });
1305 if (CountIt == ValueCounts.end()) {
1306 ValueCounts.emplace_back(Lane, 1);
1307 } else {
1308 CountIt->second++;
1311 auto CommonIt =
1312 std::max_element(ValueCounts.begin(), ValueCounts.end(),
1313 [](Entry A, Entry B) { return A.second < B.second; });
1314 assert(CommonIt != ValueCounts.end() && "Unexpected all-undef build_vector");
1315 SDValue SplatValue = CommonIt->first;
1316 size_t NumCommon = CommonIt->second;
1318 // If v128.const is available, consider using it instead of a splat
1319 if (Subtarget->hasUnimplementedSIMD128()) {
1320 // {i32,i64,f32,f64}.const opcode, and value
1321 const size_t ConstBytes = 1 + std::max(size_t(4), 16 / Lanes);
1322 // SIMD prefix and opcode
1323 const size_t SplatBytes = 2;
1324 const size_t SplatConstBytes = SplatBytes + ConstBytes;
1325 // SIMD prefix, opcode, and lane index
1326 const size_t ReplaceBytes = 3;
1327 const size_t ReplaceConstBytes = ReplaceBytes + ConstBytes;
1328 // SIMD prefix, v128.const opcode, and 128-bit value
1329 const size_t VecConstBytes = 18;
1330 // Initial v128.const and a replace_lane for each non-const operand
1331 const size_t ConstInitBytes = VecConstBytes + NumDynamic * ReplaceBytes;
1332 // Initial splat and all necessary replace_lanes
1333 const size_t SplatInitBytes =
1334 IsConstant(SplatValue)
1335 // Initial constant splat
1336 ? (SplatConstBytes +
1337 // Constant replace_lanes
1338 (NumConst - NumCommon) * ReplaceConstBytes +
1339 // Dynamic replace_lanes
1340 (NumDynamic * ReplaceBytes))
1341 // Initial dynamic splat
1342 : (SplatBytes +
1343 // Constant replace_lanes
1344 (NumConst * ReplaceConstBytes) +
1345 // Dynamic replace_lanes
1346 (NumDynamic - NumCommon) * ReplaceBytes);
1347 if (ConstInitBytes < SplatInitBytes) {
1348 // Create build_vector that will lower to initial v128.const
1349 SmallVector<SDValue, 16> ConstLanes;
1350 for (const SDValue &Lane : Op->op_values()) {
1351 if (IsConstant(Lane)) {
1352 ConstLanes.push_back(Lane);
1353 } else if (LaneT.isFloatingPoint()) {
1354 ConstLanes.push_back(DAG.getConstantFP(0, DL, LaneT));
1355 } else {
1356 ConstLanes.push_back(DAG.getConstant(0, DL, LaneT));
1359 SDValue Result = DAG.getBuildVector(VecT, DL, ConstLanes);
1360 // Add replace_lane instructions for non-const lanes
1361 for (size_t I = 0; I < Lanes; ++I) {
1362 const SDValue &Lane = Op->getOperand(I);
1363 if (!Lane.isUndef() && !IsConstant(Lane))
1364 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1365 DAG.getConstant(I, DL, MVT::i32));
1367 return Result;
1370 // Use a splat for the initial vector
1371 SDValue Result = DAG.getSplatBuildVector(VecT, DL, SplatValue);
1372 // Add replace_lane instructions for other values
1373 for (size_t I = 0; I < Lanes; ++I) {
1374 const SDValue &Lane = Op->getOperand(I);
1375 if (Lane != SplatValue)
1376 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VecT, Result, Lane,
1377 DAG.getConstant(I, DL, MVT::i32));
1379 return Result;
1382 SDValue
1383 WebAssemblyTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
1384 SelectionDAG &DAG) const {
1385 SDLoc DL(Op);
1386 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op.getNode())->getMask();
1387 MVT VecType = Op.getOperand(0).getSimpleValueType();
1388 assert(VecType.is128BitVector() && "Unexpected shuffle vector type");
1389 size_t LaneBytes = VecType.getVectorElementType().getSizeInBits() / 8;
1391 // Space for two vector args and sixteen mask indices
1392 SDValue Ops[18];
1393 size_t OpIdx = 0;
1394 Ops[OpIdx++] = Op.getOperand(0);
1395 Ops[OpIdx++] = Op.getOperand(1);
1397 // Expand mask indices to byte indices and materialize them as operands
1398 for (int M : Mask) {
1399 for (size_t J = 0; J < LaneBytes; ++J) {
1400 // Lower undefs (represented by -1 in mask) to zero
1401 uint64_t ByteIndex = M == -1 ? 0 : (uint64_t)M * LaneBytes + J;
1402 Ops[OpIdx++] = DAG.getConstant(ByteIndex, DL, MVT::i32);
1406 return DAG.getNode(WebAssemblyISD::SHUFFLE, DL, Op.getValueType(), Ops);
1409 SDValue
1410 WebAssemblyTargetLowering::LowerAccessVectorElement(SDValue Op,
1411 SelectionDAG &DAG) const {
1412 // Allow constant lane indices, expand variable lane indices
1413 SDNode *IdxNode = Op.getOperand(Op.getNumOperands() - 1).getNode();
1414 if (isa<ConstantSDNode>(IdxNode) || IdxNode->isUndef())
1415 return Op;
1416 else
1417 // Perform default expansion
1418 return SDValue();
1421 static SDValue unrollVectorShift(SDValue Op, SelectionDAG &DAG) {
1422 EVT LaneT = Op.getSimpleValueType().getVectorElementType();
1423 // 32-bit and 64-bit unrolled shifts will have proper semantics
1424 if (LaneT.bitsGE(MVT::i32))
1425 return DAG.UnrollVectorOp(Op.getNode());
1426 // Otherwise mask the shift value to get proper semantics from 32-bit shift
1427 SDLoc DL(Op);
1428 SDValue ShiftVal = Op.getOperand(1);
1429 uint64_t MaskVal = LaneT.getSizeInBits() - 1;
1430 SDValue MaskedShiftVal = DAG.getNode(
1431 ISD::AND, // mask opcode
1432 DL, ShiftVal.getValueType(), // masked value type
1433 ShiftVal, // original shift value operand
1434 DAG.getConstant(MaskVal, DL, ShiftVal.getValueType()) // mask operand
1437 return DAG.UnrollVectorOp(
1438 DAG.getNode(Op.getOpcode(), // original shift opcode
1439 DL, Op.getValueType(), // original return type
1440 Op.getOperand(0), // original vector operand,
1441 MaskedShiftVal // new masked shift value operand
1443 .getNode());
1446 SDValue WebAssemblyTargetLowering::LowerShift(SDValue Op,
1447 SelectionDAG &DAG) const {
1448 SDLoc DL(Op);
1450 // Only manually lower vector shifts
1451 assert(Op.getSimpleValueType().isVector());
1453 // Unroll non-splat vector shifts
1454 BuildVectorSDNode *ShiftVec;
1455 SDValue SplatVal;
1456 if (!(ShiftVec = dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode())) ||
1457 !(SplatVal = ShiftVec->getSplatValue()))
1458 return unrollVectorShift(Op, DAG);
1460 // All splats except i64x2 const splats are handled by patterns
1461 auto *SplatConst = dyn_cast<ConstantSDNode>(SplatVal);
1462 if (!SplatConst || Op.getSimpleValueType() != MVT::v2i64)
1463 return Op;
1465 // i64x2 const splats are custom lowered to avoid unnecessary wraps
1466 unsigned Opcode;
1467 switch (Op.getOpcode()) {
1468 case ISD::SHL:
1469 Opcode = WebAssemblyISD::VEC_SHL;
1470 break;
1471 case ISD::SRA:
1472 Opcode = WebAssemblyISD::VEC_SHR_S;
1473 break;
1474 case ISD::SRL:
1475 Opcode = WebAssemblyISD::VEC_SHR_U;
1476 break;
1477 default:
1478 llvm_unreachable("unexpected opcode");
1480 APInt Shift = SplatConst->getAPIntValue().zextOrTrunc(32);
1481 return DAG.getNode(Opcode, DL, Op.getValueType(), Op.getOperand(0),
1482 DAG.getConstant(Shift, DL, MVT::i32));
1485 //===----------------------------------------------------------------------===//
1486 // WebAssembly Optimization Hooks
1487 //===----------------------------------------------------------------------===//